code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import os
from pygments import highlight
from pygments import lexers
from pygments.lexers import get_lexer_for_mimetype
from pygments import styles
from pygments.util import ClassNotFound
from pygments.formatters import BBCodeFormatter
from kivy.uix.codeinput import CodeInput
from kivy.uix.floatlayout import FloatLayout
from kivy.extras.highlight import KivyLexer
from kivy.utils import get_color_from_hex, get_hex_from_color
from kivy.properties import StringProperty
from kivy.core.window import Window
from kivy.base import EventLoop
class Editor(CodeInput):
"""Inherits from :py:class:`kivy.uix.codeinput.CodeInput`.
It's a :py:class:`.kivy.uix.widget.Widget` that is adapted to highlight its
contents.
"""
last_click = ''
"""Stores the last click pressed.
This is, stores a value like 'left', 'right', 'scrollup'...
"""
background_color_default_te = [1,1,1,1]
"""Default background color for the editor.
It's set when the 'default TE' style is selected from the :py:class:`footer.footer.Footer`
and when the application is opened in default.
"""
_path = StringProperty(None)
"""Path to the file (without the file's name) that this :py:class:`.Editor` has open."""
_name = StringProperty(None)
"""Name of the file (and tab) for this :py:class:`.Editor`"""
style_name_not_bound = 'default TE'
"""Stores the style name but without being bound.
Because it's not bound, it can store any name. Like 'default TE'
"""
def __init__(self, **kwargs):
super(Editor, self).__init__(**kwargs)
self.text_from = 0
self.text_to = 0
# Let's override this method to brute force the invisible text for the moment.
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
super(Editor, self).paste()
if len(self.text) > 0:
l = len(self.text)
c = self.text[l-1]
self.text = self.text[0:l-2]
self.text = self.text + c
def change_style(self, style = None):
"""Change the style of the editor.
The style includes the background_color, the cursor color and the text
(keywords, variable names...). It means that changes the highlighting style.
:param style: Name of the style to which to change.
"""
if style is not None:
if style == 'default TE':
self.style_name = 'default'
self.style_name_not_bound = 'default TE'
self.background_color = self.background_color_default_te
elif style == 'default' and self.style_name == 'default':
self.style_name = 'algol'
self.style_name = 'default'
self.style_name_not_bound = 'default'
else:
try:
self.style_name = style
self.style_name_not_bound = style
except ClassNotFound as err:
print(err, '{}: unknown style'.format(style))
if self.style:
background_c = get_color_from_hex(self.style.background_color)
color_sum = sum(background_c[0:3])
if color_sum >= 0.5:
self.cursor_color = [0, 0, 0, 1]
else:
self.cursor_color = [1, 1, 1, 1]
self._trigger_refresh_text()
def text_changed(self, *args):
"""Manage event when :py:attr:`.Editor.text` changes.
Changes the content of :py:attr:`editorcontainer.editorcontainer.EditorTab.close_button_string`.
When that attribute is changed the text of :py:attr:`editorcontainer.editorcontainer.EditorTab.close_button`
is automatically updated.
This means this method is used to indicate the stated of the tab (unsaved, saved). The mark is an
asterisk (*).
:param \*args: Default arguments. Not used.
"""
self.tab.close_button_string = '*\nx'
self.tab.saved = False
def save_tab(self, all_tabs=False):
"""Save a tab.
Writes the contents of this :py:class:`.Editor` to the file indicated by
:py:attr:`._path` and :py:attr:`._name`.
:param all_tabs: Boolean that indicates wheter just this :py:attr:`.Editor` 's tab \
is being saved (:py:obj:`False`) or all the tabs open in the application are being \
saved (:py:obj:`True`). When all_tabs is :py:obj:`False`, if the contents of this \
:py:class:`.Editor` haven't been saved then a filechooser is shown.
"""
if self._name is not None:
try:
complete_path = os.path.join(self._path, self._name)
with open(complete_path,'w+') as file:
file.write(self.text)
self.tab.close_button_string = 'x'
self.tab.saved = True
except PermissionError as err:
print(err, "You don't have the required access rights"
" to write to: {0}".format(path), sep = '\n')
except IsADirectoryError as err:
print(err, "Cannot save file as directory", sep = '\n')
elif not all_tabs:
file_menu = self.editor_container.parent.menu_bar.file_menu
file_menu.save_as()
# Let's override this method to be able to use the right
# click menu.
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self.selection_text = u''
self._selection = False
self._selection_finished = True
self._selection_touch = None
#self._trigger_update_graphics()
# Let's override this method, too, to be able to use the right
# click menu.
def on_cursor(self, instance, value):
"""Manage event when this editor's cursor changes."""
# Update all the graphics.
if self.last_click not in ['right', 'scrolldown', 'scrollup']:
self._trigger_update_graphics()
def change_lexer(self, mimetype = None):
"""Change the lexer of this :py:class:`.Editor`.
The lexer is what takes care of recognizing the keywords, variable names, etc.
:param mimetype: The mimetype for which a lexer should be found. The lexer is \
changed to that found with this mimetype.
"""
if mimetype is not None:
try:
# If the mimetype is 'text/plain' and the extension
# of the file is '.kv', then a kivylexer should be used.
if mimetype == 'text/plain' and os.path.splitext(self._name)[1] == '.kv':
self.lexer = KivyLexer()
else:
self.lexer = get_lexer_for_mimetype(mimetype)
except ClassNotFound as err:
print(err, 'Unsopported type {}'.format(mimetype), sep='\n')
self.lexer = lexers.TextLexer()
finally:
return self.lexer.name
elif self._name is not None:
# If the mimetype is 'text/plain' and the extension
# of the file is '.kv', then a kivylexer should be used.
if os.path.splitext(self._name)[1] == '.kv':
self.lexer = KivyLexer()
else:
self.lexer = lexers.TextLexer()
else:
self.lexer = lexers.TextLexer()
return self.lexer.name
def propagate_editor_container(self, editor_container):
"""Propagate the :py:class:`~editorcontainer.editorcontainer.EditorContainer`
to this :py:class:`.Editor`.
:param editor_container: Should be a reference to :py:attr:`azaharTEA.Container.editor_container`, \
the :py:class:`~editorcontainer.editorcontainer.EditorContainer` of the application.
"""
self.editor_container = editor_container | editorcontainer/editor/editor.py | import os
from pygments import highlight
from pygments import lexers
from pygments.lexers import get_lexer_for_mimetype
from pygments import styles
from pygments.util import ClassNotFound
from pygments.formatters import BBCodeFormatter
from kivy.uix.codeinput import CodeInput
from kivy.uix.floatlayout import FloatLayout
from kivy.extras.highlight import KivyLexer
from kivy.utils import get_color_from_hex, get_hex_from_color
from kivy.properties import StringProperty
from kivy.core.window import Window
from kivy.base import EventLoop
class Editor(CodeInput):
"""Inherits from :py:class:`kivy.uix.codeinput.CodeInput`.
It's a :py:class:`.kivy.uix.widget.Widget` that is adapted to highlight its
contents.
"""
last_click = ''
"""Stores the last click pressed.
This is, stores a value like 'left', 'right', 'scrollup'...
"""
background_color_default_te = [1,1,1,1]
"""Default background color for the editor.
It's set when the 'default TE' style is selected from the :py:class:`footer.footer.Footer`
and when the application is opened in default.
"""
_path = StringProperty(None)
"""Path to the file (without the file's name) that this :py:class:`.Editor` has open."""
_name = StringProperty(None)
"""Name of the file (and tab) for this :py:class:`.Editor`"""
style_name_not_bound = 'default TE'
"""Stores the style name but without being bound.
Because it's not bound, it can store any name. Like 'default TE'
"""
def __init__(self, **kwargs):
super(Editor, self).__init__(**kwargs)
self.text_from = 0
self.text_to = 0
# Let's override this method to brute force the invisible text for the moment.
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
super(Editor, self).paste()
if len(self.text) > 0:
l = len(self.text)
c = self.text[l-1]
self.text = self.text[0:l-2]
self.text = self.text + c
def change_style(self, style = None):
"""Change the style of the editor.
The style includes the background_color, the cursor color and the text
(keywords, variable names...). It means that changes the highlighting style.
:param style: Name of the style to which to change.
"""
if style is not None:
if style == 'default TE':
self.style_name = 'default'
self.style_name_not_bound = 'default TE'
self.background_color = self.background_color_default_te
elif style == 'default' and self.style_name == 'default':
self.style_name = 'algol'
self.style_name = 'default'
self.style_name_not_bound = 'default'
else:
try:
self.style_name = style
self.style_name_not_bound = style
except ClassNotFound as err:
print(err, '{}: unknown style'.format(style))
if self.style:
background_c = get_color_from_hex(self.style.background_color)
color_sum = sum(background_c[0:3])
if color_sum >= 0.5:
self.cursor_color = [0, 0, 0, 1]
else:
self.cursor_color = [1, 1, 1, 1]
self._trigger_refresh_text()
def text_changed(self, *args):
"""Manage event when :py:attr:`.Editor.text` changes.
Changes the content of :py:attr:`editorcontainer.editorcontainer.EditorTab.close_button_string`.
When that attribute is changed the text of :py:attr:`editorcontainer.editorcontainer.EditorTab.close_button`
is automatically updated.
This means this method is used to indicate the stated of the tab (unsaved, saved). The mark is an
asterisk (*).
:param \*args: Default arguments. Not used.
"""
self.tab.close_button_string = '*\nx'
self.tab.saved = False
def save_tab(self, all_tabs=False):
"""Save a tab.
Writes the contents of this :py:class:`.Editor` to the file indicated by
:py:attr:`._path` and :py:attr:`._name`.
:param all_tabs: Boolean that indicates wheter just this :py:attr:`.Editor` 's tab \
is being saved (:py:obj:`False`) or all the tabs open in the application are being \
saved (:py:obj:`True`). When all_tabs is :py:obj:`False`, if the contents of this \
:py:class:`.Editor` haven't been saved then a filechooser is shown.
"""
if self._name is not None:
try:
complete_path = os.path.join(self._path, self._name)
with open(complete_path,'w+') as file:
file.write(self.text)
self.tab.close_button_string = 'x'
self.tab.saved = True
except PermissionError as err:
print(err, "You don't have the required access rights"
" to write to: {0}".format(path), sep = '\n')
except IsADirectoryError as err:
print(err, "Cannot save file as directory", sep = '\n')
elif not all_tabs:
file_menu = self.editor_container.parent.menu_bar.file_menu
file_menu.save_as()
# Let's override this method to be able to use the right
# click menu.
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self.selection_text = u''
self._selection = False
self._selection_finished = True
self._selection_touch = None
#self._trigger_update_graphics()
# Let's override this method, too, to be able to use the right
# click menu.
def on_cursor(self, instance, value):
"""Manage event when this editor's cursor changes."""
# Update all the graphics.
if self.last_click not in ['right', 'scrolldown', 'scrollup']:
self._trigger_update_graphics()
def change_lexer(self, mimetype = None):
"""Change the lexer of this :py:class:`.Editor`.
The lexer is what takes care of recognizing the keywords, variable names, etc.
:param mimetype: The mimetype for which a lexer should be found. The lexer is \
changed to that found with this mimetype.
"""
if mimetype is not None:
try:
# If the mimetype is 'text/plain' and the extension
# of the file is '.kv', then a kivylexer should be used.
if mimetype == 'text/plain' and os.path.splitext(self._name)[1] == '.kv':
self.lexer = KivyLexer()
else:
self.lexer = get_lexer_for_mimetype(mimetype)
except ClassNotFound as err:
print(err, 'Unsopported type {}'.format(mimetype), sep='\n')
self.lexer = lexers.TextLexer()
finally:
return self.lexer.name
elif self._name is not None:
# If the mimetype is 'text/plain' and the extension
# of the file is '.kv', then a kivylexer should be used.
if os.path.splitext(self._name)[1] == '.kv':
self.lexer = KivyLexer()
else:
self.lexer = lexers.TextLexer()
else:
self.lexer = lexers.TextLexer()
return self.lexer.name
def propagate_editor_container(self, editor_container):
"""Propagate the :py:class:`~editorcontainer.editorcontainer.EditorContainer`
to this :py:class:`.Editor`.
:param editor_container: Should be a reference to :py:attr:`azaharTEA.Container.editor_container`, \
the :py:class:`~editorcontainer.editorcontainer.EditorContainer` of the application.
"""
self.editor_container = editor_container | 0.632389 | 0.200656 |
import dask.bag as db
import json
import pytest
import pkg_resources
import glob
import os
from impresso_commons.utils.s3 import get_bucket, get_s3_versions, read_jsonlines
from impresso_commons.utils.daskutils import create_even_partitions
from impresso_commons.utils.config_loader import TextImporterConfig
def test_get_s3_versions():
bucket_name = "canonical-rebuilt"
bucket = get_bucket(bucket_name)
keys = bucket.get_all_keys()[:10]
info = [
get_s3_versions(bucket_name, key.name)
for key in keys
]
assert info is not None
assert len(info) == len(keys)
def test_read_jsonlines():
b = get_bucket("canonical-rebuilt", create=False)
key = "<KEY>"
lines = db.from_sequence(read_jsonlines(key, b.name))
count_lines = lines.count().compute()
some_lines = lines.map(json.loads).pluck('ft').take(10)
assert count_lines is not None
assert count_lines > 0
assert some_lines is not None
assert len(some_lines) > 0
def test_create_even_partitions():
dir_partition = pkg_resources.resource_filename(
'impresso_commons',
'data/partitions/'
)
config_newspapers = {
"GDL": [1804, 1805]
}
bucket_partition_name = None
bucket_partition_prefix = None
keep_full = True,
nb_partition = 100 # 500 on all data
# get the s3 bucket
bucket = get_bucket("canonical-rebuilt", create=False)
create_even_partitions(bucket,
config_newspapers,
dir_partition,
bucket_partition_name,
bucket_partition_prefix,
keep_full,
nb_partition=nb_partition)
partitions = glob.glob(os.path.join(dir_partition, "*.bz2"))
assert len(partitions) == 100
def test_load_config():
file = pkg_resources.resource_filename(
'impresso_commons',
'config/solr_ci_builder_config.example.json'
)
np = {'GDL': [1940, 1941]}
config = TextImporterConfig.from_json(file)
assert config.bucket_rebuilt == "canonical-rebuilt"
assert config.newspapers == np
assert config.solr_server == "https://dhlabsrv18.epfl.ch/solr/"
assert config.solr_core == "impresso_sandbox" | tests/utils/test_s3.py | import dask.bag as db
import json
import pytest
import pkg_resources
import glob
import os
from impresso_commons.utils.s3 import get_bucket, get_s3_versions, read_jsonlines
from impresso_commons.utils.daskutils import create_even_partitions
from impresso_commons.utils.config_loader import TextImporterConfig
def test_get_s3_versions():
bucket_name = "canonical-rebuilt"
bucket = get_bucket(bucket_name)
keys = bucket.get_all_keys()[:10]
info = [
get_s3_versions(bucket_name, key.name)
for key in keys
]
assert info is not None
assert len(info) == len(keys)
def test_read_jsonlines():
b = get_bucket("canonical-rebuilt", create=False)
key = "<KEY>"
lines = db.from_sequence(read_jsonlines(key, b.name))
count_lines = lines.count().compute()
some_lines = lines.map(json.loads).pluck('ft').take(10)
assert count_lines is not None
assert count_lines > 0
assert some_lines is not None
assert len(some_lines) > 0
def test_create_even_partitions():
dir_partition = pkg_resources.resource_filename(
'impresso_commons',
'data/partitions/'
)
config_newspapers = {
"GDL": [1804, 1805]
}
bucket_partition_name = None
bucket_partition_prefix = None
keep_full = True,
nb_partition = 100 # 500 on all data
# get the s3 bucket
bucket = get_bucket("canonical-rebuilt", create=False)
create_even_partitions(bucket,
config_newspapers,
dir_partition,
bucket_partition_name,
bucket_partition_prefix,
keep_full,
nb_partition=nb_partition)
partitions = glob.glob(os.path.join(dir_partition, "*.bz2"))
assert len(partitions) == 100
def test_load_config():
file = pkg_resources.resource_filename(
'impresso_commons',
'config/solr_ci_builder_config.example.json'
)
np = {'GDL': [1940, 1941]}
config = TextImporterConfig.from_json(file)
assert config.bucket_rebuilt == "canonical-rebuilt"
assert config.newspapers == np
assert config.solr_server == "https://dhlabsrv18.epfl.ch/solr/"
assert config.solr_core == "impresso_sandbox" | 0.347869 | 0.226805 |
import re
import requests
import time
import random
import http.cookiejar
import datetime
import ConstantQuantity as cq
session = requests.Session()
BaiduUsername = ""
BaiduPassword = ""
BaiduURLCaptcha = ""
BaiduToken = ""
BaiduVerifyCode = ""
BaiduCodeString = ""
TimeNow = (datetime.datetime.utcnow() + datetime.timedelta(hours=8))
DateToday = TimeNow.strftime("%Y-%m-%d")
LogPath = "log/" + DateToday + 'reg'
# POST请求头
CommonHeaders = {
'User-Agent': 'Mozilla/5.0 (SymbianOS/9.3; Series60/3.2 NokiaE72-1/021.021; Profile/MIDP-2.1 Configuration/CLDC-1.1 )',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
# 登录时POST请求头
LoginHeaders = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "en-US,en;q=0.8,zh;q=0.6",
"Host": "passport.baidu.com",
"Upgrade-Insecure-Requests": "1",
"Origin": "http://www.baidu.com",
"Referer": "http://www.baidu.com/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36"
}
# 第一次POST的信息,如果需要验证码则获取验证码并进行第二次POST
DataLoginBaiduFirstTime = {
"staticpage": "https://passport.baidu.com/static/passpc-account/html/V3Jump.html",
"token": BaiduToken,
"tpl": "mn",
"username": BaiduUsername,
"password": <PASSWORD>,
"loginmerge": "true",
"mem_pass": "on",
"logintype": "dialogLogin",
"logLoginType": "pc_loginDialog",
}
# 第二次POST的信息
DataLoginBaiduSecondTime = {
"staticpage": "https://passport.baidu.com/static/passpc-account/html/V3Jump.html",
"codestring": BaiduCodeString,
"verifycode": BaiduVerifyCode,
"token": BaiduToken,
"tpl": "mn",
"username": BaiduUsername,
"password": <PASSWORD>,
"loginmerge": "true",
"mem_pass": "on",
"logintype": "dialogLogin",
"logLoginType": "pc_loginDialog",
}
def WriteLog(content, IsLineFeed):
with open(LogPath, 'a+') as f:
print(content)
if IsLineFeed == cq.WITH_LINE_FEED:
f.write(content + "\n")
elif IsLineFeed == cq.WITHOUT_LINE_FEED:
f.write(content)
f.close()
def FetchCaptcha(username, password):
global BaiduURLCaptcha
DataLoginBaiduFirstTime["username"] = username
DataLoginBaiduSecondTime["username"] = username
DataLoginBaiduFirstTime["password"] = password
DataLoginBaiduSecondTime["password"] = password
if not FetchToken():
return False
# 进行第一次登陆POST
WriteLog("正在尝试登录", cq.WITH_LINE_FEED)
request = session.post("https://passport.baidu.com/v2/api/?login", headers=LoginHeaders,
data=DataLoginBaiduFirstTime)
time.sleep(random.uniform(0.2, 0.5))
# print(request.text)
state = re.compile('error=(\w+?)&').findall(str(request.text))[0]
if state == "0":
# 提取并验证 BDUSS
BDUSS = ""
for i in session.cookies:
if i.name == 'BDUSS':
BDUSS = i.value
if BDUSS:
WriteLog("登录成功!", cq.WITH_LINE_FEED)
return cq.LOGIN_SUCCESS
else:
WriteLog("这是个BUG", cq.WITH_LINE_FEED)
return cq.ITS_A_BUG
elif state == "4" or state == "7":
WriteLog("密码错误", cq.WITH_LINE_FEED)
session.cookies.clear()
return cq.WRONG_PASSWORD
elif state == "5" or state == "120019":
WriteLog("账号异常,请手动登录www.baidu.com验证手机号", cq.WITH_LINE_FEED)
return cq.ABNORMAL_STATE
elif state == "257":
# 获取验证码
WriteLog("正在获取验证码", cq.WITH_LINE_FEED)
CodeString = re.compile('codestring=(.+?)&username').findall(str(request.text))[0]
DataLoginBaiduSecondTime["codestring"] = CodeString
BaiduURLCaptcha = "https://passport.baidu.com/cgi-bin/genimage?" + CodeString
# 访问验证码地址并下载图片
return cq.NEED_CAPTCHA
elif state == "50028":
WriteLog("输入密码错误次数过多,请三小时后再试", cq.WITH_LINE_FEED)
return cq.EXCESSIVE_WRONG_PASSWORD
else:
WriteLog("未知错误1,错误代码为{0},请联系管理员".format(state), cq.WITH_LINE_FEED)
return cq.UNEXPECTED_ERROR
def FetchDBUSS(captcha):
# 将验证码内容写入第二次POST信息
DataLoginBaiduSecondTime["verifycode"] = captcha
# 进行第二次登陆POST
WriteLog("验证验证码", cq.WITH_LINE_FEED)
request = session.post("https://passport.baidu.com/v2/api/?login", headers=LoginHeaders, data=DataLoginBaiduSecondTime)
# print(request.text)
state = re.compile('error=(\w+?)&').findall(str(request.text))[0]
if state == "0":
# 提取并验证BDUSS
BDUSS = ""
for i in session.cookies:
if i.name == 'BDUSS':
BDUSS = i.value
if BDUSS:
WriteLog("登录成功!", cq.WITH_LINE_FEED)
return cq.LOGIN_SUCCESS
else:
WriteLog("这是个BUG", cq.WITH_LINE_FEED)
return cq.ITS_A_BUG
elif state == "6":
WriteLog("验证码错误!", cq.WITH_LINE_FEED)
return cq.WRONG_CAPTCHA
elif state == "4" or state == "7":
WriteLog("密码错误", cq.WITH_LINE_FEED)
return cq.WRONG_PASSWORD
elif state == "120021":
WriteLog("账号异常,请手动登录www.baidu.com验证邮箱", cq.WITH_LINE_FEED)
return cq.ABNORMAL_STATE
else:
WriteLog("未知错误2,错误代码为{0},请联系管理员".format(state), cq.WITH_LINE_FEED)
return cq.UNEXPECTED_ERROR
def FetchToken():
WriteLog(u"正在尝试获取Token", cq.WITH_LINE_FEED)
# 访问百度主页和登陆页面获取COOKIE
content = session.get("https://www.baidu.com/").text
time.sleep(random.uniform(0.2, 0.5))
content = session.get("https://passport.baidu.com/v2/api/?login").text
time.sleep(random.uniform(0.2, 0.5))
# 获取token信息
try:
content = session.get("https://passport.baidu.com/v2/api/?getapi&class=login&tpl=mn&tangram=true", headers=CommonHeaders).text
token = re.compile("login_token=\'(\w+?)\';").findall(str(content))[0]
DataLoginBaiduFirstTime["token"] = token
DataLoginBaiduSecondTime["token"] = token
WriteLog("已获取Token", cq.WITH_LINE_FEED)
return True
except Exception as err:
WriteLog("无法获取Token,正在退出...,错误为{0}".format(err), cq.WITH_LINE_FEED)
return False
def NewUser(username, password):
global BaiduURLCaptcha, BaiduUsername, BaiduPassword
BaiduUsername = username
BaiduPassword = password
TempTimeNow = (datetime.datetime.utcnow() + datetime.timedelta(hours=8))
WriteLog("\n" + "[" + TempTimeNow.strftime("%Y-%m-%d %H:%M:%S") + "] 开始为" + BaiduUsername + "注册", cq.WITH_LINE_FEED)
with open("user/" + BaiduUsername, "a+") as f:
f.close()
session.cookies = http.cookiejar.LWPCookieJar("user/" + BaiduUsername)
state = FetchCaptcha(BaiduUsername, BaiduPassword)
if state == cq.NEED_CAPTCHA:
print(BaiduURLCaptcha)
VerifyCaptcha()
elif state == cq.LOGIN_SUCCESS:
session.cookies.save(ignore_discard=True, ignore_expires=True)
WriteLog("登录成功", cq.WITH_LINE_FEED)
else:
WriteLog("Error in NewUser", cq.WITH_LINE_FEED)
def VerifyCaptcha():
global BaiduURLCaptcha
captcha = input("captcha:")
is_login = FetchDBUSS(captcha)
if is_login == "登录成功!":
# 保存COOKIE信息
session.cookies.save(ignore_discard=True, ignore_expires=True)
print("登录成功!")
elif is_login == "验证码错误!":
BaiduURLCaptcha = FetchCaptcha(BaiduUsername, BaiduPassword)
# print(url_captcha)
print("验证码错误!")
elif is_login == "密码错误":
print("密码错误!")
else:
print("Error in VerifyCaptcha") | ModuleLoginTieba.py | import re
import requests
import time
import random
import http.cookiejar
import datetime
import ConstantQuantity as cq
session = requests.Session()
BaiduUsername = ""
BaiduPassword = ""
BaiduURLCaptcha = ""
BaiduToken = ""
BaiduVerifyCode = ""
BaiduCodeString = ""
TimeNow = (datetime.datetime.utcnow() + datetime.timedelta(hours=8))
DateToday = TimeNow.strftime("%Y-%m-%d")
LogPath = "log/" + DateToday + 'reg'
# POST请求头
CommonHeaders = {
'User-Agent': 'Mozilla/5.0 (SymbianOS/9.3; Series60/3.2 NokiaE72-1/021.021; Profile/MIDP-2.1 Configuration/CLDC-1.1 )',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
# 登录时POST请求头
LoginHeaders = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "en-US,en;q=0.8,zh;q=0.6",
"Host": "passport.baidu.com",
"Upgrade-Insecure-Requests": "1",
"Origin": "http://www.baidu.com",
"Referer": "http://www.baidu.com/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36"
}
# 第一次POST的信息,如果需要验证码则获取验证码并进行第二次POST
DataLoginBaiduFirstTime = {
"staticpage": "https://passport.baidu.com/static/passpc-account/html/V3Jump.html",
"token": BaiduToken,
"tpl": "mn",
"username": BaiduUsername,
"password": <PASSWORD>,
"loginmerge": "true",
"mem_pass": "on",
"logintype": "dialogLogin",
"logLoginType": "pc_loginDialog",
}
# 第二次POST的信息
DataLoginBaiduSecondTime = {
"staticpage": "https://passport.baidu.com/static/passpc-account/html/V3Jump.html",
"codestring": BaiduCodeString,
"verifycode": BaiduVerifyCode,
"token": BaiduToken,
"tpl": "mn",
"username": BaiduUsername,
"password": <PASSWORD>,
"loginmerge": "true",
"mem_pass": "on",
"logintype": "dialogLogin",
"logLoginType": "pc_loginDialog",
}
def WriteLog(content, IsLineFeed):
with open(LogPath, 'a+') as f:
print(content)
if IsLineFeed == cq.WITH_LINE_FEED:
f.write(content + "\n")
elif IsLineFeed == cq.WITHOUT_LINE_FEED:
f.write(content)
f.close()
def FetchCaptcha(username, password):
global BaiduURLCaptcha
DataLoginBaiduFirstTime["username"] = username
DataLoginBaiduSecondTime["username"] = username
DataLoginBaiduFirstTime["password"] = password
DataLoginBaiduSecondTime["password"] = password
if not FetchToken():
return False
# 进行第一次登陆POST
WriteLog("正在尝试登录", cq.WITH_LINE_FEED)
request = session.post("https://passport.baidu.com/v2/api/?login", headers=LoginHeaders,
data=DataLoginBaiduFirstTime)
time.sleep(random.uniform(0.2, 0.5))
# print(request.text)
state = re.compile('error=(\w+?)&').findall(str(request.text))[0]
if state == "0":
# 提取并验证 BDUSS
BDUSS = ""
for i in session.cookies:
if i.name == 'BDUSS':
BDUSS = i.value
if BDUSS:
WriteLog("登录成功!", cq.WITH_LINE_FEED)
return cq.LOGIN_SUCCESS
else:
WriteLog("这是个BUG", cq.WITH_LINE_FEED)
return cq.ITS_A_BUG
elif state == "4" or state == "7":
WriteLog("密码错误", cq.WITH_LINE_FEED)
session.cookies.clear()
return cq.WRONG_PASSWORD
elif state == "5" or state == "120019":
WriteLog("账号异常,请手动登录www.baidu.com验证手机号", cq.WITH_LINE_FEED)
return cq.ABNORMAL_STATE
elif state == "257":
# 获取验证码
WriteLog("正在获取验证码", cq.WITH_LINE_FEED)
CodeString = re.compile('codestring=(.+?)&username').findall(str(request.text))[0]
DataLoginBaiduSecondTime["codestring"] = CodeString
BaiduURLCaptcha = "https://passport.baidu.com/cgi-bin/genimage?" + CodeString
# 访问验证码地址并下载图片
return cq.NEED_CAPTCHA
elif state == "50028":
WriteLog("输入密码错误次数过多,请三小时后再试", cq.WITH_LINE_FEED)
return cq.EXCESSIVE_WRONG_PASSWORD
else:
WriteLog("未知错误1,错误代码为{0},请联系管理员".format(state), cq.WITH_LINE_FEED)
return cq.UNEXPECTED_ERROR
def FetchDBUSS(captcha):
# 将验证码内容写入第二次POST信息
DataLoginBaiduSecondTime["verifycode"] = captcha
# 进行第二次登陆POST
WriteLog("验证验证码", cq.WITH_LINE_FEED)
request = session.post("https://passport.baidu.com/v2/api/?login", headers=LoginHeaders, data=DataLoginBaiduSecondTime)
# print(request.text)
state = re.compile('error=(\w+?)&').findall(str(request.text))[0]
if state == "0":
# 提取并验证BDUSS
BDUSS = ""
for i in session.cookies:
if i.name == 'BDUSS':
BDUSS = i.value
if BDUSS:
WriteLog("登录成功!", cq.WITH_LINE_FEED)
return cq.LOGIN_SUCCESS
else:
WriteLog("这是个BUG", cq.WITH_LINE_FEED)
return cq.ITS_A_BUG
elif state == "6":
WriteLog("验证码错误!", cq.WITH_LINE_FEED)
return cq.WRONG_CAPTCHA
elif state == "4" or state == "7":
WriteLog("密码错误", cq.WITH_LINE_FEED)
return cq.WRONG_PASSWORD
elif state == "120021":
WriteLog("账号异常,请手动登录www.baidu.com验证邮箱", cq.WITH_LINE_FEED)
return cq.ABNORMAL_STATE
else:
WriteLog("未知错误2,错误代码为{0},请联系管理员".format(state), cq.WITH_LINE_FEED)
return cq.UNEXPECTED_ERROR
def FetchToken():
WriteLog(u"正在尝试获取Token", cq.WITH_LINE_FEED)
# 访问百度主页和登陆页面获取COOKIE
content = session.get("https://www.baidu.com/").text
time.sleep(random.uniform(0.2, 0.5))
content = session.get("https://passport.baidu.com/v2/api/?login").text
time.sleep(random.uniform(0.2, 0.5))
# 获取token信息
try:
content = session.get("https://passport.baidu.com/v2/api/?getapi&class=login&tpl=mn&tangram=true", headers=CommonHeaders).text
token = re.compile("login_token=\'(\w+?)\';").findall(str(content))[0]
DataLoginBaiduFirstTime["token"] = token
DataLoginBaiduSecondTime["token"] = token
WriteLog("已获取Token", cq.WITH_LINE_FEED)
return True
except Exception as err:
WriteLog("无法获取Token,正在退出...,错误为{0}".format(err), cq.WITH_LINE_FEED)
return False
def NewUser(username, password):
global BaiduURLCaptcha, BaiduUsername, BaiduPassword
BaiduUsername = username
BaiduPassword = password
TempTimeNow = (datetime.datetime.utcnow() + datetime.timedelta(hours=8))
WriteLog("\n" + "[" + TempTimeNow.strftime("%Y-%m-%d %H:%M:%S") + "] 开始为" + BaiduUsername + "注册", cq.WITH_LINE_FEED)
with open("user/" + BaiduUsername, "a+") as f:
f.close()
session.cookies = http.cookiejar.LWPCookieJar("user/" + BaiduUsername)
state = FetchCaptcha(BaiduUsername, BaiduPassword)
if state == cq.NEED_CAPTCHA:
print(BaiduURLCaptcha)
VerifyCaptcha()
elif state == cq.LOGIN_SUCCESS:
session.cookies.save(ignore_discard=True, ignore_expires=True)
WriteLog("登录成功", cq.WITH_LINE_FEED)
else:
WriteLog("Error in NewUser", cq.WITH_LINE_FEED)
def VerifyCaptcha():
global BaiduURLCaptcha
captcha = input("captcha:")
is_login = FetchDBUSS(captcha)
if is_login == "登录成功!":
# 保存COOKIE信息
session.cookies.save(ignore_discard=True, ignore_expires=True)
print("登录成功!")
elif is_login == "验证码错误!":
BaiduURLCaptcha = FetchCaptcha(BaiduUsername, BaiduPassword)
# print(url_captcha)
print("验证码错误!")
elif is_login == "密码错误":
print("密码错误!")
else:
print("Error in VerifyCaptcha") | 0.079806 | 0.120155 |
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import math
import os
import shutil
import mplleaflet
class rousys:
def __init__(self, inp_folder = '', crs = 35, typ = 7, vbase = 415, sbase = 1):
#Geogaphical locations of all nodes
self.geol = pd.read_csv(inp_folder + os.sep + 'geol_dist.csv')
#Number of all nodes
self.node = len(self.geol)
#Parameters of cables
self.cblt = pd.read_csv(inp_folder + os.sep + 'cblt_dist.csv')
#Cross section of cables [mm]
self.crs = crs
#Type of cables
self.typ = typ
#Line-to-Line voltage [V]
self.vbase = vbase
#Base three-phase apparnet power [VA]
self.sbase = 1000*sbase
#Base impedance
self.zbase = (vbase**2)/sbase
#Base curent
self.ibase = sbase/(math.sqrt(3)*vbase)
#Calculations of line/cable parameters
self.r = self.cblt.loc[self.cblt['crs'] == crs,'r'+str(typ)].values[0]*1e-3/self.zbase
self.x = self.cblt.loc[self.cblt['crs'] == crs,'x'+str(typ)].values[0]*1e-3/self.zbase
self.i = self.cblt.loc[self.cblt['crs'] == crs,'i'+str(typ)].values[0]/self.ibase
self.p = (math.sqrt(2)/2)*self.i
self.q = (math.sqrt(2)/2)*self.i
self.inp_folder = inp_folder
#Minimum spanning tree algorithm
def min_spn_tre(self):
G = nx.Graph()
for n in range(self.node):
G.add_node(n,pos =(self.geol['Longtitude'][n], self.geol['Latitude'][n]))
for n in range(self.node):
for m in range(self.node):
if n != m:
G.add_edge(n,m,weight=distance((self.geol['Longtitude'][n], self.geol['Latitude'][n]), (self.geol['Longtitude'][m], self.geol['Latitude'][m])))
T = nx.minimum_spanning_tree(G)
nx.draw(T, nx.get_node_attributes(T,'pos'),node_size=5, width = 2, node_color = 'red', edge_color='blue')
plt.savefig("path.png")
fig, ax = plt.subplots()
pos = nx.get_node_attributes(T,'pos')
nx.draw_networkx_nodes(T,pos=pos,node_size=10,node_color='red')
nx.draw_networkx_edges(T,pos=pos,edge_color='blue')
mplleaflet.show(fig=ax.figure)
rou_dist = pd.DataFrame(sorted(T.edges(data=True)))
rou_dist = rou_dist.rename({0: 'from', 1: 'to', 2: 'distance'}, axis=1)
dist = rou_dist.loc[:, 'distance']
rou_dist['distance'] = [d.get('weight') for d in dist]
rou_dist.to_csv(self.inp_folder + os.sep + 'rou_dist.csv', index=False)
elin_dist = rou_dist.loc[:,'from':'to']
elin_dist['ini'] = 1
elin_dist['res'] = [self.r*d.get('weight') for d in dist]
elin_dist['rea'] = [self.x*d.get('weight') for d in dist]
elin_dist['sus'] = [0 for d in dist]
elin_dist['pmax'] = self.p
elin_dist['qmax'] = self.q
elin_dist.to_csv(self.inp_folder + os.sep + 'elin_dist.csv', index=False)
#Convert latitude and longtitude to XY coordinates
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
# Radius in meter
radius = 6371000
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d | pyeplan/routing.py | import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import math
import os
import shutil
import mplleaflet
class rousys:
def __init__(self, inp_folder = '', crs = 35, typ = 7, vbase = 415, sbase = 1):
#Geogaphical locations of all nodes
self.geol = pd.read_csv(inp_folder + os.sep + 'geol_dist.csv')
#Number of all nodes
self.node = len(self.geol)
#Parameters of cables
self.cblt = pd.read_csv(inp_folder + os.sep + 'cblt_dist.csv')
#Cross section of cables [mm]
self.crs = crs
#Type of cables
self.typ = typ
#Line-to-Line voltage [V]
self.vbase = vbase
#Base three-phase apparnet power [VA]
self.sbase = 1000*sbase
#Base impedance
self.zbase = (vbase**2)/sbase
#Base curent
self.ibase = sbase/(math.sqrt(3)*vbase)
#Calculations of line/cable parameters
self.r = self.cblt.loc[self.cblt['crs'] == crs,'r'+str(typ)].values[0]*1e-3/self.zbase
self.x = self.cblt.loc[self.cblt['crs'] == crs,'x'+str(typ)].values[0]*1e-3/self.zbase
self.i = self.cblt.loc[self.cblt['crs'] == crs,'i'+str(typ)].values[0]/self.ibase
self.p = (math.sqrt(2)/2)*self.i
self.q = (math.sqrt(2)/2)*self.i
self.inp_folder = inp_folder
#Minimum spanning tree algorithm
def min_spn_tre(self):
G = nx.Graph()
for n in range(self.node):
G.add_node(n,pos =(self.geol['Longtitude'][n], self.geol['Latitude'][n]))
for n in range(self.node):
for m in range(self.node):
if n != m:
G.add_edge(n,m,weight=distance((self.geol['Longtitude'][n], self.geol['Latitude'][n]), (self.geol['Longtitude'][m], self.geol['Latitude'][m])))
T = nx.minimum_spanning_tree(G)
nx.draw(T, nx.get_node_attributes(T,'pos'),node_size=5, width = 2, node_color = 'red', edge_color='blue')
plt.savefig("path.png")
fig, ax = plt.subplots()
pos = nx.get_node_attributes(T,'pos')
nx.draw_networkx_nodes(T,pos=pos,node_size=10,node_color='red')
nx.draw_networkx_edges(T,pos=pos,edge_color='blue')
mplleaflet.show(fig=ax.figure)
rou_dist = pd.DataFrame(sorted(T.edges(data=True)))
rou_dist = rou_dist.rename({0: 'from', 1: 'to', 2: 'distance'}, axis=1)
dist = rou_dist.loc[:, 'distance']
rou_dist['distance'] = [d.get('weight') for d in dist]
rou_dist.to_csv(self.inp_folder + os.sep + 'rou_dist.csv', index=False)
elin_dist = rou_dist.loc[:,'from':'to']
elin_dist['ini'] = 1
elin_dist['res'] = [self.r*d.get('weight') for d in dist]
elin_dist['rea'] = [self.x*d.get('weight') for d in dist]
elin_dist['sus'] = [0 for d in dist]
elin_dist['pmax'] = self.p
elin_dist['qmax'] = self.q
elin_dist.to_csv(self.inp_folder + os.sep + 'elin_dist.csv', index=False)
#Convert latitude and longtitude to XY coordinates
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
# Radius in meter
radius = 6371000
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d | 0.365004 | 0.29175 |
import board
import neopixel
import time
import random
from analogio import AnalogIn
from adafruit_circuitplayground.express import cpx
RED = (0x10, 0, 0) # 0x100000 also works
YELLOW=(0x10, 0x10, 0)
GREEN = (0, 0x10, 0)
AQUA = (0, 0x10, 0x10)
BLUE = (0, 0, 0x10)
PURPLE = (0x10, 0, 0x10)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
FAILURE_TONE = 50
VICTORY_TONE = 500
speed = {
1: 0.5,
2: 0.1,
3: 0.05,
4: 0.02,
5: 0.01
}
cpx.pixels.fill(BLACK)
cpx.pixels.show()
difficulty = 1
# The game itself. Choose a random target LED, start spinning the red LED until users touches pad A7
def game(delay):
# The LEDs are 1..10 from a player's perspective
# but 0..9 in terms of the pixels array index
# target is the LED the user needs to 'capture' to win
target = random.randint(0, 9)
print("Player target =",target+1)
print("Speed set to ", delay)
#time.sleep(5)
print("Range is ", range(cpx.pixels.n))
print("Len of pixels is ", len(cpx.pixels))
# Set for single tap
cpx.pixels.fill(BLACK)
# Keep cycling LEDS until the user taps CPx
# Target LED is lit RED, others BLUE
while True:
for i in range(cpx.pixels.n):
if i == target:
cpx.pixels[i] = RED
else:
cpx.pixels[i] = BLUE
# Handle the edge case of 0 wrapping backwards to 9
if i == 0:
cpx.pixels[len(cpx.pixels)-1] = BLACK
else:
cpx.pixels[i-1] = BLACK
# Give the player time to react
time.sleep(delay)
if (cpx.button_a or cpx.button_b):
print("Player tapped, i = ",i+1)
if i == target:
won()
else:
lost()
return
def lost():
cpx.pixels.fill(RED)
cpx.play_tone(FAILURE_TONE, 1.5)
cpx.pixels.fill(BLACK)
def won():
cpx.pixels.fill(GREEN)
cpx.play_tone(VICTORY_TONE, .3)
cpx.play_tone(1.5*VICTORY_TONE, .3)
cpx.play_tone(2*VICTORY_TONE, .3)
cpx.play_tone(2.5*VICTORY_TONE, .3)
cpx.pixels.fill(BLACK)
# Main loop of the game: bump difficulty setting if users presses A, taps CPx
while True:
# Set for single tap
cpx.detect_taps = 1
# Wait until player taps CPx before starting game, flash LEDs blue while waiting
while not cpx.tapped:
cpx.pixels.fill(BLUE)
for i in range(difficulty):
cpx.pixels[i] = RED
# 5 is max difficulty, wrap to difficulty one after that
if cpx.button_a:
if difficulty < 5:
difficulty += 1
else:
difficulty = 1
print("Difficulty set to", difficulty)
cpx.pixels[difficulty-1] = RED
time.sleep(.3)
# Fill "black" to turn them off momentarily for a flashing effect
cpx.pixels.fill(BLACK)
time.sleep(.1)
# Give player 1 second to get ready for the game to start
time.sleep(1)
game(speed[difficulty]) | code.py |
import board
import neopixel
import time
import random
from analogio import AnalogIn
from adafruit_circuitplayground.express import cpx
RED = (0x10, 0, 0) # 0x100000 also works
YELLOW=(0x10, 0x10, 0)
GREEN = (0, 0x10, 0)
AQUA = (0, 0x10, 0x10)
BLUE = (0, 0, 0x10)
PURPLE = (0x10, 0, 0x10)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
FAILURE_TONE = 50
VICTORY_TONE = 500
speed = {
1: 0.5,
2: 0.1,
3: 0.05,
4: 0.02,
5: 0.01
}
cpx.pixels.fill(BLACK)
cpx.pixels.show()
difficulty = 1
# The game itself. Choose a random target LED, start spinning the red LED until users touches pad A7
def game(delay):
# The LEDs are 1..10 from a player's perspective
# but 0..9 in terms of the pixels array index
# target is the LED the user needs to 'capture' to win
target = random.randint(0, 9)
print("Player target =",target+1)
print("Speed set to ", delay)
#time.sleep(5)
print("Range is ", range(cpx.pixels.n))
print("Len of pixels is ", len(cpx.pixels))
# Set for single tap
cpx.pixels.fill(BLACK)
# Keep cycling LEDS until the user taps CPx
# Target LED is lit RED, others BLUE
while True:
for i in range(cpx.pixels.n):
if i == target:
cpx.pixels[i] = RED
else:
cpx.pixels[i] = BLUE
# Handle the edge case of 0 wrapping backwards to 9
if i == 0:
cpx.pixels[len(cpx.pixels)-1] = BLACK
else:
cpx.pixels[i-1] = BLACK
# Give the player time to react
time.sleep(delay)
if (cpx.button_a or cpx.button_b):
print("Player tapped, i = ",i+1)
if i == target:
won()
else:
lost()
return
def lost():
cpx.pixels.fill(RED)
cpx.play_tone(FAILURE_TONE, 1.5)
cpx.pixels.fill(BLACK)
def won():
cpx.pixels.fill(GREEN)
cpx.play_tone(VICTORY_TONE, .3)
cpx.play_tone(1.5*VICTORY_TONE, .3)
cpx.play_tone(2*VICTORY_TONE, .3)
cpx.play_tone(2.5*VICTORY_TONE, .3)
cpx.pixels.fill(BLACK)
# Main loop of the game: bump difficulty setting if users presses A, taps CPx
while True:
# Set for single tap
cpx.detect_taps = 1
# Wait until player taps CPx before starting game, flash LEDs blue while waiting
while not cpx.tapped:
cpx.pixels.fill(BLUE)
for i in range(difficulty):
cpx.pixels[i] = RED
# 5 is max difficulty, wrap to difficulty one after that
if cpx.button_a:
if difficulty < 5:
difficulty += 1
else:
difficulty = 1
print("Difficulty set to", difficulty)
cpx.pixels[difficulty-1] = RED
time.sleep(.3)
# Fill "black" to turn them off momentarily for a flashing effect
cpx.pixels.fill(BLACK)
time.sleep(.1)
# Give player 1 second to get ready for the game to start
time.sleep(1)
game(speed[difficulty]) | 0.325413 | 0.312377 |
from collections import defaultdict
from typing import Dict
import torch as th
import torch.distributions as td
import torch.nn.functional as F
from rls.algorithms.base.marl_off_policy import MultiAgentOffPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.nn.models import ActorDct, ActorDPG, MACriticQvalueOne
from rls.nn.modules.wrappers import TargetTwin
from rls.nn.noised_actions import Noise_action_REGISTER
from rls.nn.utils import OPLR
from rls.utils.torch_utils import n_step_return
class MADDPG(MultiAgentOffPolicy):
"""
Multi-Agent Deep Deterministic Policy Gradient, https://arxiv.org/abs/1706.02275
"""
policy_mode = 'off-policy'
def __init__(self,
polyak=0.995,
noise_action='ou',
noise_params={
'sigma': 0.2
},
actor_lr=5.0e-4,
critic_lr=1.0e-3,
discrete_tau=1.0,
network_settings={
'actor_continuous': [32, 32],
'actor_discrete': [32, 32],
'q': [32, 32]
},
**kwargs):
"""
TODO: Annotation
"""
super().__init__(**kwargs)
self.polyak = polyak
self.discrete_tau = discrete_tau
self.actors, self.critics = {}, {}
for id in set(self.model_ids):
if self.is_continuouss[id]:
self.actors[id] = TargetTwin(ActorDPG(self.obs_specs[id],
rep_net_params=self._rep_net_params,
output_shape=self.a_dims[id],
network_settings=network_settings['actor_continuous']),
self.polyak).to(self.device)
else:
self.actors[id] = TargetTwin(ActorDct(self.obs_specs[id],
rep_net_params=self._rep_net_params,
output_shape=self.a_dims[id],
network_settings=network_settings['actor_discrete']),
self.polyak).to(self.device)
self.critics[id] = TargetTwin(MACriticQvalueOne(list(self.obs_specs.values()),
rep_net_params=self._rep_net_params,
action_dim=sum(
self.a_dims.values()),
network_settings=network_settings['q']),
self.polyak).to(self.device)
self.actor_oplr = OPLR(list(self.actors.values()), actor_lr, **self._oplr_params)
self.critic_oplr = OPLR(list(self.critics.values()), critic_lr, **self._oplr_params)
# TODO: 添加动作类型判断
self.noised_actions = {id: Noise_action_REGISTER[noise_action](**noise_params)
for id in set(self.model_ids) if self.is_continuouss[id]}
self._trainer_modules.update({f"actor_{id}": self.actors[id] for id in set(self.model_ids)})
self._trainer_modules.update({f"critic_{id}": self.critics[id] for id in set(self.model_ids)})
self._trainer_modules.update(actor_oplr=self.actor_oplr,
critic_oplr=self.critic_oplr)
def episode_reset(self):
super().episode_reset()
for noised_action in self.noised_actions.values():
noised_action.reset()
@iton
def select_action(self, obs: Dict):
acts_info = {}
actions = {}
for aid, mid in zip(self.agent_ids, self.model_ids):
output = self.actors[mid](obs[aid], rnncs=self.rnncs[aid]) # [B, A]
self.rnncs_[aid] = self.actors[mid].get_rnncs()
if self.is_continuouss[aid]:
mu = output # [B, A]
pi = self.noised_actions[mid](mu) # [B, A]
else:
logits = output # [B, A]
mu = logits.argmax(-1) # [B,]
cate_dist = td.Categorical(logits=logits)
pi = cate_dist.sample() # [B,]
action = pi if self._is_train_mode else mu
acts_info[aid] = Data(action=action)
actions[aid] = action
return actions, acts_info
@iton
def _train(self, BATCH_DICT):
"""
TODO: Annotation
"""
summaries = defaultdict(dict)
target_actions = {}
for aid, mid in zip(self.agent_ids, self.model_ids):
if self.is_continuouss[aid]:
target_actions[aid] = self.actors[mid].t(
BATCH_DICT[aid].obs_, begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
else:
target_logits = self.actors[mid].t(
BATCH_DICT[aid].obs_, begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
target_cate_dist = td.Categorical(logits=target_logits)
target_pi = target_cate_dist.sample() # [T, B]
action_target = F.one_hot(target_pi, self.a_dims[aid]).float() # [T, B, A]
target_actions[aid] = action_target # [T, B, A]
target_actions = th.cat(list(target_actions.values()), -1) # [T, B, N*A]
qs, q_targets = {}, {}
for mid in self.model_ids:
qs[mid] = self.critics[mid]([BATCH_DICT[id].obs for id in self.agent_ids],
th.cat([BATCH_DICT[id].action for id in self.agent_ids], -1)) # [T, B, 1]
q_targets[mid] = self.critics[mid].t([BATCH_DICT[id].obs_ for id in self.agent_ids],
target_actions) # [T, B, 1]
q_loss = {}
td_errors = 0.
for aid, mid in zip(self.agent_ids, self.model_ids):
dc_r = n_step_return(BATCH_DICT[aid].reward,
self.gamma,
BATCH_DICT[aid].done,
q_targets[mid],
BATCH_DICT['global'].begin_mask).detach() # [T, B, 1]
td_error = dc_r - qs[mid] # [T, B, 1]
td_errors += td_error
q_loss[aid] = 0.5 * td_error.square().mean() # 1
summaries[aid].update({
'Statistics/q_min': qs[mid].min(),
'Statistics/q_mean': qs[mid].mean(),
'Statistics/q_max': qs[mid].max()
})
self.critic_oplr.optimize(sum(q_loss.values()))
actor_loss = {}
for aid, mid in zip(self.agent_ids, self.model_ids):
if self.is_continuouss[aid]:
mu = self.actors[mid](BATCH_DICT[aid].obs,
begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
else:
logits = self.actors[mid](BATCH_DICT[aid].obs,
begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
logp_all = logits.log_softmax(-1) # [T, B, A]
gumbel_noise = td.Gumbel(0, 1).sample(logp_all.shape) # [T, B, A]
_pi = ((logp_all + gumbel_noise) / self.discrete_tau).softmax(-1) # [T, B, A]
_pi_true_one_hot = F.one_hot(_pi.argmax(-1), self.a_dims[aid]).float() # [T, B, A]
_pi_diff = (_pi_true_one_hot - _pi).detach() # [T, B, A]
mu = _pi_diff + _pi # [T, B, A]
all_actions = {id: BATCH_DICT[id].action for id in self.agent_ids}
all_actions[aid] = mu
q_actor = self.critics[mid](
[BATCH_DICT[id].obs for id in self.agent_ids],
th.cat(list(all_actions.values()), -1),
begin_mask=BATCH_DICT['global'].begin_mask
) # [T, B, 1]
actor_loss[aid] = -q_actor.mean() # 1
self.actor_oplr.optimize(sum(actor_loss.values()))
for aid in self.agent_ids:
summaries[aid].update({
'LOSS/actor_loss': actor_loss[aid],
'LOSS/critic_loss': q_loss[aid]
})
summaries['model'].update({
'LOSS/actor_loss', sum(actor_loss.values()),
'LOSS/critic_loss', sum(q_loss.values())
})
return td_errors / self.n_agents_percopy, summaries
def _after_train(self):
super()._after_train()
for actor in self.actors.values():
actor.sync()
for critic in self.critics.values():
critic.sync() | rls/algorithms/multi/maddpg.py |
from collections import defaultdict
from typing import Dict
import torch as th
import torch.distributions as td
import torch.nn.functional as F
from rls.algorithms.base.marl_off_policy import MultiAgentOffPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.nn.models import ActorDct, ActorDPG, MACriticQvalueOne
from rls.nn.modules.wrappers import TargetTwin
from rls.nn.noised_actions import Noise_action_REGISTER
from rls.nn.utils import OPLR
from rls.utils.torch_utils import n_step_return
class MADDPG(MultiAgentOffPolicy):
"""
Multi-Agent Deep Deterministic Policy Gradient, https://arxiv.org/abs/1706.02275
"""
policy_mode = 'off-policy'
def __init__(self,
polyak=0.995,
noise_action='ou',
noise_params={
'sigma': 0.2
},
actor_lr=5.0e-4,
critic_lr=1.0e-3,
discrete_tau=1.0,
network_settings={
'actor_continuous': [32, 32],
'actor_discrete': [32, 32],
'q': [32, 32]
},
**kwargs):
"""
TODO: Annotation
"""
super().__init__(**kwargs)
self.polyak = polyak
self.discrete_tau = discrete_tau
self.actors, self.critics = {}, {}
for id in set(self.model_ids):
if self.is_continuouss[id]:
self.actors[id] = TargetTwin(ActorDPG(self.obs_specs[id],
rep_net_params=self._rep_net_params,
output_shape=self.a_dims[id],
network_settings=network_settings['actor_continuous']),
self.polyak).to(self.device)
else:
self.actors[id] = TargetTwin(ActorDct(self.obs_specs[id],
rep_net_params=self._rep_net_params,
output_shape=self.a_dims[id],
network_settings=network_settings['actor_discrete']),
self.polyak).to(self.device)
self.critics[id] = TargetTwin(MACriticQvalueOne(list(self.obs_specs.values()),
rep_net_params=self._rep_net_params,
action_dim=sum(
self.a_dims.values()),
network_settings=network_settings['q']),
self.polyak).to(self.device)
self.actor_oplr = OPLR(list(self.actors.values()), actor_lr, **self._oplr_params)
self.critic_oplr = OPLR(list(self.critics.values()), critic_lr, **self._oplr_params)
# TODO: 添加动作类型判断
self.noised_actions = {id: Noise_action_REGISTER[noise_action](**noise_params)
for id in set(self.model_ids) if self.is_continuouss[id]}
self._trainer_modules.update({f"actor_{id}": self.actors[id] for id in set(self.model_ids)})
self._trainer_modules.update({f"critic_{id}": self.critics[id] for id in set(self.model_ids)})
self._trainer_modules.update(actor_oplr=self.actor_oplr,
critic_oplr=self.critic_oplr)
def episode_reset(self):
super().episode_reset()
for noised_action in self.noised_actions.values():
noised_action.reset()
@iton
def select_action(self, obs: Dict):
acts_info = {}
actions = {}
for aid, mid in zip(self.agent_ids, self.model_ids):
output = self.actors[mid](obs[aid], rnncs=self.rnncs[aid]) # [B, A]
self.rnncs_[aid] = self.actors[mid].get_rnncs()
if self.is_continuouss[aid]:
mu = output # [B, A]
pi = self.noised_actions[mid](mu) # [B, A]
else:
logits = output # [B, A]
mu = logits.argmax(-1) # [B,]
cate_dist = td.Categorical(logits=logits)
pi = cate_dist.sample() # [B,]
action = pi if self._is_train_mode else mu
acts_info[aid] = Data(action=action)
actions[aid] = action
return actions, acts_info
@iton
def _train(self, BATCH_DICT):
"""
TODO: Annotation
"""
summaries = defaultdict(dict)
target_actions = {}
for aid, mid in zip(self.agent_ids, self.model_ids):
if self.is_continuouss[aid]:
target_actions[aid] = self.actors[mid].t(
BATCH_DICT[aid].obs_, begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
else:
target_logits = self.actors[mid].t(
BATCH_DICT[aid].obs_, begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
target_cate_dist = td.Categorical(logits=target_logits)
target_pi = target_cate_dist.sample() # [T, B]
action_target = F.one_hot(target_pi, self.a_dims[aid]).float() # [T, B, A]
target_actions[aid] = action_target # [T, B, A]
target_actions = th.cat(list(target_actions.values()), -1) # [T, B, N*A]
qs, q_targets = {}, {}
for mid in self.model_ids:
qs[mid] = self.critics[mid]([BATCH_DICT[id].obs for id in self.agent_ids],
th.cat([BATCH_DICT[id].action for id in self.agent_ids], -1)) # [T, B, 1]
q_targets[mid] = self.critics[mid].t([BATCH_DICT[id].obs_ for id in self.agent_ids],
target_actions) # [T, B, 1]
q_loss = {}
td_errors = 0.
for aid, mid in zip(self.agent_ids, self.model_ids):
dc_r = n_step_return(BATCH_DICT[aid].reward,
self.gamma,
BATCH_DICT[aid].done,
q_targets[mid],
BATCH_DICT['global'].begin_mask).detach() # [T, B, 1]
td_error = dc_r - qs[mid] # [T, B, 1]
td_errors += td_error
q_loss[aid] = 0.5 * td_error.square().mean() # 1
summaries[aid].update({
'Statistics/q_min': qs[mid].min(),
'Statistics/q_mean': qs[mid].mean(),
'Statistics/q_max': qs[mid].max()
})
self.critic_oplr.optimize(sum(q_loss.values()))
actor_loss = {}
for aid, mid in zip(self.agent_ids, self.model_ids):
if self.is_continuouss[aid]:
mu = self.actors[mid](BATCH_DICT[aid].obs,
begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
else:
logits = self.actors[mid](BATCH_DICT[aid].obs,
begin_mask=BATCH_DICT['global'].begin_mask) # [T, B, A]
logp_all = logits.log_softmax(-1) # [T, B, A]
gumbel_noise = td.Gumbel(0, 1).sample(logp_all.shape) # [T, B, A]
_pi = ((logp_all + gumbel_noise) / self.discrete_tau).softmax(-1) # [T, B, A]
_pi_true_one_hot = F.one_hot(_pi.argmax(-1), self.a_dims[aid]).float() # [T, B, A]
_pi_diff = (_pi_true_one_hot - _pi).detach() # [T, B, A]
mu = _pi_diff + _pi # [T, B, A]
all_actions = {id: BATCH_DICT[id].action for id in self.agent_ids}
all_actions[aid] = mu
q_actor = self.critics[mid](
[BATCH_DICT[id].obs for id in self.agent_ids],
th.cat(list(all_actions.values()), -1),
begin_mask=BATCH_DICT['global'].begin_mask
) # [T, B, 1]
actor_loss[aid] = -q_actor.mean() # 1
self.actor_oplr.optimize(sum(actor_loss.values()))
for aid in self.agent_ids:
summaries[aid].update({
'LOSS/actor_loss': actor_loss[aid],
'LOSS/critic_loss': q_loss[aid]
})
summaries['model'].update({
'LOSS/actor_loss', sum(actor_loss.values()),
'LOSS/critic_loss', sum(q_loss.values())
})
return td_errors / self.n_agents_percopy, summaries
def _after_train(self):
super()._after_train()
for actor in self.actors.values():
actor.sync()
for critic in self.critics.values():
critic.sync() | 0.589835 | 0.228038 |
import os
import time
from datetime import datetime
import numpy as np
from AirSimClient import CarClient, CarControls
from prep_data import Point
class DataControl:
def __init__(self):
self.x1 = Point(0, 0)
self.x2 = Point(0, 0)
self.x3 = Point(0, 0)
self.speed1 = 0
self.speed2 = 0
self.velocity2_x = 0
self.velocity2_y = 0
self.velocity1_x = 0
self.velocity1_y = 0
self.throttle2 = 0
self.throttle1 = 0
self.steering2 = 0
self.steering1 = 0
def getHeader(self):
return "x1, y1, x2, y2, x3, y3," \
" velocity1_x, velocity1_y, steering1, throttle1, " \
"velocity2_x, velocity2_y, steering2, throttle2, \n"
def __str__(self):
return f"{self.x1.x}, {self.x1.y}, {self.x2.x}, {self.x2.y}, {self.x3.x}, {self.x3.y}, " \
f"{self.velocity1_x}, {self.velocity1_y}, {self.steering1}, {self.throttle1}, " \
f"{self.velocity2_x}, {self.velocity2_y},{self.steering2}, {self.throttle2}, \n"
def advance_location(self, point):
self.x1 = self.x2
self.x2 = self.x3
self.x3 = point
def set_data(self, speed, avelocity_x, avelocity_y, asteering, athrottle):
self.speed1 = self.speed2
self.velocity1_x = self.velocity2_x
self.velocity1_y = self.velocity2_y
self.steering1 = self.steering2
self.throttle1 = self.throttle2
self.velocity2_x = avelocity_x
self.velocity2_y = avelocity_y
self.speed2 = speed
self.steering2 = asteering
self.throttle2 = athrottle
def reset(self):
self.x1 = Point(0, 0)
self.x2 = Point(0, 0)
self.x3 = Point(0, 0)
self.speed1 = 0
self.speed2 = 0
self.velocity2 = 0
self.velocity1 = 0
self.throttle2 = 0
self.throttle1 = 0
self.steering2 = 0
self.steering1 = 0
DATA_FREQUENCY = 0.3 # about 5 samples per second
DATA_DIR = "data_dir" # directory for all the samples
FILE_SIZE = 5000 # max samples per file.
# Create image directory if it doesn't already exist
try:
os.stat(DATA_DIR)
except:
os.mkdir(DATA_DIR)
# connect to the AirSim simulator
client = CarClient()
client.confirmConnection()
print('Connected')
client.enableApiControl(True)
car_controls = CarControls()
client.reset()
cntrl = DataControl()
file_name = "collect_straight.csv"
s_mu = 0
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for j in range(1, 5):
s_sigma = 0.01 * j
t_sigma = 0.1 * j
t_mu = 0.1 * j
for k in range(6):
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
start_time = time.time()
while True:
collision_info = client.getCollisionInfo()
if collision_info.has_collided or time.time() - start_time > 20:
break
c_state = client.getCarState()
cntrl.advance_location(Point(c_state.position[b'x_val'], c_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
n_steering = np.random.normal(s_mu, s_sigma, 1)[0]
n_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(c_state.speed, c_state.velocity[b'x_val'], c_state.velocity[b'y_val'], n_steering,
n_throttle)
car_controls.throttle = n_throttle
car_controls.steering = n_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY)
file_name = "collect_left.csv"
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for i in range(1, 5):
for j in range(1, 5):
s_mu = -0.2 * i
t_mu = -0.2 * i
s_sigma = 0.1 * j
t_sigma = 0.1 * j
for k in range(4):
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
start_time = time.time()
while True:
collision_info = client.getCollisionInfo()
if collision_info.has_collided or time.time() - start_time > 20:
break
c_state = client.getCarState()
cntrl.advance_location(Point(c_state.position[b'x_val'], c_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
n_steering = np.random.normal(s_mu, s_sigma, 1)[0]
n_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(c_state.speed, c_state.velocity[b'x_val'], c_state.velocity[b'y_val'],
n_steering, n_throttle)
car_controls.throttle = n_throttle
car_controls.steering = n_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY)
file_name = "collect_right.csv"
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for i in range(1, 5):
for j in range(1, 5):
s_mu = 0.2 * i
t_mu = 0.2 * i
s_sigma = 0.1 * j
t_sigma = 0.1 * j
for k in range(4):
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
start_time = time.time()
while True:
collision_info = client.getCollisionInfo()
if collision_info.has_collided or time.time() - start_time > 20:
break
c_state = client.getCarState()
cntrl.advance_location(Point(c_state.position[b'x_val'], c_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
n_steering = np.random.normal(s_mu, s_sigma, 1)[0]
n_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(c_state.speed, c_state.velocity[b'x_val'], c_state.velocity[b'y_val'],
n_steering, n_throttle)
car_controls.throttle = n_throttle
car_controls.steering = n_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY)
while True:
s_mu, s_sigma, t_mu, t_sigma, = np.random.normal(0, 0.6, 4)
s_sigma = abs(s_sigma)
t_sigma = abs(t_sigma)
file_name_head = datetime.now().strftime("%m_%d_%H_%S")
file_name_tail = "_sm{}_ss{}_tm{}_ts{}.csv".format(int(s_mu*100), int(s_sigma*100), int(t_mu*100), int(t_sigma*100))
file_name = file_name_head + file_name_tail
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for i in range(FILE_SIZE):
collision_info = client.getCollisionInfo()
if collision_info.has_collided:
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
car_state = client.getCarState()
cntrl.advance_location(Point(car_state.position[b'x_val'], car_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
new_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
new_steering = np.random.normal(s_mu, s_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(car_state.speed, car_state.velocity[b'x_val'], car_state.velocity[b'y_val'], new_steering, new_throttle)
car_controls.throttle = new_throttle
car_controls.steering = new_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY) | random_collection.py | import os
import time
from datetime import datetime
import numpy as np
from AirSimClient import CarClient, CarControls
from prep_data import Point
class DataControl:
def __init__(self):
self.x1 = Point(0, 0)
self.x2 = Point(0, 0)
self.x3 = Point(0, 0)
self.speed1 = 0
self.speed2 = 0
self.velocity2_x = 0
self.velocity2_y = 0
self.velocity1_x = 0
self.velocity1_y = 0
self.throttle2 = 0
self.throttle1 = 0
self.steering2 = 0
self.steering1 = 0
def getHeader(self):
return "x1, y1, x2, y2, x3, y3," \
" velocity1_x, velocity1_y, steering1, throttle1, " \
"velocity2_x, velocity2_y, steering2, throttle2, \n"
def __str__(self):
return f"{self.x1.x}, {self.x1.y}, {self.x2.x}, {self.x2.y}, {self.x3.x}, {self.x3.y}, " \
f"{self.velocity1_x}, {self.velocity1_y}, {self.steering1}, {self.throttle1}, " \
f"{self.velocity2_x}, {self.velocity2_y},{self.steering2}, {self.throttle2}, \n"
def advance_location(self, point):
self.x1 = self.x2
self.x2 = self.x3
self.x3 = point
def set_data(self, speed, avelocity_x, avelocity_y, asteering, athrottle):
self.speed1 = self.speed2
self.velocity1_x = self.velocity2_x
self.velocity1_y = self.velocity2_y
self.steering1 = self.steering2
self.throttle1 = self.throttle2
self.velocity2_x = avelocity_x
self.velocity2_y = avelocity_y
self.speed2 = speed
self.steering2 = asteering
self.throttle2 = athrottle
def reset(self):
self.x1 = Point(0, 0)
self.x2 = Point(0, 0)
self.x3 = Point(0, 0)
self.speed1 = 0
self.speed2 = 0
self.velocity2 = 0
self.velocity1 = 0
self.throttle2 = 0
self.throttle1 = 0
self.steering2 = 0
self.steering1 = 0
DATA_FREQUENCY = 0.3 # about 5 samples per second
DATA_DIR = "data_dir" # directory for all the samples
FILE_SIZE = 5000 # max samples per file.
# Create image directory if it doesn't already exist
try:
os.stat(DATA_DIR)
except:
os.mkdir(DATA_DIR)
# connect to the AirSim simulator
client = CarClient()
client.confirmConnection()
print('Connected')
client.enableApiControl(True)
car_controls = CarControls()
client.reset()
cntrl = DataControl()
file_name = "collect_straight.csv"
s_mu = 0
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for j in range(1, 5):
s_sigma = 0.01 * j
t_sigma = 0.1 * j
t_mu = 0.1 * j
for k in range(6):
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
start_time = time.time()
while True:
collision_info = client.getCollisionInfo()
if collision_info.has_collided or time.time() - start_time > 20:
break
c_state = client.getCarState()
cntrl.advance_location(Point(c_state.position[b'x_val'], c_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
n_steering = np.random.normal(s_mu, s_sigma, 1)[0]
n_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(c_state.speed, c_state.velocity[b'x_val'], c_state.velocity[b'y_val'], n_steering,
n_throttle)
car_controls.throttle = n_throttle
car_controls.steering = n_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY)
file_name = "collect_left.csv"
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for i in range(1, 5):
for j in range(1, 5):
s_mu = -0.2 * i
t_mu = -0.2 * i
s_sigma = 0.1 * j
t_sigma = 0.1 * j
for k in range(4):
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
start_time = time.time()
while True:
collision_info = client.getCollisionInfo()
if collision_info.has_collided or time.time() - start_time > 20:
break
c_state = client.getCarState()
cntrl.advance_location(Point(c_state.position[b'x_val'], c_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
n_steering = np.random.normal(s_mu, s_sigma, 1)[0]
n_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(c_state.speed, c_state.velocity[b'x_val'], c_state.velocity[b'y_val'],
n_steering, n_throttle)
car_controls.throttle = n_throttle
car_controls.steering = n_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY)
file_name = "collect_right.csv"
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for i in range(1, 5):
for j in range(1, 5):
s_mu = 0.2 * i
t_mu = 0.2 * i
s_sigma = 0.1 * j
t_sigma = 0.1 * j
for k in range(4):
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
start_time = time.time()
while True:
collision_info = client.getCollisionInfo()
if collision_info.has_collided or time.time() - start_time > 20:
break
c_state = client.getCarState()
cntrl.advance_location(Point(c_state.position[b'x_val'], c_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
n_steering = np.random.normal(s_mu, s_sigma, 1)[0]
n_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(c_state.speed, c_state.velocity[b'x_val'], c_state.velocity[b'y_val'],
n_steering, n_throttle)
car_controls.throttle = n_throttle
car_controls.steering = n_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY)
while True:
s_mu, s_sigma, t_mu, t_sigma, = np.random.normal(0, 0.6, 4)
s_sigma = abs(s_sigma)
t_sigma = abs(t_sigma)
file_name_head = datetime.now().strftime("%m_%d_%H_%S")
file_name_tail = "_sm{}_ss{}_tm{}_ts{}.csv".format(int(s_mu*100), int(s_sigma*100), int(t_mu*100), int(t_sigma*100))
file_name = file_name_head + file_name_tail
with open(DATA_DIR + "/" + file_name, "w") as file:
file.write(cntrl.getHeader())
for i in range(FILE_SIZE):
collision_info = client.getCollisionInfo()
if collision_info.has_collided:
car_controls.throttle = 0
car_controls.steering = 0
# set the new controls to the simul
client.setCarControls(car_controls)
time.sleep(1)
client.reset()
cntrl.reset()
car_state = client.getCarState()
cntrl.advance_location(Point(car_state.position[b'x_val'], car_state.position[b'y_val']))
# now x1 is t-2, x2 & v & s & t are t-1, x3 is t.
file.write(cntrl.__str__())
new_throttle = np.random.normal(t_mu, t_sigma, 1)[0]
new_steering = np.random.normal(s_mu, s_sigma, 1)[0]
# set the commands and velocity for future knowledge
cntrl.set_data(car_state.speed, car_state.velocity[b'x_val'], car_state.velocity[b'y_val'], new_steering, new_throttle)
car_controls.throttle = new_throttle
car_controls.steering = new_steering
# set the new controls to the simulator
client.setCarControls(car_controls)
# wait for the change to impact.
time.sleep(DATA_FREQUENCY) | 0.325949 | 0.221919 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.admin_permission import AdminPermission
from networkapi.auth import has_perm
from networkapi.grupo.models import GrupoError
from networkapi.infrastructure.xml_utils import dumps_networkapi, loads, XMLError
import logging
from networkapi.rest import RestResource
from networkapi.util import is_valid_int_greater_zero_param
from networkapi.ambiente.models import IPConfig, ConfigEnvironment, IPConfigNotFoundError, AmbienteError, Ambiente, AmbienteNotFoundError, \
ConfigEnvironmentDuplicateError
from networkapi.exception import InvalidValueError
class EnvironmentIpConfigResource(RestResource):
log = logging.getLogger('EnvironmentIpConfigResource')
CODE_MESSAGE_CONFIG_ENVIRONMENT_ALREADY_EXISTS = 302
def handle_post(self, request, user, *args, **kwargs):
"""Handles POST requests associate environment to ip config
URL: ipconfig/
"""
try:
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.ENVIRONMENT_MANAGEMENT, AdminPermission.WRITE_OPERATION):
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, attrs_map = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
return self.response_error(3, u'Não existe valor para a tag networkapi do XML de requisição.')
environment_map = networkapi_map.get('ambiente')
if environment_map is None:
return self.response_error(3, u'Não existe valor para a tag ambiente do XML de requisição.')
# Get XML data
id_environment = environment_map.get('id_environment')
id_ip_config = environment_map.get('id_ip_config')
# Valid environment
if not is_valid_int_greater_zero_param(id_environment):
raise InvalidValueError(None, 'id_environment', id_environment)
# Valid ip config
if not is_valid_int_greater_zero_param(id_ip_config):
raise InvalidValueError(None, 'id_ip_config', id_ip_config)
# Environment must exists
environment = Ambiente().get_by_pk(id_environment)
# Ip config must exists
ip_conf = IPConfig().get_by_pk(id_ip_config)
# Makes the relationship
config = ConfigEnvironment()
config.ip_config = ip_conf
config.environment = environment
config.save()
# Make return xml
conf_env_map = dict()
conf_env_map['id_config_do_ambiente'] = config.id
return self.response(dumps_networkapi({'config_do_ambiente': conf_env_map}))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except ConfigEnvironmentDuplicateError, e:
return self.response_error(self.CODE_MESSAGE_CONFIG_ENVIRONMENT_ALREADY_EXISTS)
except IPConfigNotFoundError, e:
return self.response_error(301)
except AmbienteNotFoundError, e:
return self.response_error(112)
except XMLError, x:
self.log.error(u'Error reading the XML request.')
return self.response_error(3, x)
except (AmbienteError, GrupoError, Exception), e:
return self.response_error(1) | networkapi/ambiente/resource/EnvironmentIpConfigResource.py |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.admin_permission import AdminPermission
from networkapi.auth import has_perm
from networkapi.grupo.models import GrupoError
from networkapi.infrastructure.xml_utils import dumps_networkapi, loads, XMLError
import logging
from networkapi.rest import RestResource
from networkapi.util import is_valid_int_greater_zero_param
from networkapi.ambiente.models import IPConfig, ConfigEnvironment, IPConfigNotFoundError, AmbienteError, Ambiente, AmbienteNotFoundError, \
ConfigEnvironmentDuplicateError
from networkapi.exception import InvalidValueError
class EnvironmentIpConfigResource(RestResource):
log = logging.getLogger('EnvironmentIpConfigResource')
CODE_MESSAGE_CONFIG_ENVIRONMENT_ALREADY_EXISTS = 302
def handle_post(self, request, user, *args, **kwargs):
"""Handles POST requests associate environment to ip config
URL: ipconfig/
"""
try:
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.ENVIRONMENT_MANAGEMENT, AdminPermission.WRITE_OPERATION):
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, attrs_map = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
return self.response_error(3, u'Não existe valor para a tag networkapi do XML de requisição.')
environment_map = networkapi_map.get('ambiente')
if environment_map is None:
return self.response_error(3, u'Não existe valor para a tag ambiente do XML de requisição.')
# Get XML data
id_environment = environment_map.get('id_environment')
id_ip_config = environment_map.get('id_ip_config')
# Valid environment
if not is_valid_int_greater_zero_param(id_environment):
raise InvalidValueError(None, 'id_environment', id_environment)
# Valid ip config
if not is_valid_int_greater_zero_param(id_ip_config):
raise InvalidValueError(None, 'id_ip_config', id_ip_config)
# Environment must exists
environment = Ambiente().get_by_pk(id_environment)
# Ip config must exists
ip_conf = IPConfig().get_by_pk(id_ip_config)
# Makes the relationship
config = ConfigEnvironment()
config.ip_config = ip_conf
config.environment = environment
config.save()
# Make return xml
conf_env_map = dict()
conf_env_map['id_config_do_ambiente'] = config.id
return self.response(dumps_networkapi({'config_do_ambiente': conf_env_map}))
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except ConfigEnvironmentDuplicateError, e:
return self.response_error(self.CODE_MESSAGE_CONFIG_ENVIRONMENT_ALREADY_EXISTS)
except IPConfigNotFoundError, e:
return self.response_error(301)
except AmbienteNotFoundError, e:
return self.response_error(112)
except XMLError, x:
self.log.error(u'Error reading the XML request.')
return self.response_error(3, x)
except (AmbienteError, GrupoError, Exception), e:
return self.response_error(1) | 0.744656 | 0.067026 |
"""Test class for the relay derating module."""
# Third Party Imports
import pytest
# RAMSTK Package Imports
from ramstk.analyses.derating import relay
@pytest.mark.unit
def test_do_derating_analysis_no_stresses(test_stress_limits):
"""should determine the relay is not execeeding any limit."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 0
assert _reason == ""
@pytest.mark.unit
def test_do_derating_analysis_active_temperature(test_stress_limits):
"""should determine the relay is exceeding the active ambient temperature limit."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=76.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 1
assert (
_reason == "Ambient temperature of 76.3C exceeds the derated maximum "
"temperature of 20.0C less than maximum rated temperature of 85.0C.\n"
)
@pytest.mark.unit
def test_do_derating_analysis_current(test_stress_limits):
"""should determine the relay is execeeding the current limit."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.8,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 1
assert _reason == "Current ratio of 0.8 exceeds the allowable limit of 0.6.\n"
@pytest.mark.unit
def test_do_derating_analysis_all_stresses(test_stress_limits):
"""should determine the relay is execeeding both limits."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.8,
temperature_active=66.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 1
assert (
_reason == "Current ratio of 0.8 exceeds the allowable limit of 0.6.\nAmbient "
"temperature of 66.3C exceeds the derated maximum temperature of 20.0C less "
"than maximum rated temperature of 85.0C.\n"
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_environment(test_stress_limits):
"""should raise am IndexError when passed an unknown environment."""
with pytest.raises(IndexError):
relay.do_derating_analysis(
5,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_type(test_stress_limits):
"""should raise am KeyError when passed an unknown type ID."""
with pytest.raises(KeyError):
relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=11,
)
@pytest.mark.unit
@pytest.mark.parametrize("active_temperature", ["128.3", None])
def test_do_derating_analysis_non_numeric_temperature(
active_temperature,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric current ratio."""
with pytest.raises(TypeError):
relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=active_temperature,
temperature_rated_max=85.0,
type_id=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("current_ratio", ["0.9", None])
def test_do_derating_analysis_non_numeric_current_ratio(
current_ratio,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric current ratio."""
with pytest.raises(TypeError):
relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=current_ratio,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
) | tests/analyses/derating/models/relay_derating_unit_test.py | """Test class for the relay derating module."""
# Third Party Imports
import pytest
# RAMSTK Package Imports
from ramstk.analyses.derating import relay
@pytest.mark.unit
def test_do_derating_analysis_no_stresses(test_stress_limits):
"""should determine the relay is not execeeding any limit."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 0
assert _reason == ""
@pytest.mark.unit
def test_do_derating_analysis_active_temperature(test_stress_limits):
"""should determine the relay is exceeding the active ambient temperature limit."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=76.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 1
assert (
_reason == "Ambient temperature of 76.3C exceeds the derated maximum "
"temperature of 20.0C less than maximum rated temperature of 85.0C.\n"
)
@pytest.mark.unit
def test_do_derating_analysis_current(test_stress_limits):
"""should determine the relay is execeeding the current limit."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.8,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 1
assert _reason == "Current ratio of 0.8 exceeds the allowable limit of 0.6.\n"
@pytest.mark.unit
def test_do_derating_analysis_all_stresses(test_stress_limits):
"""should determine the relay is execeeding both limits."""
_overstress, _reason = relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.8,
temperature_active=66.3,
temperature_rated_max=85.0,
type_id=1,
)
assert _overstress == 1
assert (
_reason == "Current ratio of 0.8 exceeds the allowable limit of 0.6.\nAmbient "
"temperature of 66.3C exceeds the derated maximum temperature of 20.0C less "
"than maximum rated temperature of 85.0C.\n"
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_environment(test_stress_limits):
"""should raise am IndexError when passed an unknown environment."""
with pytest.raises(IndexError):
relay.do_derating_analysis(
5,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
)
@pytest.mark.unit
def test_do_derating_analysis_unknown_type(test_stress_limits):
"""should raise am KeyError when passed an unknown type ID."""
with pytest.raises(KeyError):
relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=11,
)
@pytest.mark.unit
@pytest.mark.parametrize("active_temperature", ["128.3", None])
def test_do_derating_analysis_non_numeric_temperature(
active_temperature,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric current ratio."""
with pytest.raises(TypeError):
relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=0.2,
temperature_active=active_temperature,
temperature_rated_max=85.0,
type_id=1,
)
@pytest.mark.unit
@pytest.mark.parametrize("current_ratio", ["0.9", None])
def test_do_derating_analysis_non_numeric_current_ratio(
current_ratio,
test_stress_limits,
):
"""should raise am TypeError when passed a non-numeric current ratio."""
with pytest.raises(TypeError):
relay.do_derating_analysis(
1,
test_stress_limits["relay"],
current_ratio=current_ratio,
temperature_active=46.3,
temperature_rated_max=85.0,
type_id=1,
) | 0.902559 | 0.511046 |
import logging
import requests
import urllib3
from trans_sec.exceptions import NotFoundError, AlreadyExistsError
urllib3.disable_warnings()
logger = logging.getLogger('http_session')
class HttpSession:
def __init__(self, url, username=None, password=<PASSWORD>, verify_cert=False):
self.username = username
self.password = password
self.url = url
self.token = ''
self.verify_cert = verify_cert
self.authorized = True
def authorize(self):
self.authorized = True
def is_authorized(self):
return self.authorized
def get(self, resource, key=None):
logger.info('GET resource [%s] with key [%s]', resource, key)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
actual_resource = resource
if key is not None:
actual_resource = actual_resource + '/' + key
r = requests.get(self.url + '/' + actual_resource,
headers=headers, verify=False)
if r.status_code == 200:
logger.info('GET return value - [%s]', r.json())
return r.json()
else:
logger.error('Error on Get with code and payload [%s]',
r.status_code, r.json())
temp = r.json()
raise NotFoundError(key, str(temp['Messages'][0]))
def post(self, resource, body):
logger.info('POST resource [%s] with body [%s]', resource, body)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
logger.debug('Post request received from %s/%s with body value[%s]',
self.url, resource, body)
try:
response = requests.post(
self.url + '/' + resource,
headers=headers, json=body, verify=False)
except Exception as e:
logger.error('Unexpected error - %s', e)
return
logger.debug('POST response - [%s]', response)
if response.status_code == 201 or response.status_code == 200:
logger.info('POST return value - [%s]', response.json())
return response.json()
else:
logger.error(
'Error on Post [%s] to URL [%s/%s]',
str(response.status_code), self.url, resource)
temp = response.json()
if body.get('Name') is not None:
raise AlreadyExistsError(
body['Name'], str(temp['Messages'][0]))
else:
raise AlreadyExistsError(
body['Addr'], str(temp['Messages'][0]))
def delete(self, resource, body):
logger.info('DELETE resource [%s] with key [%s]', resource, body)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
r = requests.delete(self.url + '/' + resource,
headers=headers, json=body, verify=False)
if r.status_code == 200:
logger.info('DELETE return value - [%s]', r.json())
return r.json()
elif r.status_code == 404:
logger.info('Deleting a non-existent object, ignoring')
return dict()
else:
logger.error('Error on Delete with code %s and payload [%s]',
r.status_code, r.json)
return r.status_code
def put(self, resource, body, key):
logger.info('PUT resource [%s] with key [%s]', resource, key)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
actual_resource = resource + '/' + key
r = requests.put(self.url + '/' + actual_resource,
headers=headers, json=body, verify=False)
if r.status_code == 200:
logger.info('PUT return value - [%s]', r.json())
return r.json()
else:
logger.error('Error on Put with code %s and payload [%s]',
r.status_code, r.json())
return r.status_code | trans_sec/utils/http_session.py | import logging
import requests
import urllib3
from trans_sec.exceptions import NotFoundError, AlreadyExistsError
urllib3.disable_warnings()
logger = logging.getLogger('http_session')
class HttpSession:
def __init__(self, url, username=None, password=<PASSWORD>, verify_cert=False):
self.username = username
self.password = password
self.url = url
self.token = ''
self.verify_cert = verify_cert
self.authorized = True
def authorize(self):
self.authorized = True
def is_authorized(self):
return self.authorized
def get(self, resource, key=None):
logger.info('GET resource [%s] with key [%s]', resource, key)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
actual_resource = resource
if key is not None:
actual_resource = actual_resource + '/' + key
r = requests.get(self.url + '/' + actual_resource,
headers=headers, verify=False)
if r.status_code == 200:
logger.info('GET return value - [%s]', r.json())
return r.json()
else:
logger.error('Error on Get with code and payload [%s]',
r.status_code, r.json())
temp = r.json()
raise NotFoundError(key, str(temp['Messages'][0]))
def post(self, resource, body):
logger.info('POST resource [%s] with body [%s]', resource, body)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
logger.debug('Post request received from %s/%s with body value[%s]',
self.url, resource, body)
try:
response = requests.post(
self.url + '/' + resource,
headers=headers, json=body, verify=False)
except Exception as e:
logger.error('Unexpected error - %s', e)
return
logger.debug('POST response - [%s]', response)
if response.status_code == 201 or response.status_code == 200:
logger.info('POST return value - [%s]', response.json())
return response.json()
else:
logger.error(
'Error on Post [%s] to URL [%s/%s]',
str(response.status_code), self.url, resource)
temp = response.json()
if body.get('Name') is not None:
raise AlreadyExistsError(
body['Name'], str(temp['Messages'][0]))
else:
raise AlreadyExistsError(
body['Addr'], str(temp['Messages'][0]))
def delete(self, resource, body):
logger.info('DELETE resource [%s] with key [%s]', resource, body)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
r = requests.delete(self.url + '/' + resource,
headers=headers, json=body, verify=False)
if r.status_code == 200:
logger.info('DELETE return value - [%s]', r.json())
return r.json()
elif r.status_code == 404:
logger.info('Deleting a non-existent object, ignoring')
return dict()
else:
logger.error('Error on Delete with code %s and payload [%s]',
r.status_code, r.json)
return r.status_code
def put(self, resource, body, key):
logger.info('PUT resource [%s] with key [%s]', resource, key)
if not self.is_authorized():
self.authorize()
headers = {'Authorization': 'Bearer ' + self.token}
actual_resource = resource + '/' + key
r = requests.put(self.url + '/' + actual_resource,
headers=headers, json=body, verify=False)
if r.status_code == 200:
logger.info('PUT return value - [%s]', r.json())
return r.json()
else:
logger.error('Error on Put with code %s and payload [%s]',
r.status_code, r.json())
return r.status_code | 0.18591 | 0.104067 |
from pathlib import Path
from math import *
import functools
import numpy as np
import tensorflow as tf
from tensorflow import keras
import config
def format_percentage2(n):
return floor(n * 10000) / 100
# init variables
IMAGE_SIZE = (config.MODEL_INPUT_SIZE, config.MODEL_INPUT_SIZE)
print(f'Using {config.MODEL_URL} with input size {IMAGE_SIZE}')
# load test dataset
ds = tf.keras.preprocessing.image_dataset_from_directory(
config.DIRPATH_DATASET,
seed=123,
image_size=IMAGE_SIZE,
batch_size=config.DATASET_BATCH_SIZE)
class_names = ds.class_names
class_indices = range(len(class_names))
class_count = len(class_names)
ds_count = ds.cardinality().numpy()
test_count = int(ds_count * 0.2)
test_ds = ds.take(test_count)
print('datasets initialized')
# evaluate model on test images
model = keras.models.load_model(config.FILEPATH_SAVED_MODEL)
predicted_indices = []
actual_indices = []
num_batches = sum([1 for _ in test_ds])
i=1
for images, labels in test_ds:
labels_list = list(labels.numpy())
pred = model.predict(images)
predicted_list = list(np.argmax(pred, axis=1))
for predicted, actual in zip(predicted_list, labels_list):
predicted_indices.append(predicted)
actual_indices.append(actual)
print(f'batch {i}/{num_batches}')
i += 1
# count correct guesses for each class
classification_counts = [[0 for _ in class_names]
for _ in class_names]
correct_predictions = 0
total_predictions = 0
for predicted, actual in zip(actual_indices, predicted_indices):
classification_counts[actual][predicted] += 1
if predicted == actual:
correct_predictions += 1
total_predictions += 1
# calculate percentages for all classes
accuracies = []
class_index = 0
for class_counts in classification_counts:
total_for_class = sum(class_counts)
accuracy = 0
percentages = []
if total_for_class != 0:
# calculate accuracy for class
accuracy = class_counts[class_index] / total_for_class
# calculate top n guessed classes for actual class
for j in range(5):
# pick class with highest classification count for the actual class
class_index_highest_count = np.argmax(class_counts)
highest_count = class_counts[class_index_highest_count]
if highest_count < 0:
break
acc = 0
percentage = format_percentage2(highest_count / total_for_class)
percentages.append({'class_index': class_index_highest_count,
'percentage': percentage})
class_counts[class_index_highest_count] = -1
# create string for top n guessed classes
percentage_str = ''
j = 0
for percentage_entry in percentages:
if j != 0:
percentage_str += ', '
class_name = class_names[percentage_entry['class_index']]
percentage_str += f'{percentage_entry["percentage"]}% {class_name}'
j += 1
accuracies.append({
'class_name': class_names[class_index],
'accuracy': format_percentage2(accuracy),
'percentages': percentage_str})
class_index += 1
# sort accuracies in DESC order
def compare(x1, x2):
return x2["accuracy"] - x1["accuracy"]
accuracies = sorted(
accuracies, key=functools.cmp_to_key(compare))
# write result to output file
print(f'accuracy: {format_percentage2(correct_predictions / total_predictions)}%')
f = open(config.FILEPATH_CLASS_ACCURACIES, "w", encoding='utf-8')
for entry in accuracies:
f.write(f'{entry["accuracy"]}%,{entry["class_name"]},"{entry["percentages"]}"\n')
f.close()
print('-------------- DONE --------------') | 7_evaluate_model.py | from pathlib import Path
from math import *
import functools
import numpy as np
import tensorflow as tf
from tensorflow import keras
import config
def format_percentage2(n):
return floor(n * 10000) / 100
# init variables
IMAGE_SIZE = (config.MODEL_INPUT_SIZE, config.MODEL_INPUT_SIZE)
print(f'Using {config.MODEL_URL} with input size {IMAGE_SIZE}')
# load test dataset
ds = tf.keras.preprocessing.image_dataset_from_directory(
config.DIRPATH_DATASET,
seed=123,
image_size=IMAGE_SIZE,
batch_size=config.DATASET_BATCH_SIZE)
class_names = ds.class_names
class_indices = range(len(class_names))
class_count = len(class_names)
ds_count = ds.cardinality().numpy()
test_count = int(ds_count * 0.2)
test_ds = ds.take(test_count)
print('datasets initialized')
# evaluate model on test images
model = keras.models.load_model(config.FILEPATH_SAVED_MODEL)
predicted_indices = []
actual_indices = []
num_batches = sum([1 for _ in test_ds])
i=1
for images, labels in test_ds:
labels_list = list(labels.numpy())
pred = model.predict(images)
predicted_list = list(np.argmax(pred, axis=1))
for predicted, actual in zip(predicted_list, labels_list):
predicted_indices.append(predicted)
actual_indices.append(actual)
print(f'batch {i}/{num_batches}')
i += 1
# count correct guesses for each class
classification_counts = [[0 for _ in class_names]
for _ in class_names]
correct_predictions = 0
total_predictions = 0
for predicted, actual in zip(actual_indices, predicted_indices):
classification_counts[actual][predicted] += 1
if predicted == actual:
correct_predictions += 1
total_predictions += 1
# calculate percentages for all classes
accuracies = []
class_index = 0
for class_counts in classification_counts:
total_for_class = sum(class_counts)
accuracy = 0
percentages = []
if total_for_class != 0:
# calculate accuracy for class
accuracy = class_counts[class_index] / total_for_class
# calculate top n guessed classes for actual class
for j in range(5):
# pick class with highest classification count for the actual class
class_index_highest_count = np.argmax(class_counts)
highest_count = class_counts[class_index_highest_count]
if highest_count < 0:
break
acc = 0
percentage = format_percentage2(highest_count / total_for_class)
percentages.append({'class_index': class_index_highest_count,
'percentage': percentage})
class_counts[class_index_highest_count] = -1
# create string for top n guessed classes
percentage_str = ''
j = 0
for percentage_entry in percentages:
if j != 0:
percentage_str += ', '
class_name = class_names[percentage_entry['class_index']]
percentage_str += f'{percentage_entry["percentage"]}% {class_name}'
j += 1
accuracies.append({
'class_name': class_names[class_index],
'accuracy': format_percentage2(accuracy),
'percentages': percentage_str})
class_index += 1
# sort accuracies in DESC order
def compare(x1, x2):
return x2["accuracy"] - x1["accuracy"]
accuracies = sorted(
accuracies, key=functools.cmp_to_key(compare))
# write result to output file
print(f'accuracy: {format_percentage2(correct_predictions / total_predictions)}%')
f = open(config.FILEPATH_CLASS_ACCURACIES, "w", encoding='utf-8')
for entry in accuracies:
f.write(f'{entry["accuracy"]}%,{entry["class_name"]},"{entry["percentages"]}"\n')
f.close()
print('-------------- DONE --------------') | 0.587825 | 0.28763 |
import json
from django.utils import timezone
from agiletixapi import AgileError, AgileSalesAPI
from agiletixapi.exceptions import AgileException, InvalidPromoException
from agiletixapi.models import Order
from agiletixapi.utils import datestring_to_ms_datestring
from agiletix.logging import get_logger
logger = get_logger('lib')
from agiletix.settings import AGILE_SETTINGS as SETTINGS
SESSION_CART_DATA = "SESSION_CART_DATA"
SESSION_EVENT_PRICE_CACHE_KEY = "SESSION_EVENT_PRICE_CACHE_KEY"
api = AgileSalesAPI(
base_url=SETTINGS['AGILE_BASE_URL'],
app_key=SETTINGS['AGILE_APP_KEY'],
user_key=SETTINGS['AGILE_USER_KEY'],
corp_org_id=SETTINGS['AGILE_CORP_ORG_ID']
)
def get_cart_for_request(request, force_non_member=False):
"""
Try to retrieve cart from the current session. If none found, create one
"""
cart = None
if hasattr(request, 'cart'):
cart = request.cart
if cart and cart.is_member and force_non_member:
cart = None
if not cart:
try:
cart = Cart(request=request, force_non_member=force_non_member)
except AgileException as e:
# TODO: Yeha
#logger.warning(__name__, "AgileException -> {}".format(e))
if e.code == 1024:
cart = get_cart_for_request(request, force_non_member=True)
#cart_error = e.code
return cart
class Cart(object):
_order = None
customer = None
request = None
def __init__(self, request, force_non_member=False):
self.request = request
if request.user.is_authenticated and not force_non_member:
self.customer = request.user
@property
def is_member(self):
return self.customer and self.customer.member_id
def start_order(self):
customer = self.customer
response = None
if customer:
if customer.member_id:
response = api.order_start(buyer_type_id=SETTINGS['AGILE_BUYER_TYPE_STANDARD_ID'] , customer_id=customer.customer_id, member_id=customer.member_id)
else:
response = api.order_start(buyer_type_id=SETTINGS['AGILE_BUYER_TYPE_STANDARD_ID'] , customer_id=customer.customer_id)
if not response.success:
if response.error.code == AgileError.MemberRenewalRequired:
raise AgileException(code=response.error.code, message=response.error.message)
# TODO: Handle others
if not customer or (response and not response.success):
response = api.order_start(buyer_type_id=SETTINGS['AGILE_BUYER_TYPE_STANDARD_ID'])
if response and response.success:
logger.debug("Order started", response=response.data)
order = Order(response.data)
else:
order = None
self.request.session[SESSION_CART_DATA] = json.dumps(order.to_json())
return order
def load_order(self):
order = None
json_object = None
order_json = self.request.session.get(SESSION_CART_DATA)
if order_json:
try:
json_object = json.loads(order_json)
except:
pass # TODO: Better handling here
if json_object:
logger.debug("Order loaded", order_json=json_object)
# Need to convert datetimes back to MS Json.NET before passing to Order object
# CloseDateTime, ExpirationDateTime, OpenDateTime
agile_json_object = {}
for key, value in json_object.items():
if "DateTime" in key:
agile_json_object[key] = datestring_to_ms_datestring(value)
else:
agile_json_object[key] = value
order = Order(agile_json_object)
return order
def validate_order(self, order):
valid = order.in_process
if order.expiration_datetime < timezone.now() or order.expired:
valid = False
customer = self.customer
if customer and customer.customer_id:
if not order.customer_id:
valid = False
elif (int(order.customer_id) != int(customer.customer_id)):
valid = False
return valid
@property
def order(self):
if not self._order:
order = self.load_order()
if order:
if not self.validate_order(order):
self.request.session[SESSION_CART_DATA] = None
order = None
if not order:
order = self.start_order()
self._order = order
return self._order
@order.setter
def order(self, value):
self._order = None
self.request.session[SESSION_CART_DATA] = json.dumps(value.to_json())
def add_tickets(self, agile_event_org_id, agile_event_id, tier_id, tickets, promo_codes=None):
"""
Tickets is a dictionary in the format:
{ TICKET_TYPE: QUANTITY }
"""
ticket_types = ",".join(tickets.keys())
quantities = ",".join([str(tickets[t]) for t in tickets.keys()])
self.add_ticket(
agile_event_org_id=agile_event_org_id,
agile_event_id=agile_event_id,
tier_id=tier_id,
ticket_types=ticket_types,
quantities=quantities,
promo_codes=promo_codes
)
def add_ticket(self, agile_event_org_id, agile_event_id, tier_id, ticket_types, quantities, promo_codes=None):
order = self.order
if promo_codes:
promo_codes = ",".join(promo_codes)
logger.debug("Adding ticket payload to cart",
order_id=order.order_id,
transaction_id=order.transaction_id,
agile_event_org_id=agile_event_org_id,
agile_event_id=agile_event_id,
tier_id=tier_id,
ticket_types=ticket_types,
quantities=quantities,
promo_codes=promo_codes
)
response = api.tickets_add(
order.order_id,
order.transaction_id,
agile_event_org_id,
agile_event_id,
tier_id,
ticket_types,
quantities,
promo_codes=promo_codes
)
logger.debug("Adding ticket response", response=response.data)
if not response.success:
if response.error.code == 1034:
raise InvalidPromoException
else:
raise AgileException(code=response.error.code, message=response.error.message)
def get_transfer_url(self):
response = api.order_transfer(self.order.order_id, self.order.transaction_id)
url = None
logger.debug("Transfer URL response", response=response.data)
if response.success:
url = response.data
url = url.replace('http://', 'https://')
return url | agiletix/cart.py | import json
from django.utils import timezone
from agiletixapi import AgileError, AgileSalesAPI
from agiletixapi.exceptions import AgileException, InvalidPromoException
from agiletixapi.models import Order
from agiletixapi.utils import datestring_to_ms_datestring
from agiletix.logging import get_logger
logger = get_logger('lib')
from agiletix.settings import AGILE_SETTINGS as SETTINGS
SESSION_CART_DATA = "SESSION_CART_DATA"
SESSION_EVENT_PRICE_CACHE_KEY = "SESSION_EVENT_PRICE_CACHE_KEY"
api = AgileSalesAPI(
base_url=SETTINGS['AGILE_BASE_URL'],
app_key=SETTINGS['AGILE_APP_KEY'],
user_key=SETTINGS['AGILE_USER_KEY'],
corp_org_id=SETTINGS['AGILE_CORP_ORG_ID']
)
def get_cart_for_request(request, force_non_member=False):
"""
Try to retrieve cart from the current session. If none found, create one
"""
cart = None
if hasattr(request, 'cart'):
cart = request.cart
if cart and cart.is_member and force_non_member:
cart = None
if not cart:
try:
cart = Cart(request=request, force_non_member=force_non_member)
except AgileException as e:
# TODO: Yeha
#logger.warning(__name__, "AgileException -> {}".format(e))
if e.code == 1024:
cart = get_cart_for_request(request, force_non_member=True)
#cart_error = e.code
return cart
class Cart(object):
_order = None
customer = None
request = None
def __init__(self, request, force_non_member=False):
self.request = request
if request.user.is_authenticated and not force_non_member:
self.customer = request.user
@property
def is_member(self):
return self.customer and self.customer.member_id
def start_order(self):
customer = self.customer
response = None
if customer:
if customer.member_id:
response = api.order_start(buyer_type_id=SETTINGS['AGILE_BUYER_TYPE_STANDARD_ID'] , customer_id=customer.customer_id, member_id=customer.member_id)
else:
response = api.order_start(buyer_type_id=SETTINGS['AGILE_BUYER_TYPE_STANDARD_ID'] , customer_id=customer.customer_id)
if not response.success:
if response.error.code == AgileError.MemberRenewalRequired:
raise AgileException(code=response.error.code, message=response.error.message)
# TODO: Handle others
if not customer or (response and not response.success):
response = api.order_start(buyer_type_id=SETTINGS['AGILE_BUYER_TYPE_STANDARD_ID'])
if response and response.success:
logger.debug("Order started", response=response.data)
order = Order(response.data)
else:
order = None
self.request.session[SESSION_CART_DATA] = json.dumps(order.to_json())
return order
def load_order(self):
order = None
json_object = None
order_json = self.request.session.get(SESSION_CART_DATA)
if order_json:
try:
json_object = json.loads(order_json)
except:
pass # TODO: Better handling here
if json_object:
logger.debug("Order loaded", order_json=json_object)
# Need to convert datetimes back to MS Json.NET before passing to Order object
# CloseDateTime, ExpirationDateTime, OpenDateTime
agile_json_object = {}
for key, value in json_object.items():
if "DateTime" in key:
agile_json_object[key] = datestring_to_ms_datestring(value)
else:
agile_json_object[key] = value
order = Order(agile_json_object)
return order
def validate_order(self, order):
valid = order.in_process
if order.expiration_datetime < timezone.now() or order.expired:
valid = False
customer = self.customer
if customer and customer.customer_id:
if not order.customer_id:
valid = False
elif (int(order.customer_id) != int(customer.customer_id)):
valid = False
return valid
@property
def order(self):
if not self._order:
order = self.load_order()
if order:
if not self.validate_order(order):
self.request.session[SESSION_CART_DATA] = None
order = None
if not order:
order = self.start_order()
self._order = order
return self._order
@order.setter
def order(self, value):
self._order = None
self.request.session[SESSION_CART_DATA] = json.dumps(value.to_json())
def add_tickets(self, agile_event_org_id, agile_event_id, tier_id, tickets, promo_codes=None):
"""
Tickets is a dictionary in the format:
{ TICKET_TYPE: QUANTITY }
"""
ticket_types = ",".join(tickets.keys())
quantities = ",".join([str(tickets[t]) for t in tickets.keys()])
self.add_ticket(
agile_event_org_id=agile_event_org_id,
agile_event_id=agile_event_id,
tier_id=tier_id,
ticket_types=ticket_types,
quantities=quantities,
promo_codes=promo_codes
)
def add_ticket(self, agile_event_org_id, agile_event_id, tier_id, ticket_types, quantities, promo_codes=None):
order = self.order
if promo_codes:
promo_codes = ",".join(promo_codes)
logger.debug("Adding ticket payload to cart",
order_id=order.order_id,
transaction_id=order.transaction_id,
agile_event_org_id=agile_event_org_id,
agile_event_id=agile_event_id,
tier_id=tier_id,
ticket_types=ticket_types,
quantities=quantities,
promo_codes=promo_codes
)
response = api.tickets_add(
order.order_id,
order.transaction_id,
agile_event_org_id,
agile_event_id,
tier_id,
ticket_types,
quantities,
promo_codes=promo_codes
)
logger.debug("Adding ticket response", response=response.data)
if not response.success:
if response.error.code == 1034:
raise InvalidPromoException
else:
raise AgileException(code=response.error.code, message=response.error.message)
def get_transfer_url(self):
response = api.order_transfer(self.order.order_id, self.order.transaction_id)
url = None
logger.debug("Transfer URL response", response=response.data)
if response.success:
url = response.data
url = url.replace('http://', 'https://')
return url | 0.145115 | 0.061706 |
__author__ = "chaitanya"
from collections import defaultdict
class Graph:
def __init__(self, directed=True):
self.relations = defaultdict()
self.nodes = defaultdict()
self.node2id = {}
self.relation2id = {}
self.edges = {}
self.edgeCount = 0
self.directed = directed
#self.add_node("UNK-NODE")
#self.add_relation("UNK-REL")
def add_edge(self, node1, node2, rel, label, weight, uri=None):
"""
:param node1: source node
:param node2: target node
:param rel: relation
:param label: relation
:param weight: weight of edge from [0.0, 1.0]
:param uri: uri of edge
:return: Edge object
"""
new_edge = Edge(node1, node2, rel, label, weight, uri)
if node2 in self.edges[node1]:
self.edges[node1][node2].append(new_edge)
else:
self.edges[node1][node2] = [new_edge]
# node1.neighbors.add(node2)
node2.neighbors.add(node1)
self.edgeCount += 1
if (self.edgeCount + 1) % 10000 == 0:
print("Number of edges: %d" % self.edgeCount, end="\r")
return new_edge
def add_node(self, name):
"""
:param name:
:return:
"""
new_node = Node(name, len(self.nodes))
self.nodes[len(self.nodes)] = new_node
self.node2id[new_node.name] = len(self.nodes) - 1
self.edges[new_node] = {}
return self.node2id[new_node.name]
def add_relation(self, name):
"""
:param name
:return:
"""
new_relation = Relation(name, len(self.relations))
self.relations[len(self.relations)] = new_relation
self.relation2id[new_relation.name] = len(self.relations) - 1
return self.relation2id[new_relation.name]
def find_node(self, name):
"""
:param name:
:return:
"""
if name in self.node2id:
return self.node2id[name]
else:
return -1
def find_relation(self, name):
"""
:param name:
:return:
"""
if name in self.relation2id:
return self.relation2id[name]
else:
return -1
def is_connected(self, node1, node2):
"""
:param node1:
:param node2:
:return:
"""
if node1 in self.edges:
if node2 in self.edges[node1]:
return True
return False
def node_exists(self, node):
"""
:param node: node to check
:return: Boolean value
"""
if node in self.nodes.values():
return True
return False
def find_all_connections(self, relation):
"""
:param relation:
:return: list of all edges representing this relation
"""
relevant_edges = []
for edge in self.edges:
if edge.relation == relation:
relevant_edges.append(edge)
return relevant_edges
def iter_nodes(self):
return list(self.nodes.values())
def iter_relations(self):
return list(self.relations.values())
def iter_edges(self):
for node in self.edges:
for edge_list in self.edges[node].values():
for edge in edge_list:
yield edge
def __str__(self):
for node in self.nodes:
print(node)
class Node:
def __init__(self, name, id, lang='en'):
self.name = name
self.id = id
self.lang = lang
self.neighbors = set([])
def get_neighbors(self):
"""
:param node:
:return:
"""
return self.neighbors
def get_degree(self):
"""
:param node:
:return:
"""
return len(self.neighbors)
def __str__(self):
out = ("Node #%d : %s" % (self.id, self.name))
return out
class Relation:
def __init__(self, name, id):
self.name = name
self.id = id
class Edge:
def __init__(self, node1, node2, relation, label, weight, uri):
self.src = node1
self.tgt = node2
self.relation = relation
self.label = label
self.weight = weight
self.uri = uri
def __str__(self):
out = ("%s: %s --> %s" % (self.relation.name, self.src.name, self.tgt.name))
return out | src/graph.py | __author__ = "chaitanya"
from collections import defaultdict
class Graph:
def __init__(self, directed=True):
self.relations = defaultdict()
self.nodes = defaultdict()
self.node2id = {}
self.relation2id = {}
self.edges = {}
self.edgeCount = 0
self.directed = directed
#self.add_node("UNK-NODE")
#self.add_relation("UNK-REL")
def add_edge(self, node1, node2, rel, label, weight, uri=None):
"""
:param node1: source node
:param node2: target node
:param rel: relation
:param label: relation
:param weight: weight of edge from [0.0, 1.0]
:param uri: uri of edge
:return: Edge object
"""
new_edge = Edge(node1, node2, rel, label, weight, uri)
if node2 in self.edges[node1]:
self.edges[node1][node2].append(new_edge)
else:
self.edges[node1][node2] = [new_edge]
# node1.neighbors.add(node2)
node2.neighbors.add(node1)
self.edgeCount += 1
if (self.edgeCount + 1) % 10000 == 0:
print("Number of edges: %d" % self.edgeCount, end="\r")
return new_edge
def add_node(self, name):
"""
:param name:
:return:
"""
new_node = Node(name, len(self.nodes))
self.nodes[len(self.nodes)] = new_node
self.node2id[new_node.name] = len(self.nodes) - 1
self.edges[new_node] = {}
return self.node2id[new_node.name]
def add_relation(self, name):
"""
:param name
:return:
"""
new_relation = Relation(name, len(self.relations))
self.relations[len(self.relations)] = new_relation
self.relation2id[new_relation.name] = len(self.relations) - 1
return self.relation2id[new_relation.name]
def find_node(self, name):
"""
:param name:
:return:
"""
if name in self.node2id:
return self.node2id[name]
else:
return -1
def find_relation(self, name):
"""
:param name:
:return:
"""
if name in self.relation2id:
return self.relation2id[name]
else:
return -1
def is_connected(self, node1, node2):
"""
:param node1:
:param node2:
:return:
"""
if node1 in self.edges:
if node2 in self.edges[node1]:
return True
return False
def node_exists(self, node):
"""
:param node: node to check
:return: Boolean value
"""
if node in self.nodes.values():
return True
return False
def find_all_connections(self, relation):
"""
:param relation:
:return: list of all edges representing this relation
"""
relevant_edges = []
for edge in self.edges:
if edge.relation == relation:
relevant_edges.append(edge)
return relevant_edges
def iter_nodes(self):
return list(self.nodes.values())
def iter_relations(self):
return list(self.relations.values())
def iter_edges(self):
for node in self.edges:
for edge_list in self.edges[node].values():
for edge in edge_list:
yield edge
def __str__(self):
for node in self.nodes:
print(node)
class Node:
def __init__(self, name, id, lang='en'):
self.name = name
self.id = id
self.lang = lang
self.neighbors = set([])
def get_neighbors(self):
"""
:param node:
:return:
"""
return self.neighbors
def get_degree(self):
"""
:param node:
:return:
"""
return len(self.neighbors)
def __str__(self):
out = ("Node #%d : %s" % (self.id, self.name))
return out
class Relation:
def __init__(self, name, id):
self.name = name
self.id = id
class Edge:
def __init__(self, node1, node2, relation, label, weight, uri):
self.src = node1
self.tgt = node2
self.relation = relation
self.label = label
self.weight = weight
self.uri = uri
def __str__(self):
out = ("%s: %s --> %s" % (self.relation.name, self.src.name, self.tgt.name))
return out | 0.780704 | 0.293151 |
import ftplib
from io import BytesIO
from pathlib import Path
from typing import Union, List
class FTP(ftplib.FTP):
"""
A modified FTP class that has the capabilities to scan a ftp directory
and provide easy to use download functions with out the need to build
request strings or similiar stuff.
"""
def list_files(self,
remote_path: Union[Path, str],
also_subfolders: bool) -> List[str]:
"""
Args:
remote_path(str): a path that is searched for files
also_subfolders(bool): a bool that defines if subfolders should also be searched for files
Returns:
list of strings of all files (and files of subfolders) that can be found in a given directory
"""
server_files = []
path_files = self.nlst(remote_path)
path_directories = [path_files.pop(file_id)
for file_id, file in enumerate(path_files)
if "." not in file]
if also_subfolders:
for directory in path_directories:
server_files.extend(self.list_files(remote_path=directory,
also_subfolders=also_subfolders))
server_files.extend(path_files)
return server_files
def read_file_to_bytes(self,
remote_file_path: Union[Path, str]) -> BytesIO:
"""
Args:
remote_file_path:
Returns:
"""
file = BytesIO()
self.retrbinary(f"RETR {remote_file_path}", file.write)
file.seek(0)
return file
def download(self,
remote_file_path: Union[Path, str],
local_file_path: Union[Path, str]):
with open(local_file_path, "wb") as file:
self.retrbinary(f"RETR {remote_file_path}", file.write)
def ftp_file_download(ftp_connection: FTP,
remote_file_path: Union[Path, str],
local_file_path: Union[Path, str]):
"""
Args:
ftp_connection: connection to an ftp server
remote_file_path: path to the file on the server
local_file_path: path where the file should be stored
Returns:
store file on local file system
"""
ftp_connection.download(remote_file_path,
local_file_path) | python_dwd/download/ftp_handling.py | import ftplib
from io import BytesIO
from pathlib import Path
from typing import Union, List
class FTP(ftplib.FTP):
"""
A modified FTP class that has the capabilities to scan a ftp directory
and provide easy to use download functions with out the need to build
request strings or similiar stuff.
"""
def list_files(self,
remote_path: Union[Path, str],
also_subfolders: bool) -> List[str]:
"""
Args:
remote_path(str): a path that is searched for files
also_subfolders(bool): a bool that defines if subfolders should also be searched for files
Returns:
list of strings of all files (and files of subfolders) that can be found in a given directory
"""
server_files = []
path_files = self.nlst(remote_path)
path_directories = [path_files.pop(file_id)
for file_id, file in enumerate(path_files)
if "." not in file]
if also_subfolders:
for directory in path_directories:
server_files.extend(self.list_files(remote_path=directory,
also_subfolders=also_subfolders))
server_files.extend(path_files)
return server_files
def read_file_to_bytes(self,
remote_file_path: Union[Path, str]) -> BytesIO:
"""
Args:
remote_file_path:
Returns:
"""
file = BytesIO()
self.retrbinary(f"RETR {remote_file_path}", file.write)
file.seek(0)
return file
def download(self,
remote_file_path: Union[Path, str],
local_file_path: Union[Path, str]):
with open(local_file_path, "wb") as file:
self.retrbinary(f"RETR {remote_file_path}", file.write)
def ftp_file_download(ftp_connection: FTP,
remote_file_path: Union[Path, str],
local_file_path: Union[Path, str]):
"""
Args:
ftp_connection: connection to an ftp server
remote_file_path: path to the file on the server
local_file_path: path where the file should be stored
Returns:
store file on local file system
"""
ftp_connection.download(remote_file_path,
local_file_path) | 0.852076 | 0.361334 |
def snail(a: list[int]) -> list[int]:
pos, start, limit = [], 0, len(a) - 1
while limit:
r = [[start, i] for i in range(start, limit)]
d = [[i, limit] for i in range(start, limit)]
l = [[limit, i] for i in range(limit, start, -1)]
u = [[i, start] for i in range(limit, start, -1)]
for position in r + d + l + u:
pos.append([position[0], position[1]])
start, limit = start + 1, limit - 1
if start == limit:
pos.append([start, limit])
return [a[p[0]][p[1]] for p in pos] if len(pos) != 0 else a[0] if type(a[0]) == list else a
if __name__ == '__main__':
basic_tests = [
['snail', [[1,2,3],
[4,5,6],
[7,8,9]],
[1,2,3,6,9,8,7,4,5]
],
['snail', [[1,2,3],
[8,9,4],
[7,6,5]],
[1,2,3,4,5,6,7,8,9]
]
]
for test in basic_tests:
fn_name, a, expected = test
result = globals()[fn_name](a)
print(f'{fn_name}({a}) returns {result}'
f'{f", expected: {expected}" if result != expected else ""}')
# _ _ _ _
# | | | | | | (_)
# | |__ ___ ___| |_ _ __ _ __ __ _ ___| |_ _ ___ ___
# | '_ \ / _ \/ __| __| | '_ \| '__/ _` |/ __| __| |/ __/ _ \
# | |_) | __/\__ \ |_ | |_) | | | (_| | (__| |_| | (_| __/
# |_.__/ \___||___/\__| | .__/|_| \__,_|\___|\__|_|\___\___|
# | | written by
# |_| https://codewars.com/users/jolaf
'''jolaf
def snail(array):
ret = []
if array and array[0]:
size = len(array)
for n in xrange((size + 1) // 2):
for x in xrange(n, size - n):
ret.append(array[n][x])
for y in xrange(1 + n, size - n):
ret.append(array[y][-1 - n])
for x in xrange(2 + n, size - n + 1):
ret.append(array[-1 - n][-x])
for y in xrange(2 + n, size - n):
ret.append(array[-y][n])
return ret
''' | 4kyu/snail.py | def snail(a: list[int]) -> list[int]:
pos, start, limit = [], 0, len(a) - 1
while limit:
r = [[start, i] for i in range(start, limit)]
d = [[i, limit] for i in range(start, limit)]
l = [[limit, i] for i in range(limit, start, -1)]
u = [[i, start] for i in range(limit, start, -1)]
for position in r + d + l + u:
pos.append([position[0], position[1]])
start, limit = start + 1, limit - 1
if start == limit:
pos.append([start, limit])
return [a[p[0]][p[1]] for p in pos] if len(pos) != 0 else a[0] if type(a[0]) == list else a
if __name__ == '__main__':
basic_tests = [
['snail', [[1,2,3],
[4,5,6],
[7,8,9]],
[1,2,3,6,9,8,7,4,5]
],
['snail', [[1,2,3],
[8,9,4],
[7,6,5]],
[1,2,3,4,5,6,7,8,9]
]
]
for test in basic_tests:
fn_name, a, expected = test
result = globals()[fn_name](a)
print(f'{fn_name}({a}) returns {result}'
f'{f", expected: {expected}" if result != expected else ""}')
# _ _ _ _
# | | | | | | (_)
# | |__ ___ ___| |_ _ __ _ __ __ _ ___| |_ _ ___ ___
# | '_ \ / _ \/ __| __| | '_ \| '__/ _` |/ __| __| |/ __/ _ \
# | |_) | __/\__ \ |_ | |_) | | | (_| | (__| |_| | (_| __/
# |_.__/ \___||___/\__| | .__/|_| \__,_|\___|\__|_|\___\___|
# | | written by
# |_| https://codewars.com/users/jolaf
'''jolaf
def snail(array):
ret = []
if array and array[0]:
size = len(array)
for n in xrange((size + 1) // 2):
for x in xrange(n, size - n):
ret.append(array[n][x])
for y in xrange(1 + n, size - n):
ret.append(array[y][-1 - n])
for x in xrange(2 + n, size - n + 1):
ret.append(array[-1 - n][-x])
for y in xrange(2 + n, size - n):
ret.append(array[-y][n])
return ret
''' | 0.266453 | 0.495117 |
import codecs
import csv
import hashlib
import logging
import os
import random
import zipfile
import tensorflow as tf
from .. import settings, utils as rainbow_utils
from . import preparer, utils as preparer_utils
logger = logging.getLogger(__name__)
# main class
class JOCIPreparer(preparer.Preparer):
"""Prepare JOCI for text-to-text modeling."""
JOCI = {
"name": "joci",
"splits": {
"train": {"name": "train", "size": 34092},
"validation": {"name": "validation", "size": 2500},
},
"url": "http://decomp.io/projects/common-sense-inference/joci.zip",
"checksum": "7812ddfa6e58d6bc8010dc88d9d2600cb8c559fc978201223012256f609017cb",
"file_name": "joci.zip",
"csv_path": "joci.csv",
}
"""Configuration data for JOCI."""
def prepare(self, src: str, dst: str, force_download: bool = False) -> None:
"""See ``rainbow.preparation.preparer.Preparer``."""
# Create the directory for saving the source files.
tf.io.gfile.makedirs(os.path.join(src, self.JOCI["name"]))
# Create the directory for saving the prepared files.
tf.io.gfile.makedirs(os.path.join(dst, self.JOCI["name"]))
src_path = os.path.join(src, self.JOCI["name"], self.JOCI["file_name"])
# Copy the dataset to src_path from the URL.
if not tf.io.gfile.exists(src_path) or force_download:
logger.info(
f"Downloading {self.JOCI['name']} from {self.JOCI['url']}"
f" to {src_path}."
)
preparer_utils.copy_url_to_gfile(self.JOCI["url"], src_path)
with tf.io.gfile.GFile(src_path, "rb") as src_file:
# Verify the dataset file against its checksum.
sha256 = hashlib.sha256()
chunk = None
while chunk != b"":
# Read in 64KB at a time.
chunk = src_file.read(64 * 1024)
sha256.update(chunk)
checksum = sha256.hexdigest()
if checksum != self.JOCI["checksum"]:
raise IOError(
f"The file for {self.JOCI['name']} did not have the"
f" expected checksum. Try running with force_download=True"
f" to redownload all files, or consider updating the"
f" datasets' checksums."
)
# Return to the beginning of the file.
src_file.seek(0)
# Read the data from the JOCI file.
with zipfile.ZipFile(src_file, "r") as src_zip:
with src_zip.open(self.JOCI["csv_path"], "r") as joci_csv:
joci_csv = codecs.getreader("utf-8")(joci_csv)
reader = csv.DictReader(joci_csv)
data = [x for x in reader]
# Prepare and write the splits to dst.
# Shuffle and split the JOCI data.
random_state = random.getstate()
random.seed(rainbow_utils.string_to_seed(self.JOCI["name"]))
random.shuffle(data)
random.setstate(random_state)
for split in self.JOCI["splits"].values():
dst_path = os.path.join(
dst,
self.JOCI["name"],
settings.PREPROCESSED_SPLIT_FILE_NAME_TEMPLATE.format(
split=split["name"], dataset=self.JOCI["name"]
),
)
with tf.io.gfile.GFile(dst_path, "w") as dst_file:
rows_written = 0
writer = csv.DictWriter(
dst_file,
fieldnames=["index", "inputs", "targets"],
dialect="unix",
)
writer.writeheader()
split_data, data = data[: split["size"]], data[split["size"] :]
for i, row_in in enumerate(split_data):
row_out = {
"index": rows_written,
"inputs": (
f"[{self.JOCI['name']}]:\n"
f"<context>{row_in['CONTEXT']}</context>\n"
f"<hypothesis>{row_in['HYPOTHESIS']}</hypothesis>"
),
"targets": row_in["LABEL"],
}
if i == 0:
logger.info(
f"\n\n"
f"Example {row_out['index']} from"
f" {self.JOCI['name']}'s {split['name']} split:\n"
f"inputs:\n"
f"{row_out['inputs']}\n"
f"targets:\n"
f"{row_out['targets']}\n"
f"\n"
)
# Write to the CSV.
writer.writerow(row_out)
rows_written += 1
if rows_written != split["size"]:
logger.error(
f"Expected to write {split.size} rows for the"
f" {split['name']} split of {self.JOCI['name']}, instead"
f" {rows_written} were written."
)
logger.info(f"Finished processing JOCI.") | src/rainbow/preparation/joci.py |
import codecs
import csv
import hashlib
import logging
import os
import random
import zipfile
import tensorflow as tf
from .. import settings, utils as rainbow_utils
from . import preparer, utils as preparer_utils
logger = logging.getLogger(__name__)
# main class
class JOCIPreparer(preparer.Preparer):
"""Prepare JOCI for text-to-text modeling."""
JOCI = {
"name": "joci",
"splits": {
"train": {"name": "train", "size": 34092},
"validation": {"name": "validation", "size": 2500},
},
"url": "http://decomp.io/projects/common-sense-inference/joci.zip",
"checksum": "7812ddfa6e58d6bc8010dc88d9d2600cb8c559fc978201223012256f609017cb",
"file_name": "joci.zip",
"csv_path": "joci.csv",
}
"""Configuration data for JOCI."""
def prepare(self, src: str, dst: str, force_download: bool = False) -> None:
"""See ``rainbow.preparation.preparer.Preparer``."""
# Create the directory for saving the source files.
tf.io.gfile.makedirs(os.path.join(src, self.JOCI["name"]))
# Create the directory for saving the prepared files.
tf.io.gfile.makedirs(os.path.join(dst, self.JOCI["name"]))
src_path = os.path.join(src, self.JOCI["name"], self.JOCI["file_name"])
# Copy the dataset to src_path from the URL.
if not tf.io.gfile.exists(src_path) or force_download:
logger.info(
f"Downloading {self.JOCI['name']} from {self.JOCI['url']}"
f" to {src_path}."
)
preparer_utils.copy_url_to_gfile(self.JOCI["url"], src_path)
with tf.io.gfile.GFile(src_path, "rb") as src_file:
# Verify the dataset file against its checksum.
sha256 = hashlib.sha256()
chunk = None
while chunk != b"":
# Read in 64KB at a time.
chunk = src_file.read(64 * 1024)
sha256.update(chunk)
checksum = sha256.hexdigest()
if checksum != self.JOCI["checksum"]:
raise IOError(
f"The file for {self.JOCI['name']} did not have the"
f" expected checksum. Try running with force_download=True"
f" to redownload all files, or consider updating the"
f" datasets' checksums."
)
# Return to the beginning of the file.
src_file.seek(0)
# Read the data from the JOCI file.
with zipfile.ZipFile(src_file, "r") as src_zip:
with src_zip.open(self.JOCI["csv_path"], "r") as joci_csv:
joci_csv = codecs.getreader("utf-8")(joci_csv)
reader = csv.DictReader(joci_csv)
data = [x for x in reader]
# Prepare and write the splits to dst.
# Shuffle and split the JOCI data.
random_state = random.getstate()
random.seed(rainbow_utils.string_to_seed(self.JOCI["name"]))
random.shuffle(data)
random.setstate(random_state)
for split in self.JOCI["splits"].values():
dst_path = os.path.join(
dst,
self.JOCI["name"],
settings.PREPROCESSED_SPLIT_FILE_NAME_TEMPLATE.format(
split=split["name"], dataset=self.JOCI["name"]
),
)
with tf.io.gfile.GFile(dst_path, "w") as dst_file:
rows_written = 0
writer = csv.DictWriter(
dst_file,
fieldnames=["index", "inputs", "targets"],
dialect="unix",
)
writer.writeheader()
split_data, data = data[: split["size"]], data[split["size"] :]
for i, row_in in enumerate(split_data):
row_out = {
"index": rows_written,
"inputs": (
f"[{self.JOCI['name']}]:\n"
f"<context>{row_in['CONTEXT']}</context>\n"
f"<hypothesis>{row_in['HYPOTHESIS']}</hypothesis>"
),
"targets": row_in["LABEL"],
}
if i == 0:
logger.info(
f"\n\n"
f"Example {row_out['index']} from"
f" {self.JOCI['name']}'s {split['name']} split:\n"
f"inputs:\n"
f"{row_out['inputs']}\n"
f"targets:\n"
f"{row_out['targets']}\n"
f"\n"
)
# Write to the CSV.
writer.writerow(row_out)
rows_written += 1
if rows_written != split["size"]:
logger.error(
f"Expected to write {split.size} rows for the"
f" {split['name']} split of {self.JOCI['name']}, instead"
f" {rows_written} were written."
)
logger.info(f"Finished processing JOCI.") | 0.627152 | 0.242463 |
import subprocess
class CEosAccount:
def __init__(self, account):
self.set(account)
def set(self, account):
if len(account) != 12:
raise ValueError("unvalid format of @account")
self.a = account
def __str__(self):
return self.a
class CleosCmdBuiler:
def __init__(self):
self.bin_cleos = '/root/eosio/1.8/bin/cleos --url http://127.0.0.1:7770 --wallet-url http://127.0.0.1:5550 '
def cleos__get_account(self, account):
if isinstance(account, str):
a = CEosAccount(account)
elif isinstance(account, CEosAccount):
a = account
else :
raise TypeError("invalid type for @account")
return '%s get account %s -j' % (self.bin_cleos, a)
def cleos__system_buyram(self, account, receiver, buyamount):
if isinstance(account, str):
a = CEosAccount(account)
elif isinstance(account, CEosAccount):
a = account
else :
raise TypeError("invalid type for @account")
if receiver == '' or receiver == None:
r = a
else:
r = receiver
return '%s system buyram -f %s %s "%d.00000000 SAFE"' % (self.bin_cleos, a, r, buyamount)
class CSubprocess :
def __init__(self):
pass
def check_call(self, cmd, shell=False) :
print('cmd-line: %s' % cmd)
ret = subprocess.check_call(cmd, shell=shell)
print('ret-code: %s' % ret)
return ret
def check_output(self, cmd, shell=False):
print('cmd-line: %s' % cmd)
ret = subprocess.check_output(cmd, shell=shell)
print('ret-out: %s' % ret)
return ret
def popen(self, cmd, shell=False, stdout=subprocess.PIPE) :
print('cmd-line(popened): %s' % cmd)
p = subprocess.Popen(cmd,shell=shell,stdout=stdout)
return p
def popen_stdout(self, p, cb_return=None):
ret = ''
for i in iter(p.stdout.readline, b''): #until meet the string ''
ret += i
if cb_return and cb_return(ret):
break
print('ret-out(popened): %s' % ret)
return ret | safecode-env-test/sc-et_u16/docker-fs-root/home/sc-test/script/test-ram/utils.py |
import subprocess
class CEosAccount:
def __init__(self, account):
self.set(account)
def set(self, account):
if len(account) != 12:
raise ValueError("unvalid format of @account")
self.a = account
def __str__(self):
return self.a
class CleosCmdBuiler:
def __init__(self):
self.bin_cleos = '/root/eosio/1.8/bin/cleos --url http://127.0.0.1:7770 --wallet-url http://127.0.0.1:5550 '
def cleos__get_account(self, account):
if isinstance(account, str):
a = CEosAccount(account)
elif isinstance(account, CEosAccount):
a = account
else :
raise TypeError("invalid type for @account")
return '%s get account %s -j' % (self.bin_cleos, a)
def cleos__system_buyram(self, account, receiver, buyamount):
if isinstance(account, str):
a = CEosAccount(account)
elif isinstance(account, CEosAccount):
a = account
else :
raise TypeError("invalid type for @account")
if receiver == '' or receiver == None:
r = a
else:
r = receiver
return '%s system buyram -f %s %s "%d.00000000 SAFE"' % (self.bin_cleos, a, r, buyamount)
class CSubprocess :
def __init__(self):
pass
def check_call(self, cmd, shell=False) :
print('cmd-line: %s' % cmd)
ret = subprocess.check_call(cmd, shell=shell)
print('ret-code: %s' % ret)
return ret
def check_output(self, cmd, shell=False):
print('cmd-line: %s' % cmd)
ret = subprocess.check_output(cmd, shell=shell)
print('ret-out: %s' % ret)
return ret
def popen(self, cmd, shell=False, stdout=subprocess.PIPE) :
print('cmd-line(popened): %s' % cmd)
p = subprocess.Popen(cmd,shell=shell,stdout=stdout)
return p
def popen_stdout(self, p, cb_return=None):
ret = ''
for i in iter(p.stdout.readline, b''): #until meet the string ''
ret += i
if cb_return and cb_return(ret):
break
print('ret-out(popened): %s' % ret)
return ret | 0.495361 | 0.092033 |
import unittest
import sqlite3 as sqlite
from collections import Sequence
class MyConnection(sqlite.Connection):
def __init__(self, *args, **kwargs):
sqlite.Connection.__init__(self, *args, **kwargs)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class MyCursor(sqlite.Cursor):
def __init__(self, *args, **kwargs):
sqlite.Cursor.__init__(self, *args, **kwargs)
self.row_factory = dict_factory
class ConnectionFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:", factory=MyConnection)
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
self.assertIsInstance(self.con, MyConnection)
class CursorFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
cur = self.con.cursor(factory=MyCursor)
self.assertIsInstance(cur, MyCursor)
class RowFactoryTestsBackwardsCompat(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckIsProducedByFactory(self):
cur = self.con.cursor(factory=MyCursor)
cur.execute("select 4+5 as foo")
row = cur.fetchone()
self.assertIsInstance(row, dict)
cur.close()
def tearDown(self):
self.con.close()
class RowFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckCustomFactory(self):
self.con.row_factory = lambda cur, row: list(row)
row = self.con.execute("select 1, 2").fetchone()
self.assertIsInstance(row, list)
def CheckSqliteRowIndex(self):
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
self.assertIsInstance(row, sqlite.Row)
col1, col2 = row["a"], row["b"]
self.assertEqual(col1, 1, "by name: wrong result for column 'a'")
self.assertEqual(col2, 2, "by name: wrong result for column 'a'")
col1, col2 = row["A"], row["B"]
self.assertEqual(col1, 1, "by name: wrong result for column 'A'")
self.assertEqual(col2, 2, "by name: wrong result for column 'B'")
self.assertEqual(row[0], 1, "by index: wrong result for column 0")
self.assertEqual(row[0L], 1, "by index: wrong result for column 0")
self.assertEqual(row[1], 2, "by index: wrong result for column 1")
self.assertEqual(row[1L], 2, "by index: wrong result for column 1")
self.assertEqual(row[-1], 2, "by index: wrong result for column -1")
self.assertEqual(row[-1L], 2, "by index: wrong result for column -1")
self.assertEqual(row[-2], 1, "by index: wrong result for column -2")
self.assertEqual(row[-2L], 1, "by index: wrong result for column -2")
with self.assertRaises(IndexError):
row['c']
with self.assertRaises(IndexError):
row[2]
with self.assertRaises(IndexError):
row[2L]
with self.assertRaises(IndexError):
row[-3]
with self.assertRaises(IndexError):
row[-3L]
with self.assertRaises(IndexError):
row[2**1000]
def CheckSqliteRowIter(self):
"""Checks if the row object is iterable"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
for col in row:
pass
def CheckSqliteRowAsTuple(self):
"""Checks if the row object can be converted to a tuple"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
t = tuple(row)
self.assertEqual(t, (row['a'], row['b']))
def CheckSqliteRowAsDict(self):
"""Checks if the row object can be correctly converted to a dictionary"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
d = dict(row)
self.assertEqual(d["a"], row["a"])
self.assertEqual(d["b"], row["b"])
def CheckSqliteRowHashCmp(self):
"""Checks if the row object compares and hashes correctly"""
self.con.row_factory = sqlite.Row
row_1 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_2 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_3 = self.con.execute("select 1 as a, 3 as b").fetchone()
self.assertEqual(row_1, row_1)
self.assertEqual(row_1, row_2)
self.assertTrue(row_2 != row_3)
self.assertFalse(row_1 != row_1)
self.assertFalse(row_1 != row_2)
self.assertFalse(row_2 == row_3)
self.assertEqual(row_1, row_2)
self.assertEqual(hash(row_1), hash(row_2))
self.assertNotEqual(row_1, row_3)
self.assertNotEqual(hash(row_1), hash(row_3))
def CheckSqliteRowAsSequence(self):
""" Checks if the row object can act like a sequence """
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
as_tuple = tuple(row)
self.assertEqual(list(reversed(row)), list(reversed(as_tuple)))
self.assertIsInstance(row, Sequence)
def tearDown(self):
self.con.close()
class TextFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckUnicode(self):
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
def CheckString(self):
self.con.text_factory = str
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.assertEqual(type(row[0]), str, "type of row[0] must be str")
self.assertEqual(row[0], austria.encode("utf-8"), "column must equal original data in UTF-8")
def CheckCustom(self):
self.con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria.encode("latin1"),)).fetchone()
self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
self.assertTrue(row[0].endswith(u"reich"), "column must contain original data")
def CheckOptimizedUnicode(self):
self.con.text_factory = sqlite.OptimizedUnicode
austria = unicode("Österreich", "latin1")
germany = unicode("Deutchland")
a_row = self.con.execute("select ?", (austria,)).fetchone()
d_row = self.con.execute("select ?", (germany,)).fetchone()
self.assertEqual(type(a_row[0]), unicode, "type of non-ASCII row must be unicode")
self.assertEqual(type(d_row[0]), str, "type of ASCII-only row must be str")
def tearDown(self):
self.con.close()
class TextFactoryTestsWithEmbeddedZeroBytes(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.execute("create table test (value text)")
self.con.execute("insert into test (value) values (?)", ("a\x00b",))
def CheckString(self):
# text_factory defaults to unicode
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), unicode)
self.assertEqual(row[0], "a\x00b")
def CheckCustom(self):
# A custom factory should receive an str argument
self.con.text_factory = lambda x: x
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), str)
self.assertEqual(row[0], "a\x00b")
def CheckOptimizedUnicodeAsString(self):
# ASCII -> str argument
self.con.text_factory = sqlite.OptimizedUnicode
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), str)
self.assertEqual(row[0], "a\x00b")
def CheckOptimizedUnicodeAsUnicode(self):
# Non-ASCII -> unicode argument
self.con.text_factory = sqlite.OptimizedUnicode
self.con.execute("delete from test")
self.con.execute("insert into test (value) values (?)", (u'ä\0ö',))
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), unicode)
self.assertEqual(row[0], u"ä\x00ö")
def tearDown(self):
self.con.close()
def suite():
connection_suite = unittest.makeSuite(ConnectionFactoryTests, "Check")
cursor_suite = unittest.makeSuite(CursorFactoryTests, "Check")
row_suite_compat = unittest.makeSuite(RowFactoryTestsBackwardsCompat, "Check")
row_suite = unittest.makeSuite(RowFactoryTests, "Check")
text_suite = unittest.makeSuite(TextFactoryTests, "Check")
text_zero_bytes_suite = unittest.makeSuite(TextFactoryTestsWithEmbeddedZeroBytes, "Check")
return unittest.TestSuite((connection_suite, cursor_suite, row_suite_compat, row_suite, text_suite, text_zero_bytes_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test() | pypy.js-0.2.0/lib/modules/sqlite3/test/factory.py |
import unittest
import sqlite3 as sqlite
from collections import Sequence
class MyConnection(sqlite.Connection):
def __init__(self, *args, **kwargs):
sqlite.Connection.__init__(self, *args, **kwargs)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class MyCursor(sqlite.Cursor):
def __init__(self, *args, **kwargs):
sqlite.Cursor.__init__(self, *args, **kwargs)
self.row_factory = dict_factory
class ConnectionFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:", factory=MyConnection)
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
self.assertIsInstance(self.con, MyConnection)
class CursorFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
cur = self.con.cursor(factory=MyCursor)
self.assertIsInstance(cur, MyCursor)
class RowFactoryTestsBackwardsCompat(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckIsProducedByFactory(self):
cur = self.con.cursor(factory=MyCursor)
cur.execute("select 4+5 as foo")
row = cur.fetchone()
self.assertIsInstance(row, dict)
cur.close()
def tearDown(self):
self.con.close()
class RowFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckCustomFactory(self):
self.con.row_factory = lambda cur, row: list(row)
row = self.con.execute("select 1, 2").fetchone()
self.assertIsInstance(row, list)
def CheckSqliteRowIndex(self):
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
self.assertIsInstance(row, sqlite.Row)
col1, col2 = row["a"], row["b"]
self.assertEqual(col1, 1, "by name: wrong result for column 'a'")
self.assertEqual(col2, 2, "by name: wrong result for column 'a'")
col1, col2 = row["A"], row["B"]
self.assertEqual(col1, 1, "by name: wrong result for column 'A'")
self.assertEqual(col2, 2, "by name: wrong result for column 'B'")
self.assertEqual(row[0], 1, "by index: wrong result for column 0")
self.assertEqual(row[0L], 1, "by index: wrong result for column 0")
self.assertEqual(row[1], 2, "by index: wrong result for column 1")
self.assertEqual(row[1L], 2, "by index: wrong result for column 1")
self.assertEqual(row[-1], 2, "by index: wrong result for column -1")
self.assertEqual(row[-1L], 2, "by index: wrong result for column -1")
self.assertEqual(row[-2], 1, "by index: wrong result for column -2")
self.assertEqual(row[-2L], 1, "by index: wrong result for column -2")
with self.assertRaises(IndexError):
row['c']
with self.assertRaises(IndexError):
row[2]
with self.assertRaises(IndexError):
row[2L]
with self.assertRaises(IndexError):
row[-3]
with self.assertRaises(IndexError):
row[-3L]
with self.assertRaises(IndexError):
row[2**1000]
def CheckSqliteRowIter(self):
"""Checks if the row object is iterable"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
for col in row:
pass
def CheckSqliteRowAsTuple(self):
"""Checks if the row object can be converted to a tuple"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
t = tuple(row)
self.assertEqual(t, (row['a'], row['b']))
def CheckSqliteRowAsDict(self):
"""Checks if the row object can be correctly converted to a dictionary"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
d = dict(row)
self.assertEqual(d["a"], row["a"])
self.assertEqual(d["b"], row["b"])
def CheckSqliteRowHashCmp(self):
"""Checks if the row object compares and hashes correctly"""
self.con.row_factory = sqlite.Row
row_1 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_2 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_3 = self.con.execute("select 1 as a, 3 as b").fetchone()
self.assertEqual(row_1, row_1)
self.assertEqual(row_1, row_2)
self.assertTrue(row_2 != row_3)
self.assertFalse(row_1 != row_1)
self.assertFalse(row_1 != row_2)
self.assertFalse(row_2 == row_3)
self.assertEqual(row_1, row_2)
self.assertEqual(hash(row_1), hash(row_2))
self.assertNotEqual(row_1, row_3)
self.assertNotEqual(hash(row_1), hash(row_3))
def CheckSqliteRowAsSequence(self):
""" Checks if the row object can act like a sequence """
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
as_tuple = tuple(row)
self.assertEqual(list(reversed(row)), list(reversed(as_tuple)))
self.assertIsInstance(row, Sequence)
def tearDown(self):
self.con.close()
class TextFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckUnicode(self):
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
def CheckString(self):
self.con.text_factory = str
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.assertEqual(type(row[0]), str, "type of row[0] must be str")
self.assertEqual(row[0], austria.encode("utf-8"), "column must equal original data in UTF-8")
def CheckCustom(self):
self.con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria.encode("latin1"),)).fetchone()
self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
self.assertTrue(row[0].endswith(u"reich"), "column must contain original data")
def CheckOptimizedUnicode(self):
self.con.text_factory = sqlite.OptimizedUnicode
austria = unicode("Österreich", "latin1")
germany = unicode("Deutchland")
a_row = self.con.execute("select ?", (austria,)).fetchone()
d_row = self.con.execute("select ?", (germany,)).fetchone()
self.assertEqual(type(a_row[0]), unicode, "type of non-ASCII row must be unicode")
self.assertEqual(type(d_row[0]), str, "type of ASCII-only row must be str")
def tearDown(self):
self.con.close()
class TextFactoryTestsWithEmbeddedZeroBytes(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.execute("create table test (value text)")
self.con.execute("insert into test (value) values (?)", ("a\x00b",))
def CheckString(self):
# text_factory defaults to unicode
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), unicode)
self.assertEqual(row[0], "a\x00b")
def CheckCustom(self):
# A custom factory should receive an str argument
self.con.text_factory = lambda x: x
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), str)
self.assertEqual(row[0], "a\x00b")
def CheckOptimizedUnicodeAsString(self):
# ASCII -> str argument
self.con.text_factory = sqlite.OptimizedUnicode
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), str)
self.assertEqual(row[0], "a\x00b")
def CheckOptimizedUnicodeAsUnicode(self):
# Non-ASCII -> unicode argument
self.con.text_factory = sqlite.OptimizedUnicode
self.con.execute("delete from test")
self.con.execute("insert into test (value) values (?)", (u'ä\0ö',))
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), unicode)
self.assertEqual(row[0], u"ä\x00ö")
def tearDown(self):
self.con.close()
def suite():
connection_suite = unittest.makeSuite(ConnectionFactoryTests, "Check")
cursor_suite = unittest.makeSuite(CursorFactoryTests, "Check")
row_suite_compat = unittest.makeSuite(RowFactoryTestsBackwardsCompat, "Check")
row_suite = unittest.makeSuite(RowFactoryTests, "Check")
text_suite = unittest.makeSuite(TextFactoryTests, "Check")
text_zero_bytes_suite = unittest.makeSuite(TextFactoryTestsWithEmbeddedZeroBytes, "Check")
return unittest.TestSuite((connection_suite, cursor_suite, row_suite_compat, row_suite, text_suite, text_zero_bytes_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test() | 0.735071 | 0.475727 |
import threading
import time
class database:
DATA = {}
DATABASES = [{} for x in range(16)]
TTL = {}
LOCK = threading.Lock()
CONFIG = {"databases": "16"}
@staticmethod
def select(db_index):
if database.LOCK.acquire():
database.DATA = database.DATABASES[int(db_index)]
database.LOCK.release()
@staticmethod
def set(key, value, ext):
seconds = None
milliseconds = None
mode = None
if ext:
if "EX" in ext:
seconds = ext[ext.index("EX") + 1]
if "PX" in ext:
milliseconds = ext[ext.index("PX") + 1]
if "NX" in ext:
mode = "NX"
if "XX" in ext:
mode = "XX"
if mode == "NX" and (database.get(key) is not None):
return None
if mode == "XX" and (database.get(key) is None):
return None
if database.LOCK.acquire():
database.DATA[key] = value
database.LOCK.release()
if seconds:
database.expire(key, seconds)
if milliseconds:
database.pexpire(key, milliseconds)
return "OK"
@staticmethod
def get(key):
return database.DATA.get(key, None)
@staticmethod
def DEL(keys):
ret = 0
if database.LOCK.acquire():
for key in keys:
if database.DATA.get(key):
del database.DATA[key]
ret += 1
database.LOCK.release()
return ret
@staticmethod
def keys(key):
import re
patten = re.compile(key.replace("*", r"[\w]*").replace("?", "[\w]"))
ret = filter(lambda x: patten.match(x), database.DATA.keys())
return ret
@staticmethod
def get_type(key):
return "string"
@staticmethod
def get_config(key):
return [key, database.CONFIG.get(key, None)]
@staticmethod
def set_config(key, value):
database.CONFIG[key] = value
return "OK"
@staticmethod
def get_ttl(key):
if database.get(key) is None:
return -2
ttl = database.TTL.get(key)
if ttl:
ttl = ttl - time.time()
return int(ttl)
return -1
@staticmethod
def get_pttl(key):
if database.get(key) is None:
return -2
ttl = database.TTL.get(key)
if ttl:
ttl = ttl - time.time()
return int(ttl * 1000)
return -1
@staticmethod
def expire(key, ttl):
ret = 1
if database.LOCK.acquire():
if key in database.DATA:
database.TTL[key] = time.time() + int(ttl)
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def pexpire(key, ttl):
ret = 1
if database.LOCK.acquire():
if key in database.DATA:
database.TTL[key] = time.time() + float(ttl)/1000
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def expireat(key, ttl_time):
ttl_time = float(ttl_time)
ret = 1
if database.LOCK.acquire():
if key in database.DATA and time.time() < ttl_time:
database.TTL[key] = ttl_time
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def pexpireat(key, ttl_time):
ttl_time = float(ttl_time) / 1000
ret = 1
if database.LOCK.acquire():
if key in database.DATA and time.time() < ttl_time:
database.TTL[key] = ttl_time
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def persist(key):
ret = 1
if database.LOCK.acquire():
if key in database.DATA and key in database.TTL:
del database.TTL[key]
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def move(key, db_index):
ret = 1
if database.LOCK.acquire():
if key in database.DATA:
database.DATABASES[int(db_index)][key] = database.DATA.pop(key)
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def randomkey():
import random
keys = database.DATA.keys()
if keys:
ret = keys[random.randint(0, len(keys))]
return ret
else:
return None
@staticmethod
def rename(key, newkey):
ret = "OK"
if database.LOCK.acquire():
if key in database.DATA:
database.DATA[newkey] = database.DATA.pop(key)
if key in database.TTL:
database.TTL[newkey] = database.TTL.pop(key)
else:
ret = "-ERR no such key"
database.LOCK.release()
return ret
@staticmethod
def renamenx(key, newkey):
ret = 0
if database.LOCK.acquire():
if key in database.DATA and newkey not in database.DATA:
database.DATA[newkey] = database.DATA.pop(key)
if key in database.TTL:
database.TTL[newkey] = database.TTL.pop(key)
else:
ret = 1
database.LOCK.release()
return ret
@staticmethod
def dump(key):
ret = database.get(key)
if ret:
import pickle
return pickle.dumps(ret)
return ret
@staticmethod
def restore(key, ttl, serialized_value):
ret = "OK"
import pickle
if database.LOCK.acquire():
try:
value = pickle.loads(serialized_value)
database.DATA[key] = value
ttl = int(ttl)
if ttl:
database.expire(key, ttl)
except:
ret = "-ERR DUMP payload version or checksum are wrong"
database.LOCK.release()
return ret
@staticmethod
def append(key, value):
ret = 0
if database.LOCK.acquire():
if key in database.DATA:
database.DATA[key] = database.DATA[key] + value
else:
database.DATA[key] = value
ret = len(database.DATA[key])
database.LOCK.release()
return ret
@staticmethod
def setbit(key, offset, value):
ret = 0
offset = int(offset)
value = int(value)
if database.LOCK.acquire():
if key in database.DATA:
old = database.DATA[key]
ret = (old >> offset) & 0x01
if value == 1:
database.DATA[key] = old | (value << offset)
else:
database.DATA[key] = old & (value << offset)
else:
database.DATA[key] = value << offset
ret = value
database.LOCK.release()
return ret
@staticmethod
def getbit(key, offset):
ret = 0
offset = int(offset)
if database.LOCK.acquire():
if key in database.DATA:
old = database.DATA[key]
ret = (old >> offset) & 0x01
else:
database.DATA[key] = 0
database.LOCK.release()
return ret
@staticmethod
def bitcount(key, start, end):
ret = 0
if start:
start = int(start)
if end:
end = int(end)
if database.LOCK.acquire():
if key in database.DATA:
value = database.DATA[key]
ret = bin(value)[2:][::-1][start:end].count("1")
else:
database.DATA[key] = 0
database.LOCK.release()
return ret
@staticmethod
def bitop(subaction, destkey, keys):
ret = 0
subaction = subaction.lower()
if database.LOCK.acquire():
values = map(lambda x: database.DATA[x], keys)
values0 = None
if values:
value0 = values[0]
if subaction == "and":
for value in values[1:]:
value0 &= value
elif subaction == "or":
for value in values[1:]:
value0 |= value
elif subaction == "xor":
for value in values[1:]:
value0 ^= value
elif subaction == "not":
value0 = ~(database.DATA[destkey])
database.DATA[destkey] = value0
strValue = hex(value0)[2:]
ret = len(strValue)/2
database.LOCK.release()
return ret
@staticmethod
def decr(key, amount):
ret = 0
if database.LOCK.acquire():
try:
value = int(database.DATA.get(key, 0))
database.DATA[key] = "%s" % (value - int(amount))
ret = database.DATA[key]
except :
ret = "-ERR value is not an integer or out of range"
database.LOCK.release()
return ret
@staticmethod
def incr(key, amount):
ret = 0
if database.LOCK.acquire():
try:
value = int(database.DATA.get(key, 0))
database.DATA[key] = "%s" % (value + int(amount))
ret = database.DATA[key]
except :
ret = "-ERR value is not an integer or out of range"
database.LOCK.release()
return ret
@staticmethod
def incr_float(key, amount):
ret = 0
if database.LOCK.acquire():
try:
value = float(database.DATA.get(key, 0))
database.DATA[key] = "%s" % (value + float(amount))
ret = database.DATA[key]
except Exception, e:
print e
ret = "-ERR value is not an integer or out of range"
database.LOCK.release()
return str(ret)
@staticmethod
def getrange(key, start, end):
start, end = int(start), int(end)
value = database.DATA.get(key)
if value:
return value[start:end]
return None
@staticmethod
def getset(key, value):
ret = database.DATA.get(key, None)
if database.LOCK.acquire():
database.DATA[key] = value
database.LOCK.release()
return ret
@staticmethod
def mget(keys):
ret = map(lambda key: database.DATA.get(key, None), keys)
return ret
@staticmethod
def mset(keys, values):
data = { }
for key, value in zip(keys, values):
data[key] = value
if database.LOCK.acquire():
database.DATA.update(data)
database.LOCK.release()
return ["OK"]
@staticmethod
def msetnx(keys, values):
data = { }
for key, value in zip(keys, values):
if database.DATA.get(key) is not None:
return 0
data[key] = value
if database.LOCK.acquire():
database.DATA.update(data)
database.LOCK.release()
return 1
TTL_THREAD_RUNNING = True
def ttl_thread():
while TTL_THREAD_RUNNING:
time.sleep(1)
now = time.time()
keys = database.TTL.keys()
keys_to_del = []
for key in keys:
if now - database.TTL[key] >= 0:
del database.TTL[key]
keys_to_del.append(key)
database.DEL(keys_to_del)
# initial code
TTL_THREAD = threading.Thread(target=ttl_thread)
# TTL_THREAD.start()
database.DATA = database.DATABASES[0] | src/redis_server/store.py | import threading
import time
class database:
DATA = {}
DATABASES = [{} for x in range(16)]
TTL = {}
LOCK = threading.Lock()
CONFIG = {"databases": "16"}
@staticmethod
def select(db_index):
if database.LOCK.acquire():
database.DATA = database.DATABASES[int(db_index)]
database.LOCK.release()
@staticmethod
def set(key, value, ext):
seconds = None
milliseconds = None
mode = None
if ext:
if "EX" in ext:
seconds = ext[ext.index("EX") + 1]
if "PX" in ext:
milliseconds = ext[ext.index("PX") + 1]
if "NX" in ext:
mode = "NX"
if "XX" in ext:
mode = "XX"
if mode == "NX" and (database.get(key) is not None):
return None
if mode == "XX" and (database.get(key) is None):
return None
if database.LOCK.acquire():
database.DATA[key] = value
database.LOCK.release()
if seconds:
database.expire(key, seconds)
if milliseconds:
database.pexpire(key, milliseconds)
return "OK"
@staticmethod
def get(key):
return database.DATA.get(key, None)
@staticmethod
def DEL(keys):
ret = 0
if database.LOCK.acquire():
for key in keys:
if database.DATA.get(key):
del database.DATA[key]
ret += 1
database.LOCK.release()
return ret
@staticmethod
def keys(key):
import re
patten = re.compile(key.replace("*", r"[\w]*").replace("?", "[\w]"))
ret = filter(lambda x: patten.match(x), database.DATA.keys())
return ret
@staticmethod
def get_type(key):
return "string"
@staticmethod
def get_config(key):
return [key, database.CONFIG.get(key, None)]
@staticmethod
def set_config(key, value):
database.CONFIG[key] = value
return "OK"
@staticmethod
def get_ttl(key):
if database.get(key) is None:
return -2
ttl = database.TTL.get(key)
if ttl:
ttl = ttl - time.time()
return int(ttl)
return -1
@staticmethod
def get_pttl(key):
if database.get(key) is None:
return -2
ttl = database.TTL.get(key)
if ttl:
ttl = ttl - time.time()
return int(ttl * 1000)
return -1
@staticmethod
def expire(key, ttl):
ret = 1
if database.LOCK.acquire():
if key in database.DATA:
database.TTL[key] = time.time() + int(ttl)
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def pexpire(key, ttl):
ret = 1
if database.LOCK.acquire():
if key in database.DATA:
database.TTL[key] = time.time() + float(ttl)/1000
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def expireat(key, ttl_time):
ttl_time = float(ttl_time)
ret = 1
if database.LOCK.acquire():
if key in database.DATA and time.time() < ttl_time:
database.TTL[key] = ttl_time
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def pexpireat(key, ttl_time):
ttl_time = float(ttl_time) / 1000
ret = 1
if database.LOCK.acquire():
if key in database.DATA and time.time() < ttl_time:
database.TTL[key] = ttl_time
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def persist(key):
ret = 1
if database.LOCK.acquire():
if key in database.DATA and key in database.TTL:
del database.TTL[key]
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def move(key, db_index):
ret = 1
if database.LOCK.acquire():
if key in database.DATA:
database.DATABASES[int(db_index)][key] = database.DATA.pop(key)
else:
ret = 0
database.LOCK.release()
return ret
@staticmethod
def randomkey():
import random
keys = database.DATA.keys()
if keys:
ret = keys[random.randint(0, len(keys))]
return ret
else:
return None
@staticmethod
def rename(key, newkey):
ret = "OK"
if database.LOCK.acquire():
if key in database.DATA:
database.DATA[newkey] = database.DATA.pop(key)
if key in database.TTL:
database.TTL[newkey] = database.TTL.pop(key)
else:
ret = "-ERR no such key"
database.LOCK.release()
return ret
@staticmethod
def renamenx(key, newkey):
ret = 0
if database.LOCK.acquire():
if key in database.DATA and newkey not in database.DATA:
database.DATA[newkey] = database.DATA.pop(key)
if key in database.TTL:
database.TTL[newkey] = database.TTL.pop(key)
else:
ret = 1
database.LOCK.release()
return ret
@staticmethod
def dump(key):
ret = database.get(key)
if ret:
import pickle
return pickle.dumps(ret)
return ret
@staticmethod
def restore(key, ttl, serialized_value):
ret = "OK"
import pickle
if database.LOCK.acquire():
try:
value = pickle.loads(serialized_value)
database.DATA[key] = value
ttl = int(ttl)
if ttl:
database.expire(key, ttl)
except:
ret = "-ERR DUMP payload version or checksum are wrong"
database.LOCK.release()
return ret
@staticmethod
def append(key, value):
ret = 0
if database.LOCK.acquire():
if key in database.DATA:
database.DATA[key] = database.DATA[key] + value
else:
database.DATA[key] = value
ret = len(database.DATA[key])
database.LOCK.release()
return ret
@staticmethod
def setbit(key, offset, value):
ret = 0
offset = int(offset)
value = int(value)
if database.LOCK.acquire():
if key in database.DATA:
old = database.DATA[key]
ret = (old >> offset) & 0x01
if value == 1:
database.DATA[key] = old | (value << offset)
else:
database.DATA[key] = old & (value << offset)
else:
database.DATA[key] = value << offset
ret = value
database.LOCK.release()
return ret
@staticmethod
def getbit(key, offset):
ret = 0
offset = int(offset)
if database.LOCK.acquire():
if key in database.DATA:
old = database.DATA[key]
ret = (old >> offset) & 0x01
else:
database.DATA[key] = 0
database.LOCK.release()
return ret
@staticmethod
def bitcount(key, start, end):
ret = 0
if start:
start = int(start)
if end:
end = int(end)
if database.LOCK.acquire():
if key in database.DATA:
value = database.DATA[key]
ret = bin(value)[2:][::-1][start:end].count("1")
else:
database.DATA[key] = 0
database.LOCK.release()
return ret
@staticmethod
def bitop(subaction, destkey, keys):
ret = 0
subaction = subaction.lower()
if database.LOCK.acquire():
values = map(lambda x: database.DATA[x], keys)
values0 = None
if values:
value0 = values[0]
if subaction == "and":
for value in values[1:]:
value0 &= value
elif subaction == "or":
for value in values[1:]:
value0 |= value
elif subaction == "xor":
for value in values[1:]:
value0 ^= value
elif subaction == "not":
value0 = ~(database.DATA[destkey])
database.DATA[destkey] = value0
strValue = hex(value0)[2:]
ret = len(strValue)/2
database.LOCK.release()
return ret
@staticmethod
def decr(key, amount):
ret = 0
if database.LOCK.acquire():
try:
value = int(database.DATA.get(key, 0))
database.DATA[key] = "%s" % (value - int(amount))
ret = database.DATA[key]
except :
ret = "-ERR value is not an integer or out of range"
database.LOCK.release()
return ret
@staticmethod
def incr(key, amount):
ret = 0
if database.LOCK.acquire():
try:
value = int(database.DATA.get(key, 0))
database.DATA[key] = "%s" % (value + int(amount))
ret = database.DATA[key]
except :
ret = "-ERR value is not an integer or out of range"
database.LOCK.release()
return ret
@staticmethod
def incr_float(key, amount):
ret = 0
if database.LOCK.acquire():
try:
value = float(database.DATA.get(key, 0))
database.DATA[key] = "%s" % (value + float(amount))
ret = database.DATA[key]
except Exception, e:
print e
ret = "-ERR value is not an integer or out of range"
database.LOCK.release()
return str(ret)
@staticmethod
def getrange(key, start, end):
start, end = int(start), int(end)
value = database.DATA.get(key)
if value:
return value[start:end]
return None
@staticmethod
def getset(key, value):
ret = database.DATA.get(key, None)
if database.LOCK.acquire():
database.DATA[key] = value
database.LOCK.release()
return ret
@staticmethod
def mget(keys):
ret = map(lambda key: database.DATA.get(key, None), keys)
return ret
@staticmethod
def mset(keys, values):
data = { }
for key, value in zip(keys, values):
data[key] = value
if database.LOCK.acquire():
database.DATA.update(data)
database.LOCK.release()
return ["OK"]
@staticmethod
def msetnx(keys, values):
data = { }
for key, value in zip(keys, values):
if database.DATA.get(key) is not None:
return 0
data[key] = value
if database.LOCK.acquire():
database.DATA.update(data)
database.LOCK.release()
return 1
TTL_THREAD_RUNNING = True
def ttl_thread():
while TTL_THREAD_RUNNING:
time.sleep(1)
now = time.time()
keys = database.TTL.keys()
keys_to_del = []
for key in keys:
if now - database.TTL[key] >= 0:
del database.TTL[key]
keys_to_del.append(key)
database.DEL(keys_to_del)
# initial code
TTL_THREAD = threading.Thread(target=ttl_thread)
# TTL_THREAD.start()
database.DATA = database.DATABASES[0] | 0.384681 | 0.073563 |
import os
import argparse
import re
import pandas as pd
import torch
import torchtext
class DataFrameDataset(torchtext.data.Dataset):
"""Class for using pandas DataFrames as a datasource"""
def __init__(self, examples, fields, filter_pred=None):
"""
Create a dataset from a pandas dataframe of examples
"""
self.examples = examples.apply(SeriesExample.fromSeries,
args=(fields, ),
axis=1).tolist()
if filter_pred is not None:
self.examples = filter(filter_pred, self.examples)
self.fields = dict(fields)
# Unpack field tuples
for n, f in list(self.fields.items()):
if isinstance(n, tuple):
self.fields.update(zip(n, f))
del self.fields[n]
class SeriesExample(torchtext.data.Example):
"""Class to convert a pandas Series to an Example"""
@classmethod
def fromSeries(cls, data, fields):
return cls.fromdict(data.to_dict(), fields)
@classmethod
def fromdict(cls, data, fields):
ex = cls()
for key, field in fields.items():
if key not in data:
raise ValueError("Specified key {} was not found in "
"the input data".format(key))
if field is not None:
setattr(ex, key, field.preprocess(data[key]))
else:
setattr(ex, key, data[key])
return ex
class BatchWrapper:
def __init__(self, dl, x_var, y_vars):
self.dl, self.x_var, self.y_vars = dl, x_var, y_vars
def __iter__(self):
for batch in self.dl:
x = getattr(batch, self.x_var).transpose(0, 1)
if self.y_vars is not None:
y = torch.cat([
getattr(batch, feat).unsqueeze(1) for feat in self.y_vars
],
dim=1).float()
else:
y = torch.zeros((1))
yield (x, y)
def __len__(self):
return len(self.dl)
def pre_clean_text(text):
"""Replaces unnesessary symbols from text """
return re.sub(r"[.,\"'\\\/\n-]", ' ', text)
def get_article_themes(article_folder):
try:
theme_folders = next(os.walk(article_folder))[1] #get only folders
except StopIteration as exception:
print(f"No directory found for '{article_folder}', exiting")
raise exception
print(f"Avalible themes: {theme_folders}")
return theme_folders
def get_articles(article_folder, theme_folders):
df_data = []
for theme in theme_folders:
theme_files = next(os.walk(f"{article_folder}/{theme}"))[2]
for theme_file in theme_files:
with open(f"{article_folder}/{theme}/{theme_file}") as file_data:
try:
file_data_read = file_data.read()
if len(file_data_read) > 4000:
file_data_read = file_data_read[:4000]
df_data.append([pre_clean_text(file_data_read), theme])
except UnicodeDecodeError:
print(
f"Error in decoding file {theme_file}, in theme {theme}"
)
text_df = pd.DataFrame(df_data, columns=['text', 'theme'])
for theme in theme_folders: # this forces certain order of columns for one-hot encoding
text_df.loc[text_df['theme'] == theme, theme] = 1
text_df = text_df.drop('theme', axis=1)
text_df = text_df.fillna(0)
return text_df
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument(
"-i",
"--input",
dest="input",
help="Input folder for model to train defaults to './articles'")
parser.add_argument(
"-d",
"--device",
help="What device to use for traning, defaults to 'cpu', can be 'cuda'"
)
return parser.parse_args() | helpers.py | import os
import argparse
import re
import pandas as pd
import torch
import torchtext
class DataFrameDataset(torchtext.data.Dataset):
"""Class for using pandas DataFrames as a datasource"""
def __init__(self, examples, fields, filter_pred=None):
"""
Create a dataset from a pandas dataframe of examples
"""
self.examples = examples.apply(SeriesExample.fromSeries,
args=(fields, ),
axis=1).tolist()
if filter_pred is not None:
self.examples = filter(filter_pred, self.examples)
self.fields = dict(fields)
# Unpack field tuples
for n, f in list(self.fields.items()):
if isinstance(n, tuple):
self.fields.update(zip(n, f))
del self.fields[n]
class SeriesExample(torchtext.data.Example):
"""Class to convert a pandas Series to an Example"""
@classmethod
def fromSeries(cls, data, fields):
return cls.fromdict(data.to_dict(), fields)
@classmethod
def fromdict(cls, data, fields):
ex = cls()
for key, field in fields.items():
if key not in data:
raise ValueError("Specified key {} was not found in "
"the input data".format(key))
if field is not None:
setattr(ex, key, field.preprocess(data[key]))
else:
setattr(ex, key, data[key])
return ex
class BatchWrapper:
def __init__(self, dl, x_var, y_vars):
self.dl, self.x_var, self.y_vars = dl, x_var, y_vars
def __iter__(self):
for batch in self.dl:
x = getattr(batch, self.x_var).transpose(0, 1)
if self.y_vars is not None:
y = torch.cat([
getattr(batch, feat).unsqueeze(1) for feat in self.y_vars
],
dim=1).float()
else:
y = torch.zeros((1))
yield (x, y)
def __len__(self):
return len(self.dl)
def pre_clean_text(text):
"""Replaces unnesessary symbols from text """
return re.sub(r"[.,\"'\\\/\n-]", ' ', text)
def get_article_themes(article_folder):
try:
theme_folders = next(os.walk(article_folder))[1] #get only folders
except StopIteration as exception:
print(f"No directory found for '{article_folder}', exiting")
raise exception
print(f"Avalible themes: {theme_folders}")
return theme_folders
def get_articles(article_folder, theme_folders):
df_data = []
for theme in theme_folders:
theme_files = next(os.walk(f"{article_folder}/{theme}"))[2]
for theme_file in theme_files:
with open(f"{article_folder}/{theme}/{theme_file}") as file_data:
try:
file_data_read = file_data.read()
if len(file_data_read) > 4000:
file_data_read = file_data_read[:4000]
df_data.append([pre_clean_text(file_data_read), theme])
except UnicodeDecodeError:
print(
f"Error in decoding file {theme_file}, in theme {theme}"
)
text_df = pd.DataFrame(df_data, columns=['text', 'theme'])
for theme in theme_folders: # this forces certain order of columns for one-hot encoding
text_df.loc[text_df['theme'] == theme, theme] = 1
text_df = text_df.drop('theme', axis=1)
text_df = text_df.fillna(0)
return text_df
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument(
"-i",
"--input",
dest="input",
help="Input folder for model to train defaults to './articles'")
parser.add_argument(
"-d",
"--device",
help="What device to use for traning, defaults to 'cpu', can be 'cuda'"
)
return parser.parse_args() | 0.636805 | 0.365796 |
# module typing is standard in Python 3.5+: https://docs.python.org/3/library/typing.html
# used for type hints used in static type checking in PEP 484
# PEP 484 -- Type Hints: https://www.python.org/dev/peps/pep-0484/
# PEP 525 -- Syntax for Variable Annotations: https://www.python.org/dev/peps/pep-0526/
# use mypy for static type checking of Pyhton code: http://mypy-lang.org/
# note that just because a parameter is annotated to be of a specific type, doesn't mean
# that at runtime it will actually be of that type: dynamic checking or casting/conversion
# still needs to be done
import typing
import os
import collections.abc
# suppress mypy "error: No library stub file for module 'numpy'"
import numpy # type: ignore
# using read & write methods of wavfile class in scipy.io module
# See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.read.html
# suppress mypy "error: No library stub file for module 'scipy.io'"
from scipy.io import wavfile # type: ignore
# The IPython.core.display module is specific to IPython which is used in Jupyter
# See https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html
import IPython # type: ignore
from . import files
class Sound(collections.abc.Iterable):
"""
represents a sound read in from a WAV format file
"""
@classmethod
def from_file(cls, file_name: typing.Union[str, os.PathLike]) -> 'Sound':
"""
Write better docstring
:param file_name:
:return:
"""
path = files.media_path(str(file_name))
rate, data = wavfile.read(path)
return cls(numpy.copy(data), rate)
@classmethod
def make_empty(cls, num_samples: int, rate: float = 22500.0) -> 'Sound':
"""
Write beter docstring
:param int num_samples:
:param float rate:
:return:
:rtype: Sound
"""
num_samples = int(num_samples)
if num_samples < 0:
raise ValueError
data: numpy.array = numpy.zeros(num_samples, dtype=numpy.int32)
return cls(data, rate)
def __init__(self, data, rate=None):
self.__rate: float = float(rate)
self.__samples: numpy.array = data
def copy(self) -> 'Sound':
"""
write better docstring
:return: a copy of this Sound
:rtype: Sound
"""
data_copy: numpy.array = numpy.copy(self.__samples)
new_sound: 'Sound' = Sound(data_copy, self.__rate)
return new_sound
def __str__(self) -> str:
return f"Sound: {len(self)} samples at {self.__rate} sample/second"
def __iter__(self) -> typing.Iterator['Sample']:
for i in range(0, self.length):
yield Sample(i, self)
@property
def samples(self) -> typing.Iterator['Sample']:
"""
Write better docstring
"""
return iter(self)
@property
def length(self) -> int:
"""
number of samples in the sound
:type: int
"""
return len(self.__samples)
def __len__(self) -> int:
return len(self.__samples)
@property
def duration(self) -> float:
"""
number of seconds the sound lasts = length/rate
"""
return float(len(self.__samples))/float(self.__rate)
@property
def rate(self):
""" returns sampling rate of sound in samples per second (Hz) """
return self.__rate
def __getitem__(self, index: int) -> int:
index = int(index)
if index < 0:
raise IndexError(f'Sound.getitem({index}): Negative Index')
if index >= len(self.__samples):
raise IndexError(f'Sound.getitem({index}), Index too large, max={self.__samples}')
return int(self.__samples[index])
@staticmethod
def clamp(value: int) -> int:
"""
:param value:
:return:
"""
value = int(value)
if value > 32767:
value = 32767
elif value < -32768:
value = -32768
return value
def __setitem__(self, index: int, value: int) -> None:
"""
:param index:
:param value:
:return:
"""
index = int(index)
if index < 0:
raise IndexError(f'Sound.setitem({index}): Negative Index')
if index >= len(self):
raise IndexError(f'Sound.setitem({index}), Index too large, max={self.__samples}')
value = numpy.int16(Sound.clamp(value))
self.__samples[index] = value
def _repr_html_(self) -> str:
audio = IPython.display.Audio(self.__samples, rate=int(self.__rate))
# noinspection PyProtectedMember
return audio._repr_html_() # pylint: disable=protected-access
def write(self, file_name: str) -> None:
"""
Write better docstring
:param file_name:
:return:
"""
file_name = str(file_name)
file_path = files.media_path(file_name)
wavfile.write(str(file_path), int(self.rate), self.__samples)
class Sample:
"""
Class level docstring
"""
def __init__(self, index, sound: Sound):
index = int(index)
if index < 0:
raise ValueError
if index >= sound.length:
raise ValueError
self.__sound: Sound = sound
self.__index: int = index
@property
def value(self) -> int:
"""
Write better docstring
:type: int
"""
return int(self.__sound[self.__index])
@value.setter
def value(self, val: int) -> None:
self.__sound[self.__index] = numpy.int16(Sound.clamp(val))
@property
def sound(self) -> Sound:
"""
Write better docstring
:type: Sound
"""
return self.__sound | MediaComp/sounds.py | # module typing is standard in Python 3.5+: https://docs.python.org/3/library/typing.html
# used for type hints used in static type checking in PEP 484
# PEP 484 -- Type Hints: https://www.python.org/dev/peps/pep-0484/
# PEP 525 -- Syntax for Variable Annotations: https://www.python.org/dev/peps/pep-0526/
# use mypy for static type checking of Pyhton code: http://mypy-lang.org/
# note that just because a parameter is annotated to be of a specific type, doesn't mean
# that at runtime it will actually be of that type: dynamic checking or casting/conversion
# still needs to be done
import typing
import os
import collections.abc
# suppress mypy "error: No library stub file for module 'numpy'"
import numpy # type: ignore
# using read & write methods of wavfile class in scipy.io module
# See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.read.html
# suppress mypy "error: No library stub file for module 'scipy.io'"
from scipy.io import wavfile # type: ignore
# The IPython.core.display module is specific to IPython which is used in Jupyter
# See https://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html
import IPython # type: ignore
from . import files
class Sound(collections.abc.Iterable):
"""
represents a sound read in from a WAV format file
"""
@classmethod
def from_file(cls, file_name: typing.Union[str, os.PathLike]) -> 'Sound':
"""
Write better docstring
:param file_name:
:return:
"""
path = files.media_path(str(file_name))
rate, data = wavfile.read(path)
return cls(numpy.copy(data), rate)
@classmethod
def make_empty(cls, num_samples: int, rate: float = 22500.0) -> 'Sound':
"""
Write beter docstring
:param int num_samples:
:param float rate:
:return:
:rtype: Sound
"""
num_samples = int(num_samples)
if num_samples < 0:
raise ValueError
data: numpy.array = numpy.zeros(num_samples, dtype=numpy.int32)
return cls(data, rate)
def __init__(self, data, rate=None):
self.__rate: float = float(rate)
self.__samples: numpy.array = data
def copy(self) -> 'Sound':
"""
write better docstring
:return: a copy of this Sound
:rtype: Sound
"""
data_copy: numpy.array = numpy.copy(self.__samples)
new_sound: 'Sound' = Sound(data_copy, self.__rate)
return new_sound
def __str__(self) -> str:
return f"Sound: {len(self)} samples at {self.__rate} sample/second"
def __iter__(self) -> typing.Iterator['Sample']:
for i in range(0, self.length):
yield Sample(i, self)
@property
def samples(self) -> typing.Iterator['Sample']:
"""
Write better docstring
"""
return iter(self)
@property
def length(self) -> int:
"""
number of samples in the sound
:type: int
"""
return len(self.__samples)
def __len__(self) -> int:
return len(self.__samples)
@property
def duration(self) -> float:
"""
number of seconds the sound lasts = length/rate
"""
return float(len(self.__samples))/float(self.__rate)
@property
def rate(self):
""" returns sampling rate of sound in samples per second (Hz) """
return self.__rate
def __getitem__(self, index: int) -> int:
index = int(index)
if index < 0:
raise IndexError(f'Sound.getitem({index}): Negative Index')
if index >= len(self.__samples):
raise IndexError(f'Sound.getitem({index}), Index too large, max={self.__samples}')
return int(self.__samples[index])
@staticmethod
def clamp(value: int) -> int:
"""
:param value:
:return:
"""
value = int(value)
if value > 32767:
value = 32767
elif value < -32768:
value = -32768
return value
def __setitem__(self, index: int, value: int) -> None:
"""
:param index:
:param value:
:return:
"""
index = int(index)
if index < 0:
raise IndexError(f'Sound.setitem({index}): Negative Index')
if index >= len(self):
raise IndexError(f'Sound.setitem({index}), Index too large, max={self.__samples}')
value = numpy.int16(Sound.clamp(value))
self.__samples[index] = value
def _repr_html_(self) -> str:
audio = IPython.display.Audio(self.__samples, rate=int(self.__rate))
# noinspection PyProtectedMember
return audio._repr_html_() # pylint: disable=protected-access
def write(self, file_name: str) -> None:
"""
Write better docstring
:param file_name:
:return:
"""
file_name = str(file_name)
file_path = files.media_path(file_name)
wavfile.write(str(file_path), int(self.rate), self.__samples)
class Sample:
"""
Class level docstring
"""
def __init__(self, index, sound: Sound):
index = int(index)
if index < 0:
raise ValueError
if index >= sound.length:
raise ValueError
self.__sound: Sound = sound
self.__index: int = index
@property
def value(self) -> int:
"""
Write better docstring
:type: int
"""
return int(self.__sound[self.__index])
@value.setter
def value(self, val: int) -> None:
self.__sound[self.__index] = numpy.int16(Sound.clamp(val))
@property
def sound(self) -> Sound:
"""
Write better docstring
:type: Sound
"""
return self.__sound | 0.850189 | 0.565419 |
"""Generates a self-signed CA cert or a CSR for an API Gateway domain."""
import argparse
import datetime
import os
import re
import shutil
import subprocess
import sys
import time
from utils.utils import VERSION, fail, runOpenSslCmd
def _parseArgs():
parser = argparse.ArgumentParser(
description="Generates a self-signed CA cert or a CSR for an API Gateway domain.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=("Examples\n"
"--------\n"
"# Generate default domain cert and key\n"
"./gen_domain_cert.py --default-cert\n\n"
"# Generate cert with domainId=mydomain, passphrase in /tmp/pass.txt\n"
"./gen_domain_cert.py --domain-id=mydomain --pass-file=/tmp/pass.txt\n\n"
"# Generate CSR with domainId=mydomain, passphrase in /tmp/pass.txt, O=MyOrg\n"
"./gen_domain_cert.py --domain-id=mydomain --pass-file=/tmp/pass.txt --out=csr --O=MyOrg"))
parser._action_groups.pop()
grp1 = parser.add_argument_group("arguments")
grp1.add_argument("--version", action="version", version=VERSION,
help="Show version information and exit.")
grp1.add_argument("--domain-id", dest="domainId",
help="Unique ID for API Gateway domain. Used as the common name (CN) in the domain "
"CA cert. Permitted characters: [A-Za-z0-9_-]. Must start with a letter, max length 32.")
grp1.add_argument("--pass-file", dest="passFile",
help="File containing passphrase for the domain private key.")
grp1.add_argument("--out", dest="out", choices=["self-signed-cert", "csr"], default="self-signed-cert",
help="Output type (default: self-signed-cert).")
grp1.add_argument("--force", dest="force", action='store_true', default=False,
help="Overwrite existing cert/CSR for this domain-id.")
grp1.add_argument("--sign-alg", dest="signAlg", choices=["SHA256", "SHA384", "SHA512"], default="SHA256",
help="Signing algorithm for self-signed domain cert (default: SHA256).")
grp1.add_argument("--O", dest="org",
help="Value for O (organization) field in the domain cert, e.g., Sales Org.")
grp1.add_argument("--OU", dest="orgUnit",
help="Value for OU (organizational unit) field in the domain cert, e.g., Staging.")
grp1.add_argument("--C", dest="country",
help="Value for C (country) field in the domain cert, e.g., US.")
grp1.add_argument("--ST", dest="state",
help="Value for the ST (state/county/region) field in the domain cert, e.g., New York.")
grp1.add_argument("--L", dest="locality",
help="Value for the L (locality/city) field in the domain cert, e.g., Rochester.")
grp2 = parser.add_argument_group("arguments for NON-PRODUCTION environment")
grp2.add_argument("--default-cert", dest="defaultCert", action="store_true", default=False,
help="Generate default cert and key. Equivalent to specifying "
"domain-id=DefaultDomain, passphrase=<PASSWORD>.")
# Print help if script called without arguments
if len(sys.argv) == 1:
parser.print_help()
parser.exit()
return parser.parse_args()
def _validateArgs():
if args.defaultCert:
if (args.domainId, args.passFile, args.out) != (None, None, "self-signed-cert"):
fail("If you specify --default-cert, cannot also specify --domain-id, --pass-file or --out=csr.")
args.domainId = "DefaultDomain"
else:
if None in (args.domainId, args.passFile):
fail("Must specify --default-cert or both --domain-id and --pass-file.")
if not re.match("^[A-Za-z]{1}[A-Za-z0-9_-]{0,31}$", args.domainId):
fail("Invalid domain name: '%s'. Permitted characters: [A-Za-z0-9_-]. "
"Must start with a letter, max length 32." % args.domainId)
if not os.path.exists(args.passFile):
fail("Password file does not exist: %s" % args.passFile)
minPassphraseLength = 4
with open(args.passFile, 'r') as f:
content = f.read()
contentNoLFCR = content.rstrip('\r\n')
if (len(contentNoLFCR) < minPassphraseLength):
fail("Passphase provided is too short. Length is %d, expected >= %d." % \
(len(contentNoLFCR), minPassphraseLength))
def _setup():
# Create directory to hold generated key and csr/cert
generatedCertsDir = os.path.join(os.path.dirname(__file__), "certs", args.domainId)
if os.path.exists(generatedCertsDir):
if args.force:
print("Removing existing output directory: %s" % generatedCertsDir)
shutil.rmtree(generatedCertsDir)
else:
fail("Output directory already exists for this domain-id: %s\nUse --force to overwrite." % generatedCertsDir)
os.makedirs(generatedCertsDir)
# Create temporary passphrase file if default cert being generated
if args.defaultCert:
args.passFile = os.path.join(generatedCertsDir, "default-pass-file.txt")
with open(args.passFile, 'w') as f:
f.write("<PASSWORD>")
# Instantiate openssl conf file from template
openSslTemplate = os.path.join(os.path.dirname(__file__), "utils", "openssl-template.cnf")
opensslCnfFile = os.path.join(generatedCertsDir, "openssl.cnf")
shutil.copyfile(openSslTemplate, opensslCnfFile)
with open(opensslCnfFile) as f:
s = f.read()
s = re.sub(r"basedir = \?", "basedir = %s" % generatedCertsDir, s)
s = re.sub("domaincert.pem", "%s-cert.pem" % args.domainId, s)
s = re.sub("domainkey", "%s-key" % args.domainId, s)
with open(opensslCnfFile, 'w') as f:
f.write(s)
os.environ["OPENSSL_CONF"] = opensslCnfFile
# Create openssl helper files
with open(os.path.join(generatedCertsDir, "index.txt"), 'w') as _:
pass
with open(os.path.join(generatedCertsDir, "serial"), 'w') as serialFile:
firstserial = hex(int(round(time.time() * 1000)))[2:]
if len(firstserial) % 2 != 0:
firstserial = "0%s" % firstserial
serialFile.write(firstserial)
return (opensslCnfFile, generatedCertsDir)
def _createDomainCert():
privateKeyFile = _generatePrivateKey()
csrFile = _generateCSR(privateKeyFile)
if args.out == "csr":
print("Done.\n\nPrivate key: %s\nCSR: %s\n\nThis CSR must be signed by an "
"external CA to produce a domain cert." % (privateKeyFile, csrFile))
else:
certFile = _generateCert(csrFile)
print("Done.\n\nPrivate key: %s\nDomain cert: %s" % (privateKeyFile, certFile))
def _generatePrivateKey():
pemFilename = os.path.join(generatedCertsPath, "%s-key.pem" % args.domainId)
try:
print("Generating private key...")
opensslCmd = "openssl genrsa -out %s -des -passout file:%s 2048" % (pemFilename, args.passFile)
runOpenSslCmd(opensslCmd)
except IOError as e:
fail("Failed to generate Private Key: %s" % os.strerror(e.errno))
return pemFilename
def _generateCSR(privateKeyFile):
print("Generating CSR...")
domainInfo = _getDomainInfo()
csrFile = os.path.join(generatedCertsPath, "%s.csr" % args.domainId)
params = {"signAlg":args.signAlg, "privateKeyFile":privateKeyFile, "csrFile":csrFile,
"domainInfo":domainInfo, "opensslCnf": opensslCnf, "passFile":args.passFile}
opensslCmd = ('openssl req -%(signAlg)s -new -key "%(privateKeyFile)s" -out "%(csrFile)s" -subj "%(domainInfo)s" '
'-reqexts domain_extensions -config "%(opensslCnf)s" -passin file:"%(passFile)s"' % params)
try:
runOpenSslCmd(opensslCmd)
except IOError as e:
fail("Failed to generate CSR: %s" % os.strerror(e.errno))
return csrFile
def _getDomainInfo():
certFields = {"O":args.org, "OU":args.orgUnit, "C":args.country, "ST":args.state, "L":args.locality}
domainInfo = "/CN=%s" % args.domainId
for key, value in certFields.items():
if value:
domainInfo += "/%s=%s" % (key, value)
return domainInfo
def _generateCert(csrFile):
print("Generating self-signed domain cert...")
certFile = os.path.join(generatedCertsPath, "%s-cert.pem" % args.domainId)
startDate = _getStartDate()
params = {"startDate":startDate, "signAlg":args.signAlg, "csrFile":csrFile,
"certFile":certFile, "opensslCnf":opensslCnf, "passFile":args.passFile}
# Specify -extfile to get a v3 certificate. Need a v3 certificate for SSL to work.
# Do not specify -extensions section as we want to copy extensions from the CSR via
# "copy_extensions = copyall" in openssl.cnf.
opensslCmd = ('openssl ca -startdate %(startDate)s -md %(signAlg)s -in "%(csrFile)s" -out "%(certFile)s" '
'-extfile "%(opensslCnf)s" -batch -notext -passin file:"%(passFile)s" -selfsign' % params)
try:
runOpenSslCmd(opensslCmd)
except IOError as e:
fail("Failed to generate certificate: %s" % os.strerror(e.errno))
return certFile
def _getStartDate():
datetimeFormat = "%y%m%d%H%M%SZ"
now = datetime.datetime.utcnow()
start = now + datetime.timedelta(days=-7)
return start.strftime(datetimeFormat)
def _cleanup():
for fname in os.listdir(generatedCertsPath):
fpath = os.path.join(generatedCertsPath, fname)
try:
if args.out == "self-signed-cert":
if "-cert.pem" not in fname and "-key.pem" not in fname:
os.unlink(fpath)
else:
if ".csr" not in fname and "-key.pem" not in fname:
os.unlink(fpath)
except Exception as e:
print("Error cleaning up: %s" % e)
if __name__ == "__main__":
args = _parseArgs()
_validateArgs()
# Verify that openssl is installed
runOpenSslCmd("openssl version")
opensslCnf, generatedCertsPath = _setup()
_createDomainCert()
_cleanup() | gen_domain_cert.py | """Generates a self-signed CA cert or a CSR for an API Gateway domain."""
import argparse
import datetime
import os
import re
import shutil
import subprocess
import sys
import time
from utils.utils import VERSION, fail, runOpenSslCmd
def _parseArgs():
parser = argparse.ArgumentParser(
description="Generates a self-signed CA cert or a CSR for an API Gateway domain.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=("Examples\n"
"--------\n"
"# Generate default domain cert and key\n"
"./gen_domain_cert.py --default-cert\n\n"
"# Generate cert with domainId=mydomain, passphrase in /tmp/pass.txt\n"
"./gen_domain_cert.py --domain-id=mydomain --pass-file=/tmp/pass.txt\n\n"
"# Generate CSR with domainId=mydomain, passphrase in /tmp/pass.txt, O=MyOrg\n"
"./gen_domain_cert.py --domain-id=mydomain --pass-file=/tmp/pass.txt --out=csr --O=MyOrg"))
parser._action_groups.pop()
grp1 = parser.add_argument_group("arguments")
grp1.add_argument("--version", action="version", version=VERSION,
help="Show version information and exit.")
grp1.add_argument("--domain-id", dest="domainId",
help="Unique ID for API Gateway domain. Used as the common name (CN) in the domain "
"CA cert. Permitted characters: [A-Za-z0-9_-]. Must start with a letter, max length 32.")
grp1.add_argument("--pass-file", dest="passFile",
help="File containing passphrase for the domain private key.")
grp1.add_argument("--out", dest="out", choices=["self-signed-cert", "csr"], default="self-signed-cert",
help="Output type (default: self-signed-cert).")
grp1.add_argument("--force", dest="force", action='store_true', default=False,
help="Overwrite existing cert/CSR for this domain-id.")
grp1.add_argument("--sign-alg", dest="signAlg", choices=["SHA256", "SHA384", "SHA512"], default="SHA256",
help="Signing algorithm for self-signed domain cert (default: SHA256).")
grp1.add_argument("--O", dest="org",
help="Value for O (organization) field in the domain cert, e.g., Sales Org.")
grp1.add_argument("--OU", dest="orgUnit",
help="Value for OU (organizational unit) field in the domain cert, e.g., Staging.")
grp1.add_argument("--C", dest="country",
help="Value for C (country) field in the domain cert, e.g., US.")
grp1.add_argument("--ST", dest="state",
help="Value for the ST (state/county/region) field in the domain cert, e.g., New York.")
grp1.add_argument("--L", dest="locality",
help="Value for the L (locality/city) field in the domain cert, e.g., Rochester.")
grp2 = parser.add_argument_group("arguments for NON-PRODUCTION environment")
grp2.add_argument("--default-cert", dest="defaultCert", action="store_true", default=False,
help="Generate default cert and key. Equivalent to specifying "
"domain-id=DefaultDomain, passphrase=<PASSWORD>.")
# Print help if script called without arguments
if len(sys.argv) == 1:
parser.print_help()
parser.exit()
return parser.parse_args()
def _validateArgs():
if args.defaultCert:
if (args.domainId, args.passFile, args.out) != (None, None, "self-signed-cert"):
fail("If you specify --default-cert, cannot also specify --domain-id, --pass-file or --out=csr.")
args.domainId = "DefaultDomain"
else:
if None in (args.domainId, args.passFile):
fail("Must specify --default-cert or both --domain-id and --pass-file.")
if not re.match("^[A-Za-z]{1}[A-Za-z0-9_-]{0,31}$", args.domainId):
fail("Invalid domain name: '%s'. Permitted characters: [A-Za-z0-9_-]. "
"Must start with a letter, max length 32." % args.domainId)
if not os.path.exists(args.passFile):
fail("Password file does not exist: %s" % args.passFile)
minPassphraseLength = 4
with open(args.passFile, 'r') as f:
content = f.read()
contentNoLFCR = content.rstrip('\r\n')
if (len(contentNoLFCR) < minPassphraseLength):
fail("Passphase provided is too short. Length is %d, expected >= %d." % \
(len(contentNoLFCR), minPassphraseLength))
def _setup():
# Create directory to hold generated key and csr/cert
generatedCertsDir = os.path.join(os.path.dirname(__file__), "certs", args.domainId)
if os.path.exists(generatedCertsDir):
if args.force:
print("Removing existing output directory: %s" % generatedCertsDir)
shutil.rmtree(generatedCertsDir)
else:
fail("Output directory already exists for this domain-id: %s\nUse --force to overwrite." % generatedCertsDir)
os.makedirs(generatedCertsDir)
# Create temporary passphrase file if default cert being generated
if args.defaultCert:
args.passFile = os.path.join(generatedCertsDir, "default-pass-file.txt")
with open(args.passFile, 'w') as f:
f.write("<PASSWORD>")
# Instantiate openssl conf file from template
openSslTemplate = os.path.join(os.path.dirname(__file__), "utils", "openssl-template.cnf")
opensslCnfFile = os.path.join(generatedCertsDir, "openssl.cnf")
shutil.copyfile(openSslTemplate, opensslCnfFile)
with open(opensslCnfFile) as f:
s = f.read()
s = re.sub(r"basedir = \?", "basedir = %s" % generatedCertsDir, s)
s = re.sub("domaincert.pem", "%s-cert.pem" % args.domainId, s)
s = re.sub("domainkey", "%s-key" % args.domainId, s)
with open(opensslCnfFile, 'w') as f:
f.write(s)
os.environ["OPENSSL_CONF"] = opensslCnfFile
# Create openssl helper files
with open(os.path.join(generatedCertsDir, "index.txt"), 'w') as _:
pass
with open(os.path.join(generatedCertsDir, "serial"), 'w') as serialFile:
firstserial = hex(int(round(time.time() * 1000)))[2:]
if len(firstserial) % 2 != 0:
firstserial = "0%s" % firstserial
serialFile.write(firstserial)
return (opensslCnfFile, generatedCertsDir)
def _createDomainCert():
privateKeyFile = _generatePrivateKey()
csrFile = _generateCSR(privateKeyFile)
if args.out == "csr":
print("Done.\n\nPrivate key: %s\nCSR: %s\n\nThis CSR must be signed by an "
"external CA to produce a domain cert." % (privateKeyFile, csrFile))
else:
certFile = _generateCert(csrFile)
print("Done.\n\nPrivate key: %s\nDomain cert: %s" % (privateKeyFile, certFile))
def _generatePrivateKey():
pemFilename = os.path.join(generatedCertsPath, "%s-key.pem" % args.domainId)
try:
print("Generating private key...")
opensslCmd = "openssl genrsa -out %s -des -passout file:%s 2048" % (pemFilename, args.passFile)
runOpenSslCmd(opensslCmd)
except IOError as e:
fail("Failed to generate Private Key: %s" % os.strerror(e.errno))
return pemFilename
def _generateCSR(privateKeyFile):
print("Generating CSR...")
domainInfo = _getDomainInfo()
csrFile = os.path.join(generatedCertsPath, "%s.csr" % args.domainId)
params = {"signAlg":args.signAlg, "privateKeyFile":privateKeyFile, "csrFile":csrFile,
"domainInfo":domainInfo, "opensslCnf": opensslCnf, "passFile":args.passFile}
opensslCmd = ('openssl req -%(signAlg)s -new -key "%(privateKeyFile)s" -out "%(csrFile)s" -subj "%(domainInfo)s" '
'-reqexts domain_extensions -config "%(opensslCnf)s" -passin file:"%(passFile)s"' % params)
try:
runOpenSslCmd(opensslCmd)
except IOError as e:
fail("Failed to generate CSR: %s" % os.strerror(e.errno))
return csrFile
def _getDomainInfo():
certFields = {"O":args.org, "OU":args.orgUnit, "C":args.country, "ST":args.state, "L":args.locality}
domainInfo = "/CN=%s" % args.domainId
for key, value in certFields.items():
if value:
domainInfo += "/%s=%s" % (key, value)
return domainInfo
def _generateCert(csrFile):
print("Generating self-signed domain cert...")
certFile = os.path.join(generatedCertsPath, "%s-cert.pem" % args.domainId)
startDate = _getStartDate()
params = {"startDate":startDate, "signAlg":args.signAlg, "csrFile":csrFile,
"certFile":certFile, "opensslCnf":opensslCnf, "passFile":args.passFile}
# Specify -extfile to get a v3 certificate. Need a v3 certificate for SSL to work.
# Do not specify -extensions section as we want to copy extensions from the CSR via
# "copy_extensions = copyall" in openssl.cnf.
opensslCmd = ('openssl ca -startdate %(startDate)s -md %(signAlg)s -in "%(csrFile)s" -out "%(certFile)s" '
'-extfile "%(opensslCnf)s" -batch -notext -passin file:"%(passFile)s" -selfsign' % params)
try:
runOpenSslCmd(opensslCmd)
except IOError as e:
fail("Failed to generate certificate: %s" % os.strerror(e.errno))
return certFile
def _getStartDate():
datetimeFormat = "%y%m%d%H%M%SZ"
now = datetime.datetime.utcnow()
start = now + datetime.timedelta(days=-7)
return start.strftime(datetimeFormat)
def _cleanup():
for fname in os.listdir(generatedCertsPath):
fpath = os.path.join(generatedCertsPath, fname)
try:
if args.out == "self-signed-cert":
if "-cert.pem" not in fname and "-key.pem" not in fname:
os.unlink(fpath)
else:
if ".csr" not in fname and "-key.pem" not in fname:
os.unlink(fpath)
except Exception as e:
print("Error cleaning up: %s" % e)
if __name__ == "__main__":
args = _parseArgs()
_validateArgs()
# Verify that openssl is installed
runOpenSslCmd("openssl version")
opensslCnf, generatedCertsPath = _setup()
_createDomainCert()
_cleanup() | 0.486332 | 0.153454 |
from __future__ import unicode_literals
import json
from multiprocessing import Process
from willie import web
from willie.module import commands, example, NOLIMIT, interval
def poll_minecraft(bot):
url = bot.config.minecraft.url
try:
minecraft_data = json.loads(web.get(url))
players = [player['name'] for player in minecraft_data['players']]
return players
except Exception as e:
print "Unable to enumerate players: %s" % e
return None
def configure(config):
if config.option('Monitor a minecraft server for logins/logouts?',False):
config.add_section('minecraft')
config.interactive_add('minecraft','url','URL to the Dynmap JSON output (typically http://<minecraft_server>/up/world/world/):','')
config.add_list('minecraft','channels','Channels to display joins/parts to','Channel:')
@interval(15)
def check_for_changed_players(bot):
"""
check to see if any players have joined/left
every 15 seconds
"""
if not (bot.config.has_option('minecraft','url')):
return
if not (bot.config.minecraft.get_list('channels')):
return
channels = bot.config.minecraft.get_list('channels')
players = poll_minecraft(bot)
if players is None:
return
last_onlines = []
try:
last_onlines = bot.memory['last_onlines']
except KeyError:
bot.memory['last_onlines'] = players
last_onlines = players
for pname in players:
if len(pname) > 0:
if pname in last_onlines:
# we've seen this user before
pass
else:
# this user is newly joined
for channel in channels:
bot.msg(channel, "[minecraft] %s joined the server" % pname)
for pname in last_onlines:
if len(pname) > 0:
if pname in players:
# this player is currently online
pass
else:
# this player is no longer online
for channel in channels:
bot.msg(channel, "[minecraft] %s quit the server" % pname)
bot.memory['last_onlines'] = players
@commands('online', 'minecraft')
@example('online - shows which users are logged into the minecraft server')
def who_is_online(bot, trigger):
result = poll_minecraft(bot)
if len(result) == 0:
onlines = "[minecraft] Nobody is currently online."
elif result is None:
onlines = "[minecraft] Couldn't fetch the list of online users. Try again later."
else
onlines = "[minecraft] Players currently online: %s" % ", ".join(result)
bot.say(onlines) | willie/modules/minecraft_logins.py | from __future__ import unicode_literals
import json
from multiprocessing import Process
from willie import web
from willie.module import commands, example, NOLIMIT, interval
def poll_minecraft(bot):
url = bot.config.minecraft.url
try:
minecraft_data = json.loads(web.get(url))
players = [player['name'] for player in minecraft_data['players']]
return players
except Exception as e:
print "Unable to enumerate players: %s" % e
return None
def configure(config):
if config.option('Monitor a minecraft server for logins/logouts?',False):
config.add_section('minecraft')
config.interactive_add('minecraft','url','URL to the Dynmap JSON output (typically http://<minecraft_server>/up/world/world/):','')
config.add_list('minecraft','channels','Channels to display joins/parts to','Channel:')
@interval(15)
def check_for_changed_players(bot):
"""
check to see if any players have joined/left
every 15 seconds
"""
if not (bot.config.has_option('minecraft','url')):
return
if not (bot.config.minecraft.get_list('channels')):
return
channels = bot.config.minecraft.get_list('channels')
players = poll_minecraft(bot)
if players is None:
return
last_onlines = []
try:
last_onlines = bot.memory['last_onlines']
except KeyError:
bot.memory['last_onlines'] = players
last_onlines = players
for pname in players:
if len(pname) > 0:
if pname in last_onlines:
# we've seen this user before
pass
else:
# this user is newly joined
for channel in channels:
bot.msg(channel, "[minecraft] %s joined the server" % pname)
for pname in last_onlines:
if len(pname) > 0:
if pname in players:
# this player is currently online
pass
else:
# this player is no longer online
for channel in channels:
bot.msg(channel, "[minecraft] %s quit the server" % pname)
bot.memory['last_onlines'] = players
@commands('online', 'minecraft')
@example('online - shows which users are logged into the minecraft server')
def who_is_online(bot, trigger):
result = poll_minecraft(bot)
if len(result) == 0:
onlines = "[minecraft] Nobody is currently online."
elif result is None:
onlines = "[minecraft] Couldn't fetch the list of online users. Try again later."
else
onlines = "[minecraft] Players currently online: %s" % ", ".join(result)
bot.say(onlines) | 0.316581 | 0.099602 |
import re
import time
from selenium.webdriver.common.by import By
from common.utils import plex_find_lib, get_static_html, text_format
from common.dictionary import translate_text, convert_chinese_number
def get_metadata(driver, plex, plex_title="", replace_poster="", print_only=False, season_index=1):
if len(driver.find_elements(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zhtw']")) > 0:
title = driver.find_element(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zhtw']").get_attribute('data-title')
elif len(driver.find_elements(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zho']")) > 0:
title = driver.find_element(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zho']").get_attribute('data-title')
print(f"\n{title}")
if not print_only:
show = plex_find_lib(plex, 'show', plex_title, title)
season_url = f'{driver.current_url}/seasons/official/{season_index}'
print(f"\n第 {season_index} 季")
driver.get(season_url)
time.sleep(1)
for episode in driver.find_elements(By.XPATH, '//tr')[1:]:
cells = episode.find_elements(
By.TAG_NAME, 'td')
episode_regex = re.search(
r'S([0-9]+)E([0-9]+)', cells[0].text)
episode_index = int(episode_regex.group(2))
episode_url = cells[1].find_element(
By.TAG_NAME, 'a').get_attribute('href')
html_page = get_static_html(episode_url)
episode_detail = ''
if html_page.find('div', {'data-language': 'zhtw'}):
episode_detail = html_page.find('div', {'data-language': 'zhtw'})
elif html_page.find('div', {'data-language': 'zho'}):
episode_detail = html_page.find('div', {'data-language': 'zho'})
elif html_page.find('div', {'data-language': 'yue'}):
episode_detail = html_page.find('div', {'data-language': 'yue'})
if episode_detail:
episode_title = episode_detail['data-title'].strip()
episode_synopsis = episode_detail.get_text(strip=True)
# print('episode_title', episode_title)
# print('episode_synopsis', episode_synopsis)
if episode_title and episode_synopsis:
if re.search(r'^第[0-9 ]+集$', episode_title):
episode_title = f'第 {episode_index} 集'
elif re.search(r'^第.+集$', episode_title):
episode_number = int(convert_chinese_number(
episode_title.replace('第', '').replace('集', '').strip()))
episode_title = f'第 {episode_number} 集'
else:
episode_title = re.sub(
r'第[0-9 ]+集.+', '', episode_title).strip()
episode_synopsis = text_format(
episode_synopsis)
elif episode_title and not episode_synopsis:
if re.search(r'^第[0-9 ]+集$', episode_title):
episode_title = f'第 {episode_index} 集'
else:
episode_title = text_format(episode_title)
episode_synopsis = ''
else:
episode_title = f'第 {episode_index} 集'
episode_synopsis = text_format(episode_synopsis)
if re.search(r'第 [0-9]+ 集', episode_title) and re.search(r'[\u4E00-\u9FFF]', show.season(season_index).episode(episode_index).title) and not re.search(r'^[剧第]([0-9 ]+)集$', show.season(season_index).episode(episode_index).title):
episode_title = show.season(
season_index).episode(episode_index).title
if not episode_synopsis and re.search(r'[\u4E00-\u9FFF]', show.season(season_index).episode(episode_index).summary):
episode_synopsis = text_format(show.season(
season_index).episode(episode_index).summary)
print(f"\n{episode_title}\n{episode_synopsis}")
if not print_only and episode_index:
show.season(season_index).episode(episode_index).edit(**{
"title.value": episode_title,
"title.locked": 1,
"summary.value": episode_synopsis,
"summary.locked": 1,
})
if not print_only and episode_index:
if html_page.find('a', class_='thumbnail'):
episode_poster = html_page.find(
'a', class_='thumbnail').find('img')['src']
if replace_poster and episode_poster:
show.season(season_index).episode(
episode_index).uploadPoster(url=episode_poster)
driver.quit() | services/thetvdb.py | import re
import time
from selenium.webdriver.common.by import By
from common.utils import plex_find_lib, get_static_html, text_format
from common.dictionary import translate_text, convert_chinese_number
def get_metadata(driver, plex, plex_title="", replace_poster="", print_only=False, season_index=1):
if len(driver.find_elements(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zhtw']")) > 0:
title = driver.find_element(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zhtw']").get_attribute('data-title')
elif len(driver.find_elements(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zho']")) > 0:
title = driver.find_element(By.XPATH,
"//div[@class='change_translation_text'][@data-language='zho']").get_attribute('data-title')
print(f"\n{title}")
if not print_only:
show = plex_find_lib(plex, 'show', plex_title, title)
season_url = f'{driver.current_url}/seasons/official/{season_index}'
print(f"\n第 {season_index} 季")
driver.get(season_url)
time.sleep(1)
for episode in driver.find_elements(By.XPATH, '//tr')[1:]:
cells = episode.find_elements(
By.TAG_NAME, 'td')
episode_regex = re.search(
r'S([0-9]+)E([0-9]+)', cells[0].text)
episode_index = int(episode_regex.group(2))
episode_url = cells[1].find_element(
By.TAG_NAME, 'a').get_attribute('href')
html_page = get_static_html(episode_url)
episode_detail = ''
if html_page.find('div', {'data-language': 'zhtw'}):
episode_detail = html_page.find('div', {'data-language': 'zhtw'})
elif html_page.find('div', {'data-language': 'zho'}):
episode_detail = html_page.find('div', {'data-language': 'zho'})
elif html_page.find('div', {'data-language': 'yue'}):
episode_detail = html_page.find('div', {'data-language': 'yue'})
if episode_detail:
episode_title = episode_detail['data-title'].strip()
episode_synopsis = episode_detail.get_text(strip=True)
# print('episode_title', episode_title)
# print('episode_synopsis', episode_synopsis)
if episode_title and episode_synopsis:
if re.search(r'^第[0-9 ]+集$', episode_title):
episode_title = f'第 {episode_index} 集'
elif re.search(r'^第.+集$', episode_title):
episode_number = int(convert_chinese_number(
episode_title.replace('第', '').replace('集', '').strip()))
episode_title = f'第 {episode_number} 集'
else:
episode_title = re.sub(
r'第[0-9 ]+集.+', '', episode_title).strip()
episode_synopsis = text_format(
episode_synopsis)
elif episode_title and not episode_synopsis:
if re.search(r'^第[0-9 ]+集$', episode_title):
episode_title = f'第 {episode_index} 集'
else:
episode_title = text_format(episode_title)
episode_synopsis = ''
else:
episode_title = f'第 {episode_index} 集'
episode_synopsis = text_format(episode_synopsis)
if re.search(r'第 [0-9]+ 集', episode_title) and re.search(r'[\u4E00-\u9FFF]', show.season(season_index).episode(episode_index).title) and not re.search(r'^[剧第]([0-9 ]+)集$', show.season(season_index).episode(episode_index).title):
episode_title = show.season(
season_index).episode(episode_index).title
if not episode_synopsis and re.search(r'[\u4E00-\u9FFF]', show.season(season_index).episode(episode_index).summary):
episode_synopsis = text_format(show.season(
season_index).episode(episode_index).summary)
print(f"\n{episode_title}\n{episode_synopsis}")
if not print_only and episode_index:
show.season(season_index).episode(episode_index).edit(**{
"title.value": episode_title,
"title.locked": 1,
"summary.value": episode_synopsis,
"summary.locked": 1,
})
if not print_only and episode_index:
if html_page.find('a', class_='thumbnail'):
episode_poster = html_page.find(
'a', class_='thumbnail').find('img')['src']
if replace_poster and episode_poster:
show.season(season_index).episode(
episode_index).uploadPoster(url=episode_poster)
driver.quit() | 0.062474 | 0.08698 |
import csv
import os
import copy
import shutil
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from scrapy import log
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class shoebaccaSpider(BaseSpider):
name = 'shoebacca.com'
allowed_domains = ['shoebacca.com','www.shoebacca.com']
def start_requests(self):
shutil.copy(os.path.join(HERE, 'shoemetroall.csv'),os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur'))
with open(os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku']
"""
brand = row['brand']
style = row['style']
query = (brand + ' ' + style).replace(' ', '%20')
"""
query = row['name'].replace(' ', '+')
url = 'http://www.shoebacca.com/finder/?query=%s&search_form=1&sort=price-low-high'
yield Request(url % query, meta={'sku': sku, 'name': query})
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
products = hxs.select('//ul[@id="finder-data"]/li')
if not products:
return
product = products[0]
loader = ProductLoader(item=Product(), selector=product)
name = "".join(product.select('./a/div/h5/span/text()').extract())
if name:
name2 = "".join(product.select('./a/div/h5/text()').extract())
url = product.select('./a/@href').extract()[0]
price = "".join(product.select('./a/div[@class="p-price"]/text()').re(r'([0-9\,\. ]+)')).strip()
if not price:
price = "".join(product.select('./a/div[@class="p-price"]/span[@class="sale-price"]/text()').re(r'([0-9\,\. ]+)')).strip()
loader.add_value('name', name.strip() + ' ' + name2.strip())
loader.add_value('url', urljoin_rfc(base_url,url))
loader.add_value('price', price)
loader.add_value('sku', response.meta['sku'])
if not 'apparelsave' in loader.get_output_value('name').lower():
yield loader.load_item() | portfolio/Python/scrapy/shoemetro/shoebacca.py | import csv
import os
import copy
import shutil
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from scrapy import log
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class shoebaccaSpider(BaseSpider):
name = 'shoebacca.com'
allowed_domains = ['shoebacca.com','www.shoebacca.com']
def start_requests(self):
shutil.copy(os.path.join(HERE, 'shoemetroall.csv'),os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur'))
with open(os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku']
"""
brand = row['brand']
style = row['style']
query = (brand + ' ' + style).replace(' ', '%20')
"""
query = row['name'].replace(' ', '+')
url = 'http://www.shoebacca.com/finder/?query=%s&search_form=1&sort=price-low-high'
yield Request(url % query, meta={'sku': sku, 'name': query})
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
products = hxs.select('//ul[@id="finder-data"]/li')
if not products:
return
product = products[0]
loader = ProductLoader(item=Product(), selector=product)
name = "".join(product.select('./a/div/h5/span/text()').extract())
if name:
name2 = "".join(product.select('./a/div/h5/text()').extract())
url = product.select('./a/@href').extract()[0]
price = "".join(product.select('./a/div[@class="p-price"]/text()').re(r'([0-9\,\. ]+)')).strip()
if not price:
price = "".join(product.select('./a/div[@class="p-price"]/span[@class="sale-price"]/text()').re(r'([0-9\,\. ]+)')).strip()
loader.add_value('name', name.strip() + ' ' + name2.strip())
loader.add_value('url', urljoin_rfc(base_url,url))
loader.add_value('price', price)
loader.add_value('sku', response.meta['sku'])
if not 'apparelsave' in loader.get_output_value('name').lower():
yield loader.load_item() | 0.289773 | 0.066146 |
import discord
def getPermissionJson(name, value):
return {
"permissionName": name,
"allow": value
}
def getCategoryJson(category):
return {
"name": category.name,
"type": str(category.type),
"nsfw": category.is_nsfw(),
"permissions": [getChannelPermissionJson(role, category.overwrites[role]) for role in category.overwrites.keys()]
}
def getChannelPermissionJson(role, perms):
return {
"roleName": role.name,
"permissions": [getPermissionJson(perm, value) for (perm, value) in iter(perms) if value != None]
}
def getRoleJson(role):
assert type(role) == discord.Role
permissions = role.permissions
return {
"name": role.name,
"permissions": [getPermissionJson(perm, value) for (perm, value) in iter(role.permissions)],
"settings": {
"color": list(role.color.to_rgb()),
"mention": role.mentionable,
"displaySeparate": role.hoist
}
}
def getTextChannelJson(text_channel):
assert type(text_channel) == discord.TextChannel
return {
"name": text_channel.name,
"topic": text_channel.topic,
"position": text_channel.position,
"nsfw": text_channel.is_nsfw(),
"slowmode_delay": text_channel.slowmode_delay,
"permissions": [getChannelPermissionJson(role, text_channel.overwrites[role]) for role in text_channel.overwrites.keys()],
"categoryName": text_channel.category.name if text_channel.category else None
}
def getVoiceChannelJson(voice_channel):
assert type(voice_channel) == discord.VoiceChannel
return {
"name": voice_channel.name,
"position": voice_channel.position,
"bitrate": voice_channel.bitrate,
"user_limit": voice_channel.user_limit,
"permissions": [getChannelPermissionJson(role, voice_channel.overwrites[role]) for role in voice_channel.overwrites.keys()],
"categoryName": voice_channel.category.name if voice_channel.category else None
}
def getServerJson(server):
"""
Converts the given server into a template JSON following the template_server_schema.json format.
"""
assert type(server) == discord.Guild, "server must be discord.Guild, not: " + str(type(server))
d = {}
d['serverName'] = server.name
d['roles'] = []
for r in server.roles:
d['roles'].append(getRoleJson(r))
d['categories'] = []
for c in server.categories:
d['categories'].append(getCategoryJson(c))
d['textChannels'] = []
for t in server.text_channels:
d['textChannels'].append(getTextChannelJson(t))
d['voiceChannels'] = []
for v in server.voice_channels:
d['voiceChannels'].append(getVoiceChannelJson(v))
return d | template_server_serializer.py | import discord
def getPermissionJson(name, value):
return {
"permissionName": name,
"allow": value
}
def getCategoryJson(category):
return {
"name": category.name,
"type": str(category.type),
"nsfw": category.is_nsfw(),
"permissions": [getChannelPermissionJson(role, category.overwrites[role]) for role in category.overwrites.keys()]
}
def getChannelPermissionJson(role, perms):
return {
"roleName": role.name,
"permissions": [getPermissionJson(perm, value) for (perm, value) in iter(perms) if value != None]
}
def getRoleJson(role):
assert type(role) == discord.Role
permissions = role.permissions
return {
"name": role.name,
"permissions": [getPermissionJson(perm, value) for (perm, value) in iter(role.permissions)],
"settings": {
"color": list(role.color.to_rgb()),
"mention": role.mentionable,
"displaySeparate": role.hoist
}
}
def getTextChannelJson(text_channel):
assert type(text_channel) == discord.TextChannel
return {
"name": text_channel.name,
"topic": text_channel.topic,
"position": text_channel.position,
"nsfw": text_channel.is_nsfw(),
"slowmode_delay": text_channel.slowmode_delay,
"permissions": [getChannelPermissionJson(role, text_channel.overwrites[role]) for role in text_channel.overwrites.keys()],
"categoryName": text_channel.category.name if text_channel.category else None
}
def getVoiceChannelJson(voice_channel):
assert type(voice_channel) == discord.VoiceChannel
return {
"name": voice_channel.name,
"position": voice_channel.position,
"bitrate": voice_channel.bitrate,
"user_limit": voice_channel.user_limit,
"permissions": [getChannelPermissionJson(role, voice_channel.overwrites[role]) for role in voice_channel.overwrites.keys()],
"categoryName": voice_channel.category.name if voice_channel.category else None
}
def getServerJson(server):
"""
Converts the given server into a template JSON following the template_server_schema.json format.
"""
assert type(server) == discord.Guild, "server must be discord.Guild, not: " + str(type(server))
d = {}
d['serverName'] = server.name
d['roles'] = []
for r in server.roles:
d['roles'].append(getRoleJson(r))
d['categories'] = []
for c in server.categories:
d['categories'].append(getCategoryJson(c))
d['textChannels'] = []
for t in server.text_channels:
d['textChannels'].append(getTextChannelJson(t))
d['voiceChannels'] = []
for v in server.voice_channels:
d['voiceChannels'].append(getVoiceChannelJson(v))
return d | 0.533641 | 0.338651 |
import numpy as np
from mpmath import mp
mp.dps = 500
def construct_s(bh):
s = []
for bhj in bh:
if bhj != 0:
s.append(np.sign(bhj))
s = np.array(s)
s = s.reshape((len(s), 1))
return s
def construct_A_XA_Ac_XAc_bhA(X, bh, n, p):
A = []
Ac = []
bhA = []
for j in range(p):
bhj = bh[j]
if bhj != 0:
A.append(j)
bhA.append(bhj)
else:
Ac.append(j)
XA = X[:, A]
XAc = X[:, Ac]
bhA = np.array(bhA).reshape((len(A), 1))
return A, XA, Ac, XAc, bhA
def check_KKT(XA, XAc, y, bhA, lamda, n):
print("\nCheck Active")
e1 = y - np.dot(XA, bhA)
e2 = np.dot(XA.T, e1)
print(e2/ (lamda * n))
if XAc is not None:
print("\nCheck In Active")
e1 = y - np.dot(XA, bhA)
e2 = np.dot(XAc.T, e1)
print(e2/ (lamda * n))
def construct_test_statistic(j, XA, y, A):
ej = []
for each_j in A:
if j == each_j:
ej.append(1)
else:
ej.append(0)
ej = np.array(ej).reshape((len(A), 1))
inv = np.linalg.pinv(np.dot(XA.T, XA))
XAinv = np.dot(XA, inv)
etaj = np.dot(XAinv, ej)
etajTy = np.dot(etaj.T, y)[0][0]
return etaj, etajTy
def compute_yz(y, etaj, zk, n):
sq_norm = (np.linalg.norm(etaj))**2
e1 = np.identity(n) - (np.dot(etaj, etaj.T))/sq_norm
a = np.dot(e1, y)
b = etaj/sq_norm
yz = a + b*zk
return yz, b
def pivot(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov, tn_mu, type):
tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]
z_interval = []
for i in range(len(list_active_set)):
if type == 'As':
if np.array_equal(np.sign(bh), np.sign(list_bhz[i])):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
if type == 'A':
if np.array_equal(A, list_active_set[i]):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
new_z_interval = []
for each_interval in z_interval:
if len(new_z_interval) == 0:
new_z_interval.append(each_interval)
else:
sub = each_interval[0] - new_z_interval[-1][1]
if abs(sub) < 0.01:
new_z_interval[-1][1] = each_interval[1]
else:
new_z_interval.append(each_interval)
z_interval = new_z_interval
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etajTy >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etajTy >= al) and (etajTy < ar):
numerator = numerator + mp.ncdf((etajTy - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None
def pivot_with_specified_interval(z_interval, etaj, etajTy, cov, tn_mu):
tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etajTy >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etajTy >= al) and (etajTy < ar):
numerator = numerator + mp.ncdf((etajTy - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None
def p_value(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov):
value = pivot(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov, 0, 'A')
return 2 * min(1 - value, value) | util.py | import numpy as np
from mpmath import mp
mp.dps = 500
def construct_s(bh):
s = []
for bhj in bh:
if bhj != 0:
s.append(np.sign(bhj))
s = np.array(s)
s = s.reshape((len(s), 1))
return s
def construct_A_XA_Ac_XAc_bhA(X, bh, n, p):
A = []
Ac = []
bhA = []
for j in range(p):
bhj = bh[j]
if bhj != 0:
A.append(j)
bhA.append(bhj)
else:
Ac.append(j)
XA = X[:, A]
XAc = X[:, Ac]
bhA = np.array(bhA).reshape((len(A), 1))
return A, XA, Ac, XAc, bhA
def check_KKT(XA, XAc, y, bhA, lamda, n):
print("\nCheck Active")
e1 = y - np.dot(XA, bhA)
e2 = np.dot(XA.T, e1)
print(e2/ (lamda * n))
if XAc is not None:
print("\nCheck In Active")
e1 = y - np.dot(XA, bhA)
e2 = np.dot(XAc.T, e1)
print(e2/ (lamda * n))
def construct_test_statistic(j, XA, y, A):
ej = []
for each_j in A:
if j == each_j:
ej.append(1)
else:
ej.append(0)
ej = np.array(ej).reshape((len(A), 1))
inv = np.linalg.pinv(np.dot(XA.T, XA))
XAinv = np.dot(XA, inv)
etaj = np.dot(XAinv, ej)
etajTy = np.dot(etaj.T, y)[0][0]
return etaj, etajTy
def compute_yz(y, etaj, zk, n):
sq_norm = (np.linalg.norm(etaj))**2
e1 = np.identity(n) - (np.dot(etaj, etaj.T))/sq_norm
a = np.dot(e1, y)
b = etaj/sq_norm
yz = a + b*zk
return yz, b
def pivot(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov, tn_mu, type):
tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]
z_interval = []
for i in range(len(list_active_set)):
if type == 'As':
if np.array_equal(np.sign(bh), np.sign(list_bhz[i])):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
if type == 'A':
if np.array_equal(A, list_active_set[i]):
z_interval.append([list_zk[i], list_zk[i + 1] - 1e-10])
new_z_interval = []
for each_interval in z_interval:
if len(new_z_interval) == 0:
new_z_interval.append(each_interval)
else:
sub = each_interval[0] - new_z_interval[-1][1]
if abs(sub) < 0.01:
new_z_interval[-1][1] = each_interval[1]
else:
new_z_interval.append(each_interval)
z_interval = new_z_interval
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etajTy >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etajTy >= al) and (etajTy < ar):
numerator = numerator + mp.ncdf((etajTy - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None
def pivot_with_specified_interval(z_interval, etaj, etajTy, cov, tn_mu):
tn_sigma = np.sqrt(np.dot(np.dot(etaj.T, cov), etaj))[0][0]
numerator = 0
denominator = 0
for each_interval in z_interval:
al = each_interval[0]
ar = each_interval[1]
denominator = denominator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if etajTy >= ar:
numerator = numerator + mp.ncdf((ar - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
elif (etajTy >= al) and (etajTy < ar):
numerator = numerator + mp.ncdf((etajTy - tn_mu)/tn_sigma) - mp.ncdf((al - tn_mu)/tn_sigma)
if denominator != 0:
return float(numerator/denominator)
else:
return None
def p_value(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov):
value = pivot(A, bh, list_active_set, list_zk, list_bhz, etaj, etajTy, cov, 0, 'A')
return 2 * min(1 - value, value) | 0.434941 | 0.386821 |
import os
import pkg_resources
# Anaconda
import pandas as pd
from argparse import ArgumentParser
# Local
from plot_weightlifting.plotstartingstrength import plot_db
from plot_weightlifting.global_vars import (__name__, __version__,
FIGSIZE_DICT)
def main():
""" Main function """
# Argument Parsing
description = '''Plots Starting Strength Official App data.
Output png will be named after database file'''
parser = ArgumentParser(description=description)
help_msg = 'filepath to Starting Strength Official App database file'
parser.add_argument('filename', help=help_msg, nargs='+')
help_msg = 'display version and exit'
parser.add_argument('-v', '-V', '--version', help=help_msg,
action='version',
version=f'{__name__} v{__version__}')
help_msg = '''figure size of the plot. Options: 4k, 1080p, 720p, 480p,
custom. Custom usage: --figsize w,h. w,h are floats representing the
pixels/100 for width and height. Default: 1080p: (19.20,10.80)'''
parser.add_argument('--figsize', help=help_msg, default='1080p')
parser.add_argument('--dpi', help='dpi of plot. Default: 100',
default=100)
help_msg = '''filepath to notes file. File holds notes for plot. The file
must be a JSON file with entries of the format: {"YYYY-MM-DD": {"label":
"LABEL", "ydata": YDATA}}. YDATA must match an exercase header in the
training log.
'''
parser.add_argument('--notefile', help=help_msg)
args = parser.parse_args()
# Execute plotter on files
success = []
failure = []
msg = f' {__name__} v{__version__} '
print('=' * 80)
print(f'{msg:=^80}')
print('=' * 80)
print(f'Executing {__file__}')
try:
figsize = FIGSIZE_DICT[args.figsize]
except KeyError:
figsize = args.figsize
for arg in args.filename:
fname = os.path.abspath(arg)
ret = plot_db(fname, notefile=args.notefile,
figsize=figsize,
dpi=args.dpi)
if ret == 0:
success.append(fname)
else:
failure.append([fname, ret])
print(f'Executing {__file__} complete!')
# Print Summary
if len(success) > 0:
print('Successfully processed files:')
[print(f'\t{_}->{os.path.splitext(_)[0]}.png') for _ in success]
if len(failure) > 0:
print('Skipped files:')
for _, err in failure:
print(f'\t{_}')
print(f'\t\tError: {err}')
if __name__ == '__main__':
main() | plot_weightlifting/main.py | import os
import pkg_resources
# Anaconda
import pandas as pd
from argparse import ArgumentParser
# Local
from plot_weightlifting.plotstartingstrength import plot_db
from plot_weightlifting.global_vars import (__name__, __version__,
FIGSIZE_DICT)
def main():
""" Main function """
# Argument Parsing
description = '''Plots Starting Strength Official App data.
Output png will be named after database file'''
parser = ArgumentParser(description=description)
help_msg = 'filepath to Starting Strength Official App database file'
parser.add_argument('filename', help=help_msg, nargs='+')
help_msg = 'display version and exit'
parser.add_argument('-v', '-V', '--version', help=help_msg,
action='version',
version=f'{__name__} v{__version__}')
help_msg = '''figure size of the plot. Options: 4k, 1080p, 720p, 480p,
custom. Custom usage: --figsize w,h. w,h are floats representing the
pixels/100 for width and height. Default: 1080p: (19.20,10.80)'''
parser.add_argument('--figsize', help=help_msg, default='1080p')
parser.add_argument('--dpi', help='dpi of plot. Default: 100',
default=100)
help_msg = '''filepath to notes file. File holds notes for plot. The file
must be a JSON file with entries of the format: {"YYYY-MM-DD": {"label":
"LABEL", "ydata": YDATA}}. YDATA must match an exercase header in the
training log.
'''
parser.add_argument('--notefile', help=help_msg)
args = parser.parse_args()
# Execute plotter on files
success = []
failure = []
msg = f' {__name__} v{__version__} '
print('=' * 80)
print(f'{msg:=^80}')
print('=' * 80)
print(f'Executing {__file__}')
try:
figsize = FIGSIZE_DICT[args.figsize]
except KeyError:
figsize = args.figsize
for arg in args.filename:
fname = os.path.abspath(arg)
ret = plot_db(fname, notefile=args.notefile,
figsize=figsize,
dpi=args.dpi)
if ret == 0:
success.append(fname)
else:
failure.append([fname, ret])
print(f'Executing {__file__} complete!')
# Print Summary
if len(success) > 0:
print('Successfully processed files:')
[print(f'\t{_}->{os.path.splitext(_)[0]}.png') for _ in success]
if len(failure) > 0:
print('Skipped files:')
for _, err in failure:
print(f'\t{_}')
print(f'\t\tError: {err}')
if __name__ == '__main__':
main() | 0.532911 | 0.116487 |
__all__ = ['RequestData', 'Client']
# Cell
import os
import requests
from ..exceptions import (
UnexpectedInputProvided, ExpectedInputMissing,
DataTypeNotImplemented
)
# Cell
class RequestData:
def __init__(self, spec):
"""Factory base class for request data."""
self.spec = spec
self.id_to_spec = {}
self.name_to_spec = {}
for obj in self.spec:
self.id_to_spec[obj['id']] = obj
self.name_to_spec[obj['name']] = obj
self.type_transforms = {
"int": self._tx_int,
"double": self._tx_double,
"blob": self._tx_blob,
"bool": self._tx_bool,
"string": self._tx_str,
"array_int": self._tx_arrayint
}
def make_instance(self, data):
"""
Takes in raw data in python representation,
outputs required format for the request.
In:
data, dict: contains the data in the native format
Out:
data, dict: the format defined by the deployment client. e.g. http post
"""
tx_data = {}
for name in self.name_to_spec.keys():
if name not in data:
raise MissingInput(
' '.join([
'Required input missing from spec: "{}"'])
.format(name)
)
for k, v in data.items():
if k not in self.name_to_spec:
raise UnexpectedInputProvided(
' '.join([
'Unexpected input found in',
'request formation: "{}"'])
.format(k)
)
var_spec = self.name_to_spec[k]
data_type = var_spec['data_type']['value']
if data_type not in self.type_transforms:
raise DataTypeNotImplemented(
'Request contains data type without defined behavior.',
'See client.RequestData missing {}'.format(data_type)
)
tx_data[k] = self.type_transforms[data_type](v)
return tx_data
def _tx_int(self, v):
"""Returns int value to be sent in request."""
return v
def _tx_bool(self, v):
"""Returns bool value to be sent in request."""
return v
def _tx_double(self, v):
"""Returns double value to be sent in request."""
return v
def _tx_str(self, v):
"""Returns str value to be sent in request."""
return v
def _tx_blob(self, v):
"""Returns blob value to be sent in request."""
return v
def _tx_arrayint(self, v):
return v
class Client:
def __init__(self, project_name, deployment_name,
deployment_version, input_spec,
output_spec, api_key, api_host):
self.project_name = project_name
self.deployment_name = deployment_name
self.deployment_version = deployment_version
self.input_spec = input_spec #dict
self.output_spec = output_spec #dict
self.api_key = api_key
self.api_host = api_host
def request(self, data):
"""Source the HTTP response."""
tx_data = self.input_factory.make_instance(data=data)
r = requests.post(
self.req_url, json=tx_data, headers={
"Authorization": self.api_key, #TODO: generalize this
}
)
return r
def post_process(self, response):
"""Deal with the HTTP response."""
if response.ok:
output = json.loads(response.content)
else:
raise Exception("Failed HTTP request, no return.")
return {
self.id_to_spec[k]['name']: self.output_factory(v)
for k, v in output['result'].items()
} | mod/serving/client.py |
__all__ = ['RequestData', 'Client']
# Cell
import os
import requests
from ..exceptions import (
UnexpectedInputProvided, ExpectedInputMissing,
DataTypeNotImplemented
)
# Cell
class RequestData:
def __init__(self, spec):
"""Factory base class for request data."""
self.spec = spec
self.id_to_spec = {}
self.name_to_spec = {}
for obj in self.spec:
self.id_to_spec[obj['id']] = obj
self.name_to_spec[obj['name']] = obj
self.type_transforms = {
"int": self._tx_int,
"double": self._tx_double,
"blob": self._tx_blob,
"bool": self._tx_bool,
"string": self._tx_str,
"array_int": self._tx_arrayint
}
def make_instance(self, data):
"""
Takes in raw data in python representation,
outputs required format for the request.
In:
data, dict: contains the data in the native format
Out:
data, dict: the format defined by the deployment client. e.g. http post
"""
tx_data = {}
for name in self.name_to_spec.keys():
if name not in data:
raise MissingInput(
' '.join([
'Required input missing from spec: "{}"'])
.format(name)
)
for k, v in data.items():
if k not in self.name_to_spec:
raise UnexpectedInputProvided(
' '.join([
'Unexpected input found in',
'request formation: "{}"'])
.format(k)
)
var_spec = self.name_to_spec[k]
data_type = var_spec['data_type']['value']
if data_type not in self.type_transforms:
raise DataTypeNotImplemented(
'Request contains data type without defined behavior.',
'See client.RequestData missing {}'.format(data_type)
)
tx_data[k] = self.type_transforms[data_type](v)
return tx_data
def _tx_int(self, v):
"""Returns int value to be sent in request."""
return v
def _tx_bool(self, v):
"""Returns bool value to be sent in request."""
return v
def _tx_double(self, v):
"""Returns double value to be sent in request."""
return v
def _tx_str(self, v):
"""Returns str value to be sent in request."""
return v
def _tx_blob(self, v):
"""Returns blob value to be sent in request."""
return v
def _tx_arrayint(self, v):
return v
class Client:
def __init__(self, project_name, deployment_name,
deployment_version, input_spec,
output_spec, api_key, api_host):
self.project_name = project_name
self.deployment_name = deployment_name
self.deployment_version = deployment_version
self.input_spec = input_spec #dict
self.output_spec = output_spec #dict
self.api_key = api_key
self.api_host = api_host
def request(self, data):
"""Source the HTTP response."""
tx_data = self.input_factory.make_instance(data=data)
r = requests.post(
self.req_url, json=tx_data, headers={
"Authorization": self.api_key, #TODO: generalize this
}
)
return r
def post_process(self, response):
"""Deal with the HTTP response."""
if response.ok:
output = json.loads(response.content)
else:
raise Exception("Failed HTTP request, no return.")
return {
self.id_to_spec[k]['name']: self.output_factory(v)
for k, v in output['result'].items()
} | 0.478285 | 0.196826 |
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class NetworkV2(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'cellular_technology': 'str',
'voice_roaming_enabled': 'bool',
'imei': 'str',
'iccid': 'str',
'meid': 'str',
'carrier_settings_version': 'str',
'current_carrier_network': 'str',
'current_mobile_country_code': 'str',
'current_mobile_network_code': 'str',
'home_carrier_network': 'str',
'home_mobile_country_code': 'str',
'home_mobile_network_code': 'str',
'data_roaming_enabled': 'bool',
'roaming': 'bool',
'personal_hotspot_enabled': 'bool',
'phone_number': 'str'
}
attribute_map = {
'cellular_technology': 'cellularTechnology',
'voice_roaming_enabled': 'voiceRoamingEnabled',
'imei': 'imei',
'iccid': 'iccid',
'meid': 'meid',
'carrier_settings_version': 'carrierSettingsVersion',
'current_carrier_network': 'currentCarrierNetwork',
'current_mobile_country_code': 'currentMobileCountryCode',
'current_mobile_network_code': 'currentMobileNetworkCode',
'home_carrier_network': 'homeCarrierNetwork',
'home_mobile_country_code': 'homeMobileCountryCode',
'home_mobile_network_code': 'homeMobileNetworkCode',
'data_roaming_enabled': 'dataRoamingEnabled',
'roaming': 'roaming',
'personal_hotspot_enabled': 'personalHotspotEnabled',
'phone_number': 'phoneNumber'
}
def __init__(self, cellular_technology=None, voice_roaming_enabled=None, imei=None, iccid=None, meid=None, carrier_settings_version=None, current_carrier_network=None, current_mobile_country_code=None, current_mobile_network_code=None, home_carrier_network=None, home_mobile_country_code=None, home_mobile_network_code=None, data_roaming_enabled=None, roaming=None, personal_hotspot_enabled=None, phone_number=None, local_vars_configuration=None): # noqa: E501
"""NetworkV2 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._cellular_technology = None
self._voice_roaming_enabled = None
self._imei = None
self._iccid = None
self._meid = None
self._carrier_settings_version = None
self._current_carrier_network = None
self._current_mobile_country_code = None
self._current_mobile_network_code = None
self._home_carrier_network = None
self._home_mobile_country_code = None
self._home_mobile_network_code = None
self._data_roaming_enabled = None
self._roaming = None
self._personal_hotspot_enabled = None
self._phone_number = None
self.discriminator = None
if cellular_technology is not None:
self.cellular_technology = cellular_technology
if voice_roaming_enabled is not None:
self.voice_roaming_enabled = voice_roaming_enabled
if imei is not None:
self.imei = imei
if iccid is not None:
self.iccid = iccid
if meid is not None:
self.meid = meid
if carrier_settings_version is not None:
self.carrier_settings_version = carrier_settings_version
if current_carrier_network is not None:
self.current_carrier_network = current_carrier_network
if current_mobile_country_code is not None:
self.current_mobile_country_code = current_mobile_country_code
if current_mobile_network_code is not None:
self.current_mobile_network_code = current_mobile_network_code
if home_carrier_network is not None:
self.home_carrier_network = home_carrier_network
if home_mobile_country_code is not None:
self.home_mobile_country_code = home_mobile_country_code
if home_mobile_network_code is not None:
self.home_mobile_network_code = home_mobile_network_code
if data_roaming_enabled is not None:
self.data_roaming_enabled = data_roaming_enabled
if roaming is not None:
self.roaming = roaming
if personal_hotspot_enabled is not None:
self.personal_hotspot_enabled = personal_hotspot_enabled
if phone_number is not None:
self.phone_number = phone_number
@property
def cellular_technology(self):
"""Gets the cellular_technology of this NetworkV2. # noqa: E501
:return: The cellular_technology of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._cellular_technology
@cellular_technology.setter
def cellular_technology(self, cellular_technology):
"""Sets the cellular_technology of this NetworkV2.
:param cellular_technology: The cellular_technology of this NetworkV2. # noqa: E501
:type cellular_technology: str
"""
self._cellular_technology = cellular_technology
@property
def voice_roaming_enabled(self):
"""Gets the voice_roaming_enabled of this NetworkV2. # noqa: E501
:return: The voice_roaming_enabled of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._voice_roaming_enabled
@voice_roaming_enabled.setter
def voice_roaming_enabled(self, voice_roaming_enabled):
"""Sets the voice_roaming_enabled of this NetworkV2.
:param voice_roaming_enabled: The voice_roaming_enabled of this NetworkV2. # noqa: E501
:type voice_roaming_enabled: bool
"""
self._voice_roaming_enabled = voice_roaming_enabled
@property
def imei(self):
"""Gets the imei of this NetworkV2. # noqa: E501
:return: The imei of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._imei
@imei.setter
def imei(self, imei):
"""Sets the imei of this NetworkV2.
:param imei: The imei of this NetworkV2. # noqa: E501
:type imei: str
"""
self._imei = imei
@property
def iccid(self):
"""Gets the iccid of this NetworkV2. # noqa: E501
:return: The iccid of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._iccid
@iccid.setter
def iccid(self, iccid):
"""Sets the iccid of this NetworkV2.
:param iccid: The iccid of this NetworkV2. # noqa: E501
:type iccid: str
"""
self._iccid = iccid
@property
def meid(self):
"""Gets the meid of this NetworkV2. # noqa: E501
:return: The meid of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._meid
@meid.setter
def meid(self, meid):
"""Sets the meid of this NetworkV2.
:param meid: The meid of this NetworkV2. # noqa: E501
:type meid: str
"""
self._meid = meid
@property
def carrier_settings_version(self):
"""Gets the carrier_settings_version of this NetworkV2. # noqa: E501
:return: The carrier_settings_version of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._carrier_settings_version
@carrier_settings_version.setter
def carrier_settings_version(self, carrier_settings_version):
"""Sets the carrier_settings_version of this NetworkV2.
:param carrier_settings_version: The carrier_settings_version of this NetworkV2. # noqa: E501
:type carrier_settings_version: str
"""
self._carrier_settings_version = carrier_settings_version
@property
def current_carrier_network(self):
"""Gets the current_carrier_network of this NetworkV2. # noqa: E501
:return: The current_carrier_network of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._current_carrier_network
@current_carrier_network.setter
def current_carrier_network(self, current_carrier_network):
"""Sets the current_carrier_network of this NetworkV2.
:param current_carrier_network: The current_carrier_network of this NetworkV2. # noqa: E501
:type current_carrier_network: str
"""
self._current_carrier_network = current_carrier_network
@property
def current_mobile_country_code(self):
"""Gets the current_mobile_country_code of this NetworkV2. # noqa: E501
:return: The current_mobile_country_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._current_mobile_country_code
@current_mobile_country_code.setter
def current_mobile_country_code(self, current_mobile_country_code):
"""Sets the current_mobile_country_code of this NetworkV2.
:param current_mobile_country_code: The current_mobile_country_code of this NetworkV2. # noqa: E501
:type current_mobile_country_code: str
"""
self._current_mobile_country_code = current_mobile_country_code
@property
def current_mobile_network_code(self):
"""Gets the current_mobile_network_code of this NetworkV2. # noqa: E501
:return: The current_mobile_network_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._current_mobile_network_code
@current_mobile_network_code.setter
def current_mobile_network_code(self, current_mobile_network_code):
"""Sets the current_mobile_network_code of this NetworkV2.
:param current_mobile_network_code: The current_mobile_network_code of this NetworkV2. # noqa: E501
:type current_mobile_network_code: str
"""
self._current_mobile_network_code = current_mobile_network_code
@property
def home_carrier_network(self):
"""Gets the home_carrier_network of this NetworkV2. # noqa: E501
:return: The home_carrier_network of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._home_carrier_network
@home_carrier_network.setter
def home_carrier_network(self, home_carrier_network):
"""Sets the home_carrier_network of this NetworkV2.
:param home_carrier_network: The home_carrier_network of this NetworkV2. # noqa: E501
:type home_carrier_network: str
"""
self._home_carrier_network = home_carrier_network
@property
def home_mobile_country_code(self):
"""Gets the home_mobile_country_code of this NetworkV2. # noqa: E501
:return: The home_mobile_country_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._home_mobile_country_code
@home_mobile_country_code.setter
def home_mobile_country_code(self, home_mobile_country_code):
"""Sets the home_mobile_country_code of this NetworkV2.
:param home_mobile_country_code: The home_mobile_country_code of this NetworkV2. # noqa: E501
:type home_mobile_country_code: str
"""
self._home_mobile_country_code = home_mobile_country_code
@property
def home_mobile_network_code(self):
"""Gets the home_mobile_network_code of this NetworkV2. # noqa: E501
:return: The home_mobile_network_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._home_mobile_network_code
@home_mobile_network_code.setter
def home_mobile_network_code(self, home_mobile_network_code):
"""Sets the home_mobile_network_code of this NetworkV2.
:param home_mobile_network_code: The home_mobile_network_code of this NetworkV2. # noqa: E501
:type home_mobile_network_code: str
"""
self._home_mobile_network_code = home_mobile_network_code
@property
def data_roaming_enabled(self):
"""Gets the data_roaming_enabled of this NetworkV2. # noqa: E501
:return: The data_roaming_enabled of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._data_roaming_enabled
@data_roaming_enabled.setter
def data_roaming_enabled(self, data_roaming_enabled):
"""Sets the data_roaming_enabled of this NetworkV2.
:param data_roaming_enabled: The data_roaming_enabled of this NetworkV2. # noqa: E501
:type data_roaming_enabled: bool
"""
self._data_roaming_enabled = data_roaming_enabled
@property
def roaming(self):
"""Gets the roaming of this NetworkV2. # noqa: E501
:return: The roaming of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._roaming
@roaming.setter
def roaming(self, roaming):
"""Sets the roaming of this NetworkV2.
:param roaming: The roaming of this NetworkV2. # noqa: E501
:type roaming: bool
"""
self._roaming = roaming
@property
def personal_hotspot_enabled(self):
"""Gets the personal_hotspot_enabled of this NetworkV2. # noqa: E501
:return: The personal_hotspot_enabled of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._personal_hotspot_enabled
@personal_hotspot_enabled.setter
def personal_hotspot_enabled(self, personal_hotspot_enabled):
"""Sets the personal_hotspot_enabled of this NetworkV2.
:param personal_hotspot_enabled: The personal_hotspot_enabled of this NetworkV2. # noqa: E501
:type personal_hotspot_enabled: bool
"""
self._personal_hotspot_enabled = personal_hotspot_enabled
@property
def phone_number(self):
"""Gets the phone_number of this NetworkV2. # noqa: E501
:return: The phone_number of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._phone_number
@phone_number.setter
def phone_number(self, phone_number):
"""Sets the phone_number of this NetworkV2.
:param phone_number: The phone_number of this NetworkV2. # noqa: E501
:type phone_number: str
"""
self._phone_number = phone_number
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkV2):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetworkV2):
return True
return self.to_dict() != other.to_dict() | jamf/models/network_v2.py | import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class NetworkV2(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'cellular_technology': 'str',
'voice_roaming_enabled': 'bool',
'imei': 'str',
'iccid': 'str',
'meid': 'str',
'carrier_settings_version': 'str',
'current_carrier_network': 'str',
'current_mobile_country_code': 'str',
'current_mobile_network_code': 'str',
'home_carrier_network': 'str',
'home_mobile_country_code': 'str',
'home_mobile_network_code': 'str',
'data_roaming_enabled': 'bool',
'roaming': 'bool',
'personal_hotspot_enabled': 'bool',
'phone_number': 'str'
}
attribute_map = {
'cellular_technology': 'cellularTechnology',
'voice_roaming_enabled': 'voiceRoamingEnabled',
'imei': 'imei',
'iccid': 'iccid',
'meid': 'meid',
'carrier_settings_version': 'carrierSettingsVersion',
'current_carrier_network': 'currentCarrierNetwork',
'current_mobile_country_code': 'currentMobileCountryCode',
'current_mobile_network_code': 'currentMobileNetworkCode',
'home_carrier_network': 'homeCarrierNetwork',
'home_mobile_country_code': 'homeMobileCountryCode',
'home_mobile_network_code': 'homeMobileNetworkCode',
'data_roaming_enabled': 'dataRoamingEnabled',
'roaming': 'roaming',
'personal_hotspot_enabled': 'personalHotspotEnabled',
'phone_number': 'phoneNumber'
}
def __init__(self, cellular_technology=None, voice_roaming_enabled=None, imei=None, iccid=None, meid=None, carrier_settings_version=None, current_carrier_network=None, current_mobile_country_code=None, current_mobile_network_code=None, home_carrier_network=None, home_mobile_country_code=None, home_mobile_network_code=None, data_roaming_enabled=None, roaming=None, personal_hotspot_enabled=None, phone_number=None, local_vars_configuration=None): # noqa: E501
"""NetworkV2 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._cellular_technology = None
self._voice_roaming_enabled = None
self._imei = None
self._iccid = None
self._meid = None
self._carrier_settings_version = None
self._current_carrier_network = None
self._current_mobile_country_code = None
self._current_mobile_network_code = None
self._home_carrier_network = None
self._home_mobile_country_code = None
self._home_mobile_network_code = None
self._data_roaming_enabled = None
self._roaming = None
self._personal_hotspot_enabled = None
self._phone_number = None
self.discriminator = None
if cellular_technology is not None:
self.cellular_technology = cellular_technology
if voice_roaming_enabled is not None:
self.voice_roaming_enabled = voice_roaming_enabled
if imei is not None:
self.imei = imei
if iccid is not None:
self.iccid = iccid
if meid is not None:
self.meid = meid
if carrier_settings_version is not None:
self.carrier_settings_version = carrier_settings_version
if current_carrier_network is not None:
self.current_carrier_network = current_carrier_network
if current_mobile_country_code is not None:
self.current_mobile_country_code = current_mobile_country_code
if current_mobile_network_code is not None:
self.current_mobile_network_code = current_mobile_network_code
if home_carrier_network is not None:
self.home_carrier_network = home_carrier_network
if home_mobile_country_code is not None:
self.home_mobile_country_code = home_mobile_country_code
if home_mobile_network_code is not None:
self.home_mobile_network_code = home_mobile_network_code
if data_roaming_enabled is not None:
self.data_roaming_enabled = data_roaming_enabled
if roaming is not None:
self.roaming = roaming
if personal_hotspot_enabled is not None:
self.personal_hotspot_enabled = personal_hotspot_enabled
if phone_number is not None:
self.phone_number = phone_number
@property
def cellular_technology(self):
"""Gets the cellular_technology of this NetworkV2. # noqa: E501
:return: The cellular_technology of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._cellular_technology
@cellular_technology.setter
def cellular_technology(self, cellular_technology):
"""Sets the cellular_technology of this NetworkV2.
:param cellular_technology: The cellular_technology of this NetworkV2. # noqa: E501
:type cellular_technology: str
"""
self._cellular_technology = cellular_technology
@property
def voice_roaming_enabled(self):
"""Gets the voice_roaming_enabled of this NetworkV2. # noqa: E501
:return: The voice_roaming_enabled of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._voice_roaming_enabled
@voice_roaming_enabled.setter
def voice_roaming_enabled(self, voice_roaming_enabled):
"""Sets the voice_roaming_enabled of this NetworkV2.
:param voice_roaming_enabled: The voice_roaming_enabled of this NetworkV2. # noqa: E501
:type voice_roaming_enabled: bool
"""
self._voice_roaming_enabled = voice_roaming_enabled
@property
def imei(self):
"""Gets the imei of this NetworkV2. # noqa: E501
:return: The imei of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._imei
@imei.setter
def imei(self, imei):
"""Sets the imei of this NetworkV2.
:param imei: The imei of this NetworkV2. # noqa: E501
:type imei: str
"""
self._imei = imei
@property
def iccid(self):
"""Gets the iccid of this NetworkV2. # noqa: E501
:return: The iccid of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._iccid
@iccid.setter
def iccid(self, iccid):
"""Sets the iccid of this NetworkV2.
:param iccid: The iccid of this NetworkV2. # noqa: E501
:type iccid: str
"""
self._iccid = iccid
@property
def meid(self):
"""Gets the meid of this NetworkV2. # noqa: E501
:return: The meid of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._meid
@meid.setter
def meid(self, meid):
"""Sets the meid of this NetworkV2.
:param meid: The meid of this NetworkV2. # noqa: E501
:type meid: str
"""
self._meid = meid
@property
def carrier_settings_version(self):
"""Gets the carrier_settings_version of this NetworkV2. # noqa: E501
:return: The carrier_settings_version of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._carrier_settings_version
@carrier_settings_version.setter
def carrier_settings_version(self, carrier_settings_version):
"""Sets the carrier_settings_version of this NetworkV2.
:param carrier_settings_version: The carrier_settings_version of this NetworkV2. # noqa: E501
:type carrier_settings_version: str
"""
self._carrier_settings_version = carrier_settings_version
@property
def current_carrier_network(self):
"""Gets the current_carrier_network of this NetworkV2. # noqa: E501
:return: The current_carrier_network of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._current_carrier_network
@current_carrier_network.setter
def current_carrier_network(self, current_carrier_network):
"""Sets the current_carrier_network of this NetworkV2.
:param current_carrier_network: The current_carrier_network of this NetworkV2. # noqa: E501
:type current_carrier_network: str
"""
self._current_carrier_network = current_carrier_network
@property
def current_mobile_country_code(self):
"""Gets the current_mobile_country_code of this NetworkV2. # noqa: E501
:return: The current_mobile_country_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._current_mobile_country_code
@current_mobile_country_code.setter
def current_mobile_country_code(self, current_mobile_country_code):
"""Sets the current_mobile_country_code of this NetworkV2.
:param current_mobile_country_code: The current_mobile_country_code of this NetworkV2. # noqa: E501
:type current_mobile_country_code: str
"""
self._current_mobile_country_code = current_mobile_country_code
@property
def current_mobile_network_code(self):
"""Gets the current_mobile_network_code of this NetworkV2. # noqa: E501
:return: The current_mobile_network_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._current_mobile_network_code
@current_mobile_network_code.setter
def current_mobile_network_code(self, current_mobile_network_code):
"""Sets the current_mobile_network_code of this NetworkV2.
:param current_mobile_network_code: The current_mobile_network_code of this NetworkV2. # noqa: E501
:type current_mobile_network_code: str
"""
self._current_mobile_network_code = current_mobile_network_code
@property
def home_carrier_network(self):
"""Gets the home_carrier_network of this NetworkV2. # noqa: E501
:return: The home_carrier_network of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._home_carrier_network
@home_carrier_network.setter
def home_carrier_network(self, home_carrier_network):
"""Sets the home_carrier_network of this NetworkV2.
:param home_carrier_network: The home_carrier_network of this NetworkV2. # noqa: E501
:type home_carrier_network: str
"""
self._home_carrier_network = home_carrier_network
@property
def home_mobile_country_code(self):
"""Gets the home_mobile_country_code of this NetworkV2. # noqa: E501
:return: The home_mobile_country_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._home_mobile_country_code
@home_mobile_country_code.setter
def home_mobile_country_code(self, home_mobile_country_code):
"""Sets the home_mobile_country_code of this NetworkV2.
:param home_mobile_country_code: The home_mobile_country_code of this NetworkV2. # noqa: E501
:type home_mobile_country_code: str
"""
self._home_mobile_country_code = home_mobile_country_code
@property
def home_mobile_network_code(self):
"""Gets the home_mobile_network_code of this NetworkV2. # noqa: E501
:return: The home_mobile_network_code of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._home_mobile_network_code
@home_mobile_network_code.setter
def home_mobile_network_code(self, home_mobile_network_code):
"""Sets the home_mobile_network_code of this NetworkV2.
:param home_mobile_network_code: The home_mobile_network_code of this NetworkV2. # noqa: E501
:type home_mobile_network_code: str
"""
self._home_mobile_network_code = home_mobile_network_code
@property
def data_roaming_enabled(self):
"""Gets the data_roaming_enabled of this NetworkV2. # noqa: E501
:return: The data_roaming_enabled of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._data_roaming_enabled
@data_roaming_enabled.setter
def data_roaming_enabled(self, data_roaming_enabled):
"""Sets the data_roaming_enabled of this NetworkV2.
:param data_roaming_enabled: The data_roaming_enabled of this NetworkV2. # noqa: E501
:type data_roaming_enabled: bool
"""
self._data_roaming_enabled = data_roaming_enabled
@property
def roaming(self):
"""Gets the roaming of this NetworkV2. # noqa: E501
:return: The roaming of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._roaming
@roaming.setter
def roaming(self, roaming):
"""Sets the roaming of this NetworkV2.
:param roaming: The roaming of this NetworkV2. # noqa: E501
:type roaming: bool
"""
self._roaming = roaming
@property
def personal_hotspot_enabled(self):
"""Gets the personal_hotspot_enabled of this NetworkV2. # noqa: E501
:return: The personal_hotspot_enabled of this NetworkV2. # noqa: E501
:rtype: bool
"""
return self._personal_hotspot_enabled
@personal_hotspot_enabled.setter
def personal_hotspot_enabled(self, personal_hotspot_enabled):
"""Sets the personal_hotspot_enabled of this NetworkV2.
:param personal_hotspot_enabled: The personal_hotspot_enabled of this NetworkV2. # noqa: E501
:type personal_hotspot_enabled: bool
"""
self._personal_hotspot_enabled = personal_hotspot_enabled
@property
def phone_number(self):
"""Gets the phone_number of this NetworkV2. # noqa: E501
:return: The phone_number of this NetworkV2. # noqa: E501
:rtype: str
"""
return self._phone_number
@phone_number.setter
def phone_number(self, phone_number):
"""Sets the phone_number of this NetworkV2.
:param phone_number: The phone_number of this NetworkV2. # noqa: E501
:type phone_number: str
"""
self._phone_number = phone_number
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkV2):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NetworkV2):
return True
return self.to_dict() != other.to_dict() | 0.54359 | 0.090977 |
import os
from xml.dom import minidom
import unittest
import mock
from pybrightcove import video, enums, connection
class FTPVideoTest(unittest.TestCase):
@mock.patch('ftplib.FTP')
@mock.patch('hashlib.md5') # md5(), md5.hexdigest
@mock.patch('os.path.getsize')
@mock.patch('__builtin__.file')
@mock.patch("os.fdopen")
def test_batch_provision_video(self, FDOpenMockClass, OpenMockClass, GetSizeMockClass, Md5MockClass, FTPMockClass):
fd = FDOpenMockClass()
o = OpenMockClass()
o.read.return_value = None
m = Md5MockClass()
m.hexdigest.return_value = 'a78fa9f8asd'
GetSizeMockClass.return_value = 10000
f = FTPMockClass()
ftp = connection.FTPConnection(host='host',
user='user',
password='<PASSWORD>',
publisher_id='111111111',
preparer='Patrick',
report_success=True)
v = video.Video(name="Some title",
reference_id='a532kallk3252a',
short_description="A short description.",
_connection=ftp)
v.long_description = "An even longer description"
v.tags.extend(["blah", "nah", "tag"])
v.add_asset('1500.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'High quality rendition',
encoding_rate=1500000, frame_width=640,
frame_height=360)
v.add_asset('700.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'Medium quality rendition',
encoding_rate=700000, frame_width=640,
frame_height=360)
v.add_asset('poster.png',
enums.AssetTypeEnum.VIDEO_STILL, 'Poster frame',
frame_width=640, frame_height=360)
v.save()
self.assertEqual('login', f.method_calls[0][0])
self.assertEqual('set_pasv', f.method_calls[1][0])
self.assertEqual('storbinary', f.method_calls[2][0])
self.assertEqual('STOR 1500.flv', f.method_calls[2][1][0])
self.assertEqual('login', f.method_calls[3][0])
self.assertEqual('set_pasv', f.method_calls[4][0])
self.assertEqual('storbinary', f.method_calls[5][0])
self.assertEqual('STOR 700.flv', f.method_calls[5][1][0])
self.assertEqual('login', f.method_calls[6][0])
self.assertEqual('set_pasv', f.method_calls[7][0])
self.assertEqual('storbinary', f.method_calls[8][0])
self.assertEqual('STOR poster.png', f.method_calls[8][1][0])
self.assertEqual('write', fd.method_calls[2][0])
valid_xml = minidom.parse(
open(os.path.join(os.path.dirname(__file__), 'test_ftp_video_batch_provision_manifest.xml'), 'rb'))
test_xml = minidom.parseString(fd.method_calls[2][1][0])
self.assertEqual(
valid_xml.toxml().replace('\t', '').replace('\n', ''),
test_xml.toxml().replace('\t', '').replace('\n', ''))
@mock.patch('ftplib.FTP')
@mock.patch('hashlib.md5')
@mock.patch('os.path.getsize')
@mock.patch('__builtin__.file')
@mock.patch("os.fdopen")
def test_batch_provision_with_custom_metadata_video(self, FDOpenMockClass, OpenMockClass,
GetSizeMockClass, Md5MockClass, FTPMockClass):
fd = FDOpenMockClass()
o = OpenMockClass()
o.read.return_value = None
m = Md5MockClass()
m.hexdigest.return_value = 'a78fa9f8asd'
GetSizeMockClass.return_value = 10000
f = FTPMockClass()
ftp = connection.FTPConnection(host='host',
user='user',
password='<PASSWORD>',
publisher_id='111111111',
preparer='Patrick',
report_success=True)
v = video.Video(name="Some title",
reference_id='a532kallk3252a',
short_description="A short description.",
_connection=ftp)
v.long_description = "An even longer description"
v.tags.extend(["blah", "nah", "tag"])
v.add_asset('1500.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'High quality rendition',
encoding_rate=1500000, frame_width=640,
frame_height=360)
v.add_asset('700.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'Medium quality rendition',
encoding_rate=700000, frame_width=640,
frame_height=360)
v.add_asset('poster.png',
enums.AssetTypeEnum.VIDEO_STILL, 'Poster frame',
frame_width=640, frame_height=360)
v.add_custom_metadata("enum_one", "Value One", enums.CustomMetaType.ENUM)
v.add_custom_metadata("enum_two", "Value Two", enums.CustomMetaType.ENUM)
v.add_custom_metadata("key_one", "String Value One", enums.CustomMetaType.STRING)
v.add_custom_metadata("key_two", "String Value Two", enums.CustomMetaType.STRING)
v.save()
self.assertEqual('login', f.method_calls[0][0])
self.assertEqual('set_pasv', f.method_calls[1][0])
self.assertEqual('storbinary', f.method_calls[2][0])
self.assertEqual('STOR 1500.flv', f.method_calls[2][1][0])
self.assertEqual('login', f.method_calls[3][0])
self.assertEqual('set_pasv', f.method_calls[4][0])
self.assertEqual('storbinary', f.method_calls[5][0])
self.assertEqual('STOR 700.flv', f.method_calls[5][1][0])
self.assertEqual('login', f.method_calls[6][0])
self.assertEqual('set_pasv', f.method_calls[7][0])
self.assertEqual('storbinary', f.method_calls[8][0])
self.assertEqual('STOR poster.png', f.method_calls[8][1][0])
self.assertEqual('write', fd.method_calls[0][0])
valid_xml = minidom.parse(
open(os.path.join(os.path.dirname(__file__), 'test_ftp_video_batch_provision_with_custom_metadata_manifest.xml'), 'rb'))
test_xml = minidom.parseString(fd.method_calls[0][1][0])
self.assertEqual(
valid_xml.toxml().replace('\t', '').replace('\n', ''),
test_xml.toxml().replace('\t', '').replace('\n', '')) | tests/test_ftp_video.py |
import os
from xml.dom import minidom
import unittest
import mock
from pybrightcove import video, enums, connection
class FTPVideoTest(unittest.TestCase):
@mock.patch('ftplib.FTP')
@mock.patch('hashlib.md5') # md5(), md5.hexdigest
@mock.patch('os.path.getsize')
@mock.patch('__builtin__.file')
@mock.patch("os.fdopen")
def test_batch_provision_video(self, FDOpenMockClass, OpenMockClass, GetSizeMockClass, Md5MockClass, FTPMockClass):
fd = FDOpenMockClass()
o = OpenMockClass()
o.read.return_value = None
m = Md5MockClass()
m.hexdigest.return_value = 'a78fa9f8asd'
GetSizeMockClass.return_value = 10000
f = FTPMockClass()
ftp = connection.FTPConnection(host='host',
user='user',
password='<PASSWORD>',
publisher_id='111111111',
preparer='Patrick',
report_success=True)
v = video.Video(name="Some title",
reference_id='a532kallk3252a',
short_description="A short description.",
_connection=ftp)
v.long_description = "An even longer description"
v.tags.extend(["blah", "nah", "tag"])
v.add_asset('1500.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'High quality rendition',
encoding_rate=1500000, frame_width=640,
frame_height=360)
v.add_asset('700.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'Medium quality rendition',
encoding_rate=700000, frame_width=640,
frame_height=360)
v.add_asset('poster.png',
enums.AssetTypeEnum.VIDEO_STILL, 'Poster frame',
frame_width=640, frame_height=360)
v.save()
self.assertEqual('login', f.method_calls[0][0])
self.assertEqual('set_pasv', f.method_calls[1][0])
self.assertEqual('storbinary', f.method_calls[2][0])
self.assertEqual('STOR 1500.flv', f.method_calls[2][1][0])
self.assertEqual('login', f.method_calls[3][0])
self.assertEqual('set_pasv', f.method_calls[4][0])
self.assertEqual('storbinary', f.method_calls[5][0])
self.assertEqual('STOR 700.flv', f.method_calls[5][1][0])
self.assertEqual('login', f.method_calls[6][0])
self.assertEqual('set_pasv', f.method_calls[7][0])
self.assertEqual('storbinary', f.method_calls[8][0])
self.assertEqual('STOR poster.png', f.method_calls[8][1][0])
self.assertEqual('write', fd.method_calls[2][0])
valid_xml = minidom.parse(
open(os.path.join(os.path.dirname(__file__), 'test_ftp_video_batch_provision_manifest.xml'), 'rb'))
test_xml = minidom.parseString(fd.method_calls[2][1][0])
self.assertEqual(
valid_xml.toxml().replace('\t', '').replace('\n', ''),
test_xml.toxml().replace('\t', '').replace('\n', ''))
@mock.patch('ftplib.FTP')
@mock.patch('hashlib.md5')
@mock.patch('os.path.getsize')
@mock.patch('__builtin__.file')
@mock.patch("os.fdopen")
def test_batch_provision_with_custom_metadata_video(self, FDOpenMockClass, OpenMockClass,
GetSizeMockClass, Md5MockClass, FTPMockClass):
fd = FDOpenMockClass()
o = OpenMockClass()
o.read.return_value = None
m = Md5MockClass()
m.hexdigest.return_value = 'a78fa9f8asd'
GetSizeMockClass.return_value = 10000
f = FTPMockClass()
ftp = connection.FTPConnection(host='host',
user='user',
password='<PASSWORD>',
publisher_id='111111111',
preparer='Patrick',
report_success=True)
v = video.Video(name="Some title",
reference_id='a532kallk3252a',
short_description="A short description.",
_connection=ftp)
v.long_description = "An even longer description"
v.tags.extend(["blah", "nah", "tag"])
v.add_asset('1500.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'High quality rendition',
encoding_rate=1500000, frame_width=640,
frame_height=360)
v.add_asset('700.flv',
enums.AssetTypeEnum.VIDEO_FULL, 'Medium quality rendition',
encoding_rate=700000, frame_width=640,
frame_height=360)
v.add_asset('poster.png',
enums.AssetTypeEnum.VIDEO_STILL, 'Poster frame',
frame_width=640, frame_height=360)
v.add_custom_metadata("enum_one", "Value One", enums.CustomMetaType.ENUM)
v.add_custom_metadata("enum_two", "Value Two", enums.CustomMetaType.ENUM)
v.add_custom_metadata("key_one", "String Value One", enums.CustomMetaType.STRING)
v.add_custom_metadata("key_two", "String Value Two", enums.CustomMetaType.STRING)
v.save()
self.assertEqual('login', f.method_calls[0][0])
self.assertEqual('set_pasv', f.method_calls[1][0])
self.assertEqual('storbinary', f.method_calls[2][0])
self.assertEqual('STOR 1500.flv', f.method_calls[2][1][0])
self.assertEqual('login', f.method_calls[3][0])
self.assertEqual('set_pasv', f.method_calls[4][0])
self.assertEqual('storbinary', f.method_calls[5][0])
self.assertEqual('STOR 700.flv', f.method_calls[5][1][0])
self.assertEqual('login', f.method_calls[6][0])
self.assertEqual('set_pasv', f.method_calls[7][0])
self.assertEqual('storbinary', f.method_calls[8][0])
self.assertEqual('STOR poster.png', f.method_calls[8][1][0])
self.assertEqual('write', fd.method_calls[0][0])
valid_xml = minidom.parse(
open(os.path.join(os.path.dirname(__file__), 'test_ftp_video_batch_provision_with_custom_metadata_manifest.xml'), 'rb'))
test_xml = minidom.parseString(fd.method_calls[0][1][0])
self.assertEqual(
valid_xml.toxml().replace('\t', '').replace('\n', ''),
test_xml.toxml().replace('\t', '').replace('\n', '')) | 0.472927 | 0.099077 |
from pip_services3_expressions.tokenizers.AbstractTokenizer import AbstractTokenizer
from pip_services3_expressions.tokenizers.TokenType import TokenType
from pip_services3_expressions.tokenizers.generic.GenericCommentState import GenericCommentState
from pip_services3_expressions.tokenizers.generic.GenericNumberState import GenericNumberState
from pip_services3_expressions.tokenizers.generic.GenericQuoteState import GenericQuoteState
from pip_services3_expressions.tokenizers.generic.GenericSymbolState import GenericSymbolState
from pip_services3_expressions.tokenizers.generic.GenericWhitespaceState import GenericWhitespaceState
from pip_services3_expressions.tokenizers.generic.GenericWordState import GenericWordState
class GenericTokenizer(AbstractTokenizer):
"""
Implements a default tokenizer class.
"""
def __init__(self):
super(GenericTokenizer, self).__init__()
self.symbol_state = GenericSymbolState()
self.symbol_state.add("<>", TokenType.Symbol)
self.symbol_state.add("<=", TokenType.Symbol)
self.symbol_state.add(">=", TokenType.Symbol)
self.number_state = GenericNumberState()
self.quote_state = GenericQuoteState()
self.whitespace_state = GenericWhitespaceState()
self.word_state = GenericWordState()
self.comment_state = GenericCommentState()
self.clear_character_states()
self.set_character_state(0x0000, 0x00ff, self.symbol_state)
self.set_character_state(0x0000, ord(' '), self.whitespace_state)
self.set_character_state(ord('a'), ord('z'), self.word_state)
self.set_character_state(ord('A'), ord('Z'), self.word_state)
self.set_character_state(0x00c0, 0x00ff, self.word_state)
self.set_character_state(0x0100, 0xfffe, self.word_state)
self.set_character_state(ord('-'), ord('-'), self.number_state)
self.set_character_state(ord('0'), ord('9'), self.number_state)
self.set_character_state(ord('.'), ord('.'), self.number_state)
self.set_character_state(ord('\"'), ord('\"'), self.quote_state)
self.set_character_state(ord('\''), ord('\''), self.quote_state)
self.set_character_state(ord('#'), ord('#'), self.comment_state) | pip_services3_expressions-3.3.4/pip_services3_expressions/tokenizers/generic/GenericTokenizer.py |
from pip_services3_expressions.tokenizers.AbstractTokenizer import AbstractTokenizer
from pip_services3_expressions.tokenizers.TokenType import TokenType
from pip_services3_expressions.tokenizers.generic.GenericCommentState import GenericCommentState
from pip_services3_expressions.tokenizers.generic.GenericNumberState import GenericNumberState
from pip_services3_expressions.tokenizers.generic.GenericQuoteState import GenericQuoteState
from pip_services3_expressions.tokenizers.generic.GenericSymbolState import GenericSymbolState
from pip_services3_expressions.tokenizers.generic.GenericWhitespaceState import GenericWhitespaceState
from pip_services3_expressions.tokenizers.generic.GenericWordState import GenericWordState
class GenericTokenizer(AbstractTokenizer):
"""
Implements a default tokenizer class.
"""
def __init__(self):
super(GenericTokenizer, self).__init__()
self.symbol_state = GenericSymbolState()
self.symbol_state.add("<>", TokenType.Symbol)
self.symbol_state.add("<=", TokenType.Symbol)
self.symbol_state.add(">=", TokenType.Symbol)
self.number_state = GenericNumberState()
self.quote_state = GenericQuoteState()
self.whitespace_state = GenericWhitespaceState()
self.word_state = GenericWordState()
self.comment_state = GenericCommentState()
self.clear_character_states()
self.set_character_state(0x0000, 0x00ff, self.symbol_state)
self.set_character_state(0x0000, ord(' '), self.whitespace_state)
self.set_character_state(ord('a'), ord('z'), self.word_state)
self.set_character_state(ord('A'), ord('Z'), self.word_state)
self.set_character_state(0x00c0, 0x00ff, self.word_state)
self.set_character_state(0x0100, 0xfffe, self.word_state)
self.set_character_state(ord('-'), ord('-'), self.number_state)
self.set_character_state(ord('0'), ord('9'), self.number_state)
self.set_character_state(ord('.'), ord('.'), self.number_state)
self.set_character_state(ord('\"'), ord('\"'), self.quote_state)
self.set_character_state(ord('\''), ord('\''), self.quote_state)
self.set_character_state(ord('#'), ord('#'), self.comment_state) | 0.737631 | 0.175044 |
import hashlib
import json
import logging
import re
from pyhocon import ConfigFactory, HOCONConverter
from .dict_tool import merge_dict
logger = logging.getLogger(__name__)
NEW_LINE = '\n'
RE_HOCON_INCLUDE = [
r'include\s+(?:required|url|file|classpath)\(.*\)',
r'include\s+".*\.(?:conf|hocon)"',
]
RE_HOCONSTRING_INCLUDE = r'HOCONSTRING_INCLUDE_(?:.*)\s*=\s*"(?:.*)"'
RE_HOCONSTRING_INCLUDE_VALUE = r'HOCONSTRING_INCLUDE_(?:.*)\s*=\s*"(.*)"'
HOCONSTRING_INCLUDE_KEY = 'HOCONSTRING_INCLUDE_{id}'
def escape_double_quotes(double_quotes):
return double_quotes.replace('"', '\\"')
def unescape_double_quotes(escaped_double_quotes):
return escaped_double_quotes.replace('\\"', '"')
def is_valid_include(include):
is_valid_format = False
for regex in RE_HOCON_INCLUDE:
if re.findall(regex, include):
is_valid_format = True
break
return is_valid_format
def get_include_key(include_str):
"""Use md5sum hash of the whole include statement string for a key.
"""
return hashlib.md5(include_str.encode()).hexdigest()
def wrap_includes(hocon_str):
"""Convert `include` statement string into key = val format.
Returns '{key} = "{double_quote_escaped_val}"'.
"""
for regex in RE_HOCON_INCLUDE:
for include in re.findall(regex, hocon_str):
if '\\"' in include:
continue
logger.debug('Found include in HOCON: {include}'.format(include=include))
hocon_str = hocon_str.replace(
include,
'{key} = "{val}"'.format(
key=HOCONSTRING_INCLUDE_KEY.format(id=get_include_key(include)),
val=escape_double_quotes(include),
),
)
return hocon_str
def unwrap_includes(key_val_str):
"""Convert '{key} = "{val}"" formatted string to the original `include` statement string.
Args:
key:
HOCONSTRING_INCLUDE_KEY with `id` as md5sum hash of the original
`include` statement string.
val:
Double-quote-escaped `include` statement string.
"""
val = re.findall(RE_HOCONSTRING_INCLUDE_VALUE, key_val_str)
if val:
if len(val) > 1:
raise ValueError(
'Found multiple matches. Wrong include key=val format? {val}'.format(
val=val
)
)
return unescape_double_quotes(val[0])
class HOCONString:
def __init__(self, hocon_str):
"""Find an `include` statement (VALUE) in HOCON string and then convert it
into a HOCONSTRING_INCLUDE_KEY="VALUE" pair in HOCON.
Double-quotes will be escaped with double slashes.
Then the VALUE is kept as it is as a value and can be recovered later when
it is converted back to HOCON string.
This workaround is to skip parsing `include` statements since there is no
information about `classpath` at the parsing time and pyhocon will error out and
will stop parsing.
e.g. we don't know what's in `classpath` before the backend conf file is
passed to Cromwell.
"""
if not isinstance(hocon_str, str):
raise ValueError('HOCONString() takes str type only.')
self._hocon_str = wrap_includes(hocon_str)
def __str__(self):
return self.get_contents()
@classmethod
def from_dict(cls, d, include=''):
"""Create HOCONString from dict.
Args:
include:
`include` statement to be added to the top of the HOCONString.
"""
hocon = ConfigFactory.from_dict(d)
hocon_str = HOCONConverter.to_hocon(hocon)
if include:
if not is_valid_include(include):
raise ValueError(
'Wrong HOCON include format. {include}'.format(include=include)
)
hocon_str = NEW_LINE.join([include, hocon_str])
return cls(hocon_str=hocon_str)
def to_dict(self, with_include=True):
"""Convert HOCON string into dict.
Args:
with_include:
If True then double-quote-escaped `include` statements will be kept as a plain string
under key HOCONSTRING_INCLUDE_KEY.
Otherwise, `include` statements will be excluded.
"""
if with_include:
hocon_str = self._hocon_str
else:
hocon_str = self.get_contents(with_include=False)
c = ConfigFactory.parse_string(hocon_str)
j = HOCONConverter.to_json(c)
return json.loads(j)
def merge(self, b, update=False):
"""Merge self with b and then returns a plain string of merged.
Args:
b:
HOCONString, dict, str to be merged.
b's `include` statement will always be ignored.
update:
If True then replace self with a merged one.
Returns:
String of merged HOCONs.
"""
if isinstance(b, HOCONString):
d = b.to_dict()
elif isinstance(b, str):
d = HOCONString(b).to_dict()
elif isinstance(b, dict):
d = b
else:
raise TypeError('Unsupported type {t}'.format(t=type(b)))
self_d = self.to_dict()
merge_dict(self_d, d)
hocon = ConfigFactory.from_dict(self_d)
hocon_str = HOCONConverter.to_hocon(hocon)
if update:
self._hocon_str = hocon_str
return HOCONString(hocon_str).get_contents()
def get_contents(self, with_include=True):
"""Check if `include` statement is stored as a plain string.
If exists, converts it back to HOCON `include` statement.
Args:
with_include: (renamed/changed from without_include)
If True then recover all includes statements from include key=val form
(RE_HOCONSTRING_INCLUDE).
Otherwise, excludes all `include` statements.
"""
hocon_str = self._hocon_str
for include_key_val in re.findall(RE_HOCONSTRING_INCLUDE, self._hocon_str):
logger.debug(
'Found include key in HOCONString: {include_key_val}'.format(
include_key_val=include_key_val
)
)
if with_include:
original_include_str = unwrap_includes(include_key_val)
if original_include_str:
hocon_str = hocon_str.replace(include_key_val, original_include_str)
else:
hocon_str = hocon_str.replace(include_key_val, '')
return hocon_str | caper/hocon_string.py | import hashlib
import json
import logging
import re
from pyhocon import ConfigFactory, HOCONConverter
from .dict_tool import merge_dict
logger = logging.getLogger(__name__)
NEW_LINE = '\n'
RE_HOCON_INCLUDE = [
r'include\s+(?:required|url|file|classpath)\(.*\)',
r'include\s+".*\.(?:conf|hocon)"',
]
RE_HOCONSTRING_INCLUDE = r'HOCONSTRING_INCLUDE_(?:.*)\s*=\s*"(?:.*)"'
RE_HOCONSTRING_INCLUDE_VALUE = r'HOCONSTRING_INCLUDE_(?:.*)\s*=\s*"(.*)"'
HOCONSTRING_INCLUDE_KEY = 'HOCONSTRING_INCLUDE_{id}'
def escape_double_quotes(double_quotes):
return double_quotes.replace('"', '\\"')
def unescape_double_quotes(escaped_double_quotes):
return escaped_double_quotes.replace('\\"', '"')
def is_valid_include(include):
is_valid_format = False
for regex in RE_HOCON_INCLUDE:
if re.findall(regex, include):
is_valid_format = True
break
return is_valid_format
def get_include_key(include_str):
"""Use md5sum hash of the whole include statement string for a key.
"""
return hashlib.md5(include_str.encode()).hexdigest()
def wrap_includes(hocon_str):
"""Convert `include` statement string into key = val format.
Returns '{key} = "{double_quote_escaped_val}"'.
"""
for regex in RE_HOCON_INCLUDE:
for include in re.findall(regex, hocon_str):
if '\\"' in include:
continue
logger.debug('Found include in HOCON: {include}'.format(include=include))
hocon_str = hocon_str.replace(
include,
'{key} = "{val}"'.format(
key=HOCONSTRING_INCLUDE_KEY.format(id=get_include_key(include)),
val=escape_double_quotes(include),
),
)
return hocon_str
def unwrap_includes(key_val_str):
"""Convert '{key} = "{val}"" formatted string to the original `include` statement string.
Args:
key:
HOCONSTRING_INCLUDE_KEY with `id` as md5sum hash of the original
`include` statement string.
val:
Double-quote-escaped `include` statement string.
"""
val = re.findall(RE_HOCONSTRING_INCLUDE_VALUE, key_val_str)
if val:
if len(val) > 1:
raise ValueError(
'Found multiple matches. Wrong include key=val format? {val}'.format(
val=val
)
)
return unescape_double_quotes(val[0])
class HOCONString:
def __init__(self, hocon_str):
"""Find an `include` statement (VALUE) in HOCON string and then convert it
into a HOCONSTRING_INCLUDE_KEY="VALUE" pair in HOCON.
Double-quotes will be escaped with double slashes.
Then the VALUE is kept as it is as a value and can be recovered later when
it is converted back to HOCON string.
This workaround is to skip parsing `include` statements since there is no
information about `classpath` at the parsing time and pyhocon will error out and
will stop parsing.
e.g. we don't know what's in `classpath` before the backend conf file is
passed to Cromwell.
"""
if not isinstance(hocon_str, str):
raise ValueError('HOCONString() takes str type only.')
self._hocon_str = wrap_includes(hocon_str)
def __str__(self):
return self.get_contents()
@classmethod
def from_dict(cls, d, include=''):
"""Create HOCONString from dict.
Args:
include:
`include` statement to be added to the top of the HOCONString.
"""
hocon = ConfigFactory.from_dict(d)
hocon_str = HOCONConverter.to_hocon(hocon)
if include:
if not is_valid_include(include):
raise ValueError(
'Wrong HOCON include format. {include}'.format(include=include)
)
hocon_str = NEW_LINE.join([include, hocon_str])
return cls(hocon_str=hocon_str)
def to_dict(self, with_include=True):
"""Convert HOCON string into dict.
Args:
with_include:
If True then double-quote-escaped `include` statements will be kept as a plain string
under key HOCONSTRING_INCLUDE_KEY.
Otherwise, `include` statements will be excluded.
"""
if with_include:
hocon_str = self._hocon_str
else:
hocon_str = self.get_contents(with_include=False)
c = ConfigFactory.parse_string(hocon_str)
j = HOCONConverter.to_json(c)
return json.loads(j)
def merge(self, b, update=False):
"""Merge self with b and then returns a plain string of merged.
Args:
b:
HOCONString, dict, str to be merged.
b's `include` statement will always be ignored.
update:
If True then replace self with a merged one.
Returns:
String of merged HOCONs.
"""
if isinstance(b, HOCONString):
d = b.to_dict()
elif isinstance(b, str):
d = HOCONString(b).to_dict()
elif isinstance(b, dict):
d = b
else:
raise TypeError('Unsupported type {t}'.format(t=type(b)))
self_d = self.to_dict()
merge_dict(self_d, d)
hocon = ConfigFactory.from_dict(self_d)
hocon_str = HOCONConverter.to_hocon(hocon)
if update:
self._hocon_str = hocon_str
return HOCONString(hocon_str).get_contents()
def get_contents(self, with_include=True):
"""Check if `include` statement is stored as a plain string.
If exists, converts it back to HOCON `include` statement.
Args:
with_include: (renamed/changed from without_include)
If True then recover all includes statements from include key=val form
(RE_HOCONSTRING_INCLUDE).
Otherwise, excludes all `include` statements.
"""
hocon_str = self._hocon_str
for include_key_val in re.findall(RE_HOCONSTRING_INCLUDE, self._hocon_str):
logger.debug(
'Found include key in HOCONString: {include_key_val}'.format(
include_key_val=include_key_val
)
)
if with_include:
original_include_str = unwrap_includes(include_key_val)
if original_include_str:
hocon_str = hocon_str.replace(include_key_val, original_include_str)
else:
hocon_str = hocon_str.replace(include_key_val, '')
return hocon_str | 0.678859 | 0.103341 |
import unittest
from sorted_squared_array import sortedSquaredArrayNormal, sortedSquaredArrayBetter
import random
import time
class TestSortedSquaredArray(unittest.TestCase):
# Testing of normal algo code
def test_normal_1(self):
self.assertEqual(sortedSquaredArrayNormal([1, 2, 3, 5, 6, 8, 9]),[1, 4, 9, 25, 36, 64, 81])
def test_normal_2(self):
self.assertEqual(sortedSquaredArrayNormal([1]),[1])
def test_normal_3(self):
self.assertEqual(sortedSquaredArrayNormal([-1]),[1])
def test_normal_4(self):
self.assertEqual(sortedSquaredArrayNormal([-5, -4, -3, -2, -1]),[1, 4, 9, 16, 25])
def test_normal_5(self):
self.assertEqual(sortedSquaredArrayNormal([-50, -13, -2, -1, 0, 0, 1, 1, 2, 3, 19, 20]),[0, 0, 1, 1, 1, 4, 4, 9, 169, 361, 400, 2500])
def test_normal_6(self):
self.assertEqual(sortedSquaredArrayNormal([-1, -1, 2, 3, 3, 3, 4]), [1, 1, 4, 9, 9, 9, 16])
def test_normal_7(self):
self.assertEqual(sortedSquaredArrayNormal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Testing of better algo code
def test_better_1(self):
self.assertEqual(sortedSquaredArrayBetter([1, 2, 3, 5, 6, 8, 9]),[1, 4, 9, 25, 36, 64, 81])
def test_better_2(self):
self.assertEqual(sortedSquaredArrayBetter([1]),[1])
def test_better_3(self):
self.assertEqual(sortedSquaredArrayBetter([-1]),[1])
def test_better_4(self):
self.assertEqual(sortedSquaredArrayBetter([-5, -4, -3, -2, -1]),[1, 4, 9, 16, 25])
def test_better_5(self):
self.assertEqual(sortedSquaredArrayBetter([-50, -13, -2, -1, 0, 0, 1, 1, 2, 3, 19, 20]),[0, 0, 1, 1, 1, 4, 4, 9, 169, 361, 400, 2500])
def test_betterl_6(self):
self.assertEqual(sortedSquaredArrayBetter([-1, -1, 2, 3, 3, 3, 4]), [1, 1, 4, 9, 9, 9, 16])
def test_better_7(self):
self.assertEqual(sortedSquaredArrayBetter([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Testing runtime comparison
def test_runtime_compare(self):
new_arr = [random.randrange(-100, 100) for i in range(100000)]
new_arr.sort()
initial = time.time()
dummy = sortedSquaredArrayNormal(new_arr)
final = time.time()
normal_time = final - initial
print('Normal time: {}'.format(normal_time))
time.sleep(5)
initial = time.time()
new = sortedSquaredArrayBetter(new_arr)
final = time.time()
better_time = final - initial
print('Better time: {}'.format(better_time))
self.assertTrue(better_time < normal_time) | SortedSquaredArray/test_sorted_squared_array.py | import unittest
from sorted_squared_array import sortedSquaredArrayNormal, sortedSquaredArrayBetter
import random
import time
class TestSortedSquaredArray(unittest.TestCase):
# Testing of normal algo code
def test_normal_1(self):
self.assertEqual(sortedSquaredArrayNormal([1, 2, 3, 5, 6, 8, 9]),[1, 4, 9, 25, 36, 64, 81])
def test_normal_2(self):
self.assertEqual(sortedSquaredArrayNormal([1]),[1])
def test_normal_3(self):
self.assertEqual(sortedSquaredArrayNormal([-1]),[1])
def test_normal_4(self):
self.assertEqual(sortedSquaredArrayNormal([-5, -4, -3, -2, -1]),[1, 4, 9, 16, 25])
def test_normal_5(self):
self.assertEqual(sortedSquaredArrayNormal([-50, -13, -2, -1, 0, 0, 1, 1, 2, 3, 19, 20]),[0, 0, 1, 1, 1, 4, 4, 9, 169, 361, 400, 2500])
def test_normal_6(self):
self.assertEqual(sortedSquaredArrayNormal([-1, -1, 2, 3, 3, 3, 4]), [1, 1, 4, 9, 9, 9, 16])
def test_normal_7(self):
self.assertEqual(sortedSquaredArrayNormal([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Testing of better algo code
def test_better_1(self):
self.assertEqual(sortedSquaredArrayBetter([1, 2, 3, 5, 6, 8, 9]),[1, 4, 9, 25, 36, 64, 81])
def test_better_2(self):
self.assertEqual(sortedSquaredArrayBetter([1]),[1])
def test_better_3(self):
self.assertEqual(sortedSquaredArrayBetter([-1]),[1])
def test_better_4(self):
self.assertEqual(sortedSquaredArrayBetter([-5, -4, -3, -2, -1]),[1, 4, 9, 16, 25])
def test_better_5(self):
self.assertEqual(sortedSquaredArrayBetter([-50, -13, -2, -1, 0, 0, 1, 1, 2, 3, 19, 20]),[0, 0, 1, 1, 1, 4, 4, 9, 169, 361, 400, 2500])
def test_betterl_6(self):
self.assertEqual(sortedSquaredArrayBetter([-1, -1, 2, 3, 3, 3, 4]), [1, 1, 4, 9, 9, 9, 16])
def test_better_7(self):
self.assertEqual(sortedSquaredArrayBetter([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Testing runtime comparison
def test_runtime_compare(self):
new_arr = [random.randrange(-100, 100) for i in range(100000)]
new_arr.sort()
initial = time.time()
dummy = sortedSquaredArrayNormal(new_arr)
final = time.time()
normal_time = final - initial
print('Normal time: {}'.format(normal_time))
time.sleep(5)
initial = time.time()
new = sortedSquaredArrayBetter(new_arr)
final = time.time()
better_time = final - initial
print('Better time: {}'.format(better_time))
self.assertTrue(better_time < normal_time) | 0.479991 | 0.647687 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['InstanceVariableArgs', 'InstanceVariable']
@pulumi.input_type
class InstanceVariableArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str],
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
variable_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a InstanceVariable resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
if masked is not None:
pulumi.set(__self__, "masked", masked)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter
def masked(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@masked.setter
def masked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "masked", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variable_type", value)
@pulumi.input_type
class _InstanceVariableState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering InstanceVariable resources.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if masked is not None:
pulumi.set(__self__, "masked", masked)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if value is not None:
pulumi.set(__self__, "value", value)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def masked(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@masked.setter
def masked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "masked", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variable_type", value)
class InstanceVariable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # gitlab\_instance\_variable
This resource allows you to create and manage CI/CD variables for your GitLab instance.
For further information on variables, consult the [gitlab
documentation](https://docs.gitlab.com/ee/api/instance_level_ci_variables.html).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example = gitlab.InstanceVariable("example",
key="instance_variable_key",
masked=False,
protected=False,
value="instance_variable_value")
```
## Import
GitLab instance variables can be imported using an id made up of `variablename`, e.g. console
```sh
$ pulumi import gitlab:index/instanceVariable:InstanceVariable example instance_variable_key
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceVariableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # gitlab\_instance\_variable
This resource allows you to create and manage CI/CD variables for your GitLab instance.
For further information on variables, consult the [gitlab
documentation](https://docs.gitlab.com/ee/api/instance_level_ci_variables.html).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example = gitlab.InstanceVariable("example",
key="instance_variable_key",
masked=False,
protected=False,
value="instance_variable_value")
```
## Import
GitLab instance variables can be imported using an id made up of `variablename`, e.g. console
```sh
$ pulumi import gitlab:index/instanceVariable:InstanceVariable example instance_variable_key
```
:param str resource_name: The name of the resource.
:param InstanceVariableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceVariableArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceVariableArgs.__new__(InstanceVariableArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["masked"] = masked
__props__.__dict__["protected"] = protected
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
__props__.__dict__["variable_type"] = variable_type
super(InstanceVariable, __self__).__init__(
'gitlab:index/instanceVariable:InstanceVariable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None) -> 'InstanceVariable':
"""
Get an existing InstanceVariable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InstanceVariableState.__new__(_InstanceVariableState)
__props__.__dict__["key"] = key
__props__.__dict__["masked"] = masked
__props__.__dict__["protected"] = protected
__props__.__dict__["value"] = value
__props__.__dict__["variable_type"] = variable_type
return InstanceVariable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def masked(self) -> pulumi.Output[Optional[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@property
@pulumi.getter
def protected(self) -> pulumi.Output[Optional[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type") | sdk/python/pulumi_gitlab/instance_variable.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['InstanceVariableArgs', 'InstanceVariable']
@pulumi.input_type
class InstanceVariableArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str],
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
variable_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a InstanceVariable resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
if masked is not None:
pulumi.set(__self__, "masked", masked)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter
def masked(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@masked.setter
def masked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "masked", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variable_type", value)
@pulumi.input_type
class _InstanceVariableState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering InstanceVariable resources.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if masked is not None:
pulumi.set(__self__, "masked", masked)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if value is not None:
pulumi.set(__self__, "value", value)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def masked(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@masked.setter
def masked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "masked", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variable_type", value)
class InstanceVariable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # gitlab\_instance\_variable
This resource allows you to create and manage CI/CD variables for your GitLab instance.
For further information on variables, consult the [gitlab
documentation](https://docs.gitlab.com/ee/api/instance_level_ci_variables.html).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example = gitlab.InstanceVariable("example",
key="instance_variable_key",
masked=False,
protected=False,
value="instance_variable_value")
```
## Import
GitLab instance variables can be imported using an id made up of `variablename`, e.g. console
```sh
$ pulumi import gitlab:index/instanceVariable:InstanceVariable example instance_variable_key
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceVariableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # gitlab\_instance\_variable
This resource allows you to create and manage CI/CD variables for your GitLab instance.
For further information on variables, consult the [gitlab
documentation](https://docs.gitlab.com/ee/api/instance_level_ci_variables.html).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example = gitlab.InstanceVariable("example",
key="instance_variable_key",
masked=False,
protected=False,
value="instance_variable_value")
```
## Import
GitLab instance variables can be imported using an id made up of `variablename`, e.g. console
```sh
$ pulumi import gitlab:index/instanceVariable:InstanceVariable example instance_variable_key
```
:param str resource_name: The name of the resource.
:param InstanceVariableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceVariableArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceVariableArgs.__new__(InstanceVariableArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["masked"] = masked
__props__.__dict__["protected"] = protected
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
__props__.__dict__["variable_type"] = variable_type
super(InstanceVariable, __self__).__init__(
'gitlab:index/instanceVariable:InstanceVariable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None) -> 'InstanceVariable':
"""
Get an existing InstanceVariable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InstanceVariableState.__new__(_InstanceVariableState)
__props__.__dict__["key"] = key
__props__.__dict__["masked"] = masked
__props__.__dict__["protected"] = protected
__props__.__dict__["value"] = value
__props__.__dict__["variable_type"] = variable_type
return InstanceVariable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def masked(self) -> pulumi.Output[Optional[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@property
@pulumi.getter
def protected(self) -> pulumi.Output[Optional[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type") | 0.883701 | 0.173884 |
import i_worker
import socket
import struct
# Description of the GeoBrick device. Currently hard-coded.
BRICK_HOSTNAME = 'geobrickanta.solar.pvt'
BRICK_PORT = 1025
BRICK_TIMEOUT = 0.5
# Program spaces that can be used in the GeoBrick.
COMMAND_REGIS = 'P1000='
ARG1_REGIS = ' P1001='
ARG2_REGIS = ' P1002='
# Brick command dictionary.
COMMAND_DICT = {'Home': 1,
'SelRx': 2,
'SetAngle': 3,
'SetZOffset': 4,
'SetXOffset': 5,
'Kill': 6,
'Enable': 7,
'SetX': 8,
'SetZ': 9}
# Dictionaries for ethernet packets to the Brick.
RQ_TYPE = {'upload': '\xc0',
'download': '\x40'}
RQ = {'sendline': '\xb0',
'getline': '\xb1',
'flush': '\xb3',
'getmem': '\xb4',
'setmem': '\xb5',
'setbit': '\xba',
'setbits': '\xbb',
'port': '\xbe',
'getresponse': '\xbf',
'readready': '\xc2',
'response': '\xc4',
'getbuffer': '\xc5',
'writebuffer': '\xc6',
'writeerror': '\xc7',
'fwdownload': '\xcb',
'ipaddress': '\xe0'}
COORDINATE = {1: 'Z',
3: 'A',
4: 'X'}
AXIS_SCALING = {1: 42.5636 * 96 * 32,
3: 23181.5208 * 96 * 32,
4: 3973.477 * 96 * 32}
MPADDRESSSTART = 900
class BrickWorker(i_worker.IWorker):
def __init__(self):
super(BrickWorker, self).__init__()
self.commands = ['FRM-HOME',
'FRM-KILL',
'FRM-RX-SEL',
'FRM-SET-PA',
'FRM-X-OFFSET',
'FRM-Z-OFFSET',
'FRM-ABS-X',
'FRM-ABS-Z',
'FRM-ENABLE']
self.brick_socket = None
self.brick_ip = socket.gethostbyname(BRICK_HOSTNAME)
self.name = 'GeoBrick-Worker'
# ---------------------------------------------------------------
# COMMAND PACKAGING ROUTINES SPECIFIC TO GEOBRICK
# ---------------------------------------------------------------
#region Method Description
"""
Method: __make_brick_command
Description:
Takes a command to the Brick and packages it into an
ethernet packet recognized by the Brick system.
Arguments:
rq_type: type of request, either 'upload' or 'download'.
rq: nature of request, lookup dictionary defined in RQ.
val: value associated with the request.
index: index associated with the request.
command_packets: list of strings to be packed into TCP packets.
"""
#endregion
def __make_brick_command(self, rq_type, rq, val, index, command_packets):
packets = []
for packet in command_packets:
buf = RQ_TYPE[rq_type] + RQ[rq]
buf += struct.pack('H', val)
buf += struct.pack('H', index)
buf += struct.pack('H', socket.htons(len(packet) + 1))
buf += struct.pack(str(len(packet)) + 's', packet)
buf += struct.pack("B", 0)
packets.append(buf)
return packets
# ---------------------------------------------------------------
# COMMAND ROUTINES
# ---------------------------------------------------------------
#region Method Description
"""
Method: __frm_home
Description:
Runs homing procedure local to the GeoBrick. Do NOT use this
method on its own. This method is error checked before execution.
Arguments:
acc_command: list of the strings sent from the ACC. List format:
['FRM-HOME']
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_home(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-HOME.')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Home'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_rx_sel
Description:
Routine to select one of two receivers on the antenna
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-RX-SEL', rx] where rx is 1 for low-nu and 2 for high-nu.
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_rx_sel(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-RX-SEL.')
return None
rx = None
try:
rx = int(acc_command[1])
if rx not in [1, 2]:
raise ValueError('Invalid RX selection.')
except ValueError:
self.logger('Invalid call to FRM-RX-SEL.')
return None
# Build command based on parameters.
command = COMMAND_REGIS + str(COMMAND_DICT['SelRx']) + \
ARG1_REGIS + str(rx)
command_packets = [command]
return command_packets, self.__make_brick_command('download',
'getresponse',
0, 0,
command_packets)
#region Method Description
"""
Method: __frm_set_pa
Description:
Routine to move motor 3 to a given angle. This routine should only
be called after a FRM_HOME command has been issued. Do NOT use
this method on its own.
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-SET-PA', angle] where angle is the absolute angle to be
set.
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_set_pa(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-SET-PA.')
return None
angle = None
try:
angle = int(acc_command[1])
if angle > 90 or angle < -90:
raise ValueError('Invalid position angle selection.')
except ValueError:
self.logger('Invalid call to FRM-SET-PA.')
return None
# Build command based on parameters.
command = COMMAND_REGIS + str(COMMAND_DICT['SetAngle']) + \
ARG1_REGIS + str(angle)
command_packets = [command]
return command_packets, self.__make_brick_command('download',
'getresponse',
0, 0,
command_packets)
def __frm_x_offset(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-X-OFFSET.')
return None
offset = None
try:
offset = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-X-OFFSET.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetXOffset']) + \
ARG1_REGIS + str(offset)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_z_offset(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-Z-OFFSET.')
return None
offset = None
try:
offset = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-Z-OFFSET.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetZOffset']) + \
ARG1_REGIS + str(offset)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_abs_x
Description:
Routine to move x-axis to specified location
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-ABS-X', destination] where destination is the destination
in physical units (mm).
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_abs_x(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-ABS-X.')
return None
position = None
try:
position = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-ABS-X.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetX']) + \
ARG1_REGIS + str(position)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_abs_z
Description:
Routine to move z-axis to specified location
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-ABS-Z', destination] where destination is the destination
in physical units (mm).
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_abs_z(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-ABS-Z.')
return None
position = None
try:
position = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-ABS-Z.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetZ']) + \
ARG1_REGIS + str(position)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_kill(self, acc_command):
# Error check that the command given was formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-KILL')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Kill'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_enable(self, acc_command):
# Error check that the command given was formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-ENABLE')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Enable'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
# ---------------------------------------------------------------
# FUNCTION MAP
# ---------------------------------------------------------------
function_map = {'FRM-HOME': __frm_home,
'FRM-KILL': __frm_kill,
'FRM-RX-SEL': __frm_rx_sel,
'FRM-SET-PA': __frm_set_pa,
'FRM-X-OFFSET': __frm_x_offset,
'FRM-Z-OFFSET': __frm_z_offset,
'FRM-ABS-X': __frm_abs_x,
'FRM-ABS-Z': __frm_abs_z,
'FRM-ENABLE': __frm_enable}
# ---------------------------------------------------------------
# STATEFRAME HELPERS
# ---------------------------------------------------------------
def __brickmonitor_query(self):
command = 'LIST GATHER'
query_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
query_socket.settimeout(BRICK_TIMEOUT)
query_socket.connect((self.brick_ip, BRICK_PORT))
cmd_string = [command]
cmd = self.__make_brick_command('download', 'getresponse',
0, 0, cmd_string)
query_socket.sendall(cmd[0])
response = query_socket.recv(1024)
query_socket.close()
response = response.replace('\r', ' ')
response = response.split(' ')
parsed_response = []
for monitor_point in response:
parsed_response.append(self.__str2float(monitor_point))
return parsed_response
def __str2float(self, str_val):
num = 0
try:
num = int(str_val, 16)
except Exception:
num = 0
return (num >> 12) * 2**((num & 0xFFF) - 2082)
# ---------------------------------------------------------------
# INTERFACE IMPLEMENTATIONS
# ---------------------------------------------------------------
# region Method Description
"""
Method: get_command_list
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def get_command_list(self):
return self.commands
# region Method Description
"""
Method: execute
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def execute(self, acc_command):
# Use the routine functions to get the commands to push.
packets = self.function_map[acc_command[0]](
self, acc_command)
if packets is not None:
self.logger('Issued the following commands to brick:')
for packet in packets[0]:
self.logger(repr(packet))
# Try pushing message across TCP.
# Wait for reply of at most 1024 bytes.
try:
for packet in packets[1]:
reply = None
self.brick_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.brick_socket.connect((self.brick_ip, BRICK_PORT))
self.brick_socket.sendall(packet)
self.brick_socket.settimeout(BRICK_TIMEOUT)
reply = self.brick_socket.recv(1024)
self.logger('Reply from brick: ' + reply)
self.brick_socket.close()
self.brick_socket = None
except socket.gaierror:
self.logger('Brick hostname could not be resolved.')
except socket.error:
self.logger('Unable to send packet to brick.')
# region Method Description
"""
Method: stateframe_query
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def stateframe_query(self):
stateframe_data = {'AXIS1': {},
'AXIS3': {},
'AXIS4': {}}
fetched_data = self.__brickmonitor_query()
stateframe_data['HOMED'] = \
int(fetched_data[1])
stateframe_data['RXSEL'] = \
int(fetched_data[2])
stateframe_data['AXIS1']['P'] = \
float(fetched_data[3])
stateframe_data['AXIS1']['PERR'] = \
float(fetched_data[4])
stateframe_data['AXIS1']['POFF'] = \
float(fetched_data[5])
stateframe_data['AXIS1']['I'] = \
float(fetched_data[6])
stateframe_data['AXIS1']['POSLIMIT'] = \
int(fetched_data[7])
stateframe_data['AXIS1']['NEGLIMIT'] = \
int(fetched_data[8])
stateframe_data['AXIS1']['AMPFAULT'] = \
int(fetched_data[9])
stateframe_data['AXIS3']['P'] = \
float(fetched_data[10])
stateframe_data['AXIS3']['PERR'] = \
float(fetched_data[11])
stateframe_data['AXIS3']['POFF'] = \
float(fetched_data[12])
stateframe_data['AXIS3']['I'] = \
float(fetched_data[13])
stateframe_data['AXIS3']['POSLIMIT'] = \
int(fetched_data[14])
stateframe_data['AXIS3']['NEGLIMIT'] = \
int(fetched_data[15])
stateframe_data['AXIS3']['AMPFAULT'] = \
int(fetched_data[16])
stateframe_data['AXIS4']['P'] = \
float(fetched_data[17])
stateframe_data['AXIS4']['PERR'] = \
float(fetched_data[18])
stateframe_data['AXIS4']['POFF'] = \
float(fetched_data[19])
stateframe_data['AXIS4']['I'] = \
float(fetched_data[20])
stateframe_data['AXIS4']['POSLIMIT'] = \
int(fetched_data[21])
stateframe_data['AXIS4']['NEGLIMIT'] = \
int(fetched_data[22])
stateframe_data['AXIS4']['AMPFAULT'] = \
int(fetched_data[23])
return stateframe_data | core/brick_worker.py | import i_worker
import socket
import struct
# Description of the GeoBrick device. Currently hard-coded.
BRICK_HOSTNAME = 'geobrickanta.solar.pvt'
BRICK_PORT = 1025
BRICK_TIMEOUT = 0.5
# Program spaces that can be used in the GeoBrick.
COMMAND_REGIS = 'P1000='
ARG1_REGIS = ' P1001='
ARG2_REGIS = ' P1002='
# Brick command dictionary.
COMMAND_DICT = {'Home': 1,
'SelRx': 2,
'SetAngle': 3,
'SetZOffset': 4,
'SetXOffset': 5,
'Kill': 6,
'Enable': 7,
'SetX': 8,
'SetZ': 9}
# Dictionaries for ethernet packets to the Brick.
RQ_TYPE = {'upload': '\xc0',
'download': '\x40'}
RQ = {'sendline': '\xb0',
'getline': '\xb1',
'flush': '\xb3',
'getmem': '\xb4',
'setmem': '\xb5',
'setbit': '\xba',
'setbits': '\xbb',
'port': '\xbe',
'getresponse': '\xbf',
'readready': '\xc2',
'response': '\xc4',
'getbuffer': '\xc5',
'writebuffer': '\xc6',
'writeerror': '\xc7',
'fwdownload': '\xcb',
'ipaddress': '\xe0'}
COORDINATE = {1: 'Z',
3: 'A',
4: 'X'}
AXIS_SCALING = {1: 42.5636 * 96 * 32,
3: 23181.5208 * 96 * 32,
4: 3973.477 * 96 * 32}
MPADDRESSSTART = 900
class BrickWorker(i_worker.IWorker):
def __init__(self):
super(BrickWorker, self).__init__()
self.commands = ['FRM-HOME',
'FRM-KILL',
'FRM-RX-SEL',
'FRM-SET-PA',
'FRM-X-OFFSET',
'FRM-Z-OFFSET',
'FRM-ABS-X',
'FRM-ABS-Z',
'FRM-ENABLE']
self.brick_socket = None
self.brick_ip = socket.gethostbyname(BRICK_HOSTNAME)
self.name = 'GeoBrick-Worker'
# ---------------------------------------------------------------
# COMMAND PACKAGING ROUTINES SPECIFIC TO GEOBRICK
# ---------------------------------------------------------------
#region Method Description
"""
Method: __make_brick_command
Description:
Takes a command to the Brick and packages it into an
ethernet packet recognized by the Brick system.
Arguments:
rq_type: type of request, either 'upload' or 'download'.
rq: nature of request, lookup dictionary defined in RQ.
val: value associated with the request.
index: index associated with the request.
command_packets: list of strings to be packed into TCP packets.
"""
#endregion
def __make_brick_command(self, rq_type, rq, val, index, command_packets):
packets = []
for packet in command_packets:
buf = RQ_TYPE[rq_type] + RQ[rq]
buf += struct.pack('H', val)
buf += struct.pack('H', index)
buf += struct.pack('H', socket.htons(len(packet) + 1))
buf += struct.pack(str(len(packet)) + 's', packet)
buf += struct.pack("B", 0)
packets.append(buf)
return packets
# ---------------------------------------------------------------
# COMMAND ROUTINES
# ---------------------------------------------------------------
#region Method Description
"""
Method: __frm_home
Description:
Runs homing procedure local to the GeoBrick. Do NOT use this
method on its own. This method is error checked before execution.
Arguments:
acc_command: list of the strings sent from the ACC. List format:
['FRM-HOME']
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_home(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-HOME.')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Home'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_rx_sel
Description:
Routine to select one of two receivers on the antenna
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-RX-SEL', rx] where rx is 1 for low-nu and 2 for high-nu.
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_rx_sel(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-RX-SEL.')
return None
rx = None
try:
rx = int(acc_command[1])
if rx not in [1, 2]:
raise ValueError('Invalid RX selection.')
except ValueError:
self.logger('Invalid call to FRM-RX-SEL.')
return None
# Build command based on parameters.
command = COMMAND_REGIS + str(COMMAND_DICT['SelRx']) + \
ARG1_REGIS + str(rx)
command_packets = [command]
return command_packets, self.__make_brick_command('download',
'getresponse',
0, 0,
command_packets)
#region Method Description
"""
Method: __frm_set_pa
Description:
Routine to move motor 3 to a given angle. This routine should only
be called after a FRM_HOME command has been issued. Do NOT use
this method on its own.
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-SET-PA', angle] where angle is the absolute angle to be
set.
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_set_pa(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-SET-PA.')
return None
angle = None
try:
angle = int(acc_command[1])
if angle > 90 or angle < -90:
raise ValueError('Invalid position angle selection.')
except ValueError:
self.logger('Invalid call to FRM-SET-PA.')
return None
# Build command based on parameters.
command = COMMAND_REGIS + str(COMMAND_DICT['SetAngle']) + \
ARG1_REGIS + str(angle)
command_packets = [command]
return command_packets, self.__make_brick_command('download',
'getresponse',
0, 0,
command_packets)
def __frm_x_offset(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-X-OFFSET.')
return None
offset = None
try:
offset = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-X-OFFSET.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetXOffset']) + \
ARG1_REGIS + str(offset)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_z_offset(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-Z-OFFSET.')
return None
offset = None
try:
offset = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-Z-OFFSET.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetZOffset']) + \
ARG1_REGIS + str(offset)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_abs_x
Description:
Routine to move x-axis to specified location
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-ABS-X', destination] where destination is the destination
in physical units (mm).
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_abs_x(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-ABS-X.')
return None
position = None
try:
position = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-ABS-X.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetX']) + \
ARG1_REGIS + str(position)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
#region Method Description
"""
Method: __frm_abs_z
Description:
Routine to move z-axis to specified location
Arguments:
acc_command: list of strings sent from the ACC. List format:
['FRM-ABS-Z', destination] where destination is the destination
in physical units (mm).
Returns:
[0]: A list of packets as strings before compression.
[1]: A list of TCP/Ethernet packets ready to be sent to the Brick.
"""
#endregion
def __frm_abs_z(self, acc_command):
# Error check that the command given is formatted correctly.
if len(acc_command) != 2:
self.logger('Invalid call to FRM-ABS-Z.')
return None
position = None
try:
position = float(acc_command[1])
except ValueError:
self.logger('Invalid call to FRM-ABS-Z.')
return None
command = COMMAND_REGIS + str(COMMAND_DICT['SetZ']) + \
ARG1_REGIS + str(position)
# Build command based on parameters. (This assumes that the
# position given is in physical units.)
command_packets = [command]
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_kill(self, acc_command):
# Error check that the command given was formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-KILL')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Kill'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
def __frm_enable(self, acc_command):
# Error check that the command given was formatted correctly.
if len(acc_command) != 1:
self.logger('Invalid call to FRM-ENABLE')
return None
command_packets = []
command = COMMAND_REGIS + str(COMMAND_DICT['Enable'])
command_packets.append(command)
return command_packets, \
self.__make_brick_command('download', 'getresponse',
0, 0, command_packets)
# ---------------------------------------------------------------
# FUNCTION MAP
# ---------------------------------------------------------------
function_map = {'FRM-HOME': __frm_home,
'FRM-KILL': __frm_kill,
'FRM-RX-SEL': __frm_rx_sel,
'FRM-SET-PA': __frm_set_pa,
'FRM-X-OFFSET': __frm_x_offset,
'FRM-Z-OFFSET': __frm_z_offset,
'FRM-ABS-X': __frm_abs_x,
'FRM-ABS-Z': __frm_abs_z,
'FRM-ENABLE': __frm_enable}
# ---------------------------------------------------------------
# STATEFRAME HELPERS
# ---------------------------------------------------------------
def __brickmonitor_query(self):
command = 'LIST GATHER'
query_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
query_socket.settimeout(BRICK_TIMEOUT)
query_socket.connect((self.brick_ip, BRICK_PORT))
cmd_string = [command]
cmd = self.__make_brick_command('download', 'getresponse',
0, 0, cmd_string)
query_socket.sendall(cmd[0])
response = query_socket.recv(1024)
query_socket.close()
response = response.replace('\r', ' ')
response = response.split(' ')
parsed_response = []
for monitor_point in response:
parsed_response.append(self.__str2float(monitor_point))
return parsed_response
def __str2float(self, str_val):
num = 0
try:
num = int(str_val, 16)
except Exception:
num = 0
return (num >> 12) * 2**((num & 0xFFF) - 2082)
# ---------------------------------------------------------------
# INTERFACE IMPLEMENTATIONS
# ---------------------------------------------------------------
# region Method Description
"""
Method: get_command_list
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def get_command_list(self):
return self.commands
# region Method Description
"""
Method: execute
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def execute(self, acc_command):
# Use the routine functions to get the commands to push.
packets = self.function_map[acc_command[0]](
self, acc_command)
if packets is not None:
self.logger('Issued the following commands to brick:')
for packet in packets[0]:
self.logger(repr(packet))
# Try pushing message across TCP.
# Wait for reply of at most 1024 bytes.
try:
for packet in packets[1]:
reply = None
self.brick_socket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.brick_socket.connect((self.brick_ip, BRICK_PORT))
self.brick_socket.sendall(packet)
self.brick_socket.settimeout(BRICK_TIMEOUT)
reply = self.brick_socket.recv(1024)
self.logger('Reply from brick: ' + reply)
self.brick_socket.close()
self.brick_socket = None
except socket.gaierror:
self.logger('Brick hostname could not be resolved.')
except socket.error:
self.logger('Unable to send packet to brick.')
# region Method Description
"""
Method: stateframe_query
Description:
Refer to abstract class IWorker located in i_worker.py
for full description.
"""
# endregion
def stateframe_query(self):
stateframe_data = {'AXIS1': {},
'AXIS3': {},
'AXIS4': {}}
fetched_data = self.__brickmonitor_query()
stateframe_data['HOMED'] = \
int(fetched_data[1])
stateframe_data['RXSEL'] = \
int(fetched_data[2])
stateframe_data['AXIS1']['P'] = \
float(fetched_data[3])
stateframe_data['AXIS1']['PERR'] = \
float(fetched_data[4])
stateframe_data['AXIS1']['POFF'] = \
float(fetched_data[5])
stateframe_data['AXIS1']['I'] = \
float(fetched_data[6])
stateframe_data['AXIS1']['POSLIMIT'] = \
int(fetched_data[7])
stateframe_data['AXIS1']['NEGLIMIT'] = \
int(fetched_data[8])
stateframe_data['AXIS1']['AMPFAULT'] = \
int(fetched_data[9])
stateframe_data['AXIS3']['P'] = \
float(fetched_data[10])
stateframe_data['AXIS3']['PERR'] = \
float(fetched_data[11])
stateframe_data['AXIS3']['POFF'] = \
float(fetched_data[12])
stateframe_data['AXIS3']['I'] = \
float(fetched_data[13])
stateframe_data['AXIS3']['POSLIMIT'] = \
int(fetched_data[14])
stateframe_data['AXIS3']['NEGLIMIT'] = \
int(fetched_data[15])
stateframe_data['AXIS3']['AMPFAULT'] = \
int(fetched_data[16])
stateframe_data['AXIS4']['P'] = \
float(fetched_data[17])
stateframe_data['AXIS4']['PERR'] = \
float(fetched_data[18])
stateframe_data['AXIS4']['POFF'] = \
float(fetched_data[19])
stateframe_data['AXIS4']['I'] = \
float(fetched_data[20])
stateframe_data['AXIS4']['POSLIMIT'] = \
int(fetched_data[21])
stateframe_data['AXIS4']['NEGLIMIT'] = \
int(fetched_data[22])
stateframe_data['AXIS4']['AMPFAULT'] = \
int(fetched_data[23])
return stateframe_data | 0.546012 | 0.16238 |
from math import log10, log
def fsPathLoss(dist,freq):
"""
dist : Kilometros
freq : Megahertz
"""
return 32.44 + 20*log10(dist) + 20*log10(freq)
def okumuraHataPL(dist, freq, cityKind, areaKind, hb, hm):
"""OKUMURA-HATA URBAN model
freq: signal frequency(500Mhz e 1500Mhz);
AreaKind: area type (1-rural, 2-suburban e 3-urban);
cityKind : cyte type (1-small, 2-medium e 3-large);
hb: base station's height;
hm: mobile's height;
"""
a = 0.0
if (freq <= 200 and cityKind==3):
# Large cities and f<=200 Mhz
a = 8.29*(log10(1.54*hm))**2- 1.1
elif (freq>=400 and cityKind==3):
#Large cities and f>= 400 MHz
a = 3.2*((log10(11.75*hm)**2))- 4.97
else:
#a(hm) for small and medium cities, and large cities where f<200Mhz and f>400Mhz
a = (1.1*log10(freq-0.7))*hm - (1.56*log10(freq-0.8))
# Path loos for urban area
lossUrban = 69.55 + (26.16)*log10(freq)-13.82*log10(hb) - a + (44.9-6.55*log10(hb))*log10(dist)
if (areaKind== 1):
lossOpen= lossUrban - 4.78 *((log10(freq))**2)+18.33*log10(freq)-40.94
return lossOpen
elif (areaKind==2):
#Loss for open are
lossSubUrban = lossUrban - 2*(log10(freq/28.0))**2 - 5.4# //#Loss for suburban area
return lossSubUrban
return lossUrban
def flatEarthPL(dist,freq,hb,hm):
L1 = -20*log10(hb)
L2 = -20*log10(hm)
Lo = 120 + 10*4*log10(dist)
L = Lo + L1 + 2
return L
def cost231PL(dist, freq, hb, hm, ws, bs, hr,cityKind):
"""
COST 231- Cost-Waldrosch-Ikegami Model
freq: signal frequency
hb: base station's height
hm: mobile's height
ws: average width of the street in meters
bs: average setback of buildings in meters
hr: mean height of houses in meters
areaKind: area type (1-rural, 2-suburban e 3-urban).
cityKind : cyte type(1-small, 2-medium e 3-large).
"""
deltaH = hm/hb
Lbsh = 18*log(1+deltaH)
Ka = 54.0
Kd = 18.0
Kf = 4.0
if(hr > hb):
Lbsh = 0
if(hb <= hr and d >= 0.5):
Ka= Ka -0.8*deltaH
elif(hb <= hr and d < 0.5):
Ka = Ka -0.8*daltaH*(d/0.5)
if(hb < hr):
Kd = Kd - 15*(hb-hr)/(hr-hm)
if(cityKind == 1):
Kf = Kf +0.7*(freq/925-1)
else:
Kf = Kf +1.5*(freq/925-1)
Lo = 32.4+20*log10(dist)+20*log10(freq) #free space path loss
Lrts = 8.2+10*log(ws) + 10*log10(freq) + 10*log(deltaH) # roofTop loss
Lmsd =Lbsh +Ka +Kd*log10(dist)+Kf*log(freq)-9*log10(bs) #Multpath loss
#final path loss
PL = Lo + Lrts + Lmsd
return PL
def costHataPL(dist, freq, hb, hm, cityKind, areaKind):
"""
COST 231- Cost-Hata Extension Model
freq: signal frequency
hb: base station's height
hm: mobile's height
cityKind : cyte type(1-small, 2-medium e 3-large).
areaKind : area type(1-open, 2-semiurban, 3- urban)
"""
c = 0
if areaKind==3:
c = 3
ar =(1.1*log10(freq)-0.7)*hm-(1.56*log(freq)-0.8)
return 46.3 +33.9*log10(freq)-13.82*log10(hb)-ar+(44.9-6.55*log(hb))*log(dist)+c
def ericssonPL(dist, freq, hb, hm, cityKind, areaKind):
"""Ericsson Model
freq: signal frequency(range from 100 to 2000Mhz)
tyArea: area type (1-rural, 2-suburban e 3-urban).
cityKind : cyte type(1-small, 2-medium e 3-large).
hb: base station's height
hm: mobile's height
"""
g = 44.49*log10(freq)-4.78*(log10(freq))**2
a2= 12
a3= 0.1
if(cityKind == 3):
a0 = 36.2
a1 = 30.2
elif(cityKind == 2):
a0 = 43.2
a1 = 68.9
else:
a0 = 45.9
a1 = 100.6
PL = a0+a1*log10(dist)+a2*log10(hb)+a3*(log10(hb))*(log10(dist))-3.2*log10((11.75*hm)**2)+g
return PL
#print (fsPathLoss(1,1000))
#print (okumuraHataPL(1,1000,3,2,50,1.5))
#print (flatEarthPL(1,1000,50,1.5))
#print (cost231PL(1,1000,50,1.5,20,10,35,2))
#print (costHataPL(1,1000,50,1.5,2,2))
#print (ericssonPL(1,1000,50,1.5,2,2)) | Projeto/freeSpace.py | from math import log10, log
def fsPathLoss(dist,freq):
"""
dist : Kilometros
freq : Megahertz
"""
return 32.44 + 20*log10(dist) + 20*log10(freq)
def okumuraHataPL(dist, freq, cityKind, areaKind, hb, hm):
"""OKUMURA-HATA URBAN model
freq: signal frequency(500Mhz e 1500Mhz);
AreaKind: area type (1-rural, 2-suburban e 3-urban);
cityKind : cyte type (1-small, 2-medium e 3-large);
hb: base station's height;
hm: mobile's height;
"""
a = 0.0
if (freq <= 200 and cityKind==3):
# Large cities and f<=200 Mhz
a = 8.29*(log10(1.54*hm))**2- 1.1
elif (freq>=400 and cityKind==3):
#Large cities and f>= 400 MHz
a = 3.2*((log10(11.75*hm)**2))- 4.97
else:
#a(hm) for small and medium cities, and large cities where f<200Mhz and f>400Mhz
a = (1.1*log10(freq-0.7))*hm - (1.56*log10(freq-0.8))
# Path loos for urban area
lossUrban = 69.55 + (26.16)*log10(freq)-13.82*log10(hb) - a + (44.9-6.55*log10(hb))*log10(dist)
if (areaKind== 1):
lossOpen= lossUrban - 4.78 *((log10(freq))**2)+18.33*log10(freq)-40.94
return lossOpen
elif (areaKind==2):
#Loss for open are
lossSubUrban = lossUrban - 2*(log10(freq/28.0))**2 - 5.4# //#Loss for suburban area
return lossSubUrban
return lossUrban
def flatEarthPL(dist,freq,hb,hm):
L1 = -20*log10(hb)
L2 = -20*log10(hm)
Lo = 120 + 10*4*log10(dist)
L = Lo + L1 + 2
return L
def cost231PL(dist, freq, hb, hm, ws, bs, hr,cityKind):
"""
COST 231- Cost-Waldrosch-Ikegami Model
freq: signal frequency
hb: base station's height
hm: mobile's height
ws: average width of the street in meters
bs: average setback of buildings in meters
hr: mean height of houses in meters
areaKind: area type (1-rural, 2-suburban e 3-urban).
cityKind : cyte type(1-small, 2-medium e 3-large).
"""
deltaH = hm/hb
Lbsh = 18*log(1+deltaH)
Ka = 54.0
Kd = 18.0
Kf = 4.0
if(hr > hb):
Lbsh = 0
if(hb <= hr and d >= 0.5):
Ka= Ka -0.8*deltaH
elif(hb <= hr and d < 0.5):
Ka = Ka -0.8*daltaH*(d/0.5)
if(hb < hr):
Kd = Kd - 15*(hb-hr)/(hr-hm)
if(cityKind == 1):
Kf = Kf +0.7*(freq/925-1)
else:
Kf = Kf +1.5*(freq/925-1)
Lo = 32.4+20*log10(dist)+20*log10(freq) #free space path loss
Lrts = 8.2+10*log(ws) + 10*log10(freq) + 10*log(deltaH) # roofTop loss
Lmsd =Lbsh +Ka +Kd*log10(dist)+Kf*log(freq)-9*log10(bs) #Multpath loss
#final path loss
PL = Lo + Lrts + Lmsd
return PL
def costHataPL(dist, freq, hb, hm, cityKind, areaKind):
"""
COST 231- Cost-Hata Extension Model
freq: signal frequency
hb: base station's height
hm: mobile's height
cityKind : cyte type(1-small, 2-medium e 3-large).
areaKind : area type(1-open, 2-semiurban, 3- urban)
"""
c = 0
if areaKind==3:
c = 3
ar =(1.1*log10(freq)-0.7)*hm-(1.56*log(freq)-0.8)
return 46.3 +33.9*log10(freq)-13.82*log10(hb)-ar+(44.9-6.55*log(hb))*log(dist)+c
def ericssonPL(dist, freq, hb, hm, cityKind, areaKind):
"""Ericsson Model
freq: signal frequency(range from 100 to 2000Mhz)
tyArea: area type (1-rural, 2-suburban e 3-urban).
cityKind : cyte type(1-small, 2-medium e 3-large).
hb: base station's height
hm: mobile's height
"""
g = 44.49*log10(freq)-4.78*(log10(freq))**2
a2= 12
a3= 0.1
if(cityKind == 3):
a0 = 36.2
a1 = 30.2
elif(cityKind == 2):
a0 = 43.2
a1 = 68.9
else:
a0 = 45.9
a1 = 100.6
PL = a0+a1*log10(dist)+a2*log10(hb)+a3*(log10(hb))*(log10(dist))-3.2*log10((11.75*hm)**2)+g
return PL
#print (fsPathLoss(1,1000))
#print (okumuraHataPL(1,1000,3,2,50,1.5))
#print (flatEarthPL(1,1000,50,1.5))
#print (cost231PL(1,1000,50,1.5,20,10,35,2))
#print (costHataPL(1,1000,50,1.5,2,2))
#print (ericssonPL(1,1000,50,1.5,2,2)) | 0.357119 | 0.364269 |
from __future__ import annotations
from dnnv.properties.expressions.base import Expression
from ...expressions import BinaryExpression, Call
from ..base import GenericExpressionTransformer
from ._calls import FunctionSubstitutor
from ...visitors import DetailsInference
class SubstituteCalls(GenericExpressionTransformer):
def __init__(self, form="dnf"):
super().__init__()
# `form` provides a hint to the substitutor on how to efficiently
# format the substitution expression
self.form = form
def visit(self, expression):
if self._top_level:
DetailsInference().visit(expression)
return super().visit(expression)
def visit_BinaryExpression(self, expression: BinaryExpression) -> BinaryExpression:
expr_type = type(expression)
expr1 = expression.expr1
expr2 = expression.expr2
if isinstance(expr1, Call) and expr1.function.is_concrete:
substitutor = FunctionSubstitutor.lookup(expr1.function.value)
binexpr_substitute_method = f"substitute_{expr_type.__name__}"
if substitutor is not None and hasattr(
substitutor, binexpr_substitute_method
):
result = getattr(substitutor, binexpr_substitute_method)(
expr1, expr2, form=self.form
)
if result is not NotImplemented:
return self.visit(result)
elif isinstance(expr2, Call) and expr2.function.is_concrete:
substitutor = FunctionSubstitutor.lookup(expr2.function.value)
binexpr_substitute_method = f"substitute_{expr_type.__name__}"
if substitutor is not None and hasattr(
substitutor, binexpr_substitute_method
):
result = getattr(substitutor, binexpr_substitute_method)(
expr1, expr2, form=self.form
)
if result is not NotImplemented:
return self.visit(result)
return expr_type(self.visit(expr1), self.visit(expr2))
def visit_Call(self, expression: Call) -> Expression:
function = self.visit(expression.function)
args = tuple([self.visit(arg) for arg in expression.args])
kwargs = {name: self.visit(value) for name, value in expression.kwargs.items()}
if function.is_concrete:
substitutor = FunctionSubstitutor.lookup(function.value)
if substitutor is not None:
result = substitutor(function, *args, **kwargs)
if result is not NotImplemented:
return result
expr = Call(function, args, kwargs)
return expr
def visit_Not(self, expression):
form = self.form
self.form = "cnf" if form == "dnf" else "dnf"
result = super().generic_visit(expression)
self.form = form
return result
__all__ = ["SubstituteCalls"] | dnnv/properties/transformers/substitute_calls/base.py | from __future__ import annotations
from dnnv.properties.expressions.base import Expression
from ...expressions import BinaryExpression, Call
from ..base import GenericExpressionTransformer
from ._calls import FunctionSubstitutor
from ...visitors import DetailsInference
class SubstituteCalls(GenericExpressionTransformer):
def __init__(self, form="dnf"):
super().__init__()
# `form` provides a hint to the substitutor on how to efficiently
# format the substitution expression
self.form = form
def visit(self, expression):
if self._top_level:
DetailsInference().visit(expression)
return super().visit(expression)
def visit_BinaryExpression(self, expression: BinaryExpression) -> BinaryExpression:
expr_type = type(expression)
expr1 = expression.expr1
expr2 = expression.expr2
if isinstance(expr1, Call) and expr1.function.is_concrete:
substitutor = FunctionSubstitutor.lookup(expr1.function.value)
binexpr_substitute_method = f"substitute_{expr_type.__name__}"
if substitutor is not None and hasattr(
substitutor, binexpr_substitute_method
):
result = getattr(substitutor, binexpr_substitute_method)(
expr1, expr2, form=self.form
)
if result is not NotImplemented:
return self.visit(result)
elif isinstance(expr2, Call) and expr2.function.is_concrete:
substitutor = FunctionSubstitutor.lookup(expr2.function.value)
binexpr_substitute_method = f"substitute_{expr_type.__name__}"
if substitutor is not None and hasattr(
substitutor, binexpr_substitute_method
):
result = getattr(substitutor, binexpr_substitute_method)(
expr1, expr2, form=self.form
)
if result is not NotImplemented:
return self.visit(result)
return expr_type(self.visit(expr1), self.visit(expr2))
def visit_Call(self, expression: Call) -> Expression:
function = self.visit(expression.function)
args = tuple([self.visit(arg) for arg in expression.args])
kwargs = {name: self.visit(value) for name, value in expression.kwargs.items()}
if function.is_concrete:
substitutor = FunctionSubstitutor.lookup(function.value)
if substitutor is not None:
result = substitutor(function, *args, **kwargs)
if result is not NotImplemented:
return result
expr = Call(function, args, kwargs)
return expr
def visit_Not(self, expression):
form = self.form
self.form = "cnf" if form == "dnf" else "dnf"
result = super().generic_visit(expression)
self.form = form
return result
__all__ = ["SubstituteCalls"] | 0.810366 | 0.361249 |
import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
def RegisterCustomOpsLibrary():
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
print("start")
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
print("done assert")
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
if __name__ == '__main__':
print("register")
RegisterCustomOpsLibrary() | ONNX_runtime_hacks/Custom_op_loader.py | import os
import numpy as np
import onnxruntime as onnxrt
import threading
import sys
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
def RegisterCustomOpsLibrary():
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
print("start")
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, so1)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
print("done assert")
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, so2)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, so3)
if __name__ == '__main__':
print("register")
RegisterCustomOpsLibrary() | 0.212722 | 0.129954 |
import re
import csv
import imp
import sys
import yaml
import argparse
import sys
import types
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
# Add !regexp as a known Yaml dialect.
yaml.add_constructor('!regexp', lambda l, n: l.construct_scalar(n))
if PY3: # pragma: no cover
text_type = str
binary_type = bytes
unicode = str
else:
text_type = unicode
binary_type = str
class Line(object):
# Line
@classmethod
def parse(cls, classificator, line):
return cls(classificator)
# void
def __init__(self, classificator, *args, **kwargs):
self._classificator = classificator
# bool
def is_type(self):
raise RuntimeError('Line.is_type() is not implemented yet!')
# bool
def is_production(self):
return True
# void
def classify(self):
return self._classificator.classify(self)
# list<str>
def get_row(self):
raise RuntimeError('Line.get_row() is not implemented yet!')
class IgnoreLine(Exception):
pass
class Match(object):
# void
def __init__(self, **conditions):
self.conditions = conditions
for k,v in self.conditions.items():
if k.startswith('pattern_'):
self.conditions[k] = re.compile(v)
# tuple<bool,object>
def check(self, line):
groups = None
for condition, value in self.conditions.items():
action, field_name = condition.split('_',1)
field_value = getattr(line, field_name)
if not field_value:
return False, None
if action == 'match' and unicode(field_value) == unicode(value):
continue
elif action == 'pattern':
match = value.match(field_value)
if match is not None:
if match.groups():
groups = match.groups()
continue
return False, None
return True, groups
class Rule(object):
# void
def __init__(self, match=None, ignore=None, **actions):
self._match = match
self._ignore = ignore
self._actions = actions
# bool
def apply(self, line):
success, groups = self._match.check(line)
if not success:
return False
self.action(line, groups)
return True
# void
def action(self, line, groups=None):
if self._ignore:
raise IgnoreLine()
for field_name, value in self._actions.items():
if '{' in unicode(value) and '}' in unicode(value) and groups is not None:
value = value.format(*groups)
setattr(line, field_name, value)
class Classificator(object):
# void
def __init__(self, rules):
self.rules = [ Rule( Match(**dict(filter(lambda x: x[0].startswith(('match_','pattern_')), \
r.items()))), r.get('ignore'), **dict(filter(lambda x: \
not x[0].startswith(('match_','pattern_','ignore')), r.items()))) \
for r in rules ]
# void
def classify(self, line):
for rule in self.rules:
if rule.apply(line):
return True
return False
class Dialect(object):
# void
def __init__(self, config):
self._module = imp.load_source(config['dialect'], config['package'])
self._class = getattr(self._module, config['class'])
self.classificator = Classificator(config.get('classifications') or [])
# object
def parse(self, line):
return self._class.parse(self.classificator, line)
# object
def get_dialect(dialects, line):
for dialect in dialects:
try:
obj = dialect.parse(line)
if not obj.is_type(): continue
if not obj.is_production(): continue
return obj
except Exception as e:
continue
# Dialect
def make_dialect(file_pointer):
return Dialect(yaml.load(file_pointer))
# Line
def classify_line(dialects, line):
# Detect the first valid dialect
obj = get_dialect(dialects, line)
if not obj:
return None
# Classify the record
try:
obj.classify()
except IgnoreLine as e:
return None
return obj
def main():
# Command Line Interface.
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='output file, using stdout as default')
parser.add_argument('-f', '--file', type=argparse.FileType('r'), default=sys.stdin, help='log file to process, using stdin as default')
parser.add_argument('-s', '--separator', default='\001', help='CSV file separator. default is a non-printable character: \\001')
parser.add_argument('config', type=argparse.FileType('r'), nargs="+", help='dialect YAML configurations files.')
args = parser.parse_args()
# Load supported dialect's configurations.
yaml.add_constructor('!regexp', lambda l, n: l.construct_scalar(n))
dialects = list(map(make_dialect, args.config))
# Create a CSV writer.
writer = csv.writer(args.output, delimiter=args.separator)
# Iterate over the log file ...
for line in args.file:
# Detect the first valid dialect
obj = classify_line(dialects, line)
if not obj: continue
# Write out the final record
writer.writerow(obj.get_row()) | logsanitizer/__init__.py |
import re
import csv
import imp
import sys
import yaml
import argparse
import sys
import types
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
# Add !regexp as a known Yaml dialect.
yaml.add_constructor('!regexp', lambda l, n: l.construct_scalar(n))
if PY3: # pragma: no cover
text_type = str
binary_type = bytes
unicode = str
else:
text_type = unicode
binary_type = str
class Line(object):
# Line
@classmethod
def parse(cls, classificator, line):
return cls(classificator)
# void
def __init__(self, classificator, *args, **kwargs):
self._classificator = classificator
# bool
def is_type(self):
raise RuntimeError('Line.is_type() is not implemented yet!')
# bool
def is_production(self):
return True
# void
def classify(self):
return self._classificator.classify(self)
# list<str>
def get_row(self):
raise RuntimeError('Line.get_row() is not implemented yet!')
class IgnoreLine(Exception):
pass
class Match(object):
# void
def __init__(self, **conditions):
self.conditions = conditions
for k,v in self.conditions.items():
if k.startswith('pattern_'):
self.conditions[k] = re.compile(v)
# tuple<bool,object>
def check(self, line):
groups = None
for condition, value in self.conditions.items():
action, field_name = condition.split('_',1)
field_value = getattr(line, field_name)
if not field_value:
return False, None
if action == 'match' and unicode(field_value) == unicode(value):
continue
elif action == 'pattern':
match = value.match(field_value)
if match is not None:
if match.groups():
groups = match.groups()
continue
return False, None
return True, groups
class Rule(object):
# void
def __init__(self, match=None, ignore=None, **actions):
self._match = match
self._ignore = ignore
self._actions = actions
# bool
def apply(self, line):
success, groups = self._match.check(line)
if not success:
return False
self.action(line, groups)
return True
# void
def action(self, line, groups=None):
if self._ignore:
raise IgnoreLine()
for field_name, value in self._actions.items():
if '{' in unicode(value) and '}' in unicode(value) and groups is not None:
value = value.format(*groups)
setattr(line, field_name, value)
class Classificator(object):
# void
def __init__(self, rules):
self.rules = [ Rule( Match(**dict(filter(lambda x: x[0].startswith(('match_','pattern_')), \
r.items()))), r.get('ignore'), **dict(filter(lambda x: \
not x[0].startswith(('match_','pattern_','ignore')), r.items()))) \
for r in rules ]
# void
def classify(self, line):
for rule in self.rules:
if rule.apply(line):
return True
return False
class Dialect(object):
# void
def __init__(self, config):
self._module = imp.load_source(config['dialect'], config['package'])
self._class = getattr(self._module, config['class'])
self.classificator = Classificator(config.get('classifications') or [])
# object
def parse(self, line):
return self._class.parse(self.classificator, line)
# object
def get_dialect(dialects, line):
for dialect in dialects:
try:
obj = dialect.parse(line)
if not obj.is_type(): continue
if not obj.is_production(): continue
return obj
except Exception as e:
continue
# Dialect
def make_dialect(file_pointer):
return Dialect(yaml.load(file_pointer))
# Line
def classify_line(dialects, line):
# Detect the first valid dialect
obj = get_dialect(dialects, line)
if not obj:
return None
# Classify the record
try:
obj.classify()
except IgnoreLine as e:
return None
return obj
def main():
# Command Line Interface.
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='output file, using stdout as default')
parser.add_argument('-f', '--file', type=argparse.FileType('r'), default=sys.stdin, help='log file to process, using stdin as default')
parser.add_argument('-s', '--separator', default='\001', help='CSV file separator. default is a non-printable character: \\001')
parser.add_argument('config', type=argparse.FileType('r'), nargs="+", help='dialect YAML configurations files.')
args = parser.parse_args()
# Load supported dialect's configurations.
yaml.add_constructor('!regexp', lambda l, n: l.construct_scalar(n))
dialects = list(map(make_dialect, args.config))
# Create a CSV writer.
writer = csv.writer(args.output, delimiter=args.separator)
# Iterate over the log file ...
for line in args.file:
# Detect the first valid dialect
obj = classify_line(dialects, line)
if not obj: continue
# Write out the final record
writer.writerow(obj.get_row()) | 0.393618 | 0.153232 |
import numpy as np
def str_extended(value):
"""
A small helper function to convert a python object into executable code reproducing that object.
Supported types
---------------
None, bool, int, float, complex, str, list, dict, tuple, numpy.ndarray
"""
np.set_printoptions(threshold=np.inf)
def str_str(val):
return "'"+val+"'"
def str_list(val):
if len(val) == 0:
return "[]"
ret = "["
for v in val:
ret += str_extended(v)+", "
# Remove the last ", "
return ret[:-2]+"]"
def str_dict(val):
if len(val) == 0:
return "{}"
ret = "{"
for key in val.keys():
ret += str_extended(key)+" : "+str_extended(val[key])+", "
return ret[:-2]+"}"
def str_tuple(val):
if len(val) == 0:
return "()"
ret = "("
for v in val:
ret += str_extended(v)+", "
# Remove the last ", "
return ret[:-2]+")"
def str_nparray(val):
return "np."+repr(val)
case_dict = { type(None) : str,
bool : str,
int : str,
float : str,
complex : str,
str : str_str,
list : str_list,
dict : str_dict,
tuple : str_tuple,
np.ndarray : str_nparray }
try:
return case_dict[type(value)](value)
except KeyError as key:
try:
# Maybe it's some numpy type?
return case_dict[type(value.item())](value)
except:
raise ValueError("Unsupported type: "+str(key)+" for attribute "+str(value))
def mod2py(mod, path, ignoreModules=True):
"""
This function generates a python script containing all the values in the
module. This is designed to print configuration modules in an
easy-to-reload-and-inspect manner.
Parameters
----------
mod : a python module
the module to save
path : str
the file to save to
ignoreModules : bool
skip anything that's itself a module.
True by default.
"""
to_write = [attr for attr in dir(mod) if not attr[:2] == "__"]
with open(path, 'xt') as myfile:
print("import numpy as np", file=myfile)
print("", file=myfile)
for attr in to_write:
try:
print(attr+" = "+str_extended(getattr(mod, attr)), file=myfile)
except ValueError as VE:
cur_type = type(getattr(mod, attr))
if not (
(cur_type == type(np) and ignoreModules)
or (cur_type == type(mod2py) and getattr(mod, attr).__name__ == "gen_start_conf")
):
raise VE | polychromosims/save_module_to_script.py | import numpy as np
def str_extended(value):
"""
A small helper function to convert a python object into executable code reproducing that object.
Supported types
---------------
None, bool, int, float, complex, str, list, dict, tuple, numpy.ndarray
"""
np.set_printoptions(threshold=np.inf)
def str_str(val):
return "'"+val+"'"
def str_list(val):
if len(val) == 0:
return "[]"
ret = "["
for v in val:
ret += str_extended(v)+", "
# Remove the last ", "
return ret[:-2]+"]"
def str_dict(val):
if len(val) == 0:
return "{}"
ret = "{"
for key in val.keys():
ret += str_extended(key)+" : "+str_extended(val[key])+", "
return ret[:-2]+"}"
def str_tuple(val):
if len(val) == 0:
return "()"
ret = "("
for v in val:
ret += str_extended(v)+", "
# Remove the last ", "
return ret[:-2]+")"
def str_nparray(val):
return "np."+repr(val)
case_dict = { type(None) : str,
bool : str,
int : str,
float : str,
complex : str,
str : str_str,
list : str_list,
dict : str_dict,
tuple : str_tuple,
np.ndarray : str_nparray }
try:
return case_dict[type(value)](value)
except KeyError as key:
try:
# Maybe it's some numpy type?
return case_dict[type(value.item())](value)
except:
raise ValueError("Unsupported type: "+str(key)+" for attribute "+str(value))
def mod2py(mod, path, ignoreModules=True):
"""
This function generates a python script containing all the values in the
module. This is designed to print configuration modules in an
easy-to-reload-and-inspect manner.
Parameters
----------
mod : a python module
the module to save
path : str
the file to save to
ignoreModules : bool
skip anything that's itself a module.
True by default.
"""
to_write = [attr for attr in dir(mod) if not attr[:2] == "__"]
with open(path, 'xt') as myfile:
print("import numpy as np", file=myfile)
print("", file=myfile)
for attr in to_write:
try:
print(attr+" = "+str_extended(getattr(mod, attr)), file=myfile)
except ValueError as VE:
cur_type = type(getattr(mod, attr))
if not (
(cur_type == type(np) and ignoreModules)
or (cur_type == type(mod2py) and getattr(mod, attr).__name__ == "gen_start_conf")
):
raise VE | 0.409693 | 0.403097 |
import os
BASE_FOLDER = os.path.expanduser('~/rbkcli')
TARGETS_FOLDER = BASE_FOLDER + '/targets'
CONF_FOLDER = BASE_FOLDER + '/conf'
LOGS_FOLDER = BASE_FOLDER + '/logs'
SCRIPTS_FOLDER = BASE_FOLDER + '/scripts'
CMDLETS_FOLDER = CONF_FOLDER + '/cmdlets'
SUPPORTED_API_VERSIONS = ['v1',
'v2',
'internal',
'adminCli',
'rbkcli',
'cmdlets',
'scripts']
SUPPORTED_API_METHODS = ['head',
'get',
'post',
'put',
'patch',
'delete']
USERS_PROFILE = ['dev', 'admin', 'support']
SUPPORTED_USER_METHODS = {
'admin': ['get'],
'support': SUPPORTED_API_METHODS,
'dev': SUPPORTED_API_METHODS
}
SUPPORTED_OUTPUT_FORMATS = ['raw',
'json',
'table',
'list',
'values']
CONF_DICT = {}
class DotDict(dict):
"""Create a dictionary managed/accessed with dots."""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
CONSTANTS = DotDict({
'BASE_FOLDER': BASE_FOLDER,
'TARGETS_FOLDER': TARGETS_FOLDER,
'CONF_FOLDER': CONF_FOLDER,
'LOGS_FOLDER': LOGS_FOLDER,
'SUPPORTED_API_VERSIONS': SUPPORTED_API_VERSIONS,
'SUPPORTED_API_METHODS': SUPPORTED_API_METHODS,
'USERS_PROFILE': USERS_PROFILE,
'SUPPORTED_USER_METHODS': SUPPORTED_USER_METHODS,
'SUPPORTED_OUTPUT_FORMATS': SUPPORTED_OUTPUT_FORMATS,
'CONF_DICT': CONF_DICT
})
class RbkcliException(Exception):
"""Customize Rbkcli exceptions."""
class ApiRequesterError(Exception):
"""Customize DynaTable exceptions."""
class DynaTableError(Exception):
"""Customize DynaTable exceptions."""
class ToolsError(Exception):
"""Customize RbkcliTools exceptions."""
class LoggerError(Exception):
"""Customize RbkcliLogger exceptions."""
class ClusterError(Exception):
"""Customize RbkcliLogger exceptions."""
class ApiHandlerError(Exception):
"""Customize DynaTable exceptions."""
class RbkcliError(Exception):
"""Customize DynaTable exceptions."""
class ScriptError(Exception):
"""Customize Scripts exceptions.""" | rbkcli/base/essentials.py |
import os
BASE_FOLDER = os.path.expanduser('~/rbkcli')
TARGETS_FOLDER = BASE_FOLDER + '/targets'
CONF_FOLDER = BASE_FOLDER + '/conf'
LOGS_FOLDER = BASE_FOLDER + '/logs'
SCRIPTS_FOLDER = BASE_FOLDER + '/scripts'
CMDLETS_FOLDER = CONF_FOLDER + '/cmdlets'
SUPPORTED_API_VERSIONS = ['v1',
'v2',
'internal',
'adminCli',
'rbkcli',
'cmdlets',
'scripts']
SUPPORTED_API_METHODS = ['head',
'get',
'post',
'put',
'patch',
'delete']
USERS_PROFILE = ['dev', 'admin', 'support']
SUPPORTED_USER_METHODS = {
'admin': ['get'],
'support': SUPPORTED_API_METHODS,
'dev': SUPPORTED_API_METHODS
}
SUPPORTED_OUTPUT_FORMATS = ['raw',
'json',
'table',
'list',
'values']
CONF_DICT = {}
class DotDict(dict):
"""Create a dictionary managed/accessed with dots."""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
CONSTANTS = DotDict({
'BASE_FOLDER': BASE_FOLDER,
'TARGETS_FOLDER': TARGETS_FOLDER,
'CONF_FOLDER': CONF_FOLDER,
'LOGS_FOLDER': LOGS_FOLDER,
'SUPPORTED_API_VERSIONS': SUPPORTED_API_VERSIONS,
'SUPPORTED_API_METHODS': SUPPORTED_API_METHODS,
'USERS_PROFILE': USERS_PROFILE,
'SUPPORTED_USER_METHODS': SUPPORTED_USER_METHODS,
'SUPPORTED_OUTPUT_FORMATS': SUPPORTED_OUTPUT_FORMATS,
'CONF_DICT': CONF_DICT
})
class RbkcliException(Exception):
"""Customize Rbkcli exceptions."""
class ApiRequesterError(Exception):
"""Customize DynaTable exceptions."""
class DynaTableError(Exception):
"""Customize DynaTable exceptions."""
class ToolsError(Exception):
"""Customize RbkcliTools exceptions."""
class LoggerError(Exception):
"""Customize RbkcliLogger exceptions."""
class ClusterError(Exception):
"""Customize RbkcliLogger exceptions."""
class ApiHandlerError(Exception):
"""Customize DynaTable exceptions."""
class RbkcliError(Exception):
"""Customize DynaTable exceptions."""
class ScriptError(Exception):
"""Customize Scripts exceptions.""" | 0.218836 | 0.033495 |
import asyncio
import socket
from pymysql import connections
from greenio import socket as greensocket
class GreenConnection(connections.Connection):
def _connect(self):
try:
if self.unix_socket:
raise NotImplementedError()
else:
sock = greensocket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
self.host_info = "socket %s:%d" % (self.host, self.port)
if self.no_delay:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = sock
self.rfile = self.socket.makefile("rb")
self.wfile = self.socket.makefile("wb")
self._get_server_information()
self._request_authentication()
self._send_autocommit_mode()
except socket.error as e:
raise Exception(
2003, "Can't connect to MySQL server on %r (%s)" % (
self.host, e.args[0]))
if __name__ == '__main__':
import greenio
import time
@asyncio.coroutine
def sleeper():
# show that we're not blocked
while True:
yield from asyncio.sleep(0.2)
print('.')
@greenio.task
def db():
conn = GreenConnection(host='localhost')
try:
with conn as cur:
print('>> sleeping')
st = time.monotonic()
cur.execute('SELECT SLEEP(2)')
en = time.monotonic() - st
assert en >= 2
print('<< sleeping {:.3f}s'.format(en))
cur.execute('SELECT 42')
print('"SELECT 42" -> {!r}'.format(cur.fetchone()))
print('>> sleeping')
st = time.monotonic()
cur.execute('SELECT SLEEP(1)')
en = time.monotonic() - st
assert en >= 1
print('<< sleeping {:.3f}s'.format(en))
finally:
conn.close()
@asyncio.coroutine
def run():
yield from asyncio.wait([db(), sleeper()],
return_when=asyncio.FIRST_COMPLETED)
asyncio.set_event_loop_policy(greenio.GreenEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(asyncio.Task(run())) | examples/mysql.py | import asyncio
import socket
from pymysql import connections
from greenio import socket as greensocket
class GreenConnection(connections.Connection):
def _connect(self):
try:
if self.unix_socket:
raise NotImplementedError()
else:
sock = greensocket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
self.host_info = "socket %s:%d" % (self.host, self.port)
if self.no_delay:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket = sock
self.rfile = self.socket.makefile("rb")
self.wfile = self.socket.makefile("wb")
self._get_server_information()
self._request_authentication()
self._send_autocommit_mode()
except socket.error as e:
raise Exception(
2003, "Can't connect to MySQL server on %r (%s)" % (
self.host, e.args[0]))
if __name__ == '__main__':
import greenio
import time
@asyncio.coroutine
def sleeper():
# show that we're not blocked
while True:
yield from asyncio.sleep(0.2)
print('.')
@greenio.task
def db():
conn = GreenConnection(host='localhost')
try:
with conn as cur:
print('>> sleeping')
st = time.monotonic()
cur.execute('SELECT SLEEP(2)')
en = time.monotonic() - st
assert en >= 2
print('<< sleeping {:.3f}s'.format(en))
cur.execute('SELECT 42')
print('"SELECT 42" -> {!r}'.format(cur.fetchone()))
print('>> sleeping')
st = time.monotonic()
cur.execute('SELECT SLEEP(1)')
en = time.monotonic() - st
assert en >= 1
print('<< sleeping {:.3f}s'.format(en))
finally:
conn.close()
@asyncio.coroutine
def run():
yield from asyncio.wait([db(), sleeper()],
return_when=asyncio.FIRST_COMPLETED)
asyncio.set_event_loop_policy(greenio.GreenEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(asyncio.Task(run())) | 0.24608 | 0.080792 |
from trees import christmasTrees
from cafe import coolCafe
from ships import coolShips
from lists_dictionaries import InfoDb, for_loop, while_loop, recursive_loop, fibonacci
from cool_classes import dispfac, dispSeries, superfac, printpal
from goodbye import goodbye
from return_to_market import market
def buildMenu(menu):
for key,value in menu.items():
display = value["display"]
print(f"{key} ------ {display}") # each menu item is printed
print("Welcome to Felix's Flea Market! Where would you like to shop? If you'd like to see Isabelle's other TT challenges, go to the submenu. ") # user input prompt
def presentMenu(menu):
buildMenu(menu) #print out menu and take input
choice = int(input())
while choice not in menu: # ensure that choice is valid
choice = int(input("Please elect a valid item. "))
if (choice) in menu:
if menu[choice]["type"] == "func": #determine whether recursion is needed
menu[choice]["exec"]() #run function
else:
presentMenu(menu[choice]["exec"]) #display submenu
InfoDb = {
1: {
"display":"Hack 2a (for loop)",
"exec": for_loop,
"type":"func"
},
2: {
"display":"Hack 2b (while loop)",
"exec": while_loop,
"type":"func"
},
3: {
"display":"Hack 2c (recursive)",
"exec": recursive_loop,
"type":"func"
},
4: {
"display":"Return to Market",
"exec": market,
"type":"func"
},
5: {
"display":"Quit program",
"exec": quit,
"type":"func"
},
}
Math = {
1: {
"display":"Factorial Calculator",
"exec": dispfac,
"type":"func"
},
2: {
"display":"Factorial series",
"exec": dispSeries,
"type":"func"
},
3: {
"display":"Superfactorial",
"exec": superfac,
"type":"func"
},
4: {
"display":"Palindrome",
"exec": printpal,
"type":"func"
},
5: {
"display":"Fibonacci",
"exec": fibonacci,
"type":"func"
},
6: {
"display":"Return to Market",
"exec": market,
"type":"func"
},
7: {
"display":"Quit program",
"exec": goodbye,
"type":"func"
},
}
mainMenu = {
1: {"display":"Tracy's Tall Trees",
"exec":christmasTrees,
"type":"func"},
2: {"display":"Cathy's Café",
"exec":coolCafe,
"type":"func"},
3: {"display":"Suzanne's Ships",
"exec":coolShips,
"type":"func"},
4: {"display":"Polly's Penguins",
"exec":InfoDb,
"type":"dict"},
5: {"display":"Fred's Fun Math and More",
"exec":Math,
"type":"dict"},
6: {"display":"Quit Program",
"exec":quit,
"type":"func"}
}
if __name__ == "__main__":
while True: #forever loop
presentMenu(mainMenu)
halt = input("Do you want to continue shopping the Flea Market (y/n)? ") #checks if user wants to go again
if halt.lower() == "n":
print("Thank you for coming")
break | tech_talks/menu.py | from trees import christmasTrees
from cafe import coolCafe
from ships import coolShips
from lists_dictionaries import InfoDb, for_loop, while_loop, recursive_loop, fibonacci
from cool_classes import dispfac, dispSeries, superfac, printpal
from goodbye import goodbye
from return_to_market import market
def buildMenu(menu):
for key,value in menu.items():
display = value["display"]
print(f"{key} ------ {display}") # each menu item is printed
print("Welcome to Felix's Flea Market! Where would you like to shop? If you'd like to see Isabelle's other TT challenges, go to the submenu. ") # user input prompt
def presentMenu(menu):
buildMenu(menu) #print out menu and take input
choice = int(input())
while choice not in menu: # ensure that choice is valid
choice = int(input("Please elect a valid item. "))
if (choice) in menu:
if menu[choice]["type"] == "func": #determine whether recursion is needed
menu[choice]["exec"]() #run function
else:
presentMenu(menu[choice]["exec"]) #display submenu
InfoDb = {
1: {
"display":"Hack 2a (for loop)",
"exec": for_loop,
"type":"func"
},
2: {
"display":"Hack 2b (while loop)",
"exec": while_loop,
"type":"func"
},
3: {
"display":"Hack 2c (recursive)",
"exec": recursive_loop,
"type":"func"
},
4: {
"display":"Return to Market",
"exec": market,
"type":"func"
},
5: {
"display":"Quit program",
"exec": quit,
"type":"func"
},
}
Math = {
1: {
"display":"Factorial Calculator",
"exec": dispfac,
"type":"func"
},
2: {
"display":"Factorial series",
"exec": dispSeries,
"type":"func"
},
3: {
"display":"Superfactorial",
"exec": superfac,
"type":"func"
},
4: {
"display":"Palindrome",
"exec": printpal,
"type":"func"
},
5: {
"display":"Fibonacci",
"exec": fibonacci,
"type":"func"
},
6: {
"display":"Return to Market",
"exec": market,
"type":"func"
},
7: {
"display":"Quit program",
"exec": goodbye,
"type":"func"
},
}
mainMenu = {
1: {"display":"Tracy's Tall Trees",
"exec":christmasTrees,
"type":"func"},
2: {"display":"Cathy's Café",
"exec":coolCafe,
"type":"func"},
3: {"display":"Suzanne's Ships",
"exec":coolShips,
"type":"func"},
4: {"display":"Polly's Penguins",
"exec":InfoDb,
"type":"dict"},
5: {"display":"Fred's Fun Math and More",
"exec":Math,
"type":"dict"},
6: {"display":"Quit Program",
"exec":quit,
"type":"func"}
}
if __name__ == "__main__":
while True: #forever loop
presentMenu(mainMenu)
halt = input("Do you want to continue shopping the Flea Market (y/n)? ") #checks if user wants to go again
if halt.lower() == "n":
print("Thank you for coming")
break | 0.302185 | 0.253894 |
from models.minkloc import MinkLoc
from models.minkloc_multimodal import MinkLocMultimodal, ResnetFPN
from misc.utils import MinkLocParams
def model_factory(params: MinkLocParams):
in_channels = 1
# MinkLocMultimodal is our baseline MinkLoc++ model producing 256 dimensional descriptor where
# each modality produces 128 dimensional descriptor
# MinkLocRGB and MinkLoc3D are single-modality versions producing 256 dimensional descriptor
if params.model_params.model == 'MinkLocMultimodal':
cloud_fe_size = 128
cloud_fe = MinkLoc(in_channels=1, feature_size=cloud_fe_size, output_dim=cloud_fe_size,
planes=[32, 64, 64], layers=[1, 1, 1], num_top_down=1,
conv0_kernel_size=5, block='ECABasicBlock', pooling_method='GeM')
image_fe_size = 128
image_fe = ResnetFPN(out_channels=image_fe_size, lateral_dim=image_fe_size,
fh_num_bottom_up=4, fh_num_top_down=0)
model = MinkLocMultimodal(cloud_fe, cloud_fe_size, image_fe, image_fe_size, output_dim=cloud_fe_size + image_fe_size)
elif params.model_params.model == 'MinkLoc3D':
cloud_fe_size = 256
cloud_fe = MinkLoc(in_channels=1, feature_size=cloud_fe_size, output_dim=cloud_fe_size,
planes=[32, 64, 64], layers=[1, 1, 1], num_top_down=1,
conv0_kernel_size=5, block='ECABasicBlock', pooling_method='GeM')
model = MinkLocMultimodal(cloud_fe, cloud_fe_size, None, 0, output_dim=cloud_fe_size,
dropout_p=None)
elif params.model_params.model == 'MinkLocRGB':
image_fe_size = 256
image_fe = ResnetFPN(out_channels=image_fe_size, lateral_dim=image_fe_size,
fh_num_bottom_up=4, fh_num_top_down=0)
model = MinkLocMultimodal(None, 0, image_fe, image_fe_size, output_dim=image_fe_size)
else:
raise NotImplementedError('Model not implemented: {}'.format(params.model_params.model))
return model | models/model_factory.py |
from models.minkloc import MinkLoc
from models.minkloc_multimodal import MinkLocMultimodal, ResnetFPN
from misc.utils import MinkLocParams
def model_factory(params: MinkLocParams):
in_channels = 1
# MinkLocMultimodal is our baseline MinkLoc++ model producing 256 dimensional descriptor where
# each modality produces 128 dimensional descriptor
# MinkLocRGB and MinkLoc3D are single-modality versions producing 256 dimensional descriptor
if params.model_params.model == 'MinkLocMultimodal':
cloud_fe_size = 128
cloud_fe = MinkLoc(in_channels=1, feature_size=cloud_fe_size, output_dim=cloud_fe_size,
planes=[32, 64, 64], layers=[1, 1, 1], num_top_down=1,
conv0_kernel_size=5, block='ECABasicBlock', pooling_method='GeM')
image_fe_size = 128
image_fe = ResnetFPN(out_channels=image_fe_size, lateral_dim=image_fe_size,
fh_num_bottom_up=4, fh_num_top_down=0)
model = MinkLocMultimodal(cloud_fe, cloud_fe_size, image_fe, image_fe_size, output_dim=cloud_fe_size + image_fe_size)
elif params.model_params.model == 'MinkLoc3D':
cloud_fe_size = 256
cloud_fe = MinkLoc(in_channels=1, feature_size=cloud_fe_size, output_dim=cloud_fe_size,
planes=[32, 64, 64], layers=[1, 1, 1], num_top_down=1,
conv0_kernel_size=5, block='ECABasicBlock', pooling_method='GeM')
model = MinkLocMultimodal(cloud_fe, cloud_fe_size, None, 0, output_dim=cloud_fe_size,
dropout_p=None)
elif params.model_params.model == 'MinkLocRGB':
image_fe_size = 256
image_fe = ResnetFPN(out_channels=image_fe_size, lateral_dim=image_fe_size,
fh_num_bottom_up=4, fh_num_top_down=0)
model = MinkLocMultimodal(None, 0, image_fe, image_fe_size, output_dim=image_fe_size)
else:
raise NotImplementedError('Model not implemented: {}'.format(params.model_params.model))
return model | 0.73678 | 0.290477 |
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from localshop.apps.packages.models import Release, ReleaseFile
from localshop.apps.packages import xmlrpc
@csrf_exempt
def index(request):
if request.method == 'POST':
return xmlrpc.handle_request(request)
return frontpage(request)
@login_required
def frontpage(request):
recent_local = (Release.objects
.filter(package__is_local=True)
.order_by('-created')
.all()[:5])
recent_mirror = (ReleaseFile.objects
.filter(release__package__is_local=False)
.exclude(distribution='')
.order_by('-modified')
.all()[:10])
return TemplateResponse(request, 'frontpage.html', {
'recent_local': recent_local,
'recent_mirror': recent_mirror,
})
class LoginRequiredMixin(object):
"""
View mixin that applies the login_required decorator
"""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class PermissionRequiredMixin(object):
"""
View mixin which uses the permission_required decorator.
"""
permission_required = None # the permission, e.g. 'auth.add_user'
raise_exception = True # raises a 403 exception by default
login_url = settings.LOGIN_URL # the url to redirect to
def dispatch(self, request, *args, **kwargs):
if (self.permission_required is None or
'.' not in self.permission_required):
raise ImproperlyConfigured("PermissionRequiredMixin must have a "
"permission_required attribute.")
decorator = permission_required(self.permission_required,
self.login_url, self.raise_exception)
decorated_dispatch = decorator(super(PermissionRequiredMixin, self).dispatch)
return decorated_dispatch(request, *args, **kwargs) | localshop/views.py | from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from localshop.apps.packages.models import Release, ReleaseFile
from localshop.apps.packages import xmlrpc
@csrf_exempt
def index(request):
if request.method == 'POST':
return xmlrpc.handle_request(request)
return frontpage(request)
@login_required
def frontpage(request):
recent_local = (Release.objects
.filter(package__is_local=True)
.order_by('-created')
.all()[:5])
recent_mirror = (ReleaseFile.objects
.filter(release__package__is_local=False)
.exclude(distribution='')
.order_by('-modified')
.all()[:10])
return TemplateResponse(request, 'frontpage.html', {
'recent_local': recent_local,
'recent_mirror': recent_mirror,
})
class LoginRequiredMixin(object):
"""
View mixin that applies the login_required decorator
"""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class PermissionRequiredMixin(object):
"""
View mixin which uses the permission_required decorator.
"""
permission_required = None # the permission, e.g. 'auth.add_user'
raise_exception = True # raises a 403 exception by default
login_url = settings.LOGIN_URL # the url to redirect to
def dispatch(self, request, *args, **kwargs):
if (self.permission_required is None or
'.' not in self.permission_required):
raise ImproperlyConfigured("PermissionRequiredMixin must have a "
"permission_required attribute.")
decorator = permission_required(self.permission_required,
self.login_url, self.raise_exception)
decorated_dispatch = decorator(super(PermissionRequiredMixin, self).dispatch)
return decorated_dispatch(request, *args, **kwargs) | 0.590661 | 0.068475 |
from __future__ import annotations
import os
import io
import click
import json
import difflib
import datacompy
import subprocess
import pandas as pd
import bson
import decimal
from .config import get_sql_for_database, get_config_for_database
from pathlib import Path
from collections import OrderedDict
from typing import List, Dict, OrderedDict as OrderedDictType, Optional
from datetime import date, datetime
from moda import style, log
from moda.user import UserInteractor, PythonShellType, MenuOption, Interaction
from functools import reduce
class ResultEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, date):
return obj.isoformat()
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return float(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def run_sql(SQL: any, query_text: str, escape_file_text: bool, format_parameters: Optional[Dict[str, any]], verbose: bool):
if escape_file_text:
query_text = SQL.Query.escaped_query_text(query_text=query_text)
elif format_parameters is not None:
query_text = SQL.Query.formatted_query_text(
query_text=query_text,
format_parameters=format_parameters
)
if verbose:
log.log(f'...running query:\n{query_text}')
query = SQL.Query(query_text)
layer = SQL.Layer()
layer.connect()
cursor = query.run(sql_layer=layer)
records = layer.fetch_all_records(cursor=cursor)
layer.disconnect()
return records
def write_json(file_path:str, results: List[OrderedDictType[str, any]]):
with open(file_path, 'w', encoding='utf-8') as f:
f.write('[\n')
for result in results:
f.write(f'{json.dumps(result, cls=ResultEncoder)}\n')
f.write(']')
class Verifier:
class Option(MenuOption):
diff = 'v'
quit = 'q'
@property
def option_text(self) -> str:
if self is Verifier.Option.diff:
return '(V)iew JSON diff'
elif self is Verifier.Option.quit:
return '(Q)uit'
@property
def styled(self) -> style.Styled:
if self is Verifier.Option.diff:
return style.CustomStyled(text=self.option_text, style=style.Format().blue())
if self is Verifier.Option.quit:
return style.CustomStyled(text=self.option_text, style=style.Format().red())
class Verification:
verification_date: datetime
name_a: str
name_b: str
data_frame_a: pd.DataFrame
data_frame_b: pd.DataFrame
csv_path_a: Optional[str]
csv_path_b: Optional[str]
json_path_a: Optional[str]
json_path_b: Optional[str]
diff_path: Optional[str]
success: bool
def __init__(self, verification_date: datetime, name_a: str, name_b: str, data_frame_a: pd.DataFrame, data_frame_b: pd.DataFrame, csv_path_a: Optional[str], csv_path_b: Optional[str], json_path_a: Optional[str], json_path_b: Optional[str], diff_path: Optional[str], success: bool):
self.verification_date = verification_date
self.name_a = name_a
self.name_b = name_b
self.data_frame_a = data_frame_a
self.data_frame_b = data_frame_b
self.json_path_a = json_path_a
self.json_path_b = json_path_b
self.csv_path_a = csv_path_a
self.csv_path_b = csv_path_b
self.diff_path = diff_path
self.success = success
database: str
user: UserInteractor
verbose: bool
output_directory: str
diff_command: str
def __init__(self, database: str, interactive: bool=False, verbose: bool=False, output_directory: str=os.path.join('output', 'verify'), diff_command: str='vimdiff', python_shell_type: PythonShellType=PythonShellType.ipython):
self.database = database
self.user = UserInteractor(timeout=None, interactive=interactive, python_shell_type=python_shell_type)
self.verbose = verbose
self.output_directory = output_directory
self.diff_command = diff_command
def filter_columns(self, df: pd.DataFrame, columns: Optional[List[str]]=None, exclude_columns: Optional[List[str]]=None) -> pd.DataFrame:
if columns is None and not exclude_columns:
return df.copy()
final_columns = [
c
for c in df.columns
if (columns is None or c in columns)
and (exclude_columns is None or c not in exclude_columns)
]
if not final_columns:
return pd.DataFrame()
return df[final_columns]
def get_data_frame(self, text: Optional[str], stream: Optional[str], database: Optional[str], csv: bool, escape: bool, columns: Optional[List[str]], exclude_columns: Optional[List[str]], format_parameters: Dict[str, any]) -> pd.DataFrame:
SQL = get_sql_for_database(database_name=database if database else self.database)
if csv:
df = pd.read_csv(stream if stream is not None else io.StringIO(text))
else:
df = pd.DataFrame(run_sql(
SQL=SQL,
query_text=text if text is not None else stream.read(),
escape_file_text=escape,
format_parameters=format_parameters,
verbose=self.verbose
))
df = self.filter_columns(
df=df,
columns=columns,
exclude_columns=exclude_columns
)
return df
def combine_columns(self, *column_lists: List[Optional[List[str]]]) -> Optional[List[str]]:
column_lists = list(filter(lambda l: l is not None, column_lists))
return reduce(lambda l, m: l + [c for c in m if c not in l], column_lists, []) if column_lists else None
def apply_script(self, script_path: str, database: str, data_frame: pd.DataFrame, other_database: Optional[str]=None, other_data_frame: Optional[pd.DataFrame]=None, context: Optional[Dict[str, any]]=None) -> List[pd.DataFrame]:
user_interactive = self.user.interactive
user_locals = self.user.locals
user_script_directory_components = self.user.script_directory_components
self.user.script_directory_components = list(Path(script_path).parent.parts)
self.user.locals = {
**self.user.locals,
'pd': pd,
'bson': bson,
'dfs': [
data_frame,
other_data_frame,
],
'database_configs': [
get_config_for_database(database_name=database),
get_config_for_database(database_name=other_database) if other_database else None,
],
'SQLs': [
get_sql_for_database(
database_name=database,
configure=False
),
get_sql_for_database(
database_name=other_database,
configure=False
) if other_database else None
],
'context': context if context is not None else {},
}
modified_data_frames = self.user.locals['dfs']
self.user.interactive = False
if self.verbose:
script_text = Path(script_path).read_text()
log.log(f'...running script:\n{script_text}')
self.user.run_script(script_name=Path(script_path).stem)
self.user.script_directory_components = user_script_directory_components
self.user.locals = user_locals
self.user.interactive = user_interactive
return modified_data_frames
def verify(self, name_a: str, name_b: str, text_a: Optional[str]=None, text_b: Optional[str]=None, stream_a: Optional[io.TextIOBase]=None, stream_b: Optional[io.TextIOBase]=None, script_path: Optional[str]=None, script_path_a: Optional[str]=None, script_path_b: Optional[str]=None, database_a: Optional[str]=None, database_b: Optional[str]=None, csv_a: bool=False, csv_b: bool=False, escape_a: bool=False, escape_b: bool=False, columns: Optional[List[str]]=None, columns_a: Optional[List[str]]=None, columns_b: Optional[List[str]]=None, exclude_columns: Optional[List[str]]=None, exclude_columns_a: Optional[List[str]]=None, exclude_columns_b: Optional[List[str]]=None, format_parameters: Dict[str, any]={}, absolute_tolerance: float=0, relative_tolerance: float=0) -> Verification:
if self.verbose:
detail_a = f'database: {database_a if database_a else self.database}' if not csv_a else 'file: csv'
detail_b = f'database: {database_b if database_b else self.database}' if not csv_b else 'file: csv'
log.log(f'Comparing:\na: {name_a} ({detail_a})\nb: {name_b} ({detail_b})\n')
script_context = {
'format_parameters': format_parameters
}
df_a = self.get_data_frame(
text=text_a,
stream=stream_a,
database=database_a,
csv=csv_a,
escape=escape_a,
columns=self.combine_columns(columns, columns_a),
exclude_columns=self.combine_columns(exclude_columns, exclude_columns_a),
format_parameters=format_parameters
)
if script_path_a:
script_context['df_a'] = df_a
df_a = self.apply_script(
script_path=script_path_a,
database=database_a if database_a else self.database,
data_frame=df_a,
context=script_context
)[0]
df_b = self.get_data_frame(
text=text_b,
stream=stream_b,
database=database_b,
csv=csv_b,
escape=escape_b,
columns=self.combine_columns(columns, columns_b),
exclude_columns=self.combine_columns(exclude_columns, exclude_columns_b),
format_parameters=format_parameters
)
if script_path_b:
script_context['df_a'] = df_a
script_context['df_b'] = df_b
df_b = self.apply_script(
script_path=script_path_b,
database=database_b if database_b else self.database,
data_frame=df_b,
context=script_context
)[0]
if script_path:
script_context['df_a'] = df_a
script_context['df_b'] = df_b
df_a, df_b = self.apply_script(
script_path=script_path,
database=database_a if database_a else self.database,
data_frame=df_a,
other_database=database_b if database_b else self.database,
other_data_frame=df_b,
context=script_context
)
verification_date = datetime.utcnow()
results_a = df_a.to_dict(orient='records')
results_b = df_b.to_dict(orient='records')
compare = datacompy.Compare(
df_a,
df_b,
on_index=True,
df1_name=f'{name_a} [a]',
df2_name=f'{name_b} [b]',
# join_columns='acct_id', #You can also specify a list of columns
abs_tol=absolute_tolerance, #Optional, defaults to 0
rel_tol=relative_tolerance #Optional, defaults to 0
)
if self.verbose:
log.log(f'\n{compare.report()}')
output_path_a = os.path.join(self.output_directory, f'comparison_a_{name_a}')
output_path_b = os.path.join(self.output_directory, f'comparison_b_{name_b}')
csv_path_a = f'{output_path_a}.csv'
csv_path_b = f'{output_path_b}.csv'
df_a.to_csv(csv_path_a)
df_b.to_csv(csv_path_b)
json_path_a = f'{output_path_a}.json'
json_path_b = f'{output_path_b}.json'
write_json(json_path_a, results_a)
write_json(json_path_b, results_b)
if self.verbose:
log.log(f'CSV files written to\n{csv_path_a}\n{csv_path_b}\n')
log.log(f'JSON result files written to\n{json_path_a}\n{json_path_b}\n')
matched = compare.matches(ignore_extra_columns=False)
if not matched:
with open(json_path_a) as f:
lines_a = f.readlines()
with open(json_path_b) as f:
lines_b = f.readlines()
diff_lines = list(difflib.unified_diff(lines_a, lines_b))
diff_path = os.path.join(self.output_directory, f'comparison_diff__a_{name_a}__b_{name_b}.txt')
with open(diff_path, 'w') as f:
f.write(''.join(diff_lines))
log_diff_lines = [
*diff_lines[:10],
f'\n... ({len(diff_lines) - 20} lines not displayed)\n\n',
*diff_lines[-10:],
] if len(diff_lines) > 24 else diff_lines
log.log(f'JSON result file diff written to {diff_path}\n\n{"".join(log_diff_lines)}\n\n')
self.user.present_message(message='The results appear to MATCH.' if matched else 'The results DO NOT APPEAR TO MATCH.')
while True:
option = self.user.present_menu(
options=[
Verifier.Option.diff,
Interaction.python,
Interaction.debugger,
Verifier.Option.quit
],
default_option=Verifier.Option.diff if self.user.interactive and not matched else Verifier.Option.quit
)
if option is Verifier.Option.diff:
subprocess.run([self.diff_command, json_path_a, json_path_b])
elif isinstance(option, Interaction):
self.user.locals = {
**self.user.python_locals,
'df1': df_a,
'df2': df_b,
}
self.user.interact(interaction=option)
self.user.locals = {}
elif option is Verifier.Option.quit:
break
verification = Verifier.Verification(
verification_date=verification_date,
name_a=name_a,
name_b=name_b,
data_frame_a=results_a,
data_frame_b=results_b,
csv_path_a=csv_path_a,
csv_path_b=csv_path_b,
json_path_a=json_path_a,
json_path_b=json_path_b,
diff_path=diff_path if not matched else None,
success=matched
)
return verification
if __name__ == "__main__":
run() | fabrica/verify.py | from __future__ import annotations
import os
import io
import click
import json
import difflib
import datacompy
import subprocess
import pandas as pd
import bson
import decimal
from .config import get_sql_for_database, get_config_for_database
from pathlib import Path
from collections import OrderedDict
from typing import List, Dict, OrderedDict as OrderedDictType, Optional
from datetime import date, datetime
from moda import style, log
from moda.user import UserInteractor, PythonShellType, MenuOption, Interaction
from functools import reduce
class ResultEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, date):
return obj.isoformat()
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return float(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def run_sql(SQL: any, query_text: str, escape_file_text: bool, format_parameters: Optional[Dict[str, any]], verbose: bool):
if escape_file_text:
query_text = SQL.Query.escaped_query_text(query_text=query_text)
elif format_parameters is not None:
query_text = SQL.Query.formatted_query_text(
query_text=query_text,
format_parameters=format_parameters
)
if verbose:
log.log(f'...running query:\n{query_text}')
query = SQL.Query(query_text)
layer = SQL.Layer()
layer.connect()
cursor = query.run(sql_layer=layer)
records = layer.fetch_all_records(cursor=cursor)
layer.disconnect()
return records
def write_json(file_path:str, results: List[OrderedDictType[str, any]]):
with open(file_path, 'w', encoding='utf-8') as f:
f.write('[\n')
for result in results:
f.write(f'{json.dumps(result, cls=ResultEncoder)}\n')
f.write(']')
class Verifier:
class Option(MenuOption):
diff = 'v'
quit = 'q'
@property
def option_text(self) -> str:
if self is Verifier.Option.diff:
return '(V)iew JSON diff'
elif self is Verifier.Option.quit:
return '(Q)uit'
@property
def styled(self) -> style.Styled:
if self is Verifier.Option.diff:
return style.CustomStyled(text=self.option_text, style=style.Format().blue())
if self is Verifier.Option.quit:
return style.CustomStyled(text=self.option_text, style=style.Format().red())
class Verification:
verification_date: datetime
name_a: str
name_b: str
data_frame_a: pd.DataFrame
data_frame_b: pd.DataFrame
csv_path_a: Optional[str]
csv_path_b: Optional[str]
json_path_a: Optional[str]
json_path_b: Optional[str]
diff_path: Optional[str]
success: bool
def __init__(self, verification_date: datetime, name_a: str, name_b: str, data_frame_a: pd.DataFrame, data_frame_b: pd.DataFrame, csv_path_a: Optional[str], csv_path_b: Optional[str], json_path_a: Optional[str], json_path_b: Optional[str], diff_path: Optional[str], success: bool):
self.verification_date = verification_date
self.name_a = name_a
self.name_b = name_b
self.data_frame_a = data_frame_a
self.data_frame_b = data_frame_b
self.json_path_a = json_path_a
self.json_path_b = json_path_b
self.csv_path_a = csv_path_a
self.csv_path_b = csv_path_b
self.diff_path = diff_path
self.success = success
database: str
user: UserInteractor
verbose: bool
output_directory: str
diff_command: str
def __init__(self, database: str, interactive: bool=False, verbose: bool=False, output_directory: str=os.path.join('output', 'verify'), diff_command: str='vimdiff', python_shell_type: PythonShellType=PythonShellType.ipython):
self.database = database
self.user = UserInteractor(timeout=None, interactive=interactive, python_shell_type=python_shell_type)
self.verbose = verbose
self.output_directory = output_directory
self.diff_command = diff_command
def filter_columns(self, df: pd.DataFrame, columns: Optional[List[str]]=None, exclude_columns: Optional[List[str]]=None) -> pd.DataFrame:
if columns is None and not exclude_columns:
return df.copy()
final_columns = [
c
for c in df.columns
if (columns is None or c in columns)
and (exclude_columns is None or c not in exclude_columns)
]
if not final_columns:
return pd.DataFrame()
return df[final_columns]
def get_data_frame(self, text: Optional[str], stream: Optional[str], database: Optional[str], csv: bool, escape: bool, columns: Optional[List[str]], exclude_columns: Optional[List[str]], format_parameters: Dict[str, any]) -> pd.DataFrame:
SQL = get_sql_for_database(database_name=database if database else self.database)
if csv:
df = pd.read_csv(stream if stream is not None else io.StringIO(text))
else:
df = pd.DataFrame(run_sql(
SQL=SQL,
query_text=text if text is not None else stream.read(),
escape_file_text=escape,
format_parameters=format_parameters,
verbose=self.verbose
))
df = self.filter_columns(
df=df,
columns=columns,
exclude_columns=exclude_columns
)
return df
def combine_columns(self, *column_lists: List[Optional[List[str]]]) -> Optional[List[str]]:
column_lists = list(filter(lambda l: l is not None, column_lists))
return reduce(lambda l, m: l + [c for c in m if c not in l], column_lists, []) if column_lists else None
def apply_script(self, script_path: str, database: str, data_frame: pd.DataFrame, other_database: Optional[str]=None, other_data_frame: Optional[pd.DataFrame]=None, context: Optional[Dict[str, any]]=None) -> List[pd.DataFrame]:
user_interactive = self.user.interactive
user_locals = self.user.locals
user_script_directory_components = self.user.script_directory_components
self.user.script_directory_components = list(Path(script_path).parent.parts)
self.user.locals = {
**self.user.locals,
'pd': pd,
'bson': bson,
'dfs': [
data_frame,
other_data_frame,
],
'database_configs': [
get_config_for_database(database_name=database),
get_config_for_database(database_name=other_database) if other_database else None,
],
'SQLs': [
get_sql_for_database(
database_name=database,
configure=False
),
get_sql_for_database(
database_name=other_database,
configure=False
) if other_database else None
],
'context': context if context is not None else {},
}
modified_data_frames = self.user.locals['dfs']
self.user.interactive = False
if self.verbose:
script_text = Path(script_path).read_text()
log.log(f'...running script:\n{script_text}')
self.user.run_script(script_name=Path(script_path).stem)
self.user.script_directory_components = user_script_directory_components
self.user.locals = user_locals
self.user.interactive = user_interactive
return modified_data_frames
def verify(self, name_a: str, name_b: str, text_a: Optional[str]=None, text_b: Optional[str]=None, stream_a: Optional[io.TextIOBase]=None, stream_b: Optional[io.TextIOBase]=None, script_path: Optional[str]=None, script_path_a: Optional[str]=None, script_path_b: Optional[str]=None, database_a: Optional[str]=None, database_b: Optional[str]=None, csv_a: bool=False, csv_b: bool=False, escape_a: bool=False, escape_b: bool=False, columns: Optional[List[str]]=None, columns_a: Optional[List[str]]=None, columns_b: Optional[List[str]]=None, exclude_columns: Optional[List[str]]=None, exclude_columns_a: Optional[List[str]]=None, exclude_columns_b: Optional[List[str]]=None, format_parameters: Dict[str, any]={}, absolute_tolerance: float=0, relative_tolerance: float=0) -> Verification:
if self.verbose:
detail_a = f'database: {database_a if database_a else self.database}' if not csv_a else 'file: csv'
detail_b = f'database: {database_b if database_b else self.database}' if not csv_b else 'file: csv'
log.log(f'Comparing:\na: {name_a} ({detail_a})\nb: {name_b} ({detail_b})\n')
script_context = {
'format_parameters': format_parameters
}
df_a = self.get_data_frame(
text=text_a,
stream=stream_a,
database=database_a,
csv=csv_a,
escape=escape_a,
columns=self.combine_columns(columns, columns_a),
exclude_columns=self.combine_columns(exclude_columns, exclude_columns_a),
format_parameters=format_parameters
)
if script_path_a:
script_context['df_a'] = df_a
df_a = self.apply_script(
script_path=script_path_a,
database=database_a if database_a else self.database,
data_frame=df_a,
context=script_context
)[0]
df_b = self.get_data_frame(
text=text_b,
stream=stream_b,
database=database_b,
csv=csv_b,
escape=escape_b,
columns=self.combine_columns(columns, columns_b),
exclude_columns=self.combine_columns(exclude_columns, exclude_columns_b),
format_parameters=format_parameters
)
if script_path_b:
script_context['df_a'] = df_a
script_context['df_b'] = df_b
df_b = self.apply_script(
script_path=script_path_b,
database=database_b if database_b else self.database,
data_frame=df_b,
context=script_context
)[0]
if script_path:
script_context['df_a'] = df_a
script_context['df_b'] = df_b
df_a, df_b = self.apply_script(
script_path=script_path,
database=database_a if database_a else self.database,
data_frame=df_a,
other_database=database_b if database_b else self.database,
other_data_frame=df_b,
context=script_context
)
verification_date = datetime.utcnow()
results_a = df_a.to_dict(orient='records')
results_b = df_b.to_dict(orient='records')
compare = datacompy.Compare(
df_a,
df_b,
on_index=True,
df1_name=f'{name_a} [a]',
df2_name=f'{name_b} [b]',
# join_columns='acct_id', #You can also specify a list of columns
abs_tol=absolute_tolerance, #Optional, defaults to 0
rel_tol=relative_tolerance #Optional, defaults to 0
)
if self.verbose:
log.log(f'\n{compare.report()}')
output_path_a = os.path.join(self.output_directory, f'comparison_a_{name_a}')
output_path_b = os.path.join(self.output_directory, f'comparison_b_{name_b}')
csv_path_a = f'{output_path_a}.csv'
csv_path_b = f'{output_path_b}.csv'
df_a.to_csv(csv_path_a)
df_b.to_csv(csv_path_b)
json_path_a = f'{output_path_a}.json'
json_path_b = f'{output_path_b}.json'
write_json(json_path_a, results_a)
write_json(json_path_b, results_b)
if self.verbose:
log.log(f'CSV files written to\n{csv_path_a}\n{csv_path_b}\n')
log.log(f'JSON result files written to\n{json_path_a}\n{json_path_b}\n')
matched = compare.matches(ignore_extra_columns=False)
if not matched:
with open(json_path_a) as f:
lines_a = f.readlines()
with open(json_path_b) as f:
lines_b = f.readlines()
diff_lines = list(difflib.unified_diff(lines_a, lines_b))
diff_path = os.path.join(self.output_directory, f'comparison_diff__a_{name_a}__b_{name_b}.txt')
with open(diff_path, 'w') as f:
f.write(''.join(diff_lines))
log_diff_lines = [
*diff_lines[:10],
f'\n... ({len(diff_lines) - 20} lines not displayed)\n\n',
*diff_lines[-10:],
] if len(diff_lines) > 24 else diff_lines
log.log(f'JSON result file diff written to {diff_path}\n\n{"".join(log_diff_lines)}\n\n')
self.user.present_message(message='The results appear to MATCH.' if matched else 'The results DO NOT APPEAR TO MATCH.')
while True:
option = self.user.present_menu(
options=[
Verifier.Option.diff,
Interaction.python,
Interaction.debugger,
Verifier.Option.quit
],
default_option=Verifier.Option.diff if self.user.interactive and not matched else Verifier.Option.quit
)
if option is Verifier.Option.diff:
subprocess.run([self.diff_command, json_path_a, json_path_b])
elif isinstance(option, Interaction):
self.user.locals = {
**self.user.python_locals,
'df1': df_a,
'df2': df_b,
}
self.user.interact(interaction=option)
self.user.locals = {}
elif option is Verifier.Option.quit:
break
verification = Verifier.Verification(
verification_date=verification_date,
name_a=name_a,
name_b=name_b,
data_frame_a=results_a,
data_frame_b=results_b,
csv_path_a=csv_path_a,
csv_path_b=csv_path_b,
json_path_a=json_path_a,
json_path_b=json_path_b,
diff_path=diff_path if not matched else None,
success=matched
)
return verification
if __name__ == "__main__":
run() | 0.742422 | 0.157428 |
from unittest.mock import ANY
import pytest
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import URLValidator
from django.test import override_settings
from aca.client import ACAClient
from oidc.endpoints.authorize import authorization
from oidc.models import AuthSession
@pytest.mark.django_db
class TestAuthorization:
@override_settings(
ACA_PY_URL="https://aca.com",
ACA_PY_TRANSPORT_URL="https://aca-trans.com",
SITE_URL="https://site.com",
)
def test_authorization_not_found(self, mocker, email_presentation_configuration):
with pytest.raises(ObjectDoesNotExist):
authorization("invalid_pres_req_conf_id", {})
@override_settings(
ACA_PY_URL="https://aca.com",
ACA_PY_TRANSPORT_URL="https://aca-trans.com",
SITE_URL="https://site.com",
)
def test_authorization(self, mocker, email_presentation_configuration):
create_proof_req = mocker.patch.object(
ACAClient,
"create_proof_request",
return_value={
"presentation_request": "some_pr",
"thread_id": "some_tid",
"presentation_exchange_id": "some_pres_ex_id",
},
)
get_public_did = mocker.patch.object(
ACAClient, "get_public_did", return_value={"verkey": "some_verkey"}
)
short_url, session_id, pres_req, b64_presentation = authorization(
"verified-email", {"some_param": "some_value"}
)
url_validator = URLValidator()
url_validator(short_url)
session = AuthSession.objects.first()
assert session_id == str(session.id)
assert session.presentation_record_id == "verified-email"
assert session.presentation_request_id == "some_pres_ex_id"
assert session.presentation_request == {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/present-proof/1.0/request-presentation",
"@id": "some_tid",
"request_presentations~attach": [
{
"@id": "libindy-request-presentation-0",
"data": {"base64": ANY},
"mime-type": "application/json",
}
],
"~service": {
"serviceEndpoint": "https://aca-trans.com",
"routingKeys": None,
"recipientKeys": ["some_verkey"],
},
"comment": None,
}
assert session.request_parameters == {"some_param": "some_value"}
assert pres_req == "some_pres_ex_id"
assert b64_presentation
create_proof_req.assert_called_once()
get_public_did.assert_called_once() | oidc/tests/test_endpoints.py | from unittest.mock import ANY
import pytest
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import URLValidator
from django.test import override_settings
from aca.client import ACAClient
from oidc.endpoints.authorize import authorization
from oidc.models import AuthSession
@pytest.mark.django_db
class TestAuthorization:
@override_settings(
ACA_PY_URL="https://aca.com",
ACA_PY_TRANSPORT_URL="https://aca-trans.com",
SITE_URL="https://site.com",
)
def test_authorization_not_found(self, mocker, email_presentation_configuration):
with pytest.raises(ObjectDoesNotExist):
authorization("invalid_pres_req_conf_id", {})
@override_settings(
ACA_PY_URL="https://aca.com",
ACA_PY_TRANSPORT_URL="https://aca-trans.com",
SITE_URL="https://site.com",
)
def test_authorization(self, mocker, email_presentation_configuration):
create_proof_req = mocker.patch.object(
ACAClient,
"create_proof_request",
return_value={
"presentation_request": "some_pr",
"thread_id": "some_tid",
"presentation_exchange_id": "some_pres_ex_id",
},
)
get_public_did = mocker.patch.object(
ACAClient, "get_public_did", return_value={"verkey": "some_verkey"}
)
short_url, session_id, pres_req, b64_presentation = authorization(
"verified-email", {"some_param": "some_value"}
)
url_validator = URLValidator()
url_validator(short_url)
session = AuthSession.objects.first()
assert session_id == str(session.id)
assert session.presentation_record_id == "verified-email"
assert session.presentation_request_id == "some_pres_ex_id"
assert session.presentation_request == {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/present-proof/1.0/request-presentation",
"@id": "some_tid",
"request_presentations~attach": [
{
"@id": "libindy-request-presentation-0",
"data": {"base64": ANY},
"mime-type": "application/json",
}
],
"~service": {
"serviceEndpoint": "https://aca-trans.com",
"routingKeys": None,
"recipientKeys": ["some_verkey"],
},
"comment": None,
}
assert session.request_parameters == {"some_param": "some_value"}
assert pres_req == "some_pres_ex_id"
assert b64_presentation
create_proof_req.assert_called_once()
get_public_did.assert_called_once() | 0.475118 | 0.425486 |
import copy
import os
import numpy as np
from .conservative import ListDependenceResult
from .utils import get_pair_id, get_pairs_by_levels, get_possible_structures
GRIDS = ['lhs', 'rand', 'vertices']
LIB_PARAMS = ['iterative_save', 'iterative_load', 'input_names',
'output_names', 'keep_input_samples', 'load_input_samples',
'use_grid', 'save_grid', 'grid_path', 'n_pairs_start']
# TODO: add the function as a method in ConservativeEstimate
def iterative_vine_minimize(estimate_object, n_input_sample=1000, n_dep_param_init=20, max_n_pairs=5, grid_type='lhs',
q_func=np.var, n_add_pairs=1, n_remove_pairs=0, adapt_vine_structure=True, delta=0.1,
with_bootstrap=False, verbose=False, **kwargs):
"""Iteratively minimises the output quantity of interest.
Parameters
----------
Returns
-------
"""
quant_estimate = copy.copy(estimate_object)
corr_dim = quant_estimate.corr_dim
dim = quant_estimate.input_dim
max_n_pairs = min(max_n_pairs, corr_dim)
assert grid_type in GRIDS, "Unknow Grid type {0}".format(grid_type)
assert 0 < max_n_pairs <= corr_dim, "Maximum number of pairs must be positive"
assert 1 <= n_add_pairs <= corr_dim, "Must add at least one pair at each iteration"
assert 0 <= n_remove_pairs < corr_dim, "This cannot be negative"
assert callable(q_func), "Quantity function must be callable"
if n_add_pairs == corr_dim:
adapt_vine_structure = False
print('The number of dimension is equal to the number of pairs to add')
# Check if the given parameters are known
for lib_param in kwargs:
assert lib_param in LIB_PARAMS, "Unknow parameter %s" % (lib_param)
# Iterative save of the results
iterative_save = False
if 'iterative_save' in kwargs:
iterative_save = kwargs['iterative_save']
if iterative_save is True:
save_dir = './iterative_result'
elif isinstance(iterative_save, str):
save_dir = os.path.abspath(iterative_save)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
elif iterative_save is False:
pass
else:
raise TypeError("Wrong type for iterative_save: {}".format(type(iterative_save)))
# Iterative load of the results
iterative_load = False
if 'iterative_load' in kwargs:
iterative_load = kwargs['iterative_load']
if iterative_load is True:
load_dir = './iterative_result'
elif isinstance(iterative_load, str):
load_dir = os.path.abspath(iterative_load)
if not os.path.exists(load_dir):
print("Directory %s does not exists" % (load_dir))
elif iterative_load is False:
pass
else:
raise TypeError("Wrong type for iterative_load: {0}".format(type(iterative_load)))
input_names = []
if 'input_names' in kwargs:
input_names = kwargs['input_names']
output_names = []
if 'output_names' in kwargs:
output_names = kwargs['output_names']
keep_input_samples = True
if 'keep_input_samples' in kwargs:
keep_input_samples = kwargs['keep_input_samples']
load_input_samples = True
if 'load_input_samples' in kwargs:
load_input_samples = kwargs['load_input_samples']
use_grid = None
if 'use_grid' in kwargs:
use_grid = kwargs['use_grid']
save_grid = None
if 'save_grid' in kwargs:
save_grid = kwargs['save_grid']
n_pairs_start = 0
if 'n_pairs_start' in kwargs:
n_pairs_start = kwargs['n_pairs_start']
# Only a loading execution
if n_input_sample == 0:
iterative_save = None
# Initial configurations
init_family = quant_estimate.families
init_bounds_tau = quant_estimate.bounds_tau
fixed_params = quant_estimate.fixed_params.copy()
init_indep_pairs = quant_estimate._indep_pairs[:]
init_fixed_pairs = quant_estimate._fixed_pairs[:]
# New empty configurations
families = np.zeros((dim, dim), dtype=int)
bounds_tau = np.zeros((dim, dim))
bounds_tau[:] = None
# Selected pairs through iterations
selected_pairs = []
all_results = IterativeDependenceResults(dim)
if callable(n_dep_param_init):
n_param_iter = n_dep_param_init
elif n_dep_param_init is None:
n_param_iter = lambda x: None
else:
n_param_iter = lambda k: int(n_dep_param_init*(k+1)**2)
n_dep_param = n_param_iter(0)
# The pairs to do at each iterations
indices = np.asarray(np.tril_indices(dim, k=-1)).T.tolist()
# Remove fixed pairs from the list and add in the family matrix
for pair in init_fixed_pairs:
indices.remove(pair)
families[pair[0], pair[1]] = init_family[pair[0], pair[1]]
# Remove independent pairs
for pair in init_indep_pairs:
indices.remove(pair)
## Algorithm Loop
cost = 0
n_pairs = 1
iteration = 0
min_quant_iter = []
stop_conditions = False
while not stop_conditions:
min_quantity = {}
for i, j in indices:
# Family matrix for this iteration
tmp_families = families.copy()
tmp_families[i, j] = init_family[i, j]
tmp_bounds_tau = bounds_tau.copy()
tmp_bounds_tau[i, j] = init_bounds_tau[i, j]
tmp_bounds_tau[j, i] = init_bounds_tau[j, i]
# Adapt the vine structure matrix
if adapt_vine_structure:
pairs_iter = init_indep_pairs + init_fixed_pairs + selected_pairs + [(i, j)]
pairs_iter_id = [get_pair_id(dim, pair, with_plus=False) for pair in pairs_iter]
pairs_by_levels = get_pairs_by_levels(dim, pairs_iter_id)
quant_estimate.vine_structure = get_possible_structures(dim, pairs_by_levels)[0]
# Family matrix is changed
quant_estimate.families = tmp_families
quant_estimate.fixed_params = fixed_params
quant_estimate.bounds_tau = tmp_bounds_tau
# Lets get the results for this family structure
if n_input_sample > 0 and n_pairs >= n_pairs_start:
results = quant_estimate.gridsearch(n_dep_param=n_dep_param,
n_input_sample=n_input_sample,
grid_type=grid_type,
keep_input_samples=keep_input_samples,
load_grid=use_grid,
save_grid=save_grid,
use_sto_func=True)
results.q_func = q_func
if iterative_save or iterative_load:
cop_str = "_".join([str(l) for l in quant_estimate._family_list])
vine_str = "_".join([str(l) for l in quant_estimate._vine_structure_list])
filename = "%s/%s" % (load_dir, grid_type)
if n_dep_param is None:
filename += "_K_None"
else:
filename += "_K_%d" % (n_dep_param)
filename += "_cop_%s_vine_%s.hdf5" % (cop_str, vine_str)
if iterative_save and n_pairs >= n_pairs_start:
results.to_hdf(filename, input_names, output_names, with_input_sample=keep_input_samples)
if iterative_load :
name, extension = os.path.splitext(filename)
condition = os.path.exists(filename)
k = 0
while condition:
try:
load_result = ListDependenceResult.from_hdf(filename, with_input_sample=load_input_samples, q_func=q_func)
# TODO: create a function to check the configurations of two results
# TODO: is the testing necessary? If the saving worked, the loading should be ok.
np.testing.assert_equal(load_result.families, tmp_families, err_msg="Not good family")
np.testing.assert_equal(load_result.bounds_tau, tmp_bounds_tau, err_msg="Not good Bounds")
np.testing.assert_equal(load_result.vine_structure, quant_estimate.vine_structure, err_msg="Not good structure")
condition = False
except AssertionError:
filename = '%s_num_%d%s' % (name, k, extension)
condition = os.path.exists(filename)
k += 1
# Replace the actual results with the loaded results (this results + all the previous saved ones)
results = load_result
# How much does it costs
cost += results.n_evals
# Save the minimum
if not with_bootstrap:
min_quantity[i, j] = results.min_quantity
else:
assert isinstance(with_bootstrap, int), "Must be a number"
n_bootstrap = with_bootstrap
results.compute_bootstraps(n_bootstrap)
print(results.bootstrap_samples.mean(axis=1))
min_quantity[i, j] = results[results.bootstrap_samples.mean(axis=1).argmin()]
if verbose:
print('n={}, K={}. Worst quantile of {} at {}'.format(results.n_input_sample, n_dep_param, selected_pairs + [(i, j)], min_quantity[i, j]))
if input_names:
pair_names = [ "%s-%s" % (input_names[k1], input_names[k2]) for k1, k2 in selected_pairs + [(i, j)]]
print("The variables are: " + " ".join(pair_names))
# Store the result
all_results[iteration, i, j] = results
# Get the min from the iterations
sorted_quantities = sorted(min_quantity.items(), key=lambda x: x[1])
# Delay of the first iteration
if iteration == 0:
delta_q_init = abs(sorted_quantities[0][1] - sorted_quantities[-1][1])
min_quant_iter.append(sorted_quantities[0][1])
if (n_remove_pairs > 0) and (n_remove_pairs < len(sorted_quantities)-1):
# The pairs to remove
for pair in sorted_quantities[-n_remove_pairs:]:
indices.remove(list(pair[0]))
selected_pair = sorted_quantities[0][0]
# Selected pairs to add
for pair in sorted_quantities[:n_add_pairs]:
i, j = pair[0][0], pair[0][1]
families[i, j] = init_family[i, j]
bounds_tau[i, j] = init_bounds_tau[i, j]
bounds_tau[j, i] = init_bounds_tau[j, i]
indices.remove(list(pair[0]))
selected_pairs.append(pair[0])
all_results.selected_pairs.append(selected_pairs)
if True:
k1, k2 = selected_pair
tmp = '\nIteration {0}: selected pair: {1}'.format(iteration+1, selected_pair)
if input_names:
tmp += " (" + "-".join(input_names[k] for k in selected_pair) + ")"
print(tmp)
print('Total number of evaluations = %d. Minimum quantity at %.2f.\n' % (cost, min_quantity[selected_pair]))
# Stop conditions
if n_pairs >= max_n_pairs:
stop_conditions = True
print('Max number of pairs reached')
if iteration > 0:
delta_q = -(min_quant_iter[-1] - min_quant_iter[-2])
if delta_q <= delta*delta_q_init:
stop_conditions = True
print('Minimum_variation not fulfiled: %.2f <= %0.2f' % (delta_q, delta*delta_q_init))
n_pairs += n_add_pairs
if n_dep_param is not None:
n_dep_param = n_param_iter(iteration+1)
if not stop_conditions:
all_results.new_iteration()
all_results.n_evals = cost
iteration += 1
return all_results
class IterativeDependenceResults(object):
"""
"""
def __init__(self, dim):
self.iteration = 0
n_pairs = int(dim * (dim-1) / 2)
self.results = [[]]
tmp = np.zeros((dim, dim), dtype=object)
tmp[:] == None
self.results[self.iteration] = tmp
self.selected_pairs = [[]]
self.dim = dim
self.n_pairs = n_pairs
self.n_evals = 0
def new_iteration(self):
"""
"""
self.iteration += 1
tmp = np.zeros((self.dim, self.dim), dtype=object)
tmp[:] = None
self.results.append(tmp)
def __getitem__(self, item):
"""
"""
iteration, i, j = item
return self.results[iteration][i, j]
def __setitem__(self, item, result):
"""
"""
iteration, i, j = item
self.results[iteration][i, j] = result
def min_quantities(self, iteration):
"""
"""
results = self.results[iteration]
dim = self.dim
min_quantities = np.zeros((dim, dim), dtype=np.float)
for i in range(1, dim):
for j in range(i):
if results[i, j] is not None:
min_quantities[i, j] = results[i, j].min_quantity
return min_quantities
def min_results(self, iteration):
"""
"""
results = self.results[iteration]
dim = self.dim
min_results = np.zeros((dim, dim), dtype=object)
for i in range(1, dim):
for j in range(i):
if results[i, j] is not None:
min_results[i, j] = results[i, j].min_result
return min_results
def min_quantity(self, iteration):
"""
"""
min_quantities = self.min_quantities(iteration)
min_quantity = min_quantities.min()
return min_quantity
def min_result(self, iteration):
"""
"""
min_quantities = self.min_quantities(iteration)
id_min = min_quantities.argmin()
min_result = self.min_results(iteration).item(id_min)
return min_result | depimpact/iterative_vines.py | import copy
import os
import numpy as np
from .conservative import ListDependenceResult
from .utils import get_pair_id, get_pairs_by_levels, get_possible_structures
GRIDS = ['lhs', 'rand', 'vertices']
LIB_PARAMS = ['iterative_save', 'iterative_load', 'input_names',
'output_names', 'keep_input_samples', 'load_input_samples',
'use_grid', 'save_grid', 'grid_path', 'n_pairs_start']
# TODO: add the function as a method in ConservativeEstimate
def iterative_vine_minimize(estimate_object, n_input_sample=1000, n_dep_param_init=20, max_n_pairs=5, grid_type='lhs',
q_func=np.var, n_add_pairs=1, n_remove_pairs=0, adapt_vine_structure=True, delta=0.1,
with_bootstrap=False, verbose=False, **kwargs):
"""Iteratively minimises the output quantity of interest.
Parameters
----------
Returns
-------
"""
quant_estimate = copy.copy(estimate_object)
corr_dim = quant_estimate.corr_dim
dim = quant_estimate.input_dim
max_n_pairs = min(max_n_pairs, corr_dim)
assert grid_type in GRIDS, "Unknow Grid type {0}".format(grid_type)
assert 0 < max_n_pairs <= corr_dim, "Maximum number of pairs must be positive"
assert 1 <= n_add_pairs <= corr_dim, "Must add at least one pair at each iteration"
assert 0 <= n_remove_pairs < corr_dim, "This cannot be negative"
assert callable(q_func), "Quantity function must be callable"
if n_add_pairs == corr_dim:
adapt_vine_structure = False
print('The number of dimension is equal to the number of pairs to add')
# Check if the given parameters are known
for lib_param in kwargs:
assert lib_param in LIB_PARAMS, "Unknow parameter %s" % (lib_param)
# Iterative save of the results
iterative_save = False
if 'iterative_save' in kwargs:
iterative_save = kwargs['iterative_save']
if iterative_save is True:
save_dir = './iterative_result'
elif isinstance(iterative_save, str):
save_dir = os.path.abspath(iterative_save)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
elif iterative_save is False:
pass
else:
raise TypeError("Wrong type for iterative_save: {}".format(type(iterative_save)))
# Iterative load of the results
iterative_load = False
if 'iterative_load' in kwargs:
iterative_load = kwargs['iterative_load']
if iterative_load is True:
load_dir = './iterative_result'
elif isinstance(iterative_load, str):
load_dir = os.path.abspath(iterative_load)
if not os.path.exists(load_dir):
print("Directory %s does not exists" % (load_dir))
elif iterative_load is False:
pass
else:
raise TypeError("Wrong type for iterative_load: {0}".format(type(iterative_load)))
input_names = []
if 'input_names' in kwargs:
input_names = kwargs['input_names']
output_names = []
if 'output_names' in kwargs:
output_names = kwargs['output_names']
keep_input_samples = True
if 'keep_input_samples' in kwargs:
keep_input_samples = kwargs['keep_input_samples']
load_input_samples = True
if 'load_input_samples' in kwargs:
load_input_samples = kwargs['load_input_samples']
use_grid = None
if 'use_grid' in kwargs:
use_grid = kwargs['use_grid']
save_grid = None
if 'save_grid' in kwargs:
save_grid = kwargs['save_grid']
n_pairs_start = 0
if 'n_pairs_start' in kwargs:
n_pairs_start = kwargs['n_pairs_start']
# Only a loading execution
if n_input_sample == 0:
iterative_save = None
# Initial configurations
init_family = quant_estimate.families
init_bounds_tau = quant_estimate.bounds_tau
fixed_params = quant_estimate.fixed_params.copy()
init_indep_pairs = quant_estimate._indep_pairs[:]
init_fixed_pairs = quant_estimate._fixed_pairs[:]
# New empty configurations
families = np.zeros((dim, dim), dtype=int)
bounds_tau = np.zeros((dim, dim))
bounds_tau[:] = None
# Selected pairs through iterations
selected_pairs = []
all_results = IterativeDependenceResults(dim)
if callable(n_dep_param_init):
n_param_iter = n_dep_param_init
elif n_dep_param_init is None:
n_param_iter = lambda x: None
else:
n_param_iter = lambda k: int(n_dep_param_init*(k+1)**2)
n_dep_param = n_param_iter(0)
# The pairs to do at each iterations
indices = np.asarray(np.tril_indices(dim, k=-1)).T.tolist()
# Remove fixed pairs from the list and add in the family matrix
for pair in init_fixed_pairs:
indices.remove(pair)
families[pair[0], pair[1]] = init_family[pair[0], pair[1]]
# Remove independent pairs
for pair in init_indep_pairs:
indices.remove(pair)
## Algorithm Loop
cost = 0
n_pairs = 1
iteration = 0
min_quant_iter = []
stop_conditions = False
while not stop_conditions:
min_quantity = {}
for i, j in indices:
# Family matrix for this iteration
tmp_families = families.copy()
tmp_families[i, j] = init_family[i, j]
tmp_bounds_tau = bounds_tau.copy()
tmp_bounds_tau[i, j] = init_bounds_tau[i, j]
tmp_bounds_tau[j, i] = init_bounds_tau[j, i]
# Adapt the vine structure matrix
if adapt_vine_structure:
pairs_iter = init_indep_pairs + init_fixed_pairs + selected_pairs + [(i, j)]
pairs_iter_id = [get_pair_id(dim, pair, with_plus=False) for pair in pairs_iter]
pairs_by_levels = get_pairs_by_levels(dim, pairs_iter_id)
quant_estimate.vine_structure = get_possible_structures(dim, pairs_by_levels)[0]
# Family matrix is changed
quant_estimate.families = tmp_families
quant_estimate.fixed_params = fixed_params
quant_estimate.bounds_tau = tmp_bounds_tau
# Lets get the results for this family structure
if n_input_sample > 0 and n_pairs >= n_pairs_start:
results = quant_estimate.gridsearch(n_dep_param=n_dep_param,
n_input_sample=n_input_sample,
grid_type=grid_type,
keep_input_samples=keep_input_samples,
load_grid=use_grid,
save_grid=save_grid,
use_sto_func=True)
results.q_func = q_func
if iterative_save or iterative_load:
cop_str = "_".join([str(l) for l in quant_estimate._family_list])
vine_str = "_".join([str(l) for l in quant_estimate._vine_structure_list])
filename = "%s/%s" % (load_dir, grid_type)
if n_dep_param is None:
filename += "_K_None"
else:
filename += "_K_%d" % (n_dep_param)
filename += "_cop_%s_vine_%s.hdf5" % (cop_str, vine_str)
if iterative_save and n_pairs >= n_pairs_start:
results.to_hdf(filename, input_names, output_names, with_input_sample=keep_input_samples)
if iterative_load :
name, extension = os.path.splitext(filename)
condition = os.path.exists(filename)
k = 0
while condition:
try:
load_result = ListDependenceResult.from_hdf(filename, with_input_sample=load_input_samples, q_func=q_func)
# TODO: create a function to check the configurations of two results
# TODO: is the testing necessary? If the saving worked, the loading should be ok.
np.testing.assert_equal(load_result.families, tmp_families, err_msg="Not good family")
np.testing.assert_equal(load_result.bounds_tau, tmp_bounds_tau, err_msg="Not good Bounds")
np.testing.assert_equal(load_result.vine_structure, quant_estimate.vine_structure, err_msg="Not good structure")
condition = False
except AssertionError:
filename = '%s_num_%d%s' % (name, k, extension)
condition = os.path.exists(filename)
k += 1
# Replace the actual results with the loaded results (this results + all the previous saved ones)
results = load_result
# How much does it costs
cost += results.n_evals
# Save the minimum
if not with_bootstrap:
min_quantity[i, j] = results.min_quantity
else:
assert isinstance(with_bootstrap, int), "Must be a number"
n_bootstrap = with_bootstrap
results.compute_bootstraps(n_bootstrap)
print(results.bootstrap_samples.mean(axis=1))
min_quantity[i, j] = results[results.bootstrap_samples.mean(axis=1).argmin()]
if verbose:
print('n={}, K={}. Worst quantile of {} at {}'.format(results.n_input_sample, n_dep_param, selected_pairs + [(i, j)], min_quantity[i, j]))
if input_names:
pair_names = [ "%s-%s" % (input_names[k1], input_names[k2]) for k1, k2 in selected_pairs + [(i, j)]]
print("The variables are: " + " ".join(pair_names))
# Store the result
all_results[iteration, i, j] = results
# Get the min from the iterations
sorted_quantities = sorted(min_quantity.items(), key=lambda x: x[1])
# Delay of the first iteration
if iteration == 0:
delta_q_init = abs(sorted_quantities[0][1] - sorted_quantities[-1][1])
min_quant_iter.append(sorted_quantities[0][1])
if (n_remove_pairs > 0) and (n_remove_pairs < len(sorted_quantities)-1):
# The pairs to remove
for pair in sorted_quantities[-n_remove_pairs:]:
indices.remove(list(pair[0]))
selected_pair = sorted_quantities[0][0]
# Selected pairs to add
for pair in sorted_quantities[:n_add_pairs]:
i, j = pair[0][0], pair[0][1]
families[i, j] = init_family[i, j]
bounds_tau[i, j] = init_bounds_tau[i, j]
bounds_tau[j, i] = init_bounds_tau[j, i]
indices.remove(list(pair[0]))
selected_pairs.append(pair[0])
all_results.selected_pairs.append(selected_pairs)
if True:
k1, k2 = selected_pair
tmp = '\nIteration {0}: selected pair: {1}'.format(iteration+1, selected_pair)
if input_names:
tmp += " (" + "-".join(input_names[k] for k in selected_pair) + ")"
print(tmp)
print('Total number of evaluations = %d. Minimum quantity at %.2f.\n' % (cost, min_quantity[selected_pair]))
# Stop conditions
if n_pairs >= max_n_pairs:
stop_conditions = True
print('Max number of pairs reached')
if iteration > 0:
delta_q = -(min_quant_iter[-1] - min_quant_iter[-2])
if delta_q <= delta*delta_q_init:
stop_conditions = True
print('Minimum_variation not fulfiled: %.2f <= %0.2f' % (delta_q, delta*delta_q_init))
n_pairs += n_add_pairs
if n_dep_param is not None:
n_dep_param = n_param_iter(iteration+1)
if not stop_conditions:
all_results.new_iteration()
all_results.n_evals = cost
iteration += 1
return all_results
class IterativeDependenceResults(object):
"""
"""
def __init__(self, dim):
self.iteration = 0
n_pairs = int(dim * (dim-1) / 2)
self.results = [[]]
tmp = np.zeros((dim, dim), dtype=object)
tmp[:] == None
self.results[self.iteration] = tmp
self.selected_pairs = [[]]
self.dim = dim
self.n_pairs = n_pairs
self.n_evals = 0
def new_iteration(self):
"""
"""
self.iteration += 1
tmp = np.zeros((self.dim, self.dim), dtype=object)
tmp[:] = None
self.results.append(tmp)
def __getitem__(self, item):
"""
"""
iteration, i, j = item
return self.results[iteration][i, j]
def __setitem__(self, item, result):
"""
"""
iteration, i, j = item
self.results[iteration][i, j] = result
def min_quantities(self, iteration):
"""
"""
results = self.results[iteration]
dim = self.dim
min_quantities = np.zeros((dim, dim), dtype=np.float)
for i in range(1, dim):
for j in range(i):
if results[i, j] is not None:
min_quantities[i, j] = results[i, j].min_quantity
return min_quantities
def min_results(self, iteration):
"""
"""
results = self.results[iteration]
dim = self.dim
min_results = np.zeros((dim, dim), dtype=object)
for i in range(1, dim):
for j in range(i):
if results[i, j] is not None:
min_results[i, j] = results[i, j].min_result
return min_results
def min_quantity(self, iteration):
"""
"""
min_quantities = self.min_quantities(iteration)
min_quantity = min_quantities.min()
return min_quantity
def min_result(self, iteration):
"""
"""
min_quantities = self.min_quantities(iteration)
id_min = min_quantities.argmin()
min_result = self.min_results(iteration).item(id_min)
return min_result | 0.465387 | 0.40116 |
from retirable_resources.resource_manager import DeleteValue
import unittest
from retirable_resources import (
RetirableResourceManager,
SetValue,
AddToList,
ResourceDoesNotExist,
OwnerDoesNotExist,
ResourceOwnerView,
ResourceWatcher,
)
from .fixtures import RetirableResourceManagerTest
class TestInitialize(unittest.TestCase):
def test_init_with_empty_path(self):
client = object()
with self.assertRaises(ValueError):
r = RetirableResourceManager("", client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager([], client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager(tuple(), client=client)
def test_init_with_incorrect_type_path(self):
client = object()
with self.assertRaises(TypeError):
r = RetirableResourceManager(object(), client=client)
def test_init_fails_with_odd_doc_path(self):
client = object()
with self.assertRaises(ValueError):
r = RetirableResourceManager(["foo"], client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager(("foo",), client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager("foo", client=client)
def test_init_with_even_doc_path(self):
client = object()
r = RetirableResourceManager("foo/bar", client=client)
self.assertEqual(r.root_path, ("foo", "bar"))
r = RetirableResourceManager(["foo", "bar"], client=client)
self.assertEqual(r.root_path, ("foo", "bar"))
r = RetirableResourceManager(("foo", "bar"), client=client)
self.assertEqual(r.root_path, ("foo", "bar"))
class Test(RetirableResourceManagerTest):
def test_set_owners(self):
r = self.r
self.assertListEqual(r.list_owners(), [])
r.set_owners(["bob"])
self.assertListEqual(r.list_owners(), ["bob"])
r.set_owners(["bob", "mary"])
self.assertListEqual(r.list_owners(), ["bob", "mary"])
def test_update_data_on_nonexistent_resource(self):
r = self.r
with self.assertRaises(ResourceDoesNotExist):
r.update_data("resource", "bob", SetValue("foo", "bar"))
def test_data(self):
r = self.r
data1 = {"example": "1234"}
data2 = {"example": "abcd"}
r.add_resource("resource.1")
r.set_owners(["bob"])
bobresource1 = "resource.1"
r.update_data(bobresource1, "bob", SetValue("example", "1234"))
self.assertDictEqual(r.get_data(bobresource1, owner="bob"), data1)
r.update_data(bobresource1, "bob", SetValue("example", "abcd"))
self.assertDictEqual(r.get_data(bobresource1, owner="bob"), data2)
r.update_data(
bobresource1,
"bob",
SetValue("example", "wxyz"),
SetValue("something_temporary", "12345"),
AddToList("log", "apple"),
)
self.assertDictEqual(
r.get_data(bobresource1, owner="bob"),
{
"example": "wxyz",
"something_temporary": "12345",
"log": ["apple"],
},
)
r.update_data(
bobresource1,
"bob",
DeleteValue("something_temporary"),
)
self.assertDictEqual(
r.get_data(bobresource1, owner="bob"),
{
"example": "wxyz",
"log": ["apple"],
},
)
r.update_data(
bobresource1,
"bob",
AddToList("log", "banana", "carrot"),
)
def check_nothing_changed():
self.assertDictEqual(
r.get_data(bobresource1, owner="bob"),
{
"example": "wxyz",
"log": ["apple", "banana", "carrot"],
},
)
resource_taken_by_bob = r.take("bob", tag="coffee")
self.assertEqual(resource_taken_by_bob, bobresource1)
check_nothing_changed()
r.free(bobresource1, "bob")
check_nothing_changed()
r.set_owners(["bob", "mary"])
check_nothing_changed()
r.retire(bobresource1, owner="bob")
check_nothing_changed()
r.retire_resource(bobresource1)
check_nothing_changed()
self.assertEqual(r.status(bobresource1, "mary"), "retired")
def test_resource_exists(self):
r = self.r
self.assertListEqual(r.list_owners(), [])
self.assertFalse(r.resource_exists("resource.1"))
r.add_resource("resource.1")
self.assertTrue(r.resource_exists("resource.1"))
def test_is_active(self):
r = self.r
self.assertIsNone(r.is_active("resource"))
r.add_resource("resource")
self.assertTrue(r.is_active("resource"))
r.retire_resource("resource")
self.assertIsNotNone(r.is_active("resource"))
self.assertFalse(r.is_active("resource"))
def test_allocation_clears_on_retirement(self):
r = self.r
r.set_owners(["bob"])
r.add_resource("resource")
r.take("bob", "coffee")
self.assertSetEqual(r.list_allocation("bob", "coffee"), {"resource"})
r.retire("resource", "bob")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
r.add_resource("resource2")
r.take("bob", "coffee")
self.assertSetEqual(r.list_allocation("bob", "coffee"), {"resource2"})
r.retire_resource("resource2")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
def test_allocation(self):
r = self.r
r.set_owners(["bob"])
all_resources = {"r1", "r2", "r3"}
for resource in all_resources:
r.add_resource(resource)
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
allocated_resources = r.request_allocation("bob", "coffee", 10)
self.assertSetEqual(allocated_resources, all_resources)
for resource in allocated_resources:
self.assertEqual(r.status(resource, "bob"), "owned")
self.assertSetEqual(r.list_allocation("bob", "coffee"), allocated_resources)
allocated_resources = r.request_allocation("bob", "coffee", 2)
self.assertEqual(len(allocated_resources), 2)
unallocated_resources = all_resources - allocated_resources
self.assertEqual(len(unallocated_resources), 1)
for resource in allocated_resources:
self.assertEqual(r.status(resource, "bob"), "owned")
for resource in unallocated_resources:
self.assertEqual(r.status(resource, "bob"), "free")
self.assertSetEqual(r.list_allocation("bob", "coffee"), allocated_resources)
allocated_resources = r.request_allocation("bob", "coffee", 0)
self.assertSetEqual(allocated_resources, set())
for resource in all_resources:
self.assertEqual(r.status(resource, "bob"), "free")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
def test_free_allocation_count(self):
r = self.r
r.set_owners(["bob"])
all_resources = {"r1", "r2", "r3", "r4", "r5"}
for resource in all_resources:
r.add_resource(resource)
r.take("bob", "coffee")
r.take("bob", "coffee")
r.take("bob", "tea")
self.assertEqual(len(r.list_allocation("bob", "coffee")), 2)
self.assertEqual(len(r.list_allocation("bob", "tea")), 1)
self.assertEqual(r.free_allocation_count("bob"), 2)
r.clear_allocation("bob")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
self.assertEqual(r.free_allocation_count("bob"), 5)
r.retire_resource("r2")
self.assertEqual(r.free_allocation_count("bob"), 4)
def test_owner_lifecycle(self):
r = self.r
with self.assertRaises(ResourceDoesNotExist):
r.status("resource", "mary")
self.assertEqual(r.add_resource("resource"), "ok")
self.assertTrue(r.is_active("resource"))
self.assertEqual(r.add_resource("resource"), "already exists")
with self.assertRaises(OwnerDoesNotExist):
r.status("resource", "mary")
r.set_owners(["mary"])
self.assertEqual(r.status("resource", "mary"), "free")
r.take("mary", tag="green tea")
self.assertEqual(r.status("resource", "mary"), "owned")
r.free("resource", owner="mary")
self.assertEqual(r.status("resource", "mary"), "free")
# We can retire an unknown owner, and it's a no-op
self.assertEqual(r.retire("resource", "unknown owner"), "resource active")
r.set_owners(["bob", "mary"])
self.assertEqual(r.retire("resource", "mary"), "resource active")
# When the last owner is retired, the whole resource is retired
self.assertEqual(r.retire("resource", "bob"), "resource retired")
def test_resource_retirement(self):
r = self.r
r.set_owners(["bob"])
r.add_resource("resource")
self.assertTrue(r.is_active("resource"))
self.assertEqual(r.status("resource", "bob"), "free")
r.retire_resource("resource")
self.assertFalse(r.is_active("resource"))
self.assertEqual(r.status("resource", "bob"), "retired")
def test_set_owners_updates_resources(self):
r = self.r
data = {"example": "1234"}
self.assertListEqual(r.list_owners(), [])
r.add_resource("resource.1")
r.add_resource("resource.2")
self.assertIsNone(r.take("bob", tag="coffee"))
r.set_owners(["bob"])
bobresource1 = r.take("bob", tag="coffee")
self.assertTrue(r.is_active(bobresource1))
self.assertTrue(bobresource1.startswith("resource."))
r.update_data(bobresource1, "bob", SetValue("example", "1234"))
self.assertDictEqual(r.get_data(bobresource1, owner="bob"), data)
self.assertListEqual(r.list_owners(), ["bob"])
r.set_owners(["bob", "mary"])
self.assertListEqual(r.list_owners(), ["bob", "mary"])
maryresource1 = r.take("mary", tag="green tea")
self.assertTrue(maryresource1.startswith("resource."))
maryresource2 = r.take("mary", tag="earl grey")
self.assertNotEqual(maryresource1, maryresource2)
self.assertIsNone(r.take("mary", tag="cola"))
self.assertEqual(r.status(maryresource2, "mary"), "owned")
r.free(maryresource2, "mary")
self.assertEqual(r.status(maryresource2, "mary"), "free")
r.add_resource("resource.3")
r.set_owners(["mary"])
self.assertListEqual(r.list_owners(), ["mary"])
def test_owner_view(self):
r = self.r
r.set_owners(["bob"])
r.add_resource("resource")
ov = ResourceOwnerView("bob", r)
ov.update_data("resource", SetValue("foo", 123))
self.assertEqual(r.get_data("resource", "bob"), {"foo": 123})
self.assertEqual(ov.get_data("resource"), {"foo": 123})
self.assertEqual(ov.status("resource"), "free")
bob_resource = ov.take("coffee")
self.assertEqual(bob_resource, "resource")
self.assertEqual(ov.status("resource"), "owned")
ov.free(bob_resource)
self.assertEqual(ov.status("resource"), "free")
ov.retire(bob_resource)
self.assertEqual(ov.status("resource"), "retired")
def test_watch(self):
r = self.r
r.add_resource("resource")
r.update_data("resource", "bob", SetValue("tokens", []))
rw = ResourceWatcher(r, "resource", "bob")
with rw.watch(timeout_seconds=1) as watcher:
r.update_data("resource", "mary", AddToList("tokens", "1234"))
r.update_data(
"resource", "bob", AddToList("tokens", "BOB VALUE", "BOB VALUE 1")
)
r.update_data("resource", "bob", AddToList("tokens", "BOB VALUE 2"))
r.update_data("resource", "mary", AddToList("tokens", "1234"))
self.assertTrue(watcher.updated)
self.assertFalse(watcher.expired)
self.assertDictEqual(watcher.data, {"tokens": ["BOB VALUE", "BOB VALUE 1"]})
with rw.watch(timeout_seconds=1) as watcher:
r.update_data("resource", "mary", AddToList("tokens", "1234"))
r.update_data("resource", "mary", AddToList("tokens", "1234"))
self.assertFalse(watcher.updated)
self.assertTrue(watcher.expired)
self.assertIsNone(watcher.data)
def test_dispose_resource(self):
r = self.r
r.add_resource("resource")
r.dispose_resource("resource")
with self.assertRaises(ResourceDoesNotExist):
r.get_data("resource", "bob")
def test_dispose_all_resources(self):
r = self.r
all_resources = ["r1", "r2", "r3", "r4"]
for resource in all_resources:
r.add_resource(resource)
r.dispose_all_resources()
for resource in all_resources:
with self.assertRaises(ResourceDoesNotExist):
r.get_data(resource, "bob")
if __name__ == "__main__":
unittest.main() | tests/test.py | from retirable_resources.resource_manager import DeleteValue
import unittest
from retirable_resources import (
RetirableResourceManager,
SetValue,
AddToList,
ResourceDoesNotExist,
OwnerDoesNotExist,
ResourceOwnerView,
ResourceWatcher,
)
from .fixtures import RetirableResourceManagerTest
class TestInitialize(unittest.TestCase):
def test_init_with_empty_path(self):
client = object()
with self.assertRaises(ValueError):
r = RetirableResourceManager("", client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager([], client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager(tuple(), client=client)
def test_init_with_incorrect_type_path(self):
client = object()
with self.assertRaises(TypeError):
r = RetirableResourceManager(object(), client=client)
def test_init_fails_with_odd_doc_path(self):
client = object()
with self.assertRaises(ValueError):
r = RetirableResourceManager(["foo"], client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager(("foo",), client=client)
with self.assertRaises(ValueError):
r = RetirableResourceManager("foo", client=client)
def test_init_with_even_doc_path(self):
client = object()
r = RetirableResourceManager("foo/bar", client=client)
self.assertEqual(r.root_path, ("foo", "bar"))
r = RetirableResourceManager(["foo", "bar"], client=client)
self.assertEqual(r.root_path, ("foo", "bar"))
r = RetirableResourceManager(("foo", "bar"), client=client)
self.assertEqual(r.root_path, ("foo", "bar"))
class Test(RetirableResourceManagerTest):
def test_set_owners(self):
r = self.r
self.assertListEqual(r.list_owners(), [])
r.set_owners(["bob"])
self.assertListEqual(r.list_owners(), ["bob"])
r.set_owners(["bob", "mary"])
self.assertListEqual(r.list_owners(), ["bob", "mary"])
def test_update_data_on_nonexistent_resource(self):
r = self.r
with self.assertRaises(ResourceDoesNotExist):
r.update_data("resource", "bob", SetValue("foo", "bar"))
def test_data(self):
r = self.r
data1 = {"example": "1234"}
data2 = {"example": "abcd"}
r.add_resource("resource.1")
r.set_owners(["bob"])
bobresource1 = "resource.1"
r.update_data(bobresource1, "bob", SetValue("example", "1234"))
self.assertDictEqual(r.get_data(bobresource1, owner="bob"), data1)
r.update_data(bobresource1, "bob", SetValue("example", "abcd"))
self.assertDictEqual(r.get_data(bobresource1, owner="bob"), data2)
r.update_data(
bobresource1,
"bob",
SetValue("example", "wxyz"),
SetValue("something_temporary", "12345"),
AddToList("log", "apple"),
)
self.assertDictEqual(
r.get_data(bobresource1, owner="bob"),
{
"example": "wxyz",
"something_temporary": "12345",
"log": ["apple"],
},
)
r.update_data(
bobresource1,
"bob",
DeleteValue("something_temporary"),
)
self.assertDictEqual(
r.get_data(bobresource1, owner="bob"),
{
"example": "wxyz",
"log": ["apple"],
},
)
r.update_data(
bobresource1,
"bob",
AddToList("log", "banana", "carrot"),
)
def check_nothing_changed():
self.assertDictEqual(
r.get_data(bobresource1, owner="bob"),
{
"example": "wxyz",
"log": ["apple", "banana", "carrot"],
},
)
resource_taken_by_bob = r.take("bob", tag="coffee")
self.assertEqual(resource_taken_by_bob, bobresource1)
check_nothing_changed()
r.free(bobresource1, "bob")
check_nothing_changed()
r.set_owners(["bob", "mary"])
check_nothing_changed()
r.retire(bobresource1, owner="bob")
check_nothing_changed()
r.retire_resource(bobresource1)
check_nothing_changed()
self.assertEqual(r.status(bobresource1, "mary"), "retired")
def test_resource_exists(self):
r = self.r
self.assertListEqual(r.list_owners(), [])
self.assertFalse(r.resource_exists("resource.1"))
r.add_resource("resource.1")
self.assertTrue(r.resource_exists("resource.1"))
def test_is_active(self):
r = self.r
self.assertIsNone(r.is_active("resource"))
r.add_resource("resource")
self.assertTrue(r.is_active("resource"))
r.retire_resource("resource")
self.assertIsNotNone(r.is_active("resource"))
self.assertFalse(r.is_active("resource"))
def test_allocation_clears_on_retirement(self):
r = self.r
r.set_owners(["bob"])
r.add_resource("resource")
r.take("bob", "coffee")
self.assertSetEqual(r.list_allocation("bob", "coffee"), {"resource"})
r.retire("resource", "bob")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
r.add_resource("resource2")
r.take("bob", "coffee")
self.assertSetEqual(r.list_allocation("bob", "coffee"), {"resource2"})
r.retire_resource("resource2")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
def test_allocation(self):
r = self.r
r.set_owners(["bob"])
all_resources = {"r1", "r2", "r3"}
for resource in all_resources:
r.add_resource(resource)
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
allocated_resources = r.request_allocation("bob", "coffee", 10)
self.assertSetEqual(allocated_resources, all_resources)
for resource in allocated_resources:
self.assertEqual(r.status(resource, "bob"), "owned")
self.assertSetEqual(r.list_allocation("bob", "coffee"), allocated_resources)
allocated_resources = r.request_allocation("bob", "coffee", 2)
self.assertEqual(len(allocated_resources), 2)
unallocated_resources = all_resources - allocated_resources
self.assertEqual(len(unallocated_resources), 1)
for resource in allocated_resources:
self.assertEqual(r.status(resource, "bob"), "owned")
for resource in unallocated_resources:
self.assertEqual(r.status(resource, "bob"), "free")
self.assertSetEqual(r.list_allocation("bob", "coffee"), allocated_resources)
allocated_resources = r.request_allocation("bob", "coffee", 0)
self.assertSetEqual(allocated_resources, set())
for resource in all_resources:
self.assertEqual(r.status(resource, "bob"), "free")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
def test_free_allocation_count(self):
r = self.r
r.set_owners(["bob"])
all_resources = {"r1", "r2", "r3", "r4", "r5"}
for resource in all_resources:
r.add_resource(resource)
r.take("bob", "coffee")
r.take("bob", "coffee")
r.take("bob", "tea")
self.assertEqual(len(r.list_allocation("bob", "coffee")), 2)
self.assertEqual(len(r.list_allocation("bob", "tea")), 1)
self.assertEqual(r.free_allocation_count("bob"), 2)
r.clear_allocation("bob")
self.assertSetEqual(r.list_allocation("bob", "coffee"), set())
self.assertEqual(r.free_allocation_count("bob"), 5)
r.retire_resource("r2")
self.assertEqual(r.free_allocation_count("bob"), 4)
def test_owner_lifecycle(self):
r = self.r
with self.assertRaises(ResourceDoesNotExist):
r.status("resource", "mary")
self.assertEqual(r.add_resource("resource"), "ok")
self.assertTrue(r.is_active("resource"))
self.assertEqual(r.add_resource("resource"), "already exists")
with self.assertRaises(OwnerDoesNotExist):
r.status("resource", "mary")
r.set_owners(["mary"])
self.assertEqual(r.status("resource", "mary"), "free")
r.take("mary", tag="green tea")
self.assertEqual(r.status("resource", "mary"), "owned")
r.free("resource", owner="mary")
self.assertEqual(r.status("resource", "mary"), "free")
# We can retire an unknown owner, and it's a no-op
self.assertEqual(r.retire("resource", "unknown owner"), "resource active")
r.set_owners(["bob", "mary"])
self.assertEqual(r.retire("resource", "mary"), "resource active")
# When the last owner is retired, the whole resource is retired
self.assertEqual(r.retire("resource", "bob"), "resource retired")
def test_resource_retirement(self):
r = self.r
r.set_owners(["bob"])
r.add_resource("resource")
self.assertTrue(r.is_active("resource"))
self.assertEqual(r.status("resource", "bob"), "free")
r.retire_resource("resource")
self.assertFalse(r.is_active("resource"))
self.assertEqual(r.status("resource", "bob"), "retired")
def test_set_owners_updates_resources(self):
r = self.r
data = {"example": "1234"}
self.assertListEqual(r.list_owners(), [])
r.add_resource("resource.1")
r.add_resource("resource.2")
self.assertIsNone(r.take("bob", tag="coffee"))
r.set_owners(["bob"])
bobresource1 = r.take("bob", tag="coffee")
self.assertTrue(r.is_active(bobresource1))
self.assertTrue(bobresource1.startswith("resource."))
r.update_data(bobresource1, "bob", SetValue("example", "1234"))
self.assertDictEqual(r.get_data(bobresource1, owner="bob"), data)
self.assertListEqual(r.list_owners(), ["bob"])
r.set_owners(["bob", "mary"])
self.assertListEqual(r.list_owners(), ["bob", "mary"])
maryresource1 = r.take("mary", tag="green tea")
self.assertTrue(maryresource1.startswith("resource."))
maryresource2 = r.take("mary", tag="earl grey")
self.assertNotEqual(maryresource1, maryresource2)
self.assertIsNone(r.take("mary", tag="cola"))
self.assertEqual(r.status(maryresource2, "mary"), "owned")
r.free(maryresource2, "mary")
self.assertEqual(r.status(maryresource2, "mary"), "free")
r.add_resource("resource.3")
r.set_owners(["mary"])
self.assertListEqual(r.list_owners(), ["mary"])
def test_owner_view(self):
r = self.r
r.set_owners(["bob"])
r.add_resource("resource")
ov = ResourceOwnerView("bob", r)
ov.update_data("resource", SetValue("foo", 123))
self.assertEqual(r.get_data("resource", "bob"), {"foo": 123})
self.assertEqual(ov.get_data("resource"), {"foo": 123})
self.assertEqual(ov.status("resource"), "free")
bob_resource = ov.take("coffee")
self.assertEqual(bob_resource, "resource")
self.assertEqual(ov.status("resource"), "owned")
ov.free(bob_resource)
self.assertEqual(ov.status("resource"), "free")
ov.retire(bob_resource)
self.assertEqual(ov.status("resource"), "retired")
def test_watch(self):
r = self.r
r.add_resource("resource")
r.update_data("resource", "bob", SetValue("tokens", []))
rw = ResourceWatcher(r, "resource", "bob")
with rw.watch(timeout_seconds=1) as watcher:
r.update_data("resource", "mary", AddToList("tokens", "1234"))
r.update_data(
"resource", "bob", AddToList("tokens", "BOB VALUE", "BOB VALUE 1")
)
r.update_data("resource", "bob", AddToList("tokens", "BOB VALUE 2"))
r.update_data("resource", "mary", AddToList("tokens", "1234"))
self.assertTrue(watcher.updated)
self.assertFalse(watcher.expired)
self.assertDictEqual(watcher.data, {"tokens": ["BOB VALUE", "BOB VALUE 1"]})
with rw.watch(timeout_seconds=1) as watcher:
r.update_data("resource", "mary", AddToList("tokens", "1234"))
r.update_data("resource", "mary", AddToList("tokens", "1234"))
self.assertFalse(watcher.updated)
self.assertTrue(watcher.expired)
self.assertIsNone(watcher.data)
def test_dispose_resource(self):
r = self.r
r.add_resource("resource")
r.dispose_resource("resource")
with self.assertRaises(ResourceDoesNotExist):
r.get_data("resource", "bob")
def test_dispose_all_resources(self):
r = self.r
all_resources = ["r1", "r2", "r3", "r4"]
for resource in all_resources:
r.add_resource(resource)
r.dispose_all_resources()
for resource in all_resources:
with self.assertRaises(ResourceDoesNotExist):
r.get_data(resource, "bob")
if __name__ == "__main__":
unittest.main() | 0.608129 | 0.348728 |
import ctypes
# Grab a handle to kernel32.dll & USer32.dll
k_handle = ctypes.WinDLL("Kernel32.dll")
u_handle = ctypes.WinDLL("User32.dll")
# Access Rights
PROCESS_ALL_ACCESS = (0x000F0000 | 0x00100000 | 0xFFF)
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = 0x00020000
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATION = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# Grab The Windows Name from User32
lpWindowName = ctypes.c_char_p(input("Enter Window Name To Hook Into: ").encode('utf-8'))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(None, lpWindowName)
# Check to see if we have the Handle
if hWnd == 0:
print("[+] Could Not Grab Handle! + Code: {0}".format(k_handle.GetLast+()))
exit(1)
else:
print("[+] Grabbed Handle...")
# Get the PID of the process at the handle
lpdwProcessId = ctypes.c_ulong()
# We use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Check to see if the call Completed
if response == 0:
print("[+] Could Not Get PID from Handle! + Code: {0}".format(k_handle.GetLast+()))
else:
print("[+] Found PID...")
# Opening the Process by PID with Specific Access
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = lpdwProcessId
# Calling the Windows API Call to Open the Process
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Check to see if we have a valid Handle to the process
if hProcess <= 0:
print("[+] Could Not Grab Privileged Handle! + Code: {0}".format(k_handle.GetLast+()))
else:
print("[+] Privileged Handle Opened...")
# Open a Handle to the Process's Token Directly
ProcessHandle = hProcess
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
# Issue the API Call
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Handle an +
if response > 0:
print("[+] Got Handle! Token: {0}".format(TokenHandle))
else:
print("[+] Could Not Grab Privileged Handle to Token! + Code: {0}".format(k_handle.GetLast+())) | opentoken.py | import ctypes
# Grab a handle to kernel32.dll & USer32.dll
k_handle = ctypes.WinDLL("Kernel32.dll")
u_handle = ctypes.WinDLL("User32.dll")
# Access Rights
PROCESS_ALL_ACCESS = (0x000F0000 | 0x00100000 | 0xFFF)
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = 0x00020000
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATION = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# Grab The Windows Name from User32
lpWindowName = ctypes.c_char_p(input("Enter Window Name To Hook Into: ").encode('utf-8'))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(None, lpWindowName)
# Check to see if we have the Handle
if hWnd == 0:
print("[+] Could Not Grab Handle! + Code: {0}".format(k_handle.GetLast+()))
exit(1)
else:
print("[+] Grabbed Handle...")
# Get the PID of the process at the handle
lpdwProcessId = ctypes.c_ulong()
# We use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Check to see if the call Completed
if response == 0:
print("[+] Could Not Get PID from Handle! + Code: {0}".format(k_handle.GetLast+()))
else:
print("[+] Found PID...")
# Opening the Process by PID with Specific Access
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = lpdwProcessId
# Calling the Windows API Call to Open the Process
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Check to see if we have a valid Handle to the process
if hProcess <= 0:
print("[+] Could Not Grab Privileged Handle! + Code: {0}".format(k_handle.GetLast+()))
else:
print("[+] Privileged Handle Opened...")
# Open a Handle to the Process's Token Directly
ProcessHandle = hProcess
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
# Issue the API Call
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Handle an +
if response > 0:
print("[+] Got Handle! Token: {0}".format(TokenHandle))
else:
print("[+] Could Not Grab Privileged Handle to Token! + Code: {0}".format(k_handle.GetLast+())) | 0.189184 | 0.193547 |
from collections import OrderedDict
import copy
import os
from xrsdkit.tools import ymltools as xrsdyml
import fabio
import yaml
import numpy as np
from ..Workflow import Workflow
# NOTE: this workflow is for reading samples
# that were saved with YAML headers
inputs = OrderedDict(
header_file = None,
image_file = None,
q_I_file = None,
system_file = None
)
outputs = OrderedDict(
time = None,
header_data = None,
image_data = None,
q_I = None,
dI = None,
system = None
)
class Read(Workflow):
def __init__(self):
super(Read,self).__init__(inputs,outputs)
def read_header(self,filepath):
return yaml.load(open(filepath,'r'))
def run(self):
self.outputs = copy.deepcopy(outputs)
if (self.inputs['header_file']) and (os.path.exists(self.inputs['header_file'])):
hdata = self.read_header(self.inputs['header_file'])
self.outputs['header_data'] = hdata
self.outputs['time'] = hdata['time']
elif self.inputs['header_file']:
self.message_callback('header file not found: {}'.format(self.inputs['header_file']))
if (self.inputs['image_file']) and (os.path.exists(self.inputs['image_file'])):
self.outputs['image_data'] = fabio.open(self.inputs['image_file'])
elif self.inputs['image_file']:
self.message_callback('image file not found: {}'.format(self.inputs['image_file']))
if (self.inputs['q_I_file']) and (os.path.exists(self.inputs['q_I_file'])):
q_I = np.loadtxt(self.inputs['q_I_file'],dtype='float')
dI = None
if (q_I is not None) and (q_I.shape[1] > 2):
q_I = q_I[:,:2]
dI = q_I[:,2]
self.outputs['q_I'] = q_I
self.outputs['dI'] = dI
elif self.inputs['q_I_file']:
self.message_callback('q_I file not found: {}'.format(self.inputs['q_I_file']))
if (self.inputs['system_file']) and (os.path.exists(self.inputs['system_file'])):
self.message_callback('loading {}'.format(self.inputs['system_file']))
self.outputs['system'] = xrsdyml.load_sys_from_yaml(self.inputs['system_file'])
else:
self.message_callback('xrsd system file not found: {}'.format(self.inputs['system_file']))
return self.outputs | paws/workflows/SSRL_BEAMLINE_1_5/Read.py | from collections import OrderedDict
import copy
import os
from xrsdkit.tools import ymltools as xrsdyml
import fabio
import yaml
import numpy as np
from ..Workflow import Workflow
# NOTE: this workflow is for reading samples
# that were saved with YAML headers
inputs = OrderedDict(
header_file = None,
image_file = None,
q_I_file = None,
system_file = None
)
outputs = OrderedDict(
time = None,
header_data = None,
image_data = None,
q_I = None,
dI = None,
system = None
)
class Read(Workflow):
def __init__(self):
super(Read,self).__init__(inputs,outputs)
def read_header(self,filepath):
return yaml.load(open(filepath,'r'))
def run(self):
self.outputs = copy.deepcopy(outputs)
if (self.inputs['header_file']) and (os.path.exists(self.inputs['header_file'])):
hdata = self.read_header(self.inputs['header_file'])
self.outputs['header_data'] = hdata
self.outputs['time'] = hdata['time']
elif self.inputs['header_file']:
self.message_callback('header file not found: {}'.format(self.inputs['header_file']))
if (self.inputs['image_file']) and (os.path.exists(self.inputs['image_file'])):
self.outputs['image_data'] = fabio.open(self.inputs['image_file'])
elif self.inputs['image_file']:
self.message_callback('image file not found: {}'.format(self.inputs['image_file']))
if (self.inputs['q_I_file']) and (os.path.exists(self.inputs['q_I_file'])):
q_I = np.loadtxt(self.inputs['q_I_file'],dtype='float')
dI = None
if (q_I is not None) and (q_I.shape[1] > 2):
q_I = q_I[:,:2]
dI = q_I[:,2]
self.outputs['q_I'] = q_I
self.outputs['dI'] = dI
elif self.inputs['q_I_file']:
self.message_callback('q_I file not found: {}'.format(self.inputs['q_I_file']))
if (self.inputs['system_file']) and (os.path.exists(self.inputs['system_file'])):
self.message_callback('loading {}'.format(self.inputs['system_file']))
self.outputs['system'] = xrsdyml.load_sys_from_yaml(self.inputs['system_file'])
else:
self.message_callback('xrsd system file not found: {}'.format(self.inputs['system_file']))
return self.outputs | 0.404507 | 0.106598 |
import unittest
from typing import Any, Dict, List
from unittest.mock import Mock, call, patch
from nuplan.common.utils.helpers import keep_trying, try_n_times
class HelperTestingSetup:
"""Helper configuration class for testing"""
def __init__(self) -> None:
"""Initializes with mock values"""
self.args: List[Any] = list()
self.kwargs: Dict[str, Any] = dict()
self.errors = (RuntimeError,)
self.passing_function = Mock(return_value="result")
self.failing_function = Mock(return_value="result", side_effect=self.errors[0])
class TestTryNTimes(unittest.TestCase, HelperTestingSetup):
"""Test suite for tests that lets tests run multiple times before declaring failure."""
def setUp(self) -> None:
"""Inherited, see superclass"""
HelperTestingSetup.__init__(self)
def test_fails_on_invalid_number_of_tries(self) -> None:
"""Tests that we calling this method with zero tries result in failure."""
with self.assertRaises(AssertionError):
_ = try_n_times(self.passing_function, [], {}, self.errors, max_tries=0)
def test_pass_on_valid_cases(self) -> None:
"""Tests that for nominal cases the output of the function is returned."""
result = try_n_times(self.passing_function, self.args, self.kwargs, self.errors, max_tries=1)
self.assertEqual("result", result)
self.passing_function.assert_called_once_with(*self.args, **self.kwargs)
@patch("time.sleep")
def test_fail_on_invalid_case_after_n_tries(self, mock_sleep: Mock) -> None:
"""Tests that the helper throws after too many attempts."""
with self.assertRaises(self.errors[0]):
_ = try_n_times(self.failing_function, self.args, self.kwargs, self.errors, max_tries=2, sleep_time=4.2)
calls = [call(*self.args, **self.kwargs)] * 2
self.failing_function.assert_has_calls(calls)
mock_sleep.assert_called_with(4.2)
class TestKeepTrying(unittest.TestCase, HelperTestingSetup):
"""Test suite for tests that lets tests run until a timeout is reached before declaring failure."""
def setUp(self) -> None:
"""Inherited, see superclass"""
HelperTestingSetup.__init__(self)
def test_fails_on_invalid_number_of_tries(self) -> None:
"""Tests that we calling this method with zero tries result in failure."""
with self.assertRaises(AssertionError):
_ = keep_trying(self.passing_function, [], {}, self.errors, timeout=0.0)
def test_pass_on_valid_cases(self) -> None:
"""Tests that for nominal cases the output of the function is returned."""
result, _ = keep_trying(self.passing_function, self.args, self.kwargs, self.errors, timeout=1)
self.assertEqual("result", result)
self.passing_function.assert_called_once_with(*self.args, **self.kwargs)
def test_fail_on_invalid_case_after_timeout(self) -> None:
"""Tests that the helper throws after timeout."""
with self.assertRaises(TimeoutError):
_ = keep_trying(self.failing_function, self.args, self.kwargs, self.errors, timeout=1e-6, sleep_time=1e-5)
self.failing_function.assert_called_with(*self.args, **self.kwargs)
if __name__ == '__main__':
unittest.main() | nuplan/common/utils/test/test_helpers.py | import unittest
from typing import Any, Dict, List
from unittest.mock import Mock, call, patch
from nuplan.common.utils.helpers import keep_trying, try_n_times
class HelperTestingSetup:
"""Helper configuration class for testing"""
def __init__(self) -> None:
"""Initializes with mock values"""
self.args: List[Any] = list()
self.kwargs: Dict[str, Any] = dict()
self.errors = (RuntimeError,)
self.passing_function = Mock(return_value="result")
self.failing_function = Mock(return_value="result", side_effect=self.errors[0])
class TestTryNTimes(unittest.TestCase, HelperTestingSetup):
"""Test suite for tests that lets tests run multiple times before declaring failure."""
def setUp(self) -> None:
"""Inherited, see superclass"""
HelperTestingSetup.__init__(self)
def test_fails_on_invalid_number_of_tries(self) -> None:
"""Tests that we calling this method with zero tries result in failure."""
with self.assertRaises(AssertionError):
_ = try_n_times(self.passing_function, [], {}, self.errors, max_tries=0)
def test_pass_on_valid_cases(self) -> None:
"""Tests that for nominal cases the output of the function is returned."""
result = try_n_times(self.passing_function, self.args, self.kwargs, self.errors, max_tries=1)
self.assertEqual("result", result)
self.passing_function.assert_called_once_with(*self.args, **self.kwargs)
@patch("time.sleep")
def test_fail_on_invalid_case_after_n_tries(self, mock_sleep: Mock) -> None:
"""Tests that the helper throws after too many attempts."""
with self.assertRaises(self.errors[0]):
_ = try_n_times(self.failing_function, self.args, self.kwargs, self.errors, max_tries=2, sleep_time=4.2)
calls = [call(*self.args, **self.kwargs)] * 2
self.failing_function.assert_has_calls(calls)
mock_sleep.assert_called_with(4.2)
class TestKeepTrying(unittest.TestCase, HelperTestingSetup):
"""Test suite for tests that lets tests run until a timeout is reached before declaring failure."""
def setUp(self) -> None:
"""Inherited, see superclass"""
HelperTestingSetup.__init__(self)
def test_fails_on_invalid_number_of_tries(self) -> None:
"""Tests that we calling this method with zero tries result in failure."""
with self.assertRaises(AssertionError):
_ = keep_trying(self.passing_function, [], {}, self.errors, timeout=0.0)
def test_pass_on_valid_cases(self) -> None:
"""Tests that for nominal cases the output of the function is returned."""
result, _ = keep_trying(self.passing_function, self.args, self.kwargs, self.errors, timeout=1)
self.assertEqual("result", result)
self.passing_function.assert_called_once_with(*self.args, **self.kwargs)
def test_fail_on_invalid_case_after_timeout(self) -> None:
"""Tests that the helper throws after timeout."""
with self.assertRaises(TimeoutError):
_ = keep_trying(self.failing_function, self.args, self.kwargs, self.errors, timeout=1e-6, sleep_time=1e-5)
self.failing_function.assert_called_with(*self.args, **self.kwargs)
if __name__ == '__main__':
unittest.main() | 0.866895 | 0.587411 |
from tensorflow.keras import Sequential # keras model
from tensorflow.keras.layers import Conv2D, MaxPool2D # Convolution layer
from tensorflow.keras.layers import Dense, Flatten # Affine layer
from tensorflow.keras.layers import Dropout
import os
# dir setting
base_dir = "C:\\Users\\user\\Desktop\\dataset\\data_set"
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'test')
# Hyper parameters
img_h = 224 # height
img_w = 224 # width
input_shape = (img_h, img_w, 3)
# 1. CNN Model layer
print('model create')
model = Sequential()
# Convolution layer1
model.add(Conv2D(96, kernel_size=(11, 11), activation='relu', strides=4, padding='same',
input_shape=input_shape))
model.add(MaxPool2D(pool_size=(3, 3) , strides=2, padding='valid'))
# Convolution layer2
model.add(Conv2D(256, kernel_size=(5, 5), activation='relu', strides=1, padding='same'))
model.add(MaxPool2D(pool_size=(3, 3) , strides=2, padding='valid'))
# Convolution layer3 : maxpooling() 제외
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu', strides=1, padding='same'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu', strides=1, padding='same'))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', strides=1, padding='same'))
# Flatten layer : 3d -> 1d
model.add(Flatten())
# DNN hidden layer(Fully connected layer)
model.add(Dense(4096, activation='relu'))
model.add(Dense(1000, activation='relu'))
# DNN Output layer
model.add(Dense(5, activation='softmax'))
# model training set : Adam or RMSprop
model.compile(optimizer='adam',
# loss = 'binary_crossentropy', # integer(generator가 integer로 읽어옴) + 이항분류
# loss = 'categorical_crossentropy' # y:원핫인코딩
loss='sparse_categorical_crossentropy', # Y=integer + 다항분류
metrics=['sparse_categorical_accuracy'])
# 2. image file preprocessing : image 제너레이터 이용
from tensorflow.keras.preprocessing.image import ImageDataGenerator
print("image preprocessing")
# 특정 폴더의 이미지를 분류하기 위해서 학습시킬 데이터셋 생성
train_data = ImageDataGenerator(rescale=1. / 255) # 0~1 정규화
# 검증 데이터
validation_data = ImageDataGenerator(rescale=1. / 255) # 0~1 정규화
train_generator = train_data.flow_from_directory(
train_dir,
target_size=(224, 224), # image reshape
batch_size=20, # batch size
class_mode='binary') # binary label
# Found 2000 images belonging to 2 classes.
validation_generator = validation_data.flow_from_directory(
validation_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary')
# Found 1000 images belonging to 2 classes.
# 3. model training : image제너레이터 이용 모델 훈련
model_fit = model.fit_generator(
train_generator,
steps_per_epoch=40, # 20(배치사이즈:이미지 공급)* 100(steps 1에폭내에서 반복수)
epochs=50,
validation_data=validation_generator,
validation_steps=10) # 1000 = 20*50
# 4. model history graph
import matplotlib.pyplot as plt
print(model_fit.history.keys())
# dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
loss = model_fit.history['loss'] # train
acc = model_fit.history['sparse_categorical_accuracy']
val_loss = model_fit.history['val_loss'] # validation
val_acc = model_fit.history['val_sparse_categorical_accuracy']
epochs = range(1, len(acc) + 1)
# acc vs val_acc
plt.plot(epochs, acc, 'bo', label='train acc')
plt.plot(epochs, val_acc, 'r', label='val acc')
plt.title('Training vs validation accuracy')
plt.xlabel('epoch')
plt.ylabel('accuray')
plt.legend(loc='best')
plt.show()
# loss vs val_loss
plt.plot(epochs, loss, 'bo', label='train loss')
plt.plot(epochs, val_loss, 'r', label='val loss')
plt.title('Training vs validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show() | Classifier_step01.py | from tensorflow.keras import Sequential # keras model
from tensorflow.keras.layers import Conv2D, MaxPool2D # Convolution layer
from tensorflow.keras.layers import Dense, Flatten # Affine layer
from tensorflow.keras.layers import Dropout
import os
# dir setting
base_dir = "C:\\Users\\user\\Desktop\\dataset\\data_set"
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'test')
# Hyper parameters
img_h = 224 # height
img_w = 224 # width
input_shape = (img_h, img_w, 3)
# 1. CNN Model layer
print('model create')
model = Sequential()
# Convolution layer1
model.add(Conv2D(96, kernel_size=(11, 11), activation='relu', strides=4, padding='same',
input_shape=input_shape))
model.add(MaxPool2D(pool_size=(3, 3) , strides=2, padding='valid'))
# Convolution layer2
model.add(Conv2D(256, kernel_size=(5, 5), activation='relu', strides=1, padding='same'))
model.add(MaxPool2D(pool_size=(3, 3) , strides=2, padding='valid'))
# Convolution layer3 : maxpooling() 제외
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu', strides=1, padding='same'))
model.add(Conv2D(384, kernel_size=(3, 3), activation='relu', strides=1, padding='same'))
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', strides=1, padding='same'))
# Flatten layer : 3d -> 1d
model.add(Flatten())
# DNN hidden layer(Fully connected layer)
model.add(Dense(4096, activation='relu'))
model.add(Dense(1000, activation='relu'))
# DNN Output layer
model.add(Dense(5, activation='softmax'))
# model training set : Adam or RMSprop
model.compile(optimizer='adam',
# loss = 'binary_crossentropy', # integer(generator가 integer로 읽어옴) + 이항분류
# loss = 'categorical_crossentropy' # y:원핫인코딩
loss='sparse_categorical_crossentropy', # Y=integer + 다항분류
metrics=['sparse_categorical_accuracy'])
# 2. image file preprocessing : image 제너레이터 이용
from tensorflow.keras.preprocessing.image import ImageDataGenerator
print("image preprocessing")
# 특정 폴더의 이미지를 분류하기 위해서 학습시킬 데이터셋 생성
train_data = ImageDataGenerator(rescale=1. / 255) # 0~1 정규화
# 검증 데이터
validation_data = ImageDataGenerator(rescale=1. / 255) # 0~1 정규화
train_generator = train_data.flow_from_directory(
train_dir,
target_size=(224, 224), # image reshape
batch_size=20, # batch size
class_mode='binary') # binary label
# Found 2000 images belonging to 2 classes.
validation_generator = validation_data.flow_from_directory(
validation_dir,
target_size=(224, 224),
batch_size=20,
class_mode='binary')
# Found 1000 images belonging to 2 classes.
# 3. model training : image제너레이터 이용 모델 훈련
model_fit = model.fit_generator(
train_generator,
steps_per_epoch=40, # 20(배치사이즈:이미지 공급)* 100(steps 1에폭내에서 반복수)
epochs=50,
validation_data=validation_generator,
validation_steps=10) # 1000 = 20*50
# 4. model history graph
import matplotlib.pyplot as plt
print(model_fit.history.keys())
# dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
loss = model_fit.history['loss'] # train
acc = model_fit.history['sparse_categorical_accuracy']
val_loss = model_fit.history['val_loss'] # validation
val_acc = model_fit.history['val_sparse_categorical_accuracy']
epochs = range(1, len(acc) + 1)
# acc vs val_acc
plt.plot(epochs, acc, 'bo', label='train acc')
plt.plot(epochs, val_acc, 'r', label='val acc')
plt.title('Training vs validation accuracy')
plt.xlabel('epoch')
plt.ylabel('accuray')
plt.legend(loc='best')
plt.show()
# loss vs val_loss
plt.plot(epochs, loss, 'bo', label='train loss')
plt.plot(epochs, val_loss, 'r', label='val loss')
plt.title('Training vs validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show() | 0.823151 | 0.521715 |
emailSender = 'xxx@xxx' # 发件人邮箱账号
emailSenderPassword = '<PASSWORD>' # 发件人邮箱密码
emailSenderName = "昵称" # 发件人昵称
emailSMTPAddress = "xxxx" # 发件人邮箱SMTP地址(一般为smtp.邮箱后缀,如smtp.126.com)
emailSMTPPort = 25 # 发件人邮箱SMTP端口(非加密端口一般为25,加密端口一般为465)
emailTitle = "测试标题" # 邮件主题(标题)
emailContentFilename = "EmailContent.txt" # 邮件内容(文本形式)
#emailContentFilename = "EmailContent.html" # 邮件内容(网页形式)
emailReceiversListFilename = "EmailReceiversList.csv" # 收件人邮箱账号列表csv文件
failListFilename = "FailList.csv" # 发送失败的邮箱列表
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
# 读取收件人邮箱列表
emailReceiversList = open(emailReceiversListFilename, 'r', encoding="utf8").readlines()
emailReceivers = []
for each in emailReceiversList:
tmp = each.strip().split(",")[0]
if tmp!="":
emailReceivers.append(tmp)
# 读取邮件内容
if "html" in emailContentFilename:
emailContentType = "html"
else:
emailContentType = "plain"
emailContent = open(emailContentFilename, 'r', encoding="utf8").read()
# 连接服务器并发送邮件
failListFile = open(failListFilename, 'w', encoding="utf8")
try:
server = smtplib.SMTP_SSL(emailSMTPAddress, emailSMTPPort) # 发件人邮箱中的SMTP服务器
server.login(emailSender, emailSenderPassword) # 发件人邮箱账号、邮箱密码
successCount = 0
for each in emailReceivers: # 逐个邮箱发送,达到群发单显的效果
try:
msg = MIMEText(emailContent, emailContentType, 'utf-8') # 邮件内容、内容类型('plain'为文本,'html为网页)
msg['From'] = formataddr([emailSenderName, emailSender]) # 发件人邮箱昵称、发件人邮箱账号
msg['Subject']= emailTitle # 邮件的主题,也可以说是标题
#msg['To'] = formataddr(["收件人昵称",each]) # 对应收件人邮箱昵称、收件人邮箱账号
msg['To'] = each # 对应收件人邮箱账号
server.sendmail(emailSender, [each], msg.as_string()) # 发件人邮箱账号、收件人邮箱账号、发送邮件
print("成功发送邮件至:"+each)
successCount += 1
except Exception:
print("尝试发送至"+each+"失败")
failListFile.write(each+"\n")
server.quit() # 关闭与邮箱服务器的连接
print("共有"+str(successCount)+"封邮件发送成功,"+str(len(emailReceivers)-successCount)+"封邮件发送失败")
except Exception:
print("与邮箱服务器连接失败")
failListFile.close() | SendEmails.py | emailSender = 'xxx@xxx' # 发件人邮箱账号
emailSenderPassword = '<PASSWORD>' # 发件人邮箱密码
emailSenderName = "昵称" # 发件人昵称
emailSMTPAddress = "xxxx" # 发件人邮箱SMTP地址(一般为smtp.邮箱后缀,如smtp.126.com)
emailSMTPPort = 25 # 发件人邮箱SMTP端口(非加密端口一般为25,加密端口一般为465)
emailTitle = "测试标题" # 邮件主题(标题)
emailContentFilename = "EmailContent.txt" # 邮件内容(文本形式)
#emailContentFilename = "EmailContent.html" # 邮件内容(网页形式)
emailReceiversListFilename = "EmailReceiversList.csv" # 收件人邮箱账号列表csv文件
failListFilename = "FailList.csv" # 发送失败的邮箱列表
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
# 读取收件人邮箱列表
emailReceiversList = open(emailReceiversListFilename, 'r', encoding="utf8").readlines()
emailReceivers = []
for each in emailReceiversList:
tmp = each.strip().split(",")[0]
if tmp!="":
emailReceivers.append(tmp)
# 读取邮件内容
if "html" in emailContentFilename:
emailContentType = "html"
else:
emailContentType = "plain"
emailContent = open(emailContentFilename, 'r', encoding="utf8").read()
# 连接服务器并发送邮件
failListFile = open(failListFilename, 'w', encoding="utf8")
try:
server = smtplib.SMTP_SSL(emailSMTPAddress, emailSMTPPort) # 发件人邮箱中的SMTP服务器
server.login(emailSender, emailSenderPassword) # 发件人邮箱账号、邮箱密码
successCount = 0
for each in emailReceivers: # 逐个邮箱发送,达到群发单显的效果
try:
msg = MIMEText(emailContent, emailContentType, 'utf-8') # 邮件内容、内容类型('plain'为文本,'html为网页)
msg['From'] = formataddr([emailSenderName, emailSender]) # 发件人邮箱昵称、发件人邮箱账号
msg['Subject']= emailTitle # 邮件的主题,也可以说是标题
#msg['To'] = formataddr(["收件人昵称",each]) # 对应收件人邮箱昵称、收件人邮箱账号
msg['To'] = each # 对应收件人邮箱账号
server.sendmail(emailSender, [each], msg.as_string()) # 发件人邮箱账号、收件人邮箱账号、发送邮件
print("成功发送邮件至:"+each)
successCount += 1
except Exception:
print("尝试发送至"+each+"失败")
failListFile.write(each+"\n")
server.quit() # 关闭与邮箱服务器的连接
print("共有"+str(successCount)+"封邮件发送成功,"+str(len(emailReceivers)-successCount)+"封邮件发送失败")
except Exception:
print("与邮箱服务器连接失败")
failListFile.close() | 0.036511 | 0.053502 |
import numpy as np
import pandas as pd
from .association_index import association_index
from .index_terms2counters import index_terms2counters
from .tf_matrix import tf_matrix
# pyltin: disable=c0103
# pylint: disable=too-many-arguments
# pylint: disable=invalid-name
def occurrence_matrix(
column,
by=None,
min_occ=1,
max_occ=None,
min_occ_by=1,
max_occ_by=None,
normalization=None,
scheme=None,
sep="; ",
directory="./",
):
if by is None or column == by:
by = column
matrix_in_columns = tf_matrix(
directory=directory,
column=column,
min_occ=min_occ,
max_occ=max_occ,
scheme=scheme,
sep=sep,
)
matrix_in_rows = matrix_in_columns.copy()
else:
matrix_in_columns = tf_matrix(
directory=directory,
column=column,
min_occ=min_occ,
max_occ=max_occ,
scheme=scheme,
sep=sep,
)
matrix_in_rows = tf_matrix(
directory=directory,
column=by,
min_occ=min_occ_by,
max_occ=max_occ_by,
scheme=scheme,
sep=sep,
)
matrix_in_rows = matrix_in_rows.dropna()
common_documents = matrix_in_columns.index.intersection(matrix_in_rows.index)
matrix_in_columns = matrix_in_columns.loc[common_documents, :]
matrix_in_rows = matrix_in_rows.loc[common_documents, :]
matrix_values = np.matmul(
matrix_in_rows.transpose().values, matrix_in_columns.values
)
co_occ_matrix = pd.DataFrame(
matrix_values,
columns=matrix_in_columns.columns,
index=matrix_in_rows.columns,
)
co_occ_matrix = association_index(
matrix=co_occ_matrix,
association=normalization,
)
# ---< remove rows and columns with no associations >---------------------------------
co_occ_matrix = co_occ_matrix.loc[:, (co_occ_matrix != 0).any(axis=0)]
co_occ_matrix = co_occ_matrix.loc[(co_occ_matrix != 0).any(axis=1), :]
return co_occ_matrix | techminer2/occurrence_matrix.py | import numpy as np
import pandas as pd
from .association_index import association_index
from .index_terms2counters import index_terms2counters
from .tf_matrix import tf_matrix
# pyltin: disable=c0103
# pylint: disable=too-many-arguments
# pylint: disable=invalid-name
def occurrence_matrix(
column,
by=None,
min_occ=1,
max_occ=None,
min_occ_by=1,
max_occ_by=None,
normalization=None,
scheme=None,
sep="; ",
directory="./",
):
if by is None or column == by:
by = column
matrix_in_columns = tf_matrix(
directory=directory,
column=column,
min_occ=min_occ,
max_occ=max_occ,
scheme=scheme,
sep=sep,
)
matrix_in_rows = matrix_in_columns.copy()
else:
matrix_in_columns = tf_matrix(
directory=directory,
column=column,
min_occ=min_occ,
max_occ=max_occ,
scheme=scheme,
sep=sep,
)
matrix_in_rows = tf_matrix(
directory=directory,
column=by,
min_occ=min_occ_by,
max_occ=max_occ_by,
scheme=scheme,
sep=sep,
)
matrix_in_rows = matrix_in_rows.dropna()
common_documents = matrix_in_columns.index.intersection(matrix_in_rows.index)
matrix_in_columns = matrix_in_columns.loc[common_documents, :]
matrix_in_rows = matrix_in_rows.loc[common_documents, :]
matrix_values = np.matmul(
matrix_in_rows.transpose().values, matrix_in_columns.values
)
co_occ_matrix = pd.DataFrame(
matrix_values,
columns=matrix_in_columns.columns,
index=matrix_in_rows.columns,
)
co_occ_matrix = association_index(
matrix=co_occ_matrix,
association=normalization,
)
# ---< remove rows and columns with no associations >---------------------------------
co_occ_matrix = co_occ_matrix.loc[:, (co_occ_matrix != 0).any(axis=0)]
co_occ_matrix = co_occ_matrix.loc[(co_occ_matrix != 0).any(axis=1), :]
return co_occ_matrix | 0.401688 | 0.21963 |
import numpy as np
import scipy
import scipy.stats
from scipy.interpolate import interp1d, UnivariateSpline
# functions more or less directly from scipy or numpy
def linregress(x, y, _larch=None):
return scipy.stats.linregress(x, y)
linregress.__doc__ = scipy.stats.linregress.__doc__
def polyfit(x, y, deg, rcond=None, full=False, _larch=None):
return scipy.polyfit(x, y, deg, rcond=rcond, full=full)
polyfit.__doc__ = scipy.polyfit.__doc__
def _interp1d(x, y, xnew, kind='linear', fill_value=np.nan, _larch=None, **kws):
"""interpolate x, y array onto new x values, using one of
linear, quadratic, or cubic interpolation
> ynew = interp1d(x, y, xnew, kind='linear')
arguments
---------
x original x values
y original y values
xnew new x values for values to be interpolated to
kind method to use: one of 'linear', 'quadratic', 'cubic'
fill_value value to use to fill values for out-of-range x values
note: unlike interp, this version will not extrapolate for values of `xnew`
that are outside the range of `x` -- it will use NaN or `fill_value`.
this is a bare-bones wrapping of scipy.interpolate.interp1d.
see also: interp
"""
kwargs = {'kind': kind, 'fill_value': fill_value,
'copy': False, 'bounds_error': False}
kwargs.update(kws)
return interp1d(x, y, **kwargs)(xnew)
def _interp(x, y, xnew, kind='linear', fill_value=np.nan, _larch=None, **kws):
"""interpolate x, y array onto new x values, using one of
linear, quadratic, or cubic interpolation
> ynew = interp(x, y, xnew, kind='linear')
arguments
---------
x original x values
y original y values
xnew new x values for values to be interpolated to
kind method to use: one of 'linear', 'quadratic', 'cubic'
fill_value value to use to fill values for out-of-range x values
note: unlike interp1d, this version will extrapolate for values of `xnew`
that are outside the range of `x`, using the polynomial order `kind`.
see also: interp1d
"""
kind = kind.lower()
kwargs = {'kind': kind, 'fill_value': fill_value,
'copy': False, 'bounds_error': False}
kwargs.update(kws)
out = interp1d(x, y, **kwargs)(xnew)
below = np.where(xnew<x[0])[0]
above = np.where(xnew>x[-1])[0]
if len(above) == 0 and len(below) == 0:
return out
for span, isbelow in ((below, True), (above, False)):
if len(span) < 1:
continue
ncoef = 5
if kind.startswith('lin'):
ncoef = 2
elif kind.startswith('quad'):
ncoef = 3
sel = slice(None, ncoef) if isbelow else slice(-ncoef, None)
if kind.startswith('lin'):
coefs = scipy.polyfit(x[sel], y[sel], 1)
out[span] = coefs[1] + coefs[0]*xnew[span]
elif kind.startswith('quad'):
coefs = scipy.polyfit(x[sel], y[sel], 2)
out[span] = coefs[2] + xnew[span]*(coefs[1] + coefs[0]*xnew[span])
elif kind.startswith('cubic'):
out[span] = UnivariateSpline(x[sel], y[sel], s=0)(xnew[span])
return out
def _deriv(arr, _larch=None, **kws):
if not isinstance(arr, np.ndarray):
raise Warning("cannot take derivative of non-numeric array")
return np.gradient(arr)
_deriv.__doc__ = np.gradient.__doc__
def as_ndarray(obj):
"""make sure a float, int, list of floats or ints,
or tuple of floats or ints, acts as a numpy array
"""
if isinstance(obj, (float, int)):
return np.array([obj])
return np.asarray(obj)
def index_of(arrval, value):
"""return index of array *at or below* value
returns 0 if value < min(array)
"""
if value < min(arrval):
return 0
return max(np.where(arrval<=value)[0])
def index_nearest(array, value, _larch=None):
"""return index of array *nearest* to value
"""
return np.abs(array-value).argmin()
def realimag(arr, _larch=None):
"return real array of real/imag pairs from complex array"
return np.array([(i.real, i.imag) for i in arr]).flatten()
def complex_phase(arr, _larch=None):
"return phase, modulo 2pi jumps"
phase = np.arctan2(arr.imag, arr.real)
d = np.diff(phase)/np.pi
out = 1.0*phase[:]
out[1:] -= np.pi*(np.round(abs(d))*np.sign(d)).cumsum()
return out
def remove_dups(arr, tiny=1.e-8, frac=0.02):
"""avoid repeated successive values of an array that is expected
to be monotonically increasing.
For repeated values, the first encountered occurance (at index i)
will be reduced by an amount that is the largest of these:
[tiny, frac*abs(arr[i]-arr[i-1]), frac*abs(arr[i+1]-arr[i])]
where tiny and frac are optional arguments.
Parameters
----------
arr : array of values expected to be monotonically increasing
tiny : smallest expected absolute value of interval [1.e-8]
frac : smallest expected fractional interval [0.02]
Returns
-------
out : ndarray, strictly monotonically increasing array
Example
-------
>>> x = array([0, 1.1, 2.2, 2.2, 3.3])
>>> print remove_dups(x)
>>> array([ 0. , 1.1 , 2.178, 2.2 , 3.3 ])
"""
if not isinstance(arr, np.ndarray):
try:
arr = np.array(arr)
except:
print( 'remove_dups: argument is not an array')
if isinstance(arr, np.ndarray):
shape = arr.shape
arr = arr.flatten()
npts = len(arr)
try:
dups = np.where(abs(arr[:-1] - arr[1:]) < tiny)[0].tolist()
except ValueError:
dups = []
for i in dups:
t = [tiny]
if i > 0:
t.append(frac*abs(arr[i]-arr[i-1]))
if i < len(arr)-1:
t.append(frac*abs(arr[i+1]-arr[i]))
dx = max(t)
arr[i] = arr[i] - dx
arr.shape = shape
return arr
def remove_nans2(a, b):
"""removes NAN and INF from 2 arrays,
returning 2 arrays of the same length
with NANs and INFs removed
Parameters
----------
a : array 1
b : array 2
Returns
-------
anew, bnew
Example
-------
>>> x = array([0, 1.1, 2.2, nan, 3.3])
>>> y = array([1, 2, 3, 4, 5)
>>> emove_nans2(x, y)
>>> array([ 0. , 1.1, 2.2, 3.3]), array([1, 2, 3, 5])
"""
if not isinstance(a, np.ndarray):
try:
a = np.array(a)
except:
print( 'remove_nans2: argument 1 is not an array')
if not isinstance(b, np.ndarray):
try:
b = np.array(b)
except:
print( 'remove_nans2: argument 2 is not an array')
if (np.any(np.isinf(a)) or np.any(np.isinf(b)) or
np.any(np.isnan(a)) or np.any(np.isnan(b))):
a1 = a[:]
b1 = b[:]
if np.any(np.isinf(a)):
bad = np.where(a==np.inf)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
if np.any(np.isinf(b)):
bad = np.where(b==np.inf)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
if np.any(np.isnan(a)):
bad = np.where(a==np.nan)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
if np.any(np.isnan(b)):
bad = np.where(b==np.nan)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
return a1, b1
return a, b
def registerLarchPlugin():
return ('_math', {'linregress': linregress,
'polyfit': polyfit,
'realimag': realimag,
'as_ndarray': as_ndarray,
'complex_phase': complex_phase,
'deriv': _deriv,
'interp': _interp,
'interp1d': _interp1d,
'remove_dups': remove_dups,
'remove_nans2': remove_nans2,
'index_of': index_of,
'index_nearest': index_nearest,
}
) | plugins/math/mathutils.py | import numpy as np
import scipy
import scipy.stats
from scipy.interpolate import interp1d, UnivariateSpline
# functions more or less directly from scipy or numpy
def linregress(x, y, _larch=None):
return scipy.stats.linregress(x, y)
linregress.__doc__ = scipy.stats.linregress.__doc__
def polyfit(x, y, deg, rcond=None, full=False, _larch=None):
return scipy.polyfit(x, y, deg, rcond=rcond, full=full)
polyfit.__doc__ = scipy.polyfit.__doc__
def _interp1d(x, y, xnew, kind='linear', fill_value=np.nan, _larch=None, **kws):
"""interpolate x, y array onto new x values, using one of
linear, quadratic, or cubic interpolation
> ynew = interp1d(x, y, xnew, kind='linear')
arguments
---------
x original x values
y original y values
xnew new x values for values to be interpolated to
kind method to use: one of 'linear', 'quadratic', 'cubic'
fill_value value to use to fill values for out-of-range x values
note: unlike interp, this version will not extrapolate for values of `xnew`
that are outside the range of `x` -- it will use NaN or `fill_value`.
this is a bare-bones wrapping of scipy.interpolate.interp1d.
see also: interp
"""
kwargs = {'kind': kind, 'fill_value': fill_value,
'copy': False, 'bounds_error': False}
kwargs.update(kws)
return interp1d(x, y, **kwargs)(xnew)
def _interp(x, y, xnew, kind='linear', fill_value=np.nan, _larch=None, **kws):
"""interpolate x, y array onto new x values, using one of
linear, quadratic, or cubic interpolation
> ynew = interp(x, y, xnew, kind='linear')
arguments
---------
x original x values
y original y values
xnew new x values for values to be interpolated to
kind method to use: one of 'linear', 'quadratic', 'cubic'
fill_value value to use to fill values for out-of-range x values
note: unlike interp1d, this version will extrapolate for values of `xnew`
that are outside the range of `x`, using the polynomial order `kind`.
see also: interp1d
"""
kind = kind.lower()
kwargs = {'kind': kind, 'fill_value': fill_value,
'copy': False, 'bounds_error': False}
kwargs.update(kws)
out = interp1d(x, y, **kwargs)(xnew)
below = np.where(xnew<x[0])[0]
above = np.where(xnew>x[-1])[0]
if len(above) == 0 and len(below) == 0:
return out
for span, isbelow in ((below, True), (above, False)):
if len(span) < 1:
continue
ncoef = 5
if kind.startswith('lin'):
ncoef = 2
elif kind.startswith('quad'):
ncoef = 3
sel = slice(None, ncoef) if isbelow else slice(-ncoef, None)
if kind.startswith('lin'):
coefs = scipy.polyfit(x[sel], y[sel], 1)
out[span] = coefs[1] + coefs[0]*xnew[span]
elif kind.startswith('quad'):
coefs = scipy.polyfit(x[sel], y[sel], 2)
out[span] = coefs[2] + xnew[span]*(coefs[1] + coefs[0]*xnew[span])
elif kind.startswith('cubic'):
out[span] = UnivariateSpline(x[sel], y[sel], s=0)(xnew[span])
return out
def _deriv(arr, _larch=None, **kws):
if not isinstance(arr, np.ndarray):
raise Warning("cannot take derivative of non-numeric array")
return np.gradient(arr)
_deriv.__doc__ = np.gradient.__doc__
def as_ndarray(obj):
"""make sure a float, int, list of floats or ints,
or tuple of floats or ints, acts as a numpy array
"""
if isinstance(obj, (float, int)):
return np.array([obj])
return np.asarray(obj)
def index_of(arrval, value):
"""return index of array *at or below* value
returns 0 if value < min(array)
"""
if value < min(arrval):
return 0
return max(np.where(arrval<=value)[0])
def index_nearest(array, value, _larch=None):
"""return index of array *nearest* to value
"""
return np.abs(array-value).argmin()
def realimag(arr, _larch=None):
"return real array of real/imag pairs from complex array"
return np.array([(i.real, i.imag) for i in arr]).flatten()
def complex_phase(arr, _larch=None):
"return phase, modulo 2pi jumps"
phase = np.arctan2(arr.imag, arr.real)
d = np.diff(phase)/np.pi
out = 1.0*phase[:]
out[1:] -= np.pi*(np.round(abs(d))*np.sign(d)).cumsum()
return out
def remove_dups(arr, tiny=1.e-8, frac=0.02):
"""avoid repeated successive values of an array that is expected
to be monotonically increasing.
For repeated values, the first encountered occurance (at index i)
will be reduced by an amount that is the largest of these:
[tiny, frac*abs(arr[i]-arr[i-1]), frac*abs(arr[i+1]-arr[i])]
where tiny and frac are optional arguments.
Parameters
----------
arr : array of values expected to be monotonically increasing
tiny : smallest expected absolute value of interval [1.e-8]
frac : smallest expected fractional interval [0.02]
Returns
-------
out : ndarray, strictly monotonically increasing array
Example
-------
>>> x = array([0, 1.1, 2.2, 2.2, 3.3])
>>> print remove_dups(x)
>>> array([ 0. , 1.1 , 2.178, 2.2 , 3.3 ])
"""
if not isinstance(arr, np.ndarray):
try:
arr = np.array(arr)
except:
print( 'remove_dups: argument is not an array')
if isinstance(arr, np.ndarray):
shape = arr.shape
arr = arr.flatten()
npts = len(arr)
try:
dups = np.where(abs(arr[:-1] - arr[1:]) < tiny)[0].tolist()
except ValueError:
dups = []
for i in dups:
t = [tiny]
if i > 0:
t.append(frac*abs(arr[i]-arr[i-1]))
if i < len(arr)-1:
t.append(frac*abs(arr[i+1]-arr[i]))
dx = max(t)
arr[i] = arr[i] - dx
arr.shape = shape
return arr
def remove_nans2(a, b):
"""removes NAN and INF from 2 arrays,
returning 2 arrays of the same length
with NANs and INFs removed
Parameters
----------
a : array 1
b : array 2
Returns
-------
anew, bnew
Example
-------
>>> x = array([0, 1.1, 2.2, nan, 3.3])
>>> y = array([1, 2, 3, 4, 5)
>>> emove_nans2(x, y)
>>> array([ 0. , 1.1, 2.2, 3.3]), array([1, 2, 3, 5])
"""
if not isinstance(a, np.ndarray):
try:
a = np.array(a)
except:
print( 'remove_nans2: argument 1 is not an array')
if not isinstance(b, np.ndarray):
try:
b = np.array(b)
except:
print( 'remove_nans2: argument 2 is not an array')
if (np.any(np.isinf(a)) or np.any(np.isinf(b)) or
np.any(np.isnan(a)) or np.any(np.isnan(b))):
a1 = a[:]
b1 = b[:]
if np.any(np.isinf(a)):
bad = np.where(a==np.inf)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
if np.any(np.isinf(b)):
bad = np.where(b==np.inf)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
if np.any(np.isnan(a)):
bad = np.where(a==np.nan)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
if np.any(np.isnan(b)):
bad = np.where(b==np.nan)[0]
a1 = np.delete(a1, bad)
b1 = np.delete(b1, bad)
return a1, b1
return a, b
def registerLarchPlugin():
return ('_math', {'linregress': linregress,
'polyfit': polyfit,
'realimag': realimag,
'as_ndarray': as_ndarray,
'complex_phase': complex_phase,
'deriv': _deriv,
'interp': _interp,
'interp1d': _interp1d,
'remove_dups': remove_dups,
'remove_nans2': remove_nans2,
'index_of': index_of,
'index_nearest': index_nearest,
}
) | 0.78233 | 0.601301 |
import haiku as hk
import jax
import jax.numpy as jnp
from jdetr._typing import JaxArray
from jdetr.utils import maybe
__all__ = ["Transformer"]
class MultiHeadAttentionLayer(hk.Module):
def __init__(
self,
feature_dim: int,
value_dim: int,
num_heads: int,
key_query_dim: int = None,
):
super().__init__()
self.feature_dim = feature_dim
self.value_dim = value_dim
self.num_heads = num_heads
self.key_query_dim = maybe(key_query_dim, value_dim)
@hk.transparent
def multi_head_linear(self, x: JaxArray, dim: int) -> JaxArray:
"""
>>> from jdetr.utils import Init
>>> x = jnp.zeros((2, 3, 4))
>>> y = (
... Init(MultiHeadAttentionLayer, feature_dim=5, value_dim=6, num_heads=7)
... .multi_head_linear(x, dim=6)
... )
>>> tuple(y.shape)
(2, 3, 7, 6)
"""
y = hk.Linear(dim * self.num_heads)(x)
# (batch_idx, seq_idx, head_idx, hidden_dim)
return y.reshape((*x.shape[:-1], self.num_heads, dim))
# pylint: disable=invalid-name
@hk.transparent
def _multihead_attention(self, k: JaxArray, q: JaxArray, v: JaxArray) -> JaxArray:
attn = jnp.einsum("btij,bsij->btsi", q, k) / jnp.sqrt(self.key_query_dim)
attn = jax.nn.softmax(attn, axis=2)
z = jnp.einsum("btsi,bsij->btij", attn, v).reshape(
q.shape[0], q.shape[1], self.num_heads * self.value_dim
)
return hk.Linear(self.feature_dim)(z)
def __call__(self, key: JaxArray, query: JaxArray, value: JaxArray) -> JaxArray:
"""
>>> from jdetr.utils import Init
>>> x = jnp.zeros((2, 3, 4))
>>> x_ = jnp.zeros((2, 4, 4))
>>> y = (
... Init(MultiHeadAttentionLayer, feature_dim=5, value_dim=6, num_heads=7)
... .__call__(key=x, query=x_, value=x)
... )
>>> tuple(y.shape)
(2, 4, 5)
"""
k = self.multi_head_linear(key, self.key_query_dim)
q = self.multi_head_linear(query, self.key_query_dim)
v = self.multi_head_linear(value, self.value_dim)
return self._multihead_attention(k, q, v)
class DetrMultiHeadAttentionLayer(MultiHeadAttentionLayer):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> y = (
... Init(DetrMultiHeadAttentionLayer, 5, 6, 7, 32)
... .__call__(x, pos_encoding)
... )
>>> tuple(y.shape)
(2, 16, 5)
"""
def __call__(self, x: JaxArray, pos_encoding: JaxArray) -> JaxArray:
# pylint: disable=invalid-name
# Add dimension for head-index
k = self.multi_head_linear(x + pos_encoding, self.key_query_dim)
q = self.multi_head_linear(x + pos_encoding, self.key_query_dim)
v = self.multi_head_linear(x, self.value_dim)
return self._multihead_attention(k, q, v)
class DropoutLayer(hk.Module):
def __init__(self, dropout_rate: float):
super().__init__()
self.dropout_rate = dropout_rate
def __call__(self, x: JaxArray, is_training: bool) -> JaxArray:
rng = hk.next_rng_key()
return hk.dropout(rng, self.dropout_rate, x)
class EncoderLayer(hk.Module):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> y = (
... Init(EncoderLayer, feature_dim=4, num_heads=2)
... .__call__(x, pos_encoding, True)
... )
>>> tuple(y.shape)
(2, 16, 4)
"""
def __init__(
self,
feature_dim: int,
num_heads: int,
dropout_rate: float = 0.1,
feedforward_dim: int = 2048,
):
super().__init__()
self.feature_dim = feature_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.feedforward_dim = feedforward_dim
def __call__(
self, x: JaxArray, pos_encoding: JaxArray, is_training: bool
) -> JaxArray:
y = DetrMultiHeadAttentionLayer(
self.feature_dim, self.feature_dim, self.num_heads
)(x, pos_encoding)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
# TODO Try out with batchnorm as well
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
y = hk.Linear(self.feedforward_dim)(x)
y = jax.nn.relu6(y)
y = DropoutLayer(self.dropout_rate)(y, is_training)
y = hk.Linear(self.feature_dim)(y)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
class DecoderLayer(hk.Module):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> y = (
... Init(DecoderLayer, feature_dim=4, num_heads=2)
... .__call__(x, x, pos_encoding, pos_encoding, True)
... )
>>> tuple(y.shape)
(2, 16, 4)
"""
def __init__(
self,
feature_dim: int,
num_heads: int,
dropout_rate: float = 0.1,
feedforward_dim: int = 2048,
):
super().__init__()
self.feature_dim = feature_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.feedforward_dim = feedforward_dim
def __call__(
self,
encoder_features: JaxArray,
decoder_features: JaxArray,
pos_encoding: JaxArray,
query_encoding: JaxArray,
is_training: bool,
) -> JaxArray:
y = DetrMultiHeadAttentionLayer(
self.feature_dim, self.feature_dim, self.num_heads
)(decoder_features, query_encoding)
y = decoder_features + DropoutLayer(self.dropout_rate)(y, is_training)
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
y = MultiHeadAttentionLayer(self.feature_dim, self.feature_dim, self.num_heads)(
key=encoder_features + pos_encoding,
query=x + query_encoding,
value=encoder_features,
)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
y = hk.Linear(self.feedforward_dim)(x)
y = jax.nn.relu6(y)
y = DropoutLayer(self.dropout_rate)(y, is_training)
y = hk.Linear(self.feature_dim)(y)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
class Transformer(hk.Module):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> query_encoding = jnp.zeros((2, 10, 4))
>>> y = Init(Transformer, 4, 2, 1, 1).__call__(x, pos_encoding, query_encoding, True)
>>> tuple(y.shape)
(2, 10, 4)
"""
def __init__(
self,
feature_dim: int,
num_heads: int,
num_encoder_layers: int,
num_decoder_layers: int,
dropout_rate: float = 0.1,
feedforward_dim: int = 2048,
):
assert feature_dim % 2 == 0
super().__init__()
self.feature_dim = feature_dim
self.num_heads = num_heads
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.dropout_rate = dropout_rate
self.feedforward_dim = feedforward_dim
def __call__(
self,
x: JaxArray,
pos_encoding: JaxArray,
query_encoding: JaxArray,
is_training: bool,
) -> JaxArray:
encoder_features = x
for _ in range(self.num_encoder_layers):
encoder_features = EncoderLayer(
self.feature_dim,
self.num_heads,
self.dropout_rate,
feedforward_dim=self.feedforward_dim,
)(encoder_features, pos_encoding, is_training)
decoder_features = jnp.zeros_like(query_encoding)
for _ in range(self.num_decoder_layers):
decoder_features = DecoderLayer(
self.feature_dim,
self.num_heads,
self.dropout_rate,
feedforward_dim=self.feedforward_dim,
)(
encoder_features,
decoder_features,
pos_encoding,
query_encoding,
is_training,
)
return decoder_features | jdetr/models/transformer.py | import haiku as hk
import jax
import jax.numpy as jnp
from jdetr._typing import JaxArray
from jdetr.utils import maybe
__all__ = ["Transformer"]
class MultiHeadAttentionLayer(hk.Module):
def __init__(
self,
feature_dim: int,
value_dim: int,
num_heads: int,
key_query_dim: int = None,
):
super().__init__()
self.feature_dim = feature_dim
self.value_dim = value_dim
self.num_heads = num_heads
self.key_query_dim = maybe(key_query_dim, value_dim)
@hk.transparent
def multi_head_linear(self, x: JaxArray, dim: int) -> JaxArray:
"""
>>> from jdetr.utils import Init
>>> x = jnp.zeros((2, 3, 4))
>>> y = (
... Init(MultiHeadAttentionLayer, feature_dim=5, value_dim=6, num_heads=7)
... .multi_head_linear(x, dim=6)
... )
>>> tuple(y.shape)
(2, 3, 7, 6)
"""
y = hk.Linear(dim * self.num_heads)(x)
# (batch_idx, seq_idx, head_idx, hidden_dim)
return y.reshape((*x.shape[:-1], self.num_heads, dim))
# pylint: disable=invalid-name
@hk.transparent
def _multihead_attention(self, k: JaxArray, q: JaxArray, v: JaxArray) -> JaxArray:
attn = jnp.einsum("btij,bsij->btsi", q, k) / jnp.sqrt(self.key_query_dim)
attn = jax.nn.softmax(attn, axis=2)
z = jnp.einsum("btsi,bsij->btij", attn, v).reshape(
q.shape[0], q.shape[1], self.num_heads * self.value_dim
)
return hk.Linear(self.feature_dim)(z)
def __call__(self, key: JaxArray, query: JaxArray, value: JaxArray) -> JaxArray:
"""
>>> from jdetr.utils import Init
>>> x = jnp.zeros((2, 3, 4))
>>> x_ = jnp.zeros((2, 4, 4))
>>> y = (
... Init(MultiHeadAttentionLayer, feature_dim=5, value_dim=6, num_heads=7)
... .__call__(key=x, query=x_, value=x)
... )
>>> tuple(y.shape)
(2, 4, 5)
"""
k = self.multi_head_linear(key, self.key_query_dim)
q = self.multi_head_linear(query, self.key_query_dim)
v = self.multi_head_linear(value, self.value_dim)
return self._multihead_attention(k, q, v)
class DetrMultiHeadAttentionLayer(MultiHeadAttentionLayer):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> y = (
... Init(DetrMultiHeadAttentionLayer, 5, 6, 7, 32)
... .__call__(x, pos_encoding)
... )
>>> tuple(y.shape)
(2, 16, 5)
"""
def __call__(self, x: JaxArray, pos_encoding: JaxArray) -> JaxArray:
# pylint: disable=invalid-name
# Add dimension for head-index
k = self.multi_head_linear(x + pos_encoding, self.key_query_dim)
q = self.multi_head_linear(x + pos_encoding, self.key_query_dim)
v = self.multi_head_linear(x, self.value_dim)
return self._multihead_attention(k, q, v)
class DropoutLayer(hk.Module):
def __init__(self, dropout_rate: float):
super().__init__()
self.dropout_rate = dropout_rate
def __call__(self, x: JaxArray, is_training: bool) -> JaxArray:
rng = hk.next_rng_key()
return hk.dropout(rng, self.dropout_rate, x)
class EncoderLayer(hk.Module):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> y = (
... Init(EncoderLayer, feature_dim=4, num_heads=2)
... .__call__(x, pos_encoding, True)
... )
>>> tuple(y.shape)
(2, 16, 4)
"""
def __init__(
self,
feature_dim: int,
num_heads: int,
dropout_rate: float = 0.1,
feedforward_dim: int = 2048,
):
super().__init__()
self.feature_dim = feature_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.feedforward_dim = feedforward_dim
def __call__(
self, x: JaxArray, pos_encoding: JaxArray, is_training: bool
) -> JaxArray:
y = DetrMultiHeadAttentionLayer(
self.feature_dim, self.feature_dim, self.num_heads
)(x, pos_encoding)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
# TODO Try out with batchnorm as well
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
y = hk.Linear(self.feedforward_dim)(x)
y = jax.nn.relu6(y)
y = DropoutLayer(self.dropout_rate)(y, is_training)
y = hk.Linear(self.feature_dim)(y)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
class DecoderLayer(hk.Module):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> y = (
... Init(DecoderLayer, feature_dim=4, num_heads=2)
... .__call__(x, x, pos_encoding, pos_encoding, True)
... )
>>> tuple(y.shape)
(2, 16, 4)
"""
def __init__(
self,
feature_dim: int,
num_heads: int,
dropout_rate: float = 0.1,
feedforward_dim: int = 2048,
):
super().__init__()
self.feature_dim = feature_dim
self.num_heads = num_heads
self.dropout_rate = dropout_rate
self.feedforward_dim = feedforward_dim
def __call__(
self,
encoder_features: JaxArray,
decoder_features: JaxArray,
pos_encoding: JaxArray,
query_encoding: JaxArray,
is_training: bool,
) -> JaxArray:
y = DetrMultiHeadAttentionLayer(
self.feature_dim, self.feature_dim, self.num_heads
)(decoder_features, query_encoding)
y = decoder_features + DropoutLayer(self.dropout_rate)(y, is_training)
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
y = MultiHeadAttentionLayer(self.feature_dim, self.feature_dim, self.num_heads)(
key=encoder_features + pos_encoding,
query=x + query_encoding,
value=encoder_features,
)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
x = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
y = hk.Linear(self.feedforward_dim)(x)
y = jax.nn.relu6(y)
y = DropoutLayer(self.dropout_rate)(y, is_training)
y = hk.Linear(self.feature_dim)(y)
y = x + DropoutLayer(self.dropout_rate)(y, is_training)
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(y)
class Transformer(hk.Module):
"""
>>> from jdetr.utils import Init
>>> from jdetr.models.positional_encoding import sinusoidal_encoding
>>> x = jnp.zeros((2, 16, 4))
>>> pos_encoding = sinusoidal_encoding(4, 2).reshape(1, 16, 4)
>>> query_encoding = jnp.zeros((2, 10, 4))
>>> y = Init(Transformer, 4, 2, 1, 1).__call__(x, pos_encoding, query_encoding, True)
>>> tuple(y.shape)
(2, 10, 4)
"""
def __init__(
self,
feature_dim: int,
num_heads: int,
num_encoder_layers: int,
num_decoder_layers: int,
dropout_rate: float = 0.1,
feedforward_dim: int = 2048,
):
assert feature_dim % 2 == 0
super().__init__()
self.feature_dim = feature_dim
self.num_heads = num_heads
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.dropout_rate = dropout_rate
self.feedforward_dim = feedforward_dim
def __call__(
self,
x: JaxArray,
pos_encoding: JaxArray,
query_encoding: JaxArray,
is_training: bool,
) -> JaxArray:
encoder_features = x
for _ in range(self.num_encoder_layers):
encoder_features = EncoderLayer(
self.feature_dim,
self.num_heads,
self.dropout_rate,
feedforward_dim=self.feedforward_dim,
)(encoder_features, pos_encoding, is_training)
decoder_features = jnp.zeros_like(query_encoding)
for _ in range(self.num_decoder_layers):
decoder_features = DecoderLayer(
self.feature_dim,
self.num_heads,
self.dropout_rate,
feedforward_dim=self.feedforward_dim,
)(
encoder_features,
decoder_features,
pos_encoding,
query_encoding,
is_training,
)
return decoder_features | 0.731442 | 0.373533 |
# Copyright (c) Latona. All rights reserved.
from StatusJsonPythonModule import StatusJsonRest
from datetime import datetime
import pyaudio
import os
import sys
import wave
from datetime import datetime as dt
from aion.logger_library.LoggerClient import LoggerClient
from six.moves import queue
log = LoggerClient("CaptureAudioFromMic")
OUTPUT_DIR = "file/output"
RATE = 16000
CHUNK = int(RATE / 1)
class AudioStreaming():
def __init__(self, rate, chunk, device_index):
self._rate = rate
self._chunk = chunk
self._buff = queue.Queue()
self._device_index = device_index
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
if self._audio_interface.get_device_count() - 1 < self._device_index:
log.print("this device is not exist", 1)
return None
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
stream_callback=self._fill_buffer,
input_device_index=self._device_index,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
def output_wave_file(self, audio_data, output_path):
date = datetime.now()
now_time_for_file_name = date.strftime("%Y%m%d%H%M%S%f")[:-3]
now_time_for_metadata = date.isoformat()
output_file_name = now_time_for_file_name + ".wav"
output_file_path = os.path.join(output_path, output_file_name)
with wave.open(output_file_path, 'wb') as wav:
wav.setnchannels(1)
wav.setsampwidth(2)
wav.setframerate(self._rate)
wav.writeframes(audio_data)
return (output_file_path, now_time_for_metadata)
@log.function_log
def main():
# read status json file
argv = sys.argv
if len(argv) != 2:
device_index = 0
else:
device_index = int(argv[1])
current_path=\
os.path.dirname(os.path.join(os.getcwd(), __file__))
output_path = os.path.join(current_path, OUTPUT_DIR)
os.makedirs(output_path, exist_ok=True)
statusObj = StatusJsonRest.StatusJsonRest(os.getcwd(), __file__)
statusObj.initializeStatusJson()
statusObj.setNextService(
"SpeechToTextByStreaming",
"/home/latona/poseidon/Runtime/speech-to-text-by-streaming",
"python", "main.py")
print(">>> start audio recording")
with AudioStreaming(RATE, CHUNK, device_index) as stream:
stream_generator = stream.generator()
for audio_data in stream_generator:
statusObj.resetOutputJsonFile()
output_file_path, timestamp = stream.output_wave_file(audio_data, output_path)
statusObj.setOutputFileData(output_file_path, "file", "audio-wav-mono")
statusObj.setMetadataValue("timestamp", timestamp)
statusObj.outputJsonFile()
log.print("> Success: output audio (path: {}, time:{})"
.format(output_file_path, timestamp))
if __name__ == "__main__":
main() | main.py |
# Copyright (c) Latona. All rights reserved.
from StatusJsonPythonModule import StatusJsonRest
from datetime import datetime
import pyaudio
import os
import sys
import wave
from datetime import datetime as dt
from aion.logger_library.LoggerClient import LoggerClient
from six.moves import queue
log = LoggerClient("CaptureAudioFromMic")
OUTPUT_DIR = "file/output"
RATE = 16000
CHUNK = int(RATE / 1)
class AudioStreaming():
def __init__(self, rate, chunk, device_index):
self._rate = rate
self._chunk = chunk
self._buff = queue.Queue()
self._device_index = device_index
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
if self._audio_interface.get_device_count() - 1 < self._device_index:
log.print("this device is not exist", 1)
return None
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
stream_callback=self._fill_buffer,
input_device_index=self._device_index,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
def output_wave_file(self, audio_data, output_path):
date = datetime.now()
now_time_for_file_name = date.strftime("%Y%m%d%H%M%S%f")[:-3]
now_time_for_metadata = date.isoformat()
output_file_name = now_time_for_file_name + ".wav"
output_file_path = os.path.join(output_path, output_file_name)
with wave.open(output_file_path, 'wb') as wav:
wav.setnchannels(1)
wav.setsampwidth(2)
wav.setframerate(self._rate)
wav.writeframes(audio_data)
return (output_file_path, now_time_for_metadata)
@log.function_log
def main():
# read status json file
argv = sys.argv
if len(argv) != 2:
device_index = 0
else:
device_index = int(argv[1])
current_path=\
os.path.dirname(os.path.join(os.getcwd(), __file__))
output_path = os.path.join(current_path, OUTPUT_DIR)
os.makedirs(output_path, exist_ok=True)
statusObj = StatusJsonRest.StatusJsonRest(os.getcwd(), __file__)
statusObj.initializeStatusJson()
statusObj.setNextService(
"SpeechToTextByStreaming",
"/home/latona/poseidon/Runtime/speech-to-text-by-streaming",
"python", "main.py")
print(">>> start audio recording")
with AudioStreaming(RATE, CHUNK, device_index) as stream:
stream_generator = stream.generator()
for audio_data in stream_generator:
statusObj.resetOutputJsonFile()
output_file_path, timestamp = stream.output_wave_file(audio_data, output_path)
statusObj.setOutputFileData(output_file_path, "file", "audio-wav-mono")
statusObj.setMetadataValue("timestamp", timestamp)
statusObj.outputJsonFile()
log.print("> Success: output audio (path: {}, time:{})"
.format(output_file_path, timestamp))
if __name__ == "__main__":
main() | 0.542379 | 0.079603 |
import numpy as np
import pandas as pd
import pytest
from asdf import ValidationError
from weldx import Q_, TimeSeries
from weldx.asdf.types import WxSyntaxError
from weldx.asdf.util import write_buffer, write_read_buffer
from weldx.asdf.validators import _custom_shape_validator
from weldx.tags.debug.test_property_tag import PropertyTagTestClass
from weldx.tags.debug.test_shape_validator import ShapeValidatorTestClass
from weldx.tags.debug.test_unit_validator import UnitValidatorTestClass
from weldx.util import compare_nested
@pytest.mark.parametrize(
"test_input",
[PropertyTagTestClass()],
)
def test_property_tag_validator(test_input):
"""Test custom ASDF shape validators."""
write_read_buffer({"root_node": test_input})
@pytest.mark.parametrize(
"test_input,err",
[
(PropertyTagTestClass(prop3=pd.Timedelta(2, "s")), ValidationError),
(PropertyTagTestClass(prop3="STRING"), ValidationError),
],
)
def test_property_tag_validator_exceptions(test_input, err):
"""Test custom ASDF shape validators."""
with pytest.raises(err):
write_read_buffer({"root_node": test_input})
def _val(list_test, list_expected):
"""Add shape key to lists."""
if isinstance(list_test, list):
res = _custom_shape_validator({"shape": list_test}, list_expected)
return isinstance(res, dict)
return isinstance(_custom_shape_validator(list_test, list_expected), dict)
@pytest.mark.parametrize(
"shape, exp",
[
([3], [3]),
([2, 4, 5], [2, 4, 5]),
([1, 2, 3], ["..."]),
([1, 2], [1, 2, "..."]),
([1, 2], ["...", 1, 2]),
([1, 2, 3], [1, 2, None]),
([1, 2, 3], [None, 2, 3]),
([1], [1, "..."]),
([1, 2, 3, 4, 5], [1, "..."]),
([1, 2, 3, 4, 5], ["...", 4, 5]),
([1, 2], [1, 2, "(3)"]),
([1, 2], [1, 2, "(n)"]),
([1, 2], [1, 2, "(2)", "(3)"]),
([2, 3], ["(1)", 2, 3]),
([1, 2, 3], ["(1)", 2, 3]),
([2, 3], ["(1~3)", 2, 3]),
([2, 2, 3], ["(1~3)", 2, 3]),
([1, 2, 3], [1, "1~3", 3]),
([1, 2, 3], [1, "1~", 3]),
([1, 2, 3], [1, "~3", 3]),
([1, 2, 3], [1, "~", 3]),
([1, 200, 3], [1, "~", 3]),
([1, 2, 3], [1, 2, "(~)"]),
([1, 2, 300], [1, 2, "(~)"]),
([1, 2, 3], [1, "(n)", "..."]),
(1.0, [1]),
],
)
def test_shape_validator_syntax2(shape, exp):
assert _val(shape, exp)
@pytest.mark.parametrize(
"shape, exp, err",
[
([2, 2, 3], [1, "..."], ValidationError),
([2, 2, 3], ["...", 1], ValidationError),
([1], [1, 2], ValidationError),
([1, 2], [1], ValidationError),
([1, 2], [3, 2], ValidationError),
([1], [1, "~"], ValidationError),
([1], ["~", 1], ValidationError),
([1, 2, 3], [1, 2, "(4)"], ValidationError),
([1, 2, 3], ["(2)", 2, 3], ValidationError),
([1, 2], [1, "4~8"], ValidationError),
([1, 9], [1, "4~8"], ValidationError),
([1, 2], [1, "(4~8)"], ValidationError),
([1, 9], [1, "(4~8)"], ValidationError),
(1.0, [2], ValidationError),
([1, 2, 3, 4], [1, 2, "n", "n"], ValidationError),
([1, 2], [1, "~", "(...)"], WxSyntaxError),
([1, 2], [1, "(2)", 3], WxSyntaxError),
([1, 2], [1, 2, "((3))"], WxSyntaxError),
([1, 2], [1, 2, "3)"], WxSyntaxError),
([1, 2], [1, 2, "*3"], WxSyntaxError),
([1, 2], [1, 2, "(3"], WxSyntaxError),
([1, 2], [1, 2, "(3)3"], WxSyntaxError),
([1, 2], [1, 2, "2(3)"], WxSyntaxError),
([1, 2], [1, "...", 2], WxSyntaxError),
([1, 2], ["(1)", "..."], WxSyntaxError),
([1, 2], [1, "4~1"], WxSyntaxError),
([-1, -2], [-1, -2], WxSyntaxError),
([-1, 2], [1, 2], WxSyntaxError),
([1, 2], [-1, 2], WxSyntaxError),
([1, 2], [1, 2, "(-3)"], WxSyntaxError),
([1, 2], [1, 2, "(-3~-1)"], WxSyntaxError),
([1, 2], [1, 2, "(-3~1)"], WxSyntaxError),
([1, 2, 1], ["(-3~1)", 2, 1], WxSyntaxError),
([1, 2], [1, "(9~m)"], WxSyntaxError),
([1, 2], [1, "(n~9)"], WxSyntaxError),
([1, 2], [1, "(n~m)"], WxSyntaxError),
([1, 2], [1, "(1~3~5)"], WxSyntaxError),
("a string", [1, "(1~3~5)"], ValidationError),
([1, 2], "a string", WxSyntaxError),
],
)
def test_shape_validation_error_exception(shape, exp, err):
with pytest.raises(err):
assert _val(shape, exp)
@pytest.mark.parametrize(
"test_input",
[
ShapeValidatorTestClass(),
ShapeValidatorTestClass(time_prop=pd.date_range("2020", freq="D", periods=9)),
ShapeValidatorTestClass(optional_prop=np.ones((1, 2, 3))),
ShapeValidatorTestClass(optional_prop="no shape"),
ShapeValidatorTestClass(
nested_prop={
"p1": np.ones((10, 8, 6, 4, 2)),
"p2": np.ones((9, 7, 5, 3, 1)),
"p3": np.ones((1, 2, 3)),
}
),
ShapeValidatorTestClass(nested_prop={"p1": np.ones((10, 8, 6, 4, 2))}), # no p2
],
)
def test_shape_validator(test_input):
result = write_read_buffer(
{"root": test_input},
)["root"]
assert compare_nested(test_input.__dict__, result.__dict__)
assert compare_nested(result.__dict__, test_input.__dict__)
@pytest.mark.parametrize(
"test_input",
[
ShapeValidatorTestClass(prop4=np.ones((2, 3, 5, 7, 9))), # mismatch a - prop5
ShapeValidatorTestClass(prop2=np.ones((5, 2, 1))), # mismatch n - prop1
ShapeValidatorTestClass(optional_prop=np.ones((3, 2, 9))), # wrong optional
ShapeValidatorTestClass(time_prop=pd.date_range("2020", freq="D", periods=3)),
ShapeValidatorTestClass(quantity=Q_([0, 3], "s")), # mismatch shape [1]
ShapeValidatorTestClass(
timeseries=TimeSeries(
Q_([0, 3], "m"), Q_([0, 1], "s")
) # mismatch shape [1]
),
],
)
def test_shape_validator_exceptions(test_input):
with pytest.raises(ValidationError):
write_read_buffer({"root": test_input})
@pytest.mark.parametrize(
"test",
[
UnitValidatorTestClass(),
UnitValidatorTestClass(length_prop=Q_(1, "inch")),
],
)
def test_unit_validator(test):
data = write_read_buffer({"root_node": test})
test_read = data["root_node"]
assert isinstance(data, dict)
assert test_read.length_prop == test.length_prop
assert test_read.velocity_prop == test.velocity_prop
assert np.all(test_read.current_prop == test.current_prop)
assert np.all(test_read.nested_prop["q1"] == test.nested_prop["q1"])
assert test_read.nested_prop["q2"] == test.nested_prop["q2"]
assert test_read.simple_prop == test.simple_prop
@pytest.mark.parametrize(
"test",
[
UnitValidatorTestClass(
length_prop=Q_(1, "s"), # wrong unit
),
UnitValidatorTestClass(
velocity_prop=Q_(2, "liter"), # wrong unit
),
UnitValidatorTestClass(
current_prop=Q_(np.eye(2, 2), "V"), # wrong unit
),
UnitValidatorTestClass(
nested_prop=dict(q1=Q_(np.eye(3, 3), "m"), q2=Q_(2, "V")), # wrong unit
),
UnitValidatorTestClass(
simple_prop={"value": float(3), "unit": "s"}, # wrong unit
),
],
)
def test_unit_validator_exception(test):
with pytest.raises(ValidationError):
write_buffer({"root_node": test}) | weldx/tests/asdf_tests/test_asdf_validators.py | import numpy as np
import pandas as pd
import pytest
from asdf import ValidationError
from weldx import Q_, TimeSeries
from weldx.asdf.types import WxSyntaxError
from weldx.asdf.util import write_buffer, write_read_buffer
from weldx.asdf.validators import _custom_shape_validator
from weldx.tags.debug.test_property_tag import PropertyTagTestClass
from weldx.tags.debug.test_shape_validator import ShapeValidatorTestClass
from weldx.tags.debug.test_unit_validator import UnitValidatorTestClass
from weldx.util import compare_nested
@pytest.mark.parametrize(
"test_input",
[PropertyTagTestClass()],
)
def test_property_tag_validator(test_input):
"""Test custom ASDF shape validators."""
write_read_buffer({"root_node": test_input})
@pytest.mark.parametrize(
"test_input,err",
[
(PropertyTagTestClass(prop3=pd.Timedelta(2, "s")), ValidationError),
(PropertyTagTestClass(prop3="STRING"), ValidationError),
],
)
def test_property_tag_validator_exceptions(test_input, err):
"""Test custom ASDF shape validators."""
with pytest.raises(err):
write_read_buffer({"root_node": test_input})
def _val(list_test, list_expected):
"""Add shape key to lists."""
if isinstance(list_test, list):
res = _custom_shape_validator({"shape": list_test}, list_expected)
return isinstance(res, dict)
return isinstance(_custom_shape_validator(list_test, list_expected), dict)
@pytest.mark.parametrize(
"shape, exp",
[
([3], [3]),
([2, 4, 5], [2, 4, 5]),
([1, 2, 3], ["..."]),
([1, 2], [1, 2, "..."]),
([1, 2], ["...", 1, 2]),
([1, 2, 3], [1, 2, None]),
([1, 2, 3], [None, 2, 3]),
([1], [1, "..."]),
([1, 2, 3, 4, 5], [1, "..."]),
([1, 2, 3, 4, 5], ["...", 4, 5]),
([1, 2], [1, 2, "(3)"]),
([1, 2], [1, 2, "(n)"]),
([1, 2], [1, 2, "(2)", "(3)"]),
([2, 3], ["(1)", 2, 3]),
([1, 2, 3], ["(1)", 2, 3]),
([2, 3], ["(1~3)", 2, 3]),
([2, 2, 3], ["(1~3)", 2, 3]),
([1, 2, 3], [1, "1~3", 3]),
([1, 2, 3], [1, "1~", 3]),
([1, 2, 3], [1, "~3", 3]),
([1, 2, 3], [1, "~", 3]),
([1, 200, 3], [1, "~", 3]),
([1, 2, 3], [1, 2, "(~)"]),
([1, 2, 300], [1, 2, "(~)"]),
([1, 2, 3], [1, "(n)", "..."]),
(1.0, [1]),
],
)
def test_shape_validator_syntax2(shape, exp):
assert _val(shape, exp)
@pytest.mark.parametrize(
"shape, exp, err",
[
([2, 2, 3], [1, "..."], ValidationError),
([2, 2, 3], ["...", 1], ValidationError),
([1], [1, 2], ValidationError),
([1, 2], [1], ValidationError),
([1, 2], [3, 2], ValidationError),
([1], [1, "~"], ValidationError),
([1], ["~", 1], ValidationError),
([1, 2, 3], [1, 2, "(4)"], ValidationError),
([1, 2, 3], ["(2)", 2, 3], ValidationError),
([1, 2], [1, "4~8"], ValidationError),
([1, 9], [1, "4~8"], ValidationError),
([1, 2], [1, "(4~8)"], ValidationError),
([1, 9], [1, "(4~8)"], ValidationError),
(1.0, [2], ValidationError),
([1, 2, 3, 4], [1, 2, "n", "n"], ValidationError),
([1, 2], [1, "~", "(...)"], WxSyntaxError),
([1, 2], [1, "(2)", 3], WxSyntaxError),
([1, 2], [1, 2, "((3))"], WxSyntaxError),
([1, 2], [1, 2, "3)"], WxSyntaxError),
([1, 2], [1, 2, "*3"], WxSyntaxError),
([1, 2], [1, 2, "(3"], WxSyntaxError),
([1, 2], [1, 2, "(3)3"], WxSyntaxError),
([1, 2], [1, 2, "2(3)"], WxSyntaxError),
([1, 2], [1, "...", 2], WxSyntaxError),
([1, 2], ["(1)", "..."], WxSyntaxError),
([1, 2], [1, "4~1"], WxSyntaxError),
([-1, -2], [-1, -2], WxSyntaxError),
([-1, 2], [1, 2], WxSyntaxError),
([1, 2], [-1, 2], WxSyntaxError),
([1, 2], [1, 2, "(-3)"], WxSyntaxError),
([1, 2], [1, 2, "(-3~-1)"], WxSyntaxError),
([1, 2], [1, 2, "(-3~1)"], WxSyntaxError),
([1, 2, 1], ["(-3~1)", 2, 1], WxSyntaxError),
([1, 2], [1, "(9~m)"], WxSyntaxError),
([1, 2], [1, "(n~9)"], WxSyntaxError),
([1, 2], [1, "(n~m)"], WxSyntaxError),
([1, 2], [1, "(1~3~5)"], WxSyntaxError),
("a string", [1, "(1~3~5)"], ValidationError),
([1, 2], "a string", WxSyntaxError),
],
)
def test_shape_validation_error_exception(shape, exp, err):
with pytest.raises(err):
assert _val(shape, exp)
@pytest.mark.parametrize(
"test_input",
[
ShapeValidatorTestClass(),
ShapeValidatorTestClass(time_prop=pd.date_range("2020", freq="D", periods=9)),
ShapeValidatorTestClass(optional_prop=np.ones((1, 2, 3))),
ShapeValidatorTestClass(optional_prop="no shape"),
ShapeValidatorTestClass(
nested_prop={
"p1": np.ones((10, 8, 6, 4, 2)),
"p2": np.ones((9, 7, 5, 3, 1)),
"p3": np.ones((1, 2, 3)),
}
),
ShapeValidatorTestClass(nested_prop={"p1": np.ones((10, 8, 6, 4, 2))}), # no p2
],
)
def test_shape_validator(test_input):
result = write_read_buffer(
{"root": test_input},
)["root"]
assert compare_nested(test_input.__dict__, result.__dict__)
assert compare_nested(result.__dict__, test_input.__dict__)
@pytest.mark.parametrize(
"test_input",
[
ShapeValidatorTestClass(prop4=np.ones((2, 3, 5, 7, 9))), # mismatch a - prop5
ShapeValidatorTestClass(prop2=np.ones((5, 2, 1))), # mismatch n - prop1
ShapeValidatorTestClass(optional_prop=np.ones((3, 2, 9))), # wrong optional
ShapeValidatorTestClass(time_prop=pd.date_range("2020", freq="D", periods=3)),
ShapeValidatorTestClass(quantity=Q_([0, 3], "s")), # mismatch shape [1]
ShapeValidatorTestClass(
timeseries=TimeSeries(
Q_([0, 3], "m"), Q_([0, 1], "s")
) # mismatch shape [1]
),
],
)
def test_shape_validator_exceptions(test_input):
with pytest.raises(ValidationError):
write_read_buffer({"root": test_input})
@pytest.mark.parametrize(
"test",
[
UnitValidatorTestClass(),
UnitValidatorTestClass(length_prop=Q_(1, "inch")),
],
)
def test_unit_validator(test):
data = write_read_buffer({"root_node": test})
test_read = data["root_node"]
assert isinstance(data, dict)
assert test_read.length_prop == test.length_prop
assert test_read.velocity_prop == test.velocity_prop
assert np.all(test_read.current_prop == test.current_prop)
assert np.all(test_read.nested_prop["q1"] == test.nested_prop["q1"])
assert test_read.nested_prop["q2"] == test.nested_prop["q2"]
assert test_read.simple_prop == test.simple_prop
@pytest.mark.parametrize(
"test",
[
UnitValidatorTestClass(
length_prop=Q_(1, "s"), # wrong unit
),
UnitValidatorTestClass(
velocity_prop=Q_(2, "liter"), # wrong unit
),
UnitValidatorTestClass(
current_prop=Q_(np.eye(2, 2), "V"), # wrong unit
),
UnitValidatorTestClass(
nested_prop=dict(q1=Q_(np.eye(3, 3), "m"), q2=Q_(2, "V")), # wrong unit
),
UnitValidatorTestClass(
simple_prop={"value": float(3), "unit": "s"}, # wrong unit
),
],
)
def test_unit_validator_exception(test):
with pytest.raises(ValidationError):
write_buffer({"root_node": test}) | 0.558568 | 0.640945 |
import datetime as dt
import json
from typing import Dict, List, Optional
from _autoclimate.state import State
from _autoclimate.utils import climate_name
from adplus import Hass
"""
Laston - create new sensors that track the last time the climate
was "on" as defined by autoclimate entity_rules.
sensor.autoclimate_gym_laston = <datetime>
"""
class Laston:
def __init__(
self,
hass: Hass,
config: dict,
appname: str,
climates: list,
appstate_entity: str,
test_mode: bool,
):
self.hass = hass
self.aconfig = config
self.appname = appname
self.test_mode = test_mode
self.climates = climates
self.appstate_entity = appstate_entity
self.climate_states: Dict[str, TurnonState] = {}
self.hass.run_in(self.initialize_states, 0)
def initialize_states(self, kwargs):
for climate in self.climates:
self.climate_states[climate] = TurnonState(self.hass, self.aconfig, climate)
# After initialization
self.hass.run_in(self.create_laston_sensors, 0)
self.hass.run_in(self.init_laston_listeners, 0.1)
def laston_sensor_name(self, climate):
return self.laston_sensor_name_static(self.appname, climate)
@staticmethod
def laston_sensor_name_static(appname, climate):
return f"sensor.{appname}_{climate_name(climate)}_laston"
def create_laston_sensors(self, kwargs):
self.get_history_data()
for climate in self.climates:
laston_sensor_name = self.laston_sensor_name(climate)
laston_date = self.climate_states[climate].last_turned_on
self.hass.update_state(
laston_sensor_name,
state=laston_date,
attributes={
"freindly_name": f"{climate_name(climate)} - Last date climate was turned on",
"device_class": "timestamp",
},
)
self.hass.log(
f"Created sensor: {laston_sensor_name}. Initial state: {laston_date}"
)
def init_laston_listeners(self, kwargs):
for climate in self.climates:
self.hass.listen_state(
self.update_laston_sensors, entity=climate, attribute="all"
)
def update_laston_sensors(self, climate, attribute, old, new, kwargs):
# Listener for climate entity
self.climate_states[climate].add_state(new)
laston_date = str(self.climate_states[climate].last_turned_on)
sensor_name = self.laston_sensor_name(climate)
sensor_state = self.hass.get_state(sensor_name)
if sensor_state != laston_date:
self.hass.update_state(sensor_name, state=laston_date)
self.hass.log(
f"Updated state for {sensor_name}: {laston_date}. Previous: {sensor_state}"
)
def get_history_data(self, days: int = 10) -> List:
data: List = self.hass.get_history(entity_id=self.appstate_entity, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {self.appstate_entity}. Exiting"
)
return []
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(reversed(sorted(edata, key=lambda rec: rec["last_updated"])))
return edata
def find_laston_from_history(self, climate: str, history: List):
key = f"{climate_name(climate)}_state"
retval = None
for rec in history:
if rec["attributes"].get(key) == "on":
retval = rec["last_changed"]
break
return retval
class TurnonState:
"""
.__init__() - initialize from history
.add_state(stateobj) - add stateobj
.last_turned_on [property] -> None, datetime
returns the last time a climate went from "off" to "on"
(based on autoclimate config)
This requires the current state, the previous state, and the state before that.
"""
def __init__(self, hass: Hass, config: dict, climate_entity: str) -> None:
self.hass = hass
self.config = config[climate_entity]
self.climate_entity = climate_entity
# states: "on", "off" (Ignore "offline")
self.curr: Optional[str] = None
self.curr_m1: Optional[str] = None # curr minus t1 ie: prev
self.curr_m2: Optional[str] = None # curr minus t2 ie: prev prev
self._curr_dt: Optional[dt.datetime] = None
self._curr_dt_m1: Optional[dt.datetime] = None
self._initialize_from_history()
def add_state(self, stateobj: dict):
"""Must be added in chronologically increasing order!"""
last_updated = stateobj.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt.datetime.fromisoformat(stateobj["last_updated"])
if self._curr_dt and last_updated < self._curr_dt:
raise RuntimeError(
f"Adding state earlier than lastest saved state. Can only add states in increasing datetime. stateobj: {json.dumps(stateobj)}"
)
state = self.entity_state(stateobj)
assert state in ["on", "off", "offline", "error_off"]
if state == self.curr or state == "offline":
return
else:
self.curr_m2 = self.curr_m1
self.curr_m1 = self.curr
self.curr = state
self._curr_dt_m1 = self._curr_dt
self._curr_dt = last_updated
def entity_state(self, stateobj: dict) -> str:
"""Return summarized state based on config: on, off, offline """
return State.offstate(self.climate_entity, stateobj, self.config, self.hass)[0]
@property
def last_turned_on(self) -> Optional[dt.datetime]:
if self.curr == "on" and self.curr_m1 == "off":
return self._curr_dt
elif self.curr == "off" and self.curr_m1 == "on" and self.curr_m2 == "off":
return self._curr_dt_m1
else:
return None
def _initialize_from_history(self):
history = self._get_history_data()
for stateobj in history:
self.add_state(stateobj)
def _get_history_data(self, days: int = 10) -> List:
"""
returns state history for self.climate_entity
**IN CHRONOLOGICAL ORDER**
"""
data: List = self.hass.get_history(entity_id=self.climate_entity, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {self.climate_entity}. Exiting"
)
return []
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(sorted(edata, key=lambda rec: rec["last_updated"]))
return edata
def __str__(self):
def dtstr(val: Optional[dt.datetime]):
if type(val) is str:
print("here")
return "None " if not val else val.strftime("%y/%m/%d %H:%M:%S")
return f"TurnOnState: {self.climate_entity:35} **{dtstr(self.last_turned_on)}** - {self.curr} - {self.curr_m1} - {self.curr_m2} - {dtstr(self._curr_dt)} - {dtstr(self._curr_dt_m1)}" | _autoclimate/laston.py | import datetime as dt
import json
from typing import Dict, List, Optional
from _autoclimate.state import State
from _autoclimate.utils import climate_name
from adplus import Hass
"""
Laston - create new sensors that track the last time the climate
was "on" as defined by autoclimate entity_rules.
sensor.autoclimate_gym_laston = <datetime>
"""
class Laston:
def __init__(
self,
hass: Hass,
config: dict,
appname: str,
climates: list,
appstate_entity: str,
test_mode: bool,
):
self.hass = hass
self.aconfig = config
self.appname = appname
self.test_mode = test_mode
self.climates = climates
self.appstate_entity = appstate_entity
self.climate_states: Dict[str, TurnonState] = {}
self.hass.run_in(self.initialize_states, 0)
def initialize_states(self, kwargs):
for climate in self.climates:
self.climate_states[climate] = TurnonState(self.hass, self.aconfig, climate)
# After initialization
self.hass.run_in(self.create_laston_sensors, 0)
self.hass.run_in(self.init_laston_listeners, 0.1)
def laston_sensor_name(self, climate):
return self.laston_sensor_name_static(self.appname, climate)
@staticmethod
def laston_sensor_name_static(appname, climate):
return f"sensor.{appname}_{climate_name(climate)}_laston"
def create_laston_sensors(self, kwargs):
self.get_history_data()
for climate in self.climates:
laston_sensor_name = self.laston_sensor_name(climate)
laston_date = self.climate_states[climate].last_turned_on
self.hass.update_state(
laston_sensor_name,
state=laston_date,
attributes={
"freindly_name": f"{climate_name(climate)} - Last date climate was turned on",
"device_class": "timestamp",
},
)
self.hass.log(
f"Created sensor: {laston_sensor_name}. Initial state: {laston_date}"
)
def init_laston_listeners(self, kwargs):
for climate in self.climates:
self.hass.listen_state(
self.update_laston_sensors, entity=climate, attribute="all"
)
def update_laston_sensors(self, climate, attribute, old, new, kwargs):
# Listener for climate entity
self.climate_states[climate].add_state(new)
laston_date = str(self.climate_states[climate].last_turned_on)
sensor_name = self.laston_sensor_name(climate)
sensor_state = self.hass.get_state(sensor_name)
if sensor_state != laston_date:
self.hass.update_state(sensor_name, state=laston_date)
self.hass.log(
f"Updated state for {sensor_name}: {laston_date}. Previous: {sensor_state}"
)
def get_history_data(self, days: int = 10) -> List:
data: List = self.hass.get_history(entity_id=self.appstate_entity, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {self.appstate_entity}. Exiting"
)
return []
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(reversed(sorted(edata, key=lambda rec: rec["last_updated"])))
return edata
def find_laston_from_history(self, climate: str, history: List):
key = f"{climate_name(climate)}_state"
retval = None
for rec in history:
if rec["attributes"].get(key) == "on":
retval = rec["last_changed"]
break
return retval
class TurnonState:
"""
.__init__() - initialize from history
.add_state(stateobj) - add stateobj
.last_turned_on [property] -> None, datetime
returns the last time a climate went from "off" to "on"
(based on autoclimate config)
This requires the current state, the previous state, and the state before that.
"""
def __init__(self, hass: Hass, config: dict, climate_entity: str) -> None:
self.hass = hass
self.config = config[climate_entity]
self.climate_entity = climate_entity
# states: "on", "off" (Ignore "offline")
self.curr: Optional[str] = None
self.curr_m1: Optional[str] = None # curr minus t1 ie: prev
self.curr_m2: Optional[str] = None # curr minus t2 ie: prev prev
self._curr_dt: Optional[dt.datetime] = None
self._curr_dt_m1: Optional[dt.datetime] = None
self._initialize_from_history()
def add_state(self, stateobj: dict):
"""Must be added in chronologically increasing order!"""
last_updated = stateobj.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt.datetime.fromisoformat(stateobj["last_updated"])
if self._curr_dt and last_updated < self._curr_dt:
raise RuntimeError(
f"Adding state earlier than lastest saved state. Can only add states in increasing datetime. stateobj: {json.dumps(stateobj)}"
)
state = self.entity_state(stateobj)
assert state in ["on", "off", "offline", "error_off"]
if state == self.curr or state == "offline":
return
else:
self.curr_m2 = self.curr_m1
self.curr_m1 = self.curr
self.curr = state
self._curr_dt_m1 = self._curr_dt
self._curr_dt = last_updated
def entity_state(self, stateobj: dict) -> str:
"""Return summarized state based on config: on, off, offline """
return State.offstate(self.climate_entity, stateobj, self.config, self.hass)[0]
@property
def last_turned_on(self) -> Optional[dt.datetime]:
if self.curr == "on" and self.curr_m1 == "off":
return self._curr_dt
elif self.curr == "off" and self.curr_m1 == "on" and self.curr_m2 == "off":
return self._curr_dt_m1
else:
return None
def _initialize_from_history(self):
history = self._get_history_data()
for stateobj in history:
self.add_state(stateobj)
def _get_history_data(self, days: int = 10) -> List:
"""
returns state history for self.climate_entity
**IN CHRONOLOGICAL ORDER**
"""
data: List = self.hass.get_history(entity_id=self.climate_entity, days=days) # type: ignore
if not data or len(data) == 0:
self.hass.warn(
f"get_history returned no data for entity: {self.climate_entity}. Exiting"
)
return []
edata = data[0]
# the get_history() fn doesn't say it guarantees sort (though it appears to be)
edata = list(sorted(edata, key=lambda rec: rec["last_updated"]))
return edata
def __str__(self):
def dtstr(val: Optional[dt.datetime]):
if type(val) is str:
print("here")
return "None " if not val else val.strftime("%y/%m/%d %H:%M:%S")
return f"TurnOnState: {self.climate_entity:35} **{dtstr(self.last_turned_on)}** - {self.curr} - {self.curr_m1} - {self.curr_m2} - {dtstr(self._curr_dt)} - {dtstr(self._curr_dt_m1)}" | 0.8308 | 0.26594 |
# In[1]:
import pandas as pd
# In[2]:
data=pd.read_csv('analysis.mf_acc_xns.txt',sep="\t")
# In[3]:
data.info()
# In[4]:
data.head()
# In[43]:
year=[[0 for i in range(12)]for _ in range(3)]
for i, row in enumerate(data.groupby(['acc_id'])['xn_date'].first()):
temp=row.split('-')
if(temp[0]=='2016'):
year[0][int(temp[1])-1]+=1
if(temp[0]=='2017'):
year[1][int(temp[1])-1]+=1
if(temp[0]=='2018'):
year[2][int(temp[1])-1]+=1
# In[44]:
year
# In[7]:
for i in range(5,12):
year[2][i]=year[1][i]-i
# In[8]:
month=[i+1 for i in range(12)]
# In[9]:
import matplotlib.pyplot as plt
plt.plot( year[1], 'b', year[2], 'g')
# plt.set_xticklabels(months)
plt.show()
# In[45]:
sum=[0,0,0]
for i in range(3):
for j in year[i]:
sum[i]+=j
# In[46]:
sum
# In[47]:
year_per=[[0 for i in range(12)]for _ in range(3)]
# In[48]:
for i in range(3):
for j in range(len(year[i])):
year_per[i][j]=year[i][j]/sum[i]*100
# In[49]:
year_per
# In[85]:
year_p=pd.read_csv('tables.csv')
# In[86]:
year_per=year_p.as_matrix()
# In[87]:
for i in range(len(year_per)):
for j in range(12):
year_per[i][j]=int(year_per[i][j])
# In[88]:
import pylab
import numpy as np
pylab.plot(month,year_per[0][1:],'r',label='BRAC Bank')
pylab.plot(month,year_per[1][1:],'b',label='Bank B')
pylab.plot(month,year_per[2][1:],'g',label='Bank C')
pylab.ylim([0,60])
pylab.xticks(np.arange(1,13))
pylab.legend(loc='upper right')
pylab.xlabel('Months (year 2017) ')
pylab.ylabel('CC Payments in million $')
pylab.title('Credit Card Payment outflow trend')
pylab.show()
pylab.savefig('temp.png')
plt.close() # close the figure
# In[89]:
m=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sept","Oct","Nov","Dec"]
# In[52]:
# table=pd.DataFrame({'Year':m,'2016':year_per[0],'2017':year_per[1],'2018':year_per[1]})
table=pd.DataFrame(year_per,columns=m)
# In[53]:
table
# In[54]:
idx = 0
new_col = [2016,2017,2018] # can be a list, a Series, an array or a scalar
table.insert(loc=idx, column='Years', value=new_col)
# In[55]:
table.columns
# In[56]:
table.to_csv('tables.csv')
# In[23]:
get_ipython().system('ls')
# In[24]:
rt=pd.read_csv('tables.csv')
# In[25]:
rt | DataAnalysis/sliding window graph.py |
# In[1]:
import pandas as pd
# In[2]:
data=pd.read_csv('analysis.mf_acc_xns.txt',sep="\t")
# In[3]:
data.info()
# In[4]:
data.head()
# In[43]:
year=[[0 for i in range(12)]for _ in range(3)]
for i, row in enumerate(data.groupby(['acc_id'])['xn_date'].first()):
temp=row.split('-')
if(temp[0]=='2016'):
year[0][int(temp[1])-1]+=1
if(temp[0]=='2017'):
year[1][int(temp[1])-1]+=1
if(temp[0]=='2018'):
year[2][int(temp[1])-1]+=1
# In[44]:
year
# In[7]:
for i in range(5,12):
year[2][i]=year[1][i]-i
# In[8]:
month=[i+1 for i in range(12)]
# In[9]:
import matplotlib.pyplot as plt
plt.plot( year[1], 'b', year[2], 'g')
# plt.set_xticklabels(months)
plt.show()
# In[45]:
sum=[0,0,0]
for i in range(3):
for j in year[i]:
sum[i]+=j
# In[46]:
sum
# In[47]:
year_per=[[0 for i in range(12)]for _ in range(3)]
# In[48]:
for i in range(3):
for j in range(len(year[i])):
year_per[i][j]=year[i][j]/sum[i]*100
# In[49]:
year_per
# In[85]:
year_p=pd.read_csv('tables.csv')
# In[86]:
year_per=year_p.as_matrix()
# In[87]:
for i in range(len(year_per)):
for j in range(12):
year_per[i][j]=int(year_per[i][j])
# In[88]:
import pylab
import numpy as np
pylab.plot(month,year_per[0][1:],'r',label='BRAC Bank')
pylab.plot(month,year_per[1][1:],'b',label='Bank B')
pylab.plot(month,year_per[2][1:],'g',label='Bank C')
pylab.ylim([0,60])
pylab.xticks(np.arange(1,13))
pylab.legend(loc='upper right')
pylab.xlabel('Months (year 2017) ')
pylab.ylabel('CC Payments in million $')
pylab.title('Credit Card Payment outflow trend')
pylab.show()
pylab.savefig('temp.png')
plt.close() # close the figure
# In[89]:
m=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sept","Oct","Nov","Dec"]
# In[52]:
# table=pd.DataFrame({'Year':m,'2016':year_per[0],'2017':year_per[1],'2018':year_per[1]})
table=pd.DataFrame(year_per,columns=m)
# In[53]:
table
# In[54]:
idx = 0
new_col = [2016,2017,2018] # can be a list, a Series, an array or a scalar
table.insert(loc=idx, column='Years', value=new_col)
# In[55]:
table.columns
# In[56]:
table.to_csv('tables.csv')
# In[23]:
get_ipython().system('ls')
# In[24]:
rt=pd.read_csv('tables.csv')
# In[25]:
rt | 0.19544 | 0.302359 |
import argparse
import base64
import traceback
import zlib
import flask
from flask import request, jsonify
from mailer import Mailer, Message
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--port", default=5000, type=int, help="Port to bind the http api to")
parser.add_argument("--host", default="smtp.gmail.com", help="Host of the smtp server")
parser.add_argument("--user", required=True, help="Username to use to login into the smtp server")
parser.add_argument("--password", required=True, help="Password for the smtp server")
parser.add_argument("--receiver", required=True, help="Address of the receiver of feedback mails")
return parser.parse_args()
def make_app(args):
app = flask.Flask(__name__)
@app.route("/post", methods=["POST"])
def post():
version = request.form["version"]
username = request.form.get("name", "")
feedback = request.form.get("feedback", "")
if "logcat64" in request.form:
logcat = base64.b64decode(request.form.get("logcat64"))
logcat = zlib.decompress(logcat, 32+15).decode("utf8")
else:
logcat = request.form.get("logcat", "")
send_feedback_mail(version, username, feedback, logcat)
return jsonify(success=True)
def send_feedback_mail(version, username, feedback, logcat):
# noinspection PyBroadException
try:
msg = Message(From=args.user, To=args.receiver, charset="utf8")
msg.Subject = u"Feedback {} ({})".format(version, username)
msg.Body = u"User: {0} http://pr0gramm.com/user/{0}\nFeedback: {1}\n\nLogcat: {2}\n".format(username, feedback, logcat)
mailer = Mailer(args.host, port=587, use_tls=True, usr=args.user, pwd=args.password)
mailer.send(msg)
except:
traceback.print_exc()
return app
def main():
args = parse_arguments()
app = make_app(args)
app.run(host="0.0.0.0", port=args.port, debug=False)
if __name__ == '__main__':
main() | main.py | import argparse
import base64
import traceback
import zlib
import flask
from flask import request, jsonify
from mailer import Mailer, Message
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--port", default=5000, type=int, help="Port to bind the http api to")
parser.add_argument("--host", default="smtp.gmail.com", help="Host of the smtp server")
parser.add_argument("--user", required=True, help="Username to use to login into the smtp server")
parser.add_argument("--password", required=True, help="Password for the smtp server")
parser.add_argument("--receiver", required=True, help="Address of the receiver of feedback mails")
return parser.parse_args()
def make_app(args):
app = flask.Flask(__name__)
@app.route("/post", methods=["POST"])
def post():
version = request.form["version"]
username = request.form.get("name", "")
feedback = request.form.get("feedback", "")
if "logcat64" in request.form:
logcat = base64.b64decode(request.form.get("logcat64"))
logcat = zlib.decompress(logcat, 32+15).decode("utf8")
else:
logcat = request.form.get("logcat", "")
send_feedback_mail(version, username, feedback, logcat)
return jsonify(success=True)
def send_feedback_mail(version, username, feedback, logcat):
# noinspection PyBroadException
try:
msg = Message(From=args.user, To=args.receiver, charset="utf8")
msg.Subject = u"Feedback {} ({})".format(version, username)
msg.Body = u"User: {0} http://pr0gramm.com/user/{0}\nFeedback: {1}\n\nLogcat: {2}\n".format(username, feedback, logcat)
mailer = Mailer(args.host, port=587, use_tls=True, usr=args.user, pwd=args.password)
mailer.send(msg)
except:
traceback.print_exc()
return app
def main():
args = parse_arguments()
app = make_app(args)
app.run(host="0.0.0.0", port=args.port, debug=False)
if __name__ == '__main__':
main() | 0.367951 | 0.057998 |
import pprint
import re # noqa: F401
import six
class DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lease_date_variable': 'bool',
'lease_options': 'str',
'tenant_info_term_of_lease_from': 'int',
'tenant_info_term_of_lease_to': 'int',
'tenant_name': 'str',
'tenant_rent_details': 'str',
'lease_start_date': 'datetime',
'lease_end_date': 'datetime'
}
attribute_map = {
'lease_date_variable': 'leaseDateVariable',
'lease_options': 'leaseOptions',
'tenant_info_term_of_lease_from': 'tenantInfoTermOfLeaseFrom',
'tenant_info_term_of_lease_to': 'tenantInfoTermOfLeaseTo',
'tenant_name': 'tenantName',
'tenant_rent_details': 'tenantRentDetails',
'lease_start_date': 'leaseStartDate',
'lease_end_date': 'leaseEndDate'
}
def __init__(self, lease_date_variable=None, lease_options=None, tenant_info_term_of_lease_from=None, tenant_info_term_of_lease_to=None, tenant_name=None, tenant_rent_details=None, lease_start_date=None, lease_end_date=None): # noqa: E501
"""DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails - a model defined in Swagger""" # noqa: E501
self._lease_date_variable = None
self._lease_options = None
self._tenant_info_term_of_lease_from = None
self._tenant_info_term_of_lease_to = None
self._tenant_name = None
self._tenant_rent_details = None
self._lease_start_date = None
self._lease_end_date = None
self.discriminator = None
if lease_date_variable is not None:
self.lease_date_variable = lease_date_variable
if lease_options is not None:
self.lease_options = lease_options
if tenant_info_term_of_lease_from is not None:
self.tenant_info_term_of_lease_from = tenant_info_term_of_lease_from
if tenant_info_term_of_lease_to is not None:
self.tenant_info_term_of_lease_to = tenant_info_term_of_lease_to
if tenant_name is not None:
self.tenant_name = tenant_name
if tenant_rent_details is not None:
self.tenant_rent_details = tenant_rent_details
if lease_start_date is not None:
self.lease_start_date = lease_start_date
if lease_end_date is not None:
self.lease_end_date = lease_end_date
@property
def lease_date_variable(self):
"""Gets the lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: bool
"""
return self._lease_date_variable
@lease_date_variable.setter
def lease_date_variable(self, lease_date_variable):
"""Sets the lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_date_variable: The lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: bool
"""
self._lease_date_variable = lease_date_variable
@property
def lease_options(self):
"""Gets the lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: str
"""
return self._lease_options
@lease_options.setter
def lease_options(self, lease_options):
"""Sets the lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_options: The lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: str
"""
self._lease_options = lease_options
@property
def tenant_info_term_of_lease_from(self):
"""Gets the tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: int
"""
return self._tenant_info_term_of_lease_from
@tenant_info_term_of_lease_from.setter
def tenant_info_term_of_lease_from(self, tenant_info_term_of_lease_from):
"""Sets the tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_info_term_of_lease_from: The tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: int
"""
self._tenant_info_term_of_lease_from = tenant_info_term_of_lease_from
@property
def tenant_info_term_of_lease_to(self):
"""Gets the tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: int
"""
return self._tenant_info_term_of_lease_to
@tenant_info_term_of_lease_to.setter
def tenant_info_term_of_lease_to(self, tenant_info_term_of_lease_to):
"""Sets the tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_info_term_of_lease_to: The tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: int
"""
self._tenant_info_term_of_lease_to = tenant_info_term_of_lease_to
@property
def tenant_name(self):
"""Gets the tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: str
"""
return self._tenant_name
@tenant_name.setter
def tenant_name(self, tenant_name):
"""Sets the tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_name: The tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: str
"""
self._tenant_name = tenant_name
@property
def tenant_rent_details(self):
"""Gets the tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: str
"""
return self._tenant_rent_details
@tenant_rent_details.setter
def tenant_rent_details(self, tenant_rent_details):
"""Sets the tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_rent_details: The tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: str
"""
self._tenant_rent_details = tenant_rent_details
@property
def lease_start_date(self):
"""Gets the lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: datetime
"""
return self._lease_start_date
@lease_start_date.setter
def lease_start_date(self, lease_start_date):
"""Sets the lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_start_date: The lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: datetime
"""
self._lease_start_date = lease_start_date
@property
def lease_end_date(self):
"""Gets the lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: datetime
"""
return self._lease_end_date
@lease_end_date.setter
def lease_end_date(self, lease_end_date):
"""Sets the lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_end_date: The lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: datetime
"""
self._lease_end_date = lease_end_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | src/domainClient/models/domain_listings_service_v1_model_domain_listings_api_model_query_results_listing_tenant_details.py | import pprint
import re # noqa: F401
import six
class DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lease_date_variable': 'bool',
'lease_options': 'str',
'tenant_info_term_of_lease_from': 'int',
'tenant_info_term_of_lease_to': 'int',
'tenant_name': 'str',
'tenant_rent_details': 'str',
'lease_start_date': 'datetime',
'lease_end_date': 'datetime'
}
attribute_map = {
'lease_date_variable': 'leaseDateVariable',
'lease_options': 'leaseOptions',
'tenant_info_term_of_lease_from': 'tenantInfoTermOfLeaseFrom',
'tenant_info_term_of_lease_to': 'tenantInfoTermOfLeaseTo',
'tenant_name': 'tenantName',
'tenant_rent_details': 'tenantRentDetails',
'lease_start_date': 'leaseStartDate',
'lease_end_date': 'leaseEndDate'
}
def __init__(self, lease_date_variable=None, lease_options=None, tenant_info_term_of_lease_from=None, tenant_info_term_of_lease_to=None, tenant_name=None, tenant_rent_details=None, lease_start_date=None, lease_end_date=None): # noqa: E501
"""DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails - a model defined in Swagger""" # noqa: E501
self._lease_date_variable = None
self._lease_options = None
self._tenant_info_term_of_lease_from = None
self._tenant_info_term_of_lease_to = None
self._tenant_name = None
self._tenant_rent_details = None
self._lease_start_date = None
self._lease_end_date = None
self.discriminator = None
if lease_date_variable is not None:
self.lease_date_variable = lease_date_variable
if lease_options is not None:
self.lease_options = lease_options
if tenant_info_term_of_lease_from is not None:
self.tenant_info_term_of_lease_from = tenant_info_term_of_lease_from
if tenant_info_term_of_lease_to is not None:
self.tenant_info_term_of_lease_to = tenant_info_term_of_lease_to
if tenant_name is not None:
self.tenant_name = tenant_name
if tenant_rent_details is not None:
self.tenant_rent_details = tenant_rent_details
if lease_start_date is not None:
self.lease_start_date = lease_start_date
if lease_end_date is not None:
self.lease_end_date = lease_end_date
@property
def lease_date_variable(self):
"""Gets the lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: bool
"""
return self._lease_date_variable
@lease_date_variable.setter
def lease_date_variable(self, lease_date_variable):
"""Sets the lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_date_variable: The lease_date_variable of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: bool
"""
self._lease_date_variable = lease_date_variable
@property
def lease_options(self):
"""Gets the lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: str
"""
return self._lease_options
@lease_options.setter
def lease_options(self, lease_options):
"""Sets the lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_options: The lease_options of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: str
"""
self._lease_options = lease_options
@property
def tenant_info_term_of_lease_from(self):
"""Gets the tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: int
"""
return self._tenant_info_term_of_lease_from
@tenant_info_term_of_lease_from.setter
def tenant_info_term_of_lease_from(self, tenant_info_term_of_lease_from):
"""Sets the tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_info_term_of_lease_from: The tenant_info_term_of_lease_from of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: int
"""
self._tenant_info_term_of_lease_from = tenant_info_term_of_lease_from
@property
def tenant_info_term_of_lease_to(self):
"""Gets the tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: int
"""
return self._tenant_info_term_of_lease_to
@tenant_info_term_of_lease_to.setter
def tenant_info_term_of_lease_to(self, tenant_info_term_of_lease_to):
"""Sets the tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_info_term_of_lease_to: The tenant_info_term_of_lease_to of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: int
"""
self._tenant_info_term_of_lease_to = tenant_info_term_of_lease_to
@property
def tenant_name(self):
"""Gets the tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: str
"""
return self._tenant_name
@tenant_name.setter
def tenant_name(self, tenant_name):
"""Sets the tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_name: The tenant_name of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: str
"""
self._tenant_name = tenant_name
@property
def tenant_rent_details(self):
"""Gets the tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: str
"""
return self._tenant_rent_details
@tenant_rent_details.setter
def tenant_rent_details(self, tenant_rent_details):
"""Sets the tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param tenant_rent_details: The tenant_rent_details of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: str
"""
self._tenant_rent_details = tenant_rent_details
@property
def lease_start_date(self):
"""Gets the lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: datetime
"""
return self._lease_start_date
@lease_start_date.setter
def lease_start_date(self, lease_start_date):
"""Sets the lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_start_date: The lease_start_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: datetime
"""
self._lease_start_date = lease_start_date
@property
def lease_end_date(self):
"""Gets the lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:return: The lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:rtype: datetime
"""
return self._lease_end_date
@lease_end_date.setter
def lease_end_date(self, lease_end_date):
"""Sets the lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails.
:param lease_end_date: The lease_end_date of this DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails. # noqa: E501
:type: datetime
"""
self._lease_end_date = lease_end_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainListingsServiceV1ModelDomainListingsApiModelQueryResultsListingTenantDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 0.531696 | 0.05875 |
import json
import os
from os.path import join, isdir
from os import mkdir, makedirs
import cv2
import numpy as np
import re
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"] """
return [tryint(c) for c in re.split('([0-9]+)', s)]
def natural_sort(given_list):
""" Sort the given list in the way that humans expect."""
given_list.sort(key=alphanum_key)
def get_immediate_childfile_paths(folder_path, ext=None, exclude=None):
files_names = get_immediate_childfile_names(folder_path, ext, exclude)
files_full_paths = [os.path.join(folder_path, file_name) for file_name in files_names]
return files_full_paths
def get_immediate_childfile_names(folder_path, ext=None, exclude=None):
files_names = [file_name for file_name in next(os.walk(folder_path))[2]]
if ext is not None:
files_names = [file_name for file_name in files_names
if file_name.endswith(ext)]
if exclude is not None:
files_names = [file_name for file_name in files_names
if not file_name.endswith(exclude)]
natural_sort(files_names)
return files_names
def get_immediate_childimages_paths(folder_path):
files_names = [file_name for file_name in next(os.walk(folder_path))[1]]
natural_sort(files_names)
files_full_paths = [os.path.join(folder_path, file_name) for file_name in files_names]
return files_full_paths
def read_json_from_file(input_path):
with open(input_path, "r") as read_file:
python_data = json.load(read_file)
return python_data
def clip_bbox(bbox, img_shape):
bbox[2] += bbox[0]
bbox[3] += bbox[1]
if bbox[2] > img_shape[1]:
bbox[2] = img_shape[1]
if bbox[3] > img_shape[0]:
bbox[3] = img_shape[0]
return bbox
def crop_hwc_coord(bbox, out_sz=511):
a = (out_sz - 1) / (bbox[2] - bbox[0])
b = (out_sz - 1) / (bbox[3] - bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
# crop = cv2.warpAffine(image, mapping, (out_sz, out_sz),
# borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return mapping
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):
a = (out_sz-1) / (bbox[2]-bbox[0])
b = (out_sz-1) / (bbox[3]-bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
def pos_s_2_bbox(pos, s):
return [pos[0]-s/2, pos[1]-s/2, pos[0]+s/2, pos[1]+s/2]
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def crop_like_SiamFC_coord(bbox, exemplar_size=127, context_amount=0.5, search_size=255):
target_pos = [(bbox[2] + bbox[0]) / 2., (bbox[3] + bbox[1]) / 2.]
target_size = [bbox[2] - bbox[0] + 1, bbox[3] - bbox[1] + 1]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (search_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# x = crop_hwc1(image, pos_s_2_bbox(target_pos, s_x), search_size, padding)
return target_pos, s_x
def crop_like_SiamFCx(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):
target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]
target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (instanc_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)
return x
def gen_json(json_list, data_subset):
snippets = dict()
for js_file in json_list:
js_data = read_json_from_file(js_file)
ann = js_data['annotations']
eg_img_path = join('.', js_data['images'][0]['file_name'])
im = cv2.imread(eg_img_path)
im_shape = im.shape
video_name = js_file.split('.')[-2].split('/')[-2] + '/' + js_file.split('.')[-2].split('/')[-1]
snippet = dict()
for i, frame in enumerate(ann):
# print(frame)
if frame['category_id'] != 1: # 如果标注的不是人
continue
if 'bbox' not in frame: # 如果没有标注bbox(通常是人被完全遮挡,keypoints全为0)
continue
kp = frame['keypoints']
if kp.count(0) >= 30: # 如果被遮挡的kp数量大于等于10
continue
trackid = "{:02d}".format(frame['track_id'])
frame_name = "{:06d}".format(int(str(frame['image_id'])[-4:]))
kp_name = "kp_" + frame_name
bbox = clip_bbox(frame['bbox'], im_shape)
pos, s = crop_like_SiamFC_coord(bbox, exemplar_size=127, context_amount=0.5, search_size=511)
mapping_bbox = pos_s_2_bbox(pos, s)
mapping = crop_hwc_coord(mapping_bbox, out_sz=511)
affine_bbox = []
affine_bbox[:2] = affine_transform(bbox[:2], mapping) # bbox作仿射变换
affine_bbox[2:] = affine_transform(bbox[2:], mapping)
joints_3d = np.zeros((int(len(kp) / 3), 3), dtype=np.float)
for ipt in range(int(len(kp) / 3)):
joints_3d[ipt, 0] = kp[ipt * 3 + 0]
joints_3d[ipt, 1] = kp[ipt * 3 + 1]
joints_3d[ipt, 2] = kp[ipt * 3 + 2]
pts = joints_3d.copy()
affine_kp = []
for j in range(int(len(kp) / 3)):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], mapping) # kp作仿射变换
for k in range(3):
affine_kp.append(pts[j][k])
if trackid not in snippet.keys():
snippet[trackid] = dict()
# print("frame_name: ", frame_name)
# print("kp_name: ")
snippet[trackid][frame_name] = affine_bbox
snippet[trackid][kp_name] = affine_kp
snippets[video_name] = snippet
print('save json (dataset), please wait 20 seconds~')
json.dump(snippets, open('{}_pose_siamfc.json'.format(data_subset), 'w'), indent=4, sort_keys=True)
print('done!')
def main(instanc_size=511):
dataDir = '.'
crop_path = './crop{:d}'.format(instanc_size)
if not isdir(crop_path): mkdir(crop_path)
for dataType in ['train', 'val']:
set_crop_base_path = join(crop_path, dataType)
set_img_base_path = join(dataDir, 'images', dataType)
set_ann_base_path = join(dataDir, 'posetrack_data', 'annotations', dataType)
gt_json_folder_base = "./posetrack_data/annotations/{}".format(dataType)
gt_json_file_paths = get_immediate_childfile_paths(gt_json_folder_base, ext=".json")
gt_img_file_paths = get_immediate_childimages_paths(set_img_base_path)
gt_json_file_video_names = []
for gt_json_file_path in gt_json_file_paths:
gt_json_file_video_names.append(os.path.basename(gt_json_file_path).split('.')[0])
# print(gt_json_file_video_names)
# print(len(gt_json_file_video_names))
gt_img_file_video_names = []
for gt_img_file_path in gt_img_file_paths:
gt_img_file_video_names.append(os.path.basename(gt_img_file_path))
# print(gt_img_file_video_names)
# print(len(gt_img_file_video_names))
# PoseTrack数据集一个标注文件对应一段视频,但标注文件的数量与视频数量不一致,只选择有标注的视频进行crop和gen_json
gt_img_with_anno_names = [x for x in gt_json_file_video_names if x in gt_img_file_video_names]
# print(gt_img_with_anno_names)
json_list = []
for js in gt_img_with_anno_names:
json_list.append(join(gt_json_folder_base, js + '.json'))
# print(json_list)
# print(json_list[0].split('.')[-2].split('/')[-2] + '/' + json_list[0].split('.')[-2].split('/')[-1])
# print(len(gt_img_with_anno_names), len(json_list))
# n_video = len(gt_img_with_anno_names)
gen_json(json_list, dataType)
if __name__ == '__main__':
instanc_size = 511
main(instanc_size) | data/PoseTrack/gen_json.py | import json
import os
from os.path import join, isdir
from os import mkdir, makedirs
import cv2
import numpy as np
import re
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"] """
return [tryint(c) for c in re.split('([0-9]+)', s)]
def natural_sort(given_list):
""" Sort the given list in the way that humans expect."""
given_list.sort(key=alphanum_key)
def get_immediate_childfile_paths(folder_path, ext=None, exclude=None):
files_names = get_immediate_childfile_names(folder_path, ext, exclude)
files_full_paths = [os.path.join(folder_path, file_name) for file_name in files_names]
return files_full_paths
def get_immediate_childfile_names(folder_path, ext=None, exclude=None):
files_names = [file_name for file_name in next(os.walk(folder_path))[2]]
if ext is not None:
files_names = [file_name for file_name in files_names
if file_name.endswith(ext)]
if exclude is not None:
files_names = [file_name for file_name in files_names
if not file_name.endswith(exclude)]
natural_sort(files_names)
return files_names
def get_immediate_childimages_paths(folder_path):
files_names = [file_name for file_name in next(os.walk(folder_path))[1]]
natural_sort(files_names)
files_full_paths = [os.path.join(folder_path, file_name) for file_name in files_names]
return files_full_paths
def read_json_from_file(input_path):
with open(input_path, "r") as read_file:
python_data = json.load(read_file)
return python_data
def clip_bbox(bbox, img_shape):
bbox[2] += bbox[0]
bbox[3] += bbox[1]
if bbox[2] > img_shape[1]:
bbox[2] = img_shape[1]
if bbox[3] > img_shape[0]:
bbox[3] = img_shape[0]
return bbox
def crop_hwc_coord(bbox, out_sz=511):
a = (out_sz - 1) / (bbox[2] - bbox[0])
b = (out_sz - 1) / (bbox[3] - bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
# crop = cv2.warpAffine(image, mapping, (out_sz, out_sz),
# borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return mapping
def crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):
a = (out_sz-1) / (bbox[2]-bbox[0])
b = (out_sz-1) / (bbox[3]-bbox[1])
c = -a * bbox[0]
d = -b * bbox[1]
mapping = np.array([[a, 0, c],
[0, b, d]]).astype(np.float)
crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)
return crop
def pos_s_2_bbox(pos, s):
return [pos[0]-s/2, pos[1]-s/2, pos[0]+s/2, pos[1]+s/2]
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def crop_like_SiamFC_coord(bbox, exemplar_size=127, context_amount=0.5, search_size=255):
target_pos = [(bbox[2] + bbox[0]) / 2., (bbox[3] + bbox[1]) / 2.]
target_size = [bbox[2] - bbox[0] + 1, bbox[3] - bbox[1] + 1]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (search_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# x = crop_hwc1(image, pos_s_2_bbox(target_pos, s_x), search_size, padding)
return target_pos, s_x
def crop_like_SiamFCx(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):
target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]
target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]
wc_z = target_size[1] + context_amount * sum(target_size)
hc_z = target_size[0] + context_amount * sum(target_size)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
d_search = (instanc_size - exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)
return x
def gen_json(json_list, data_subset):
snippets = dict()
for js_file in json_list:
js_data = read_json_from_file(js_file)
ann = js_data['annotations']
eg_img_path = join('.', js_data['images'][0]['file_name'])
im = cv2.imread(eg_img_path)
im_shape = im.shape
video_name = js_file.split('.')[-2].split('/')[-2] + '/' + js_file.split('.')[-2].split('/')[-1]
snippet = dict()
for i, frame in enumerate(ann):
# print(frame)
if frame['category_id'] != 1: # 如果标注的不是人
continue
if 'bbox' not in frame: # 如果没有标注bbox(通常是人被完全遮挡,keypoints全为0)
continue
kp = frame['keypoints']
if kp.count(0) >= 30: # 如果被遮挡的kp数量大于等于10
continue
trackid = "{:02d}".format(frame['track_id'])
frame_name = "{:06d}".format(int(str(frame['image_id'])[-4:]))
kp_name = "kp_" + frame_name
bbox = clip_bbox(frame['bbox'], im_shape)
pos, s = crop_like_SiamFC_coord(bbox, exemplar_size=127, context_amount=0.5, search_size=511)
mapping_bbox = pos_s_2_bbox(pos, s)
mapping = crop_hwc_coord(mapping_bbox, out_sz=511)
affine_bbox = []
affine_bbox[:2] = affine_transform(bbox[:2], mapping) # bbox作仿射变换
affine_bbox[2:] = affine_transform(bbox[2:], mapping)
joints_3d = np.zeros((int(len(kp) / 3), 3), dtype=np.float)
for ipt in range(int(len(kp) / 3)):
joints_3d[ipt, 0] = kp[ipt * 3 + 0]
joints_3d[ipt, 1] = kp[ipt * 3 + 1]
joints_3d[ipt, 2] = kp[ipt * 3 + 2]
pts = joints_3d.copy()
affine_kp = []
for j in range(int(len(kp) / 3)):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], mapping) # kp作仿射变换
for k in range(3):
affine_kp.append(pts[j][k])
if trackid not in snippet.keys():
snippet[trackid] = dict()
# print("frame_name: ", frame_name)
# print("kp_name: ")
snippet[trackid][frame_name] = affine_bbox
snippet[trackid][kp_name] = affine_kp
snippets[video_name] = snippet
print('save json (dataset), please wait 20 seconds~')
json.dump(snippets, open('{}_pose_siamfc.json'.format(data_subset), 'w'), indent=4, sort_keys=True)
print('done!')
def main(instanc_size=511):
dataDir = '.'
crop_path = './crop{:d}'.format(instanc_size)
if not isdir(crop_path): mkdir(crop_path)
for dataType in ['train', 'val']:
set_crop_base_path = join(crop_path, dataType)
set_img_base_path = join(dataDir, 'images', dataType)
set_ann_base_path = join(dataDir, 'posetrack_data', 'annotations', dataType)
gt_json_folder_base = "./posetrack_data/annotations/{}".format(dataType)
gt_json_file_paths = get_immediate_childfile_paths(gt_json_folder_base, ext=".json")
gt_img_file_paths = get_immediate_childimages_paths(set_img_base_path)
gt_json_file_video_names = []
for gt_json_file_path in gt_json_file_paths:
gt_json_file_video_names.append(os.path.basename(gt_json_file_path).split('.')[0])
# print(gt_json_file_video_names)
# print(len(gt_json_file_video_names))
gt_img_file_video_names = []
for gt_img_file_path in gt_img_file_paths:
gt_img_file_video_names.append(os.path.basename(gt_img_file_path))
# print(gt_img_file_video_names)
# print(len(gt_img_file_video_names))
# PoseTrack数据集一个标注文件对应一段视频,但标注文件的数量与视频数量不一致,只选择有标注的视频进行crop和gen_json
gt_img_with_anno_names = [x for x in gt_json_file_video_names if x in gt_img_file_video_names]
# print(gt_img_with_anno_names)
json_list = []
for js in gt_img_with_anno_names:
json_list.append(join(gt_json_folder_base, js + '.json'))
# print(json_list)
# print(json_list[0].split('.')[-2].split('/')[-2] + '/' + json_list[0].split('.')[-2].split('/')[-1])
# print(len(gt_img_with_anno_names), len(json_list))
# n_video = len(gt_img_with_anno_names)
gen_json(json_list, dataType)
if __name__ == '__main__':
instanc_size = 511
main(instanc_size) | 0.226698 | 0.225353 |
from absl import logging
import gin
from multi_representation_adversary import data
from multi_representation_adversary import helper
from multi_representation_adversary import resnet
from multi_representation_adversary import selectors
import tensorflow.compat.v2 as tf
@gin.configurable
def learning_rate_scheduler(epoch, values=(0.1, 0.01, 0.001),
breakpoints=(100, 150)):
"""Piecewise constant schedule for learning rate."""
idx = sum(1 if epoch > b else 0 for b in breakpoints)
return values[idx]
@gin.configurable
def train(ckpt_dir=None,
summary_dir=None,
epochs=200,
steps_per_epoch=351, # 45000 / 128 for CIFAR-10
global_batch_size=128,
model_fn=resnet.build_resnet_v1,
lr_scheduler=learning_rate_scheduler,
representation_list=(("identity", "none"),)):
"""Train a model with adversarial training in multiple representation spaces.
Args:
ckpt_dir: The directory to store model checkpoints.
summary_dir: The directory to store training summaries.
epochs: Maximum number of epochs to train for.
steps_per_epoch: Number of training steps in each epoch.
global_batch_size: Batch size across all processors/accelerators for each
training step.
model_fn: A callable which builds the model structure.
lr_scheduler: A callable which returns the learning rate at any given epoch.
representation_list: A list of (transform, attack) tuples representing the
adversaries that this model should consider.
"""
# Set up distributed training strategy first because all variables (model,
# optimizer, etc) have to be created in the strategy's scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = model_fn(return_logits=True) # Other params are set in gin
optimizer = tf.keras.optimizers.SGD(learning_rate=lr_scheduler(0),
momentum=0.9)
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def loss_fn(label, logit):
# Normalize by global_batch_size, which is different from usual
# (per-replica) batch size in a distributed training environment.
return tf.nn.compute_average_loss(loss_obj(label, logit),
global_batch_size=global_batch_size)
metrics = [
tf.keras.metrics.SparseCategoricalCrossentropy("loss",
from_logits=True),
tf.keras.metrics.SparseCategoricalAccuracy("accuracy")]
# Compile a tf.function for training and eval (validation) steps for each
# (transform, attack) tuple.
representation_names = []
train_step_fns, eval_step_fns = [], []
for transform_name, attack_name in representation_list:
representation_names.append(f"{transform_name}_{attack_name}")
attack_fn = helper.build_attack_fn(model, transform_name, attack_name)
train_step_fns.append(helper.build_train_step_fn(
model, optimizer, loss_fn, metrics, attack_fn))
eval_step_fns.append(helper.build_eval_step_fn(model, metrics, attack_fn))
selector = selectors.construct_representation_selector(representation_names)
# Create checkpoint object for saving model weights and selector state.
checkpoint = tf.train.Checkpoint(model=model, selector=selector)
ckpt_mgr = tf.train.CheckpointManager(checkpoint, ckpt_dir,
max_to_keep=None)
restored_path = ckpt_mgr.restore_or_initialize()
if restored_path:
logging.info("Restored checkpoint %s", restored_path)
start_epoch = int(restored_path.rsplit("-", 1)[-1]) # path like "ckpt-N"
total_steps = start_epoch * steps_per_epoch
else:
logging.info("Model initialized")
start_epoch, total_steps = 0, 0
ckpt_mgr.save(0)
train_dataset = data.get_training_dataset(global_batch_size)
valid_dataset = data.get_validation_dataset(global_batch_size)
with tf.summary.create_file_writer(summary_dir).as_default():
for epoch in range(start_epoch + 1, epochs + 1):
logging.info("Epoch %d", epoch)
# Learning rate decay
if lr_scheduler(epoch) != optimizer.learning_rate:
optimizer.learning_rate = lr_scheduler(epoch)
logging.info("New learning rate: %g", optimizer.learning_rate)
# Training
dist_dataset = strategy.experimental_distribute_dataset(
train_dataset.take(steps_per_epoch))
for x, y in dist_dataset:
selected_idx = selector.select(total_steps)
train_step_fn = train_step_fns[selected_idx]
per_replica_loss = strategy.run(train_step_fn, args=(x, y))
loss_value = strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_loss, axis=None)
if total_steps % 50 == 0:
tf.summary.scalar("train/batch_loss", loss_value, step=total_steps)
total_steps += 1
for metric in metrics:
tf.summary.scalar(f"train/{metric.name}", metric.result(), step=epoch)
metric.reset_states()
# Maybe update the selector's state
if selector.should_update(epoch):
logging.info("Evaluate on validation set and update selector state")
validation_losses = []
dist_val_dataset = strategy.experimental_distribute_dataset(
valid_dataset)
for i, eval_step_fn in enumerate(eval_step_fns):
for x, y in dist_val_dataset:
strategy.run(eval_step_fn, args=(x, y))
validation_losses.append(metrics[0].result()) # Crossentropy loss
for metric in metrics:
name = f"validation/{metric.name}/{representation_names[i]}"
tf.summary.scalar(name, metric.result(), step=epoch)
metric.reset_states()
selector.update(epoch, validation_losses)
# Save a checkpoint
ckpt_mgr.save(epoch) | research/multi_representation_adversary/multi_representation_adversary/trainer.py | from absl import logging
import gin
from multi_representation_adversary import data
from multi_representation_adversary import helper
from multi_representation_adversary import resnet
from multi_representation_adversary import selectors
import tensorflow.compat.v2 as tf
@gin.configurable
def learning_rate_scheduler(epoch, values=(0.1, 0.01, 0.001),
breakpoints=(100, 150)):
"""Piecewise constant schedule for learning rate."""
idx = sum(1 if epoch > b else 0 for b in breakpoints)
return values[idx]
@gin.configurable
def train(ckpt_dir=None,
summary_dir=None,
epochs=200,
steps_per_epoch=351, # 45000 / 128 for CIFAR-10
global_batch_size=128,
model_fn=resnet.build_resnet_v1,
lr_scheduler=learning_rate_scheduler,
representation_list=(("identity", "none"),)):
"""Train a model with adversarial training in multiple representation spaces.
Args:
ckpt_dir: The directory to store model checkpoints.
summary_dir: The directory to store training summaries.
epochs: Maximum number of epochs to train for.
steps_per_epoch: Number of training steps in each epoch.
global_batch_size: Batch size across all processors/accelerators for each
training step.
model_fn: A callable which builds the model structure.
lr_scheduler: A callable which returns the learning rate at any given epoch.
representation_list: A list of (transform, attack) tuples representing the
adversaries that this model should consider.
"""
# Set up distributed training strategy first because all variables (model,
# optimizer, etc) have to be created in the strategy's scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = model_fn(return_logits=True) # Other params are set in gin
optimizer = tf.keras.optimizers.SGD(learning_rate=lr_scheduler(0),
momentum=0.9)
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
def loss_fn(label, logit):
# Normalize by global_batch_size, which is different from usual
# (per-replica) batch size in a distributed training environment.
return tf.nn.compute_average_loss(loss_obj(label, logit),
global_batch_size=global_batch_size)
metrics = [
tf.keras.metrics.SparseCategoricalCrossentropy("loss",
from_logits=True),
tf.keras.metrics.SparseCategoricalAccuracy("accuracy")]
# Compile a tf.function for training and eval (validation) steps for each
# (transform, attack) tuple.
representation_names = []
train_step_fns, eval_step_fns = [], []
for transform_name, attack_name in representation_list:
representation_names.append(f"{transform_name}_{attack_name}")
attack_fn = helper.build_attack_fn(model, transform_name, attack_name)
train_step_fns.append(helper.build_train_step_fn(
model, optimizer, loss_fn, metrics, attack_fn))
eval_step_fns.append(helper.build_eval_step_fn(model, metrics, attack_fn))
selector = selectors.construct_representation_selector(representation_names)
# Create checkpoint object for saving model weights and selector state.
checkpoint = tf.train.Checkpoint(model=model, selector=selector)
ckpt_mgr = tf.train.CheckpointManager(checkpoint, ckpt_dir,
max_to_keep=None)
restored_path = ckpt_mgr.restore_or_initialize()
if restored_path:
logging.info("Restored checkpoint %s", restored_path)
start_epoch = int(restored_path.rsplit("-", 1)[-1]) # path like "ckpt-N"
total_steps = start_epoch * steps_per_epoch
else:
logging.info("Model initialized")
start_epoch, total_steps = 0, 0
ckpt_mgr.save(0)
train_dataset = data.get_training_dataset(global_batch_size)
valid_dataset = data.get_validation_dataset(global_batch_size)
with tf.summary.create_file_writer(summary_dir).as_default():
for epoch in range(start_epoch + 1, epochs + 1):
logging.info("Epoch %d", epoch)
# Learning rate decay
if lr_scheduler(epoch) != optimizer.learning_rate:
optimizer.learning_rate = lr_scheduler(epoch)
logging.info("New learning rate: %g", optimizer.learning_rate)
# Training
dist_dataset = strategy.experimental_distribute_dataset(
train_dataset.take(steps_per_epoch))
for x, y in dist_dataset:
selected_idx = selector.select(total_steps)
train_step_fn = train_step_fns[selected_idx]
per_replica_loss = strategy.run(train_step_fn, args=(x, y))
loss_value = strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_loss, axis=None)
if total_steps % 50 == 0:
tf.summary.scalar("train/batch_loss", loss_value, step=total_steps)
total_steps += 1
for metric in metrics:
tf.summary.scalar(f"train/{metric.name}", metric.result(), step=epoch)
metric.reset_states()
# Maybe update the selector's state
if selector.should_update(epoch):
logging.info("Evaluate on validation set and update selector state")
validation_losses = []
dist_val_dataset = strategy.experimental_distribute_dataset(
valid_dataset)
for i, eval_step_fn in enumerate(eval_step_fns):
for x, y in dist_val_dataset:
strategy.run(eval_step_fn, args=(x, y))
validation_losses.append(metrics[0].result()) # Crossentropy loss
for metric in metrics:
name = f"validation/{metric.name}/{representation_names[i]}"
tf.summary.scalar(name, metric.result(), step=epoch)
metric.reset_states()
selector.update(epoch, validation_losses)
# Save a checkpoint
ckpt_mgr.save(epoch) | 0.896416 | 0.366731 |
from functools import wraps
from django.http import HttpResponse, JsonResponse, FileResponse, Http404
from django.shortcuts import get_object_or_404
from django.conf import settings
from rest_framework import viewsets
from . import models
from . import digests
from . import ocr
from . import collections
from . import serializers
from .analyzers import html
from django.db.models import Q
TEXT_LIMIT = 10 ** 6 # one million characters
def collection_view(func):
"""Decorator for views Django bound to a collection.
The collection slug is set through an URL path parameter called "collection".
"""
@wraps(func)
def view(request, *args, collection, **kwargs):
try:
col = collections.ALL[collection]
except KeyError:
raise Http404(f"Collection {collection} does not exist")
with col.set_current():
return func(request, *args, **kwargs)
return view
def drf_collection_view(func):
"""Decorator for Django Rest Framework viewset methods bound to a collection.
The collection slug is set through the `kwargs` field on the `rest_framework.viewsets.ModelViewSet`
called "collection". The `kwargs` are set by Django Rest Framework from the URL path parameter, so
result is similar to `snoop.data.views.collection_view() defined above`.
"""
@wraps(func)
def view(self, *args, **kwargs):
try:
collection = self.kwargs['collection']
col = collections.ALL[collection]
except KeyError:
raise Http404("Collection does not exist")
with col.set_current():
return func(self, *args, **kwargs)
return view
@collection_view
def collection(request):
"""View returns basic stats for a collection as JSON.
Also loads the "stats" for this collection, as saved by `snoop.data.admin.get_stats`.
"""
col = collections.current()
stats, _ = models.Statistics.objects.get_or_create(key='stats')
return JsonResponse({
'name': col.name,
'title': col.name,
'description': col.name,
'feed': 'feed',
'data_urls': '{id}/json',
'stats': stats.value,
'max_result_window': col.max_result_window,
'refresh_interval': col.refresh_interval,
})
@collection_view
def feed(request):
"""JSON view used to paginate through entire Digest database, sorted by last modification date.
This was used in the past by another service to pull documents as they are processed and index them
elsewhere. This is not used anymore by us, since we now index documents in a snoop Task. See
`snoop.data.digests.index` for the Task definition.
TODO: deprecate and remove this view.
"""
limit = settings.SNOOP_FEED_PAGE_SIZE
query = models.Digest.objects.order_by('-date_modified')
lt = request.GET.get('lt')
if lt:
query = query.filter(date_modified__lt=lt)
documents = [digests.get_document_data(d) for d in query[:limit]]
if len(documents) < limit:
next_page = None
else:
last_version = documents[-1]['version']
next_page = f'?lt={last_version}'
return JsonResponse({
'documents': documents,
'next': next_page,
})
@collection_view
def file_view(request, pk):
"""JSON view with data for a File.
The primary key of the File is used to fetch it.
Response is different from, but very similar to, the result of the `document()` view below.
"""
file = get_object_or_404(models.File.objects, pk=pk)
children_page = int(request.GET.get('children_page', 1))
return JsonResponse(trim_text(digests.get_file_data(file, children_page)))
@collection_view
def directory(request, pk):
directory = get_object_or_404(models.Directory.objects, pk=pk)
children_page = int(request.GET.get('children_page', 1))
return JsonResponse(digests.get_directory_data(directory, children_page))
def trim_text(data):
""" Trim the text fields to TEXT_LIMIT chars """
if not data.get('content'):
return data
text = data['content'].get('text')
# For images and the like, text is None.
if not text:
return data
if len(text) > TEXT_LIMIT:
text = text[:TEXT_LIMIT] + "\n\n=== Long text trimmed by Hoover ===\n"
data['content']['text'] = text
return data
@collection_view
def document(request, hash):
"""JSON view with data for a Digest.
The primary key of the Digest is used to fetch it.
These are the de-duplicated variants of the objects returned from `file_view()` above, with some
differences. See `snoop.data.digests.get_document_data()` versus `snoop.data.digests.get_file_data()`.
"""
digest = get_object_or_404(models.Digest.objects, blob__pk=hash)
children_page = int(request.GET.get('children_page', 1))
return JsonResponse(trim_text(digests.get_document_data(digest, children_page)))
@collection_view
def document_download(request, hash, filename):
"""View to download the `.original` Blob for the first File in a Digest's set.
Since all post-conversion `.blob`s are bound to the same `Digest` object, we assume the `.original`
Blobs are all equal too; so we present only the first one for downloading. This might cause problems
when this does not happen for various reasons; since users can't actually download all the different
original versions present in the dataset.
In practice, the conversion tools we use generally produce
different results every time they're run on the same file, so the chance of this happening are
non-existant. This also means we don't de-duplicate properly for files that require conversion.
See `snoop.data.filesystem.handle_file()` for more details.
"""
digest = get_object_or_404(
models.Digest.objects.only('blob'),
blob__pk=hash,
)
first_file = digest.blob.file_set.first()
blob = first_file.original
if html.is_html(blob):
clean_html = html.clean(blob)
return HttpResponse(clean_html, content_type='text/html')
real_filename = first_file.name_bytes.tobytes().decode('utf-8', errors='replace')
return FileResponse(blob.open(), content_type=blob.content_type, as_attachment=True,
filename=real_filename)
@collection_view
def document_ocr(request, hash, ocrname):
"""View to download the OCR result binary for a given Document and OCR source combination.
The file downloaded can either be a PDF document with selectable text imprinted in it, or a text file.
The OCR source can be either External OCR (added by management command
`snoop.data.management.commands.createocrsource` or through the Admin), or managed internally (with the
slug called `tesseract_$LANG`).
The given slug "ocrname" is first looked up in the `snoop.data.models.OcrSource` table. If it's not
there, then we look in the Tasks table for dependencies of this document's Digest task, and return the
one with name matching the slug.
"""
digest = get_object_or_404(models.Digest.objects, blob__pk=hash)
if models.OcrSource.objects.filter(name=ocrname).exists():
# serve file from external OCR import
ocr_source = get_object_or_404(models.OcrSource, name=ocrname)
ocr_queryset = ocr.ocr_documents_for_blob(digest.blob)
ocr_document = get_object_or_404(ocr_queryset, source=ocr_source)
blob = ocr_document.ocr
else:
digest_task = get_object_or_404(models.Task.objects, func='digests.gather', args=[hash])
tesseract_task = digest_task.prev_set.get(name=ocrname).prev
blob = tesseract_task.result
return FileResponse(blob.open(), content_type=blob.content_type, as_attachment=True,
filename=hash + '_' + ocrname)
@collection_view
def document_locations(request, hash):
"""JSON view to paginate through all locations for a Digest.
Used to browse between the different apparitions of a File in a dataset.
Paginated by integers with fixed length pages, starting from 1.
"""
digest = get_object_or_404(models.Digest.objects, blob__pk=hash)
page = int(request.GET.get('page', 1))
locations, has_next = digests.get_document_locations(digest, page)
return JsonResponse({'locations': locations, 'page': page, 'has_next_page': has_next})
class TagViewSet(viewsets.ModelViewSet):
"""Django Rest Framework (DRF) View set for the Tags APIs.
This is responsible for: capturing the various URL path arguments as the viewset context; setting the
current collection with `drf_collection_view()`; restricting private Tags access to correct users.
"""
serializer_class = serializers.DocumentUserTagSerializer
permission_classes = []
@drf_collection_view
def get_serializer(self, *args, **kwargs):
"""Set a context with the path arguments.
Generates fake values when instantiated by Swagger.
"""
fake = getattr(self, 'swagger_fake_view', False)
if fake:
context = {
'collection': "some-collection",
'blob': "0006660000000000000000000000000000000000000000000000000000000000",
'user': "testuser",
'digest_id': 666,
'uuid': 'invalid',
}
else:
context = {
'collection': self.kwargs['collection'],
'blob': self.kwargs['hash'],
'user': self.kwargs['username'],
'digest_id': models.Digest.objects.filter(blob=self.kwargs['hash']).get().id,
'uuid': self.kwargs['uuid'],
}
return super().get_serializer(*args, **kwargs, context=context)
@drf_collection_view
def dispatch(self, *args, **kwargs):
"""Collection-aware overload."""
return super().dispatch(*args, **kwargs)
@drf_collection_view
def get_queryset(self):
"""Sets this TagViewSet's queryset to tags that are private to the current user, or that are public.
"""
user = self.kwargs['username']
blob = self.kwargs['hash']
assert models.Digest.objects.filter(blob=blob).exists(), 'hash is not digest'
return models.DocumentUserTag.objects.filter(Q(user=user) | Q(public=True), Q(digest__blob=blob))
def check_ownership(self, pk):
"""Raises error if tag does not belong to current user.
To be used when doing write operations.
"""
assert self.kwargs['username'] == self.get_queryset().get(pk=pk).user, \
"you can only modify your own tags"
@drf_collection_view
def update(self, request, pk=None, **kwargs):
"""Collection-aware overload that also checks permission to write tag."""
self.check_ownership(pk)
return super().update(request, pk, **kwargs)
@drf_collection_view
def partial_update(self, request, pk=None, **kwargs):
"""Collection-aware overload that also checks permission to write tag."""
self.check_ownership(pk)
return super().partial_update(request, pk, **kwargs)
@drf_collection_view
def destroy(self, request, pk=None, **kwargs):
"""Collection-aware overload that also checks permission to write tag."""
self.check_ownership(pk)
return super().destroy(request, pk, **kwargs)
@collection_view
def thumbnail(request, hash, size):
thumbnail_entry = get_object_or_404(models.Thumbnail.objects, size=size, blob__pk=hash)
return FileResponse(thumbnail_entry.thumbnail.open(), content_type='image/jpeg')
@collection_view
def pdf_preview(request, hash):
pdf_preview_entry = get_object_or_404(models.PdfPreview.objects, blob__pk=hash)
return FileResponse(pdf_preview_entry.pdf_preview.open(), content_type='application/pdf') | snoop/data/views.py | from functools import wraps
from django.http import HttpResponse, JsonResponse, FileResponse, Http404
from django.shortcuts import get_object_or_404
from django.conf import settings
from rest_framework import viewsets
from . import models
from . import digests
from . import ocr
from . import collections
from . import serializers
from .analyzers import html
from django.db.models import Q
TEXT_LIMIT = 10 ** 6 # one million characters
def collection_view(func):
"""Decorator for views Django bound to a collection.
The collection slug is set through an URL path parameter called "collection".
"""
@wraps(func)
def view(request, *args, collection, **kwargs):
try:
col = collections.ALL[collection]
except KeyError:
raise Http404(f"Collection {collection} does not exist")
with col.set_current():
return func(request, *args, **kwargs)
return view
def drf_collection_view(func):
"""Decorator for Django Rest Framework viewset methods bound to a collection.
The collection slug is set through the `kwargs` field on the `rest_framework.viewsets.ModelViewSet`
called "collection". The `kwargs` are set by Django Rest Framework from the URL path parameter, so
result is similar to `snoop.data.views.collection_view() defined above`.
"""
@wraps(func)
def view(self, *args, **kwargs):
try:
collection = self.kwargs['collection']
col = collections.ALL[collection]
except KeyError:
raise Http404("Collection does not exist")
with col.set_current():
return func(self, *args, **kwargs)
return view
@collection_view
def collection(request):
"""View returns basic stats for a collection as JSON.
Also loads the "stats" for this collection, as saved by `snoop.data.admin.get_stats`.
"""
col = collections.current()
stats, _ = models.Statistics.objects.get_or_create(key='stats')
return JsonResponse({
'name': col.name,
'title': col.name,
'description': col.name,
'feed': 'feed',
'data_urls': '{id}/json',
'stats': stats.value,
'max_result_window': col.max_result_window,
'refresh_interval': col.refresh_interval,
})
@collection_view
def feed(request):
"""JSON view used to paginate through entire Digest database, sorted by last modification date.
This was used in the past by another service to pull documents as they are processed and index them
elsewhere. This is not used anymore by us, since we now index documents in a snoop Task. See
`snoop.data.digests.index` for the Task definition.
TODO: deprecate and remove this view.
"""
limit = settings.SNOOP_FEED_PAGE_SIZE
query = models.Digest.objects.order_by('-date_modified')
lt = request.GET.get('lt')
if lt:
query = query.filter(date_modified__lt=lt)
documents = [digests.get_document_data(d) for d in query[:limit]]
if len(documents) < limit:
next_page = None
else:
last_version = documents[-1]['version']
next_page = f'?lt={last_version}'
return JsonResponse({
'documents': documents,
'next': next_page,
})
@collection_view
def file_view(request, pk):
"""JSON view with data for a File.
The primary key of the File is used to fetch it.
Response is different from, but very similar to, the result of the `document()` view below.
"""
file = get_object_or_404(models.File.objects, pk=pk)
children_page = int(request.GET.get('children_page', 1))
return JsonResponse(trim_text(digests.get_file_data(file, children_page)))
@collection_view
def directory(request, pk):
directory = get_object_or_404(models.Directory.objects, pk=pk)
children_page = int(request.GET.get('children_page', 1))
return JsonResponse(digests.get_directory_data(directory, children_page))
def trim_text(data):
""" Trim the text fields to TEXT_LIMIT chars """
if not data.get('content'):
return data
text = data['content'].get('text')
# For images and the like, text is None.
if not text:
return data
if len(text) > TEXT_LIMIT:
text = text[:TEXT_LIMIT] + "\n\n=== Long text trimmed by Hoover ===\n"
data['content']['text'] = text
return data
@collection_view
def document(request, hash):
"""JSON view with data for a Digest.
The primary key of the Digest is used to fetch it.
These are the de-duplicated variants of the objects returned from `file_view()` above, with some
differences. See `snoop.data.digests.get_document_data()` versus `snoop.data.digests.get_file_data()`.
"""
digest = get_object_or_404(models.Digest.objects, blob__pk=hash)
children_page = int(request.GET.get('children_page', 1))
return JsonResponse(trim_text(digests.get_document_data(digest, children_page)))
@collection_view
def document_download(request, hash, filename):
"""View to download the `.original` Blob for the first File in a Digest's set.
Since all post-conversion `.blob`s are bound to the same `Digest` object, we assume the `.original`
Blobs are all equal too; so we present only the first one for downloading. This might cause problems
when this does not happen for various reasons; since users can't actually download all the different
original versions present in the dataset.
In practice, the conversion tools we use generally produce
different results every time they're run on the same file, so the chance of this happening are
non-existant. This also means we don't de-duplicate properly for files that require conversion.
See `snoop.data.filesystem.handle_file()` for more details.
"""
digest = get_object_or_404(
models.Digest.objects.only('blob'),
blob__pk=hash,
)
first_file = digest.blob.file_set.first()
blob = first_file.original
if html.is_html(blob):
clean_html = html.clean(blob)
return HttpResponse(clean_html, content_type='text/html')
real_filename = first_file.name_bytes.tobytes().decode('utf-8', errors='replace')
return FileResponse(blob.open(), content_type=blob.content_type, as_attachment=True,
filename=real_filename)
@collection_view
def document_ocr(request, hash, ocrname):
"""View to download the OCR result binary for a given Document and OCR source combination.
The file downloaded can either be a PDF document with selectable text imprinted in it, or a text file.
The OCR source can be either External OCR (added by management command
`snoop.data.management.commands.createocrsource` or through the Admin), or managed internally (with the
slug called `tesseract_$LANG`).
The given slug "ocrname" is first looked up in the `snoop.data.models.OcrSource` table. If it's not
there, then we look in the Tasks table for dependencies of this document's Digest task, and return the
one with name matching the slug.
"""
digest = get_object_or_404(models.Digest.objects, blob__pk=hash)
if models.OcrSource.objects.filter(name=ocrname).exists():
# serve file from external OCR import
ocr_source = get_object_or_404(models.OcrSource, name=ocrname)
ocr_queryset = ocr.ocr_documents_for_blob(digest.blob)
ocr_document = get_object_or_404(ocr_queryset, source=ocr_source)
blob = ocr_document.ocr
else:
digest_task = get_object_or_404(models.Task.objects, func='digests.gather', args=[hash])
tesseract_task = digest_task.prev_set.get(name=ocrname).prev
blob = tesseract_task.result
return FileResponse(blob.open(), content_type=blob.content_type, as_attachment=True,
filename=hash + '_' + ocrname)
@collection_view
def document_locations(request, hash):
"""JSON view to paginate through all locations for a Digest.
Used to browse between the different apparitions of a File in a dataset.
Paginated by integers with fixed length pages, starting from 1.
"""
digest = get_object_or_404(models.Digest.objects, blob__pk=hash)
page = int(request.GET.get('page', 1))
locations, has_next = digests.get_document_locations(digest, page)
return JsonResponse({'locations': locations, 'page': page, 'has_next_page': has_next})
class TagViewSet(viewsets.ModelViewSet):
"""Django Rest Framework (DRF) View set for the Tags APIs.
This is responsible for: capturing the various URL path arguments as the viewset context; setting the
current collection with `drf_collection_view()`; restricting private Tags access to correct users.
"""
serializer_class = serializers.DocumentUserTagSerializer
permission_classes = []
@drf_collection_view
def get_serializer(self, *args, **kwargs):
"""Set a context with the path arguments.
Generates fake values when instantiated by Swagger.
"""
fake = getattr(self, 'swagger_fake_view', False)
if fake:
context = {
'collection': "some-collection",
'blob': "0006660000000000000000000000000000000000000000000000000000000000",
'user': "testuser",
'digest_id': 666,
'uuid': 'invalid',
}
else:
context = {
'collection': self.kwargs['collection'],
'blob': self.kwargs['hash'],
'user': self.kwargs['username'],
'digest_id': models.Digest.objects.filter(blob=self.kwargs['hash']).get().id,
'uuid': self.kwargs['uuid'],
}
return super().get_serializer(*args, **kwargs, context=context)
@drf_collection_view
def dispatch(self, *args, **kwargs):
"""Collection-aware overload."""
return super().dispatch(*args, **kwargs)
@drf_collection_view
def get_queryset(self):
"""Sets this TagViewSet's queryset to tags that are private to the current user, or that are public.
"""
user = self.kwargs['username']
blob = self.kwargs['hash']
assert models.Digest.objects.filter(blob=blob).exists(), 'hash is not digest'
return models.DocumentUserTag.objects.filter(Q(user=user) | Q(public=True), Q(digest__blob=blob))
def check_ownership(self, pk):
"""Raises error if tag does not belong to current user.
To be used when doing write operations.
"""
assert self.kwargs['username'] == self.get_queryset().get(pk=pk).user, \
"you can only modify your own tags"
@drf_collection_view
def update(self, request, pk=None, **kwargs):
"""Collection-aware overload that also checks permission to write tag."""
self.check_ownership(pk)
return super().update(request, pk, **kwargs)
@drf_collection_view
def partial_update(self, request, pk=None, **kwargs):
"""Collection-aware overload that also checks permission to write tag."""
self.check_ownership(pk)
return super().partial_update(request, pk, **kwargs)
@drf_collection_view
def destroy(self, request, pk=None, **kwargs):
"""Collection-aware overload that also checks permission to write tag."""
self.check_ownership(pk)
return super().destroy(request, pk, **kwargs)
@collection_view
def thumbnail(request, hash, size):
thumbnail_entry = get_object_or_404(models.Thumbnail.objects, size=size, blob__pk=hash)
return FileResponse(thumbnail_entry.thumbnail.open(), content_type='image/jpeg')
@collection_view
def pdf_preview(request, hash):
pdf_preview_entry = get_object_or_404(models.PdfPreview.objects, blob__pk=hash)
return FileResponse(pdf_preview_entry.pdf_preview.open(), content_type='application/pdf') | 0.726717 | 0.20458 |
import sqlite3
import re
from urllib import parse
import hashlib
def url_pas(data):
return(parse.quote(data).replace('/','%2F'))
def sha224(data):
return(hashlib.sha224(bytes(data, 'utf-8')).hexdigest())
def link(conn, title, data, num, category, backlink):
curs = conn.cursor()
data = data.replace('\', '\\')
m = re.findall("\[\[(분류:(?:(?:(?!\]\]).)*))\]\]", data)
for g in m:
if(title != g):
if(num == 1):
backlink += [[title, g, 'cat']]
if(category == ''):
curs.execute("select title from data where title = ?", [g])
exists = curs.fetchall()
if(exists):
red = ""
else:
red = 'class="not_thing"'
category += '<a ' + red + ' href="/w/' + url_pas(g) + '">' + re.sub("분류:", "", g) + '</a>'
else:
curs.execute("select title from data where title = ?", [g])
exists = curs.fetchall()
if(exists):
red = ""
else:
red = 'class="not_thing"'
category += ' / ' + '<a ' + red + ' href="/w/' + url_pas(g) + '">' + re.sub("분류:", "", g) + '</a>'
data = re.sub("\[\[(분류:(?:(?:(?!\]\]).)*))\]\]", '', data, 1)
test = re.findall('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', data)
if(test):
for wiki in test:
if(wiki[1]):
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + wiki[1] + '</a>', data, 1)
else:
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + wiki[0] + '</a>', data, 1)
data = re.sub("\[\[(?::(?P<in>(?:분류|파일):(?:(?:(?!\]\]).)*)))\]\]", "[[\g<in>]]", data)
a = re.findall('\[\[\.\.\/(\|(?:(?!]]).)+)?]]', data)
for i in a:
b = re.search('(.*)\/', title)
if(b):
m = b.groups()
if(i):
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + ']]', data, 1)
else:
if(i):
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + ']]', data, 1)
data = re.sub('\[\[(?P<in>\/(?:(?!]]|\|).)+)(?P<out>\|(?:(?:(?!]]).)+))?]]', '[[' + title + '\g<in>\g<out>]]', data)
link = re.compile('\[\[((?:(?!\[\[|\]\]|\|).)*)(?:\|((?:(?!\[\[|\]\]).)*))?\]\]')
while(1):
l_d = link.search(data)
if(l_d):
d = l_d.groups()
if(re.search('^(?:파일|외부):', d[0])):
width = ''
height = ''
align = ''
span = ['', '']
try:
w_d = re.search('width=([0-9]+(?:[a-z%]+)?)', d[1])
if(w_d):
width = 'width="' + w_d.groups()[0] + '" '
h_d = re.search('height=([0-9]+(?:[a-z%]+)?)', d[1])
if(h_d):
height = 'height="' + h_d.groups()[0] + '" '
a_d = re.search('align=(center|right)', d[1])
if(a_d):
span[0] = '<span style="display: block; text-align: ' + a_d.groups()[0] + ';">'
span[1] = '</span>'
except:
pass
f_d = re.search('^파일:([^.]+)\.(.+)$', d[0])
if(f_d):
if(not re.search("^파일:([^\n]*)", title)):
if(num == 1):
backlink += [[title, d[0], 'file']]
file_name = f_d.groups()
curs.execute("select title from data where title = ?", ['파일:' + file_name[0] + '.' + file_name[1]])
if(not curs.fetchall()):
img = '<a class="not_thing" href="/w/' + url_pas('파일:' + file_name[0] + '.' + file_name[1]) + '">파일:' + file_name[0] + '.' + file_name[1] + '</a>'
else:
img = span[0] + '<img src="/image/' + sha224(file_name[0]) + '.' + file_name[1] + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
else:
img = span[0] + '<img src="' + re.sub('^외부:', '', d[0]) + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
elif(re.search('^https?:\/\/', re.sub('<([^>]*)>', '', d[0]))):
view = d[0]
try:
if(re.search('(.+)', d[1])):
view = d[1]
except:
pass
data = link.sub('<a class="out_link" rel="nofollow" href="' + re.sub('<([^>]*)>', '', d[0]) + '">' + view + '</a>', data, 1)
else:
view = d[0].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
try:
if(re.search('(.+)', d[1])):
view = d[1].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
except:
pass
sh = ''
s_d = re.search('#((?:(?!x27;|#).)+)$', d[0])
if(s_d):
href = re.sub('#((?:(?!x27;|#).)+)$', '', d[0])
sh = '#' + s_d.groups()[0]
else:
href = d[0]
if(d[0] == title):
data = link.sub('<b>' + view + '</b>', data, 1)
elif(re.search('^#', d[0])):
data = link.sub('<a title="' + sh + '" href="' + sh + '">' + view + '</a>', data, 1)
else:
a = re.sub('<([^>]*)>', '', href.replace(''', "'").replace('"', '"').replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\'))
if(num == 1):
backlink += [[title, a, '']]
curs.execute("select title from data where title = ?", [a])
if(not curs.fetchall()):
no = 'class="not_thing"'
if(num == 1):
backlink += [[title, a, 'no']]
else:
no = ''
data = link.sub('<a ' + no + ' title="' + re.sub('<([^>]*)>', '', href) + sh + '" href="/w/' + url_pas(a) + sh + '">' + view.replace('\\', '\\\\') + '</a>', data, 1)
else:
break
data = data.replace('\\', '\')
return([data, category, backlink]) | set_mark/link.py | import sqlite3
import re
from urllib import parse
import hashlib
def url_pas(data):
return(parse.quote(data).replace('/','%2F'))
def sha224(data):
return(hashlib.sha224(bytes(data, 'utf-8')).hexdigest())
def link(conn, title, data, num, category, backlink):
curs = conn.cursor()
data = data.replace('\', '\\')
m = re.findall("\[\[(분류:(?:(?:(?!\]\]).)*))\]\]", data)
for g in m:
if(title != g):
if(num == 1):
backlink += [[title, g, 'cat']]
if(category == ''):
curs.execute("select title from data where title = ?", [g])
exists = curs.fetchall()
if(exists):
red = ""
else:
red = 'class="not_thing"'
category += '<a ' + red + ' href="/w/' + url_pas(g) + '">' + re.sub("분류:", "", g) + '</a>'
else:
curs.execute("select title from data where title = ?", [g])
exists = curs.fetchall()
if(exists):
red = ""
else:
red = 'class="not_thing"'
category += ' / ' + '<a ' + red + ' href="/w/' + url_pas(g) + '">' + re.sub("분류:", "", g) + '</a>'
data = re.sub("\[\[(분류:(?:(?:(?!\]\]).)*))\]\]", '', data, 1)
test = re.findall('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', data)
if(test):
for wiki in test:
if(wiki[1]):
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + wiki[1] + '</a>', data, 1)
else:
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + wiki[0] + '</a>', data, 1)
data = re.sub("\[\[(?::(?P<in>(?:분류|파일):(?:(?:(?!\]\]).)*)))\]\]", "[[\g<in>]]", data)
a = re.findall('\[\[\.\.\/(\|(?:(?!]]).)+)?]]', data)
for i in a:
b = re.search('(.*)\/', title)
if(b):
m = b.groups()
if(i):
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + ']]', data, 1)
else:
if(i):
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + ']]', data, 1)
data = re.sub('\[\[(?P<in>\/(?:(?!]]|\|).)+)(?P<out>\|(?:(?:(?!]]).)+))?]]', '[[' + title + '\g<in>\g<out>]]', data)
link = re.compile('\[\[((?:(?!\[\[|\]\]|\|).)*)(?:\|((?:(?!\[\[|\]\]).)*))?\]\]')
while(1):
l_d = link.search(data)
if(l_d):
d = l_d.groups()
if(re.search('^(?:파일|외부):', d[0])):
width = ''
height = ''
align = ''
span = ['', '']
try:
w_d = re.search('width=([0-9]+(?:[a-z%]+)?)', d[1])
if(w_d):
width = 'width="' + w_d.groups()[0] + '" '
h_d = re.search('height=([0-9]+(?:[a-z%]+)?)', d[1])
if(h_d):
height = 'height="' + h_d.groups()[0] + '" '
a_d = re.search('align=(center|right)', d[1])
if(a_d):
span[0] = '<span style="display: block; text-align: ' + a_d.groups()[0] + ';">'
span[1] = '</span>'
except:
pass
f_d = re.search('^파일:([^.]+)\.(.+)$', d[0])
if(f_d):
if(not re.search("^파일:([^\n]*)", title)):
if(num == 1):
backlink += [[title, d[0], 'file']]
file_name = f_d.groups()
curs.execute("select title from data where title = ?", ['파일:' + file_name[0] + '.' + file_name[1]])
if(not curs.fetchall()):
img = '<a class="not_thing" href="/w/' + url_pas('파일:' + file_name[0] + '.' + file_name[1]) + '">파일:' + file_name[0] + '.' + file_name[1] + '</a>'
else:
img = span[0] + '<img src="/image/' + sha224(file_name[0]) + '.' + file_name[1] + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
else:
img = span[0] + '<img src="' + re.sub('^외부:', '', d[0]) + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
elif(re.search('^https?:\/\/', re.sub('<([^>]*)>', '', d[0]))):
view = d[0]
try:
if(re.search('(.+)', d[1])):
view = d[1]
except:
pass
data = link.sub('<a class="out_link" rel="nofollow" href="' + re.sub('<([^>]*)>', '', d[0]) + '">' + view + '</a>', data, 1)
else:
view = d[0].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
try:
if(re.search('(.+)', d[1])):
view = d[1].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
except:
pass
sh = ''
s_d = re.search('#((?:(?!x27;|#).)+)$', d[0])
if(s_d):
href = re.sub('#((?:(?!x27;|#).)+)$', '', d[0])
sh = '#' + s_d.groups()[0]
else:
href = d[0]
if(d[0] == title):
data = link.sub('<b>' + view + '</b>', data, 1)
elif(re.search('^#', d[0])):
data = link.sub('<a title="' + sh + '" href="' + sh + '">' + view + '</a>', data, 1)
else:
a = re.sub('<([^>]*)>', '', href.replace(''', "'").replace('"', '"').replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\'))
if(num == 1):
backlink += [[title, a, '']]
curs.execute("select title from data where title = ?", [a])
if(not curs.fetchall()):
no = 'class="not_thing"'
if(num == 1):
backlink += [[title, a, 'no']]
else:
no = ''
data = link.sub('<a ' + no + ' title="' + re.sub('<([^>]*)>', '', href) + sh + '" href="/w/' + url_pas(a) + sh + '">' + view.replace('\\', '\\\\') + '</a>', data, 1)
else:
break
data = data.replace('\\', '\')
return([data, category, backlink]) | 0.056894 | 0.193262 |
from typing import Optional, Tuple
import numpy as np
import scipy.stats
def random_spd_eigendecomposition(
N: int,
dtype: Optional[np.dtype] = np.double,
rng: Optional[np.random.Generator] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a random eigendecomposition of a symmetric positive definite matrix.
The spectrum of the matrix will be drawn from a shifted gamma distribution, while
the eigenbasis is drawn uniformly from the Haar measure.
Parameters
----------
N :
Dimension of the matrix.
rng :
The random number generator to be used to sample the eigendecomposition.
Returns
-------
spectrum :
The spectrum of the matrix as a :class:`numpy.ndarray` of shape :code:`(N,)`.
basis :
The eigenbasis as the columns of a :class:`numpy.ndarray` of shape
:code:`(N, N)`.
"""
# Generate a random positive spectrum
spectrum = scipy.stats.gamma.rvs(
a=10.0, # "Shape" parameter
loc=1.0,
scale=1.0,
size=N,
random_state=rng,
).astype(dtype, copy=False)
spectrum.sort()
# Generate a random orthonormal eigenbasis
if N == 1:
basis = np.ones((1, 1), dtype=dtype)
else:
basis = scipy.stats.special_ortho_group.rvs(N, random_state=rng).astype(
dtype, copy=False
)
return spectrum, basis
def random_spd_matrix(
N: int,
fast: bool = False,
dtype: Optional[np.dtype] = np.double,
rng: Optional[np.random.Generator] = None,
) -> np.ndarray:
"""Generates a random symmetric positive-definite matrix.
Parameters
----------
N :
Dimension of the matrix.
fast:
If this is set to :code:`True`, the method will use a fast but biased method to
draw the matrix. Otherwise, a random eigendecomposition will be drawn.
rng :
The random number generator to be used to sample the matrix.
Returns
-------
A random symmetrix positive-definite matrix.
"""
if fast:
# Generate positive-semidefinite matrix from square-root
A = scipy.stats.norm.rvs(size=(N, N), random_state=rng).astype(
dtype, copy=False
)
M = A @ A.T
# Make positive definite
M += np.eye(N, dtype=dtype)
# Apply Jacobi preconditioner to improve condition number
D = np.sqrt(np.diag(M))
M = D[:, None] * M * D[None, :]
else:
# Sample a random Eigendecomposition
spectrum, Q = random_spd_eigendecomposition(N, dtype=dtype, rng=rng)
# Assemble matrix
M = Q @ np.diag(spectrum) @ Q.T
# Symmetrize
M = 0.5 * (M + M.T)
return M
def random_rank_1_downdate(
L: np.ndarray,
rng: Optional[np.random.Generator] = None,
) -> np.ndarray:
"""Generates a random rank-1 downdate for a given Cholesky factor which, when
applied, will result in a positive-definite matrix again.
Parameters
----------
L :
The lower-triangular Cholesky factor of the matrix to be downdated.
rng :
The random number generator to be used to sample the matrix.
Returns
-------
The vector :math:`v` which defines the downdate as a :class:`numpy.ndarray` of shape
:code:`(N,)`, where :code:`(N, N)` is the shape of :code:`L`.
"""
N = L.shape[0]
# Sample uniformly random direction
v_dir = scipy.stats.norm.rvs(size=N, random_state=rng).astype(L.dtype, copy=False)
v_dir /= np.linalg.norm(v_dir, ord=2)
# The downdated matrix is positive semi-definite if and only if p^T p < 1 for
# L * p = v. Hence, a vector v = ||v||_2 * u, where `u` is a unit vector leads to a
# valid downdate if ||v||_2^2 < (1 / p^T p).
p_dir = scipy.linalg.solve_triangular(L, v_dir, lower=True).astype(
L.dtype, copy=False
)
v_norm_sq = scipy.stats.uniform.rvs(
loc=0.2, scale=0.9 - 0.2, size=N, random_state=rng
).astype(L.dtype, copy=False)
v_norm_sq /= np.dot(p_dir, p_dir)
v_norm = np.sqrt(v_norm_sq)
return v_norm * v_dir | src/cholupdates/utils.py |
from typing import Optional, Tuple
import numpy as np
import scipy.stats
def random_spd_eigendecomposition(
N: int,
dtype: Optional[np.dtype] = np.double,
rng: Optional[np.random.Generator] = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a random eigendecomposition of a symmetric positive definite matrix.
The spectrum of the matrix will be drawn from a shifted gamma distribution, while
the eigenbasis is drawn uniformly from the Haar measure.
Parameters
----------
N :
Dimension of the matrix.
rng :
The random number generator to be used to sample the eigendecomposition.
Returns
-------
spectrum :
The spectrum of the matrix as a :class:`numpy.ndarray` of shape :code:`(N,)`.
basis :
The eigenbasis as the columns of a :class:`numpy.ndarray` of shape
:code:`(N, N)`.
"""
# Generate a random positive spectrum
spectrum = scipy.stats.gamma.rvs(
a=10.0, # "Shape" parameter
loc=1.0,
scale=1.0,
size=N,
random_state=rng,
).astype(dtype, copy=False)
spectrum.sort()
# Generate a random orthonormal eigenbasis
if N == 1:
basis = np.ones((1, 1), dtype=dtype)
else:
basis = scipy.stats.special_ortho_group.rvs(N, random_state=rng).astype(
dtype, copy=False
)
return spectrum, basis
def random_spd_matrix(
N: int,
fast: bool = False,
dtype: Optional[np.dtype] = np.double,
rng: Optional[np.random.Generator] = None,
) -> np.ndarray:
"""Generates a random symmetric positive-definite matrix.
Parameters
----------
N :
Dimension of the matrix.
fast:
If this is set to :code:`True`, the method will use a fast but biased method to
draw the matrix. Otherwise, a random eigendecomposition will be drawn.
rng :
The random number generator to be used to sample the matrix.
Returns
-------
A random symmetrix positive-definite matrix.
"""
if fast:
# Generate positive-semidefinite matrix from square-root
A = scipy.stats.norm.rvs(size=(N, N), random_state=rng).astype(
dtype, copy=False
)
M = A @ A.T
# Make positive definite
M += np.eye(N, dtype=dtype)
# Apply Jacobi preconditioner to improve condition number
D = np.sqrt(np.diag(M))
M = D[:, None] * M * D[None, :]
else:
# Sample a random Eigendecomposition
spectrum, Q = random_spd_eigendecomposition(N, dtype=dtype, rng=rng)
# Assemble matrix
M = Q @ np.diag(spectrum) @ Q.T
# Symmetrize
M = 0.5 * (M + M.T)
return M
def random_rank_1_downdate(
L: np.ndarray,
rng: Optional[np.random.Generator] = None,
) -> np.ndarray:
"""Generates a random rank-1 downdate for a given Cholesky factor which, when
applied, will result in a positive-definite matrix again.
Parameters
----------
L :
The lower-triangular Cholesky factor of the matrix to be downdated.
rng :
The random number generator to be used to sample the matrix.
Returns
-------
The vector :math:`v` which defines the downdate as a :class:`numpy.ndarray` of shape
:code:`(N,)`, where :code:`(N, N)` is the shape of :code:`L`.
"""
N = L.shape[0]
# Sample uniformly random direction
v_dir = scipy.stats.norm.rvs(size=N, random_state=rng).astype(L.dtype, copy=False)
v_dir /= np.linalg.norm(v_dir, ord=2)
# The downdated matrix is positive semi-definite if and only if p^T p < 1 for
# L * p = v. Hence, a vector v = ||v||_2 * u, where `u` is a unit vector leads to a
# valid downdate if ||v||_2^2 < (1 / p^T p).
p_dir = scipy.linalg.solve_triangular(L, v_dir, lower=True).astype(
L.dtype, copy=False
)
v_norm_sq = scipy.stats.uniform.rvs(
loc=0.2, scale=0.9 - 0.2, size=N, random_state=rng
).astype(L.dtype, copy=False)
v_norm_sq /= np.dot(p_dir, p_dir)
v_norm = np.sqrt(v_norm_sq)
return v_norm * v_dir | 0.967747 | 0.786705 |
import logging
import os
import sys
import time
import unittest
sys.path.insert(0, os.path.abspath('..'))
from osdp import *
log = logging.getLogger('osdp')
class ControlPanelTestCase(unittest.TestCase):
"""Test Bus for OSDP Python Module."""
def setUp(self):
"""Setup."""
self.last_reply = None
def tearDown(self):
"""Teardown."""
def test_cp_checksum_unsecure(self):
conn = SerialPortOsdpConnection(port='/dev/tty.wchusbserial1420', baud_rate=9600)
cp = ControlPanel()
bus_id = cp.start_connection(conn)
self.assertIsNotNone(bus_id)
cp.add_device(connection_id=bus_id, address=0x7F, use_crc=False, use_secure_channel=False)
id_report = cp.id_report(connection_id=bus_id, address=0x7F)
print("\r\n")
print(id_report)
device_capabilities = cp.device_capabilities(connection_id=bus_id, address=0x7F)
print("\r\n")
print(device_capabilities)
local_status = cp.local_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(local_status)
input_status = cp.input_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(input_status)
output_status = cp.output_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(output_status)
reader_status = cp.reader_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(reader_status)
output_status = cp.output_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(output_status)
granted_led = [ReaderLedControl(
reader_number = 0x0,
led_number = 0x0,
temporary_mode = TemporaryReaderControlCode.SetTemporaryAndStartTimer,
temporary_on_time = 0x02,
temporary_off_time = 0x01,
temporary_on_color = LedColor.Green,
temporary_off_color = LedColor.Black,
temporary_timer = 0x000A,
permanent_mode = PermanentReaderControlCode.Nop,
permanent_on_time = 0x00,
permanent_off_time = 0x00,
permanent_on_color = LedColor.Black,
permanent_off_color = LedColor.Black
)]
result = cp.reader_led_control(connection_id=bus_id, address=0x7F, reader_led_controls=ReaderLedControls(granted_led))
print("\r\n")
print(result)
time.sleep(1.0)
denied_led = [ReaderLedControl(
reader_number = 0x0,
led_number = 0x0,
temporary_mode = TemporaryReaderControlCode.SetTemporaryAndStartTimer,
temporary_on_time = 0x02,
temporary_off_time = 0x01,
temporary_on_color = LedColor.Red,
temporary_off_color = LedColor.Black,
temporary_timer = 0x000A,
permanent_mode = PermanentReaderControlCode.Nop,
permanent_on_time = 0x00,
permanent_off_time = 0x00,
permanent_on_color = LedColor.Black,
permanent_off_color = LedColor.Black
)]
result = cp.reader_led_control(connection_id=bus_id, address=0x7F, reader_led_controls=ReaderLedControls(denied_led))
print("\r\n")
print(result)
cp.shutdown()
if __name__ == '__main__':
unittest.main() | tests/test_control_panel.py | import logging
import os
import sys
import time
import unittest
sys.path.insert(0, os.path.abspath('..'))
from osdp import *
log = logging.getLogger('osdp')
class ControlPanelTestCase(unittest.TestCase):
"""Test Bus for OSDP Python Module."""
def setUp(self):
"""Setup."""
self.last_reply = None
def tearDown(self):
"""Teardown."""
def test_cp_checksum_unsecure(self):
conn = SerialPortOsdpConnection(port='/dev/tty.wchusbserial1420', baud_rate=9600)
cp = ControlPanel()
bus_id = cp.start_connection(conn)
self.assertIsNotNone(bus_id)
cp.add_device(connection_id=bus_id, address=0x7F, use_crc=False, use_secure_channel=False)
id_report = cp.id_report(connection_id=bus_id, address=0x7F)
print("\r\n")
print(id_report)
device_capabilities = cp.device_capabilities(connection_id=bus_id, address=0x7F)
print("\r\n")
print(device_capabilities)
local_status = cp.local_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(local_status)
input_status = cp.input_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(input_status)
output_status = cp.output_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(output_status)
reader_status = cp.reader_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(reader_status)
output_status = cp.output_status(connection_id=bus_id, address=0x7F)
print("\r\n")
print(output_status)
granted_led = [ReaderLedControl(
reader_number = 0x0,
led_number = 0x0,
temporary_mode = TemporaryReaderControlCode.SetTemporaryAndStartTimer,
temporary_on_time = 0x02,
temporary_off_time = 0x01,
temporary_on_color = LedColor.Green,
temporary_off_color = LedColor.Black,
temporary_timer = 0x000A,
permanent_mode = PermanentReaderControlCode.Nop,
permanent_on_time = 0x00,
permanent_off_time = 0x00,
permanent_on_color = LedColor.Black,
permanent_off_color = LedColor.Black
)]
result = cp.reader_led_control(connection_id=bus_id, address=0x7F, reader_led_controls=ReaderLedControls(granted_led))
print("\r\n")
print(result)
time.sleep(1.0)
denied_led = [ReaderLedControl(
reader_number = 0x0,
led_number = 0x0,
temporary_mode = TemporaryReaderControlCode.SetTemporaryAndStartTimer,
temporary_on_time = 0x02,
temporary_off_time = 0x01,
temporary_on_color = LedColor.Red,
temporary_off_color = LedColor.Black,
temporary_timer = 0x000A,
permanent_mode = PermanentReaderControlCode.Nop,
permanent_on_time = 0x00,
permanent_off_time = 0x00,
permanent_on_color = LedColor.Black,
permanent_off_color = LedColor.Black
)]
result = cp.reader_led_control(connection_id=bus_id, address=0x7F, reader_led_controls=ReaderLedControls(denied_led))
print("\r\n")
print(result)
cp.shutdown()
if __name__ == '__main__':
unittest.main() | 0.224735 | 0.122786 |
import logging
from metadatadb_driver_interface.plugin import AbstractPlugin
from metadatadb_driver_interface.search_model import FullTextModel, QueryModel
from metadata_driver_elasticsearch.instance import get_database_instance
class Plugin(AbstractPlugin):
"""Elasticsearch ledger plugin for `Metadata DB's Python reference
implementation <https://github.com/neveminedio/metadata-driver-elastic>`_.
Plugs in a Elasticsearch instance as the persistence layer for Metadata Db
related actions.
"""
def __init__(self, config=None):
"""Initialize a :class:`~.Plugin` instance and connect to Elasticsearch.
"""
self.driver = get_database_instance(config)
self.logger = logging.getLogger('Plugin')
logging.basicConfig(level=logging.INFO)
@property
def type(self):
"""str: the type of this plugin (``'Elasticsearch'``)"""
return 'Elasticsearch'
def write(self, obj, resource_id=None):
"""Write obj in elasticsearch.
:param obj: value to be written in elasticsearch.
:param resource_id: id for the resource.
:return: id of the transaction.
"""
self.logger.debug('elasticsearch::write::{}'.format(resource_id))
if resource_id is not None:
if self.driver._es.exists(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
):
raise ValueError(
"Resource \"{}\" already exists, use update instead".format(resource_id))
return self.driver._es.index(
index=self.driver._index,
id=resource_id,
body=obj,
doc_type='_doc',
refresh='wait_for'
)['_id']
def read(self, resource_id):
"""Read object in elasticsearch using the resource_id.
:param resource_id: id of the object to be read.
:return: object value from elasticsearch.
"""
self.logger.debug('elasticsearch::read::{}'.format(resource_id))
return self.driver._es.get(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
)['_source']
def update(self, obj, resource_id):
"""Update object in elasticsearch using the resource_id.
:param metadata: new metadata for the transaction.
:param resource_id: id of the object to be updated.
:return: id of the object.
"""
self.logger.debug('elasticsearch::update::{}'.format(resource_id))
return self.driver._es.index(
index=self.driver._index,
id=resource_id,
body=obj,
doc_type='_doc',
refresh='wait_for'
)['_id']
def delete(self, resource_id):
"""Delete an object from elasticsearch.
:param resource_id: id of the object to be deleted.
:return:
"""
self.logger.debug('elasticsearch::delete::{}'.format(resource_id))
if self.driver._es.exists(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
) == False:
raise ValueError("Resource \"{}\" does not exists".format(resource_id))
return self.driver._es.delete(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
)
def list(self, search_from=None, search_to=None, limit=None):
"""List all the objects saved elasticsearch.
:param search_from: start offset of objects to return.
:param search_to: last offset of objects to return.
:param limit: max number of values to be returned.
:return: list with transactions.
"""
self.logger.debug('elasticsearch::list')
body = {
'sort': [
{"_id": "asc"},
],
'query': {
'match_all': {}
}
}
if search_from:
body['from'] = search_from
if search_to:
body['size'] = search_to - search_from
if limit:
body['size'] = limit
page = self.driver._es.search(
index=self.driver._index,
body=body
)
object_list = []
for x in page['hits']['hits']:
object_list.append(x['_source'])
return object_list
def query(self, search_model: QueryModel):
"""Query elasticsearch for objects.
:param search_model: object of QueryModel.
:return: list of objects that match the query.
"""
assert search_model.page >= 1, 'page value %s is invalid' % search_model.page
self.logger.debug(f'elasticsearch::query::{search_model.query}')
if search_model.sort is not None:
self._mapping_to_sort(search_model.sort.keys())
sort = self._sort_object(search_model.sort)
else:
sort = [{"_id": "asc"}]
if search_model.query == {}:
query = {'match_all': {}}
else:
query = search_model.query
body = {
'sort': sort,
'from': (search_model.page - 1) * search_model.offset,
'size': search_model.offset,
}
if query != {}:
body['query'] = query
page = self.driver._es.search(
index=self.driver._index,
body=body,
q=search_model.text
)
object_list = []
for x in page['hits']['hits']:
object_list.append(x['_source'])
return object_list, page['hits']['total']['value']
def text_query(self, search_model: FullTextModel):
"""Query elasticsearch for objects.
:param search_model: object of FullTextModel
:return: list of objects that match the query.
"""
assert search_model.page >= 1, 'page value %s is invalid' % search_model.page
self.logger.debug('elasticsearch::text_query::{}'.format(search_model.text))
if search_model.sort is not None:
self._mapping_to_sort(search_model.sort.keys())
sort = self._sort_object(search_model.sort)
else:
sort = [{"_id": "asc"}]
body = {
'sort': sort,
'from': (search_model.page - 1) * search_model.offset,
'size': search_model.offset,
}
page = self.driver._es.search(
index=self.driver._index,
body=body,
q=search_model.text
)
object_list = []
for x in page['hits']['hits']:
object_list.append(x['_source'])
return object_list, page['hits']['total']['value']
def _mapping_to_sort(self, keys):
for i in keys:
mapping = """{
"properties": {
"%s" : {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}
""" % i
if self.driver._es.indices.get_field_mapping(i)[self.driver._index]['mappings'] == {}:
self.driver._es.indices.put_mapping(index=self.driver._index, body=mapping,
doc_type='_doc', include_type_name=True)
def _sort_object(self, sort):
try:
o = []
for i in sort.keys():
if self.driver._es.indices.get_field_mapping(i)[self.driver._index]['mappings'][
i]['mapping'][i.split('.')[-1]]['type'] == 'text':
o.append({i + ".keyword": ('asc' if sort.get(i) == 1 else 'desc')}, )
else:
o.append({i: ('asc' if sort.get(i) == 1 else 'desc')}, )
return o
except Exception:
raise Exception("Sort \"{}\" does not have a valid format.".format(sort)) | metadata_driver_elasticsearch/plugin.py | import logging
from metadatadb_driver_interface.plugin import AbstractPlugin
from metadatadb_driver_interface.search_model import FullTextModel, QueryModel
from metadata_driver_elasticsearch.instance import get_database_instance
class Plugin(AbstractPlugin):
"""Elasticsearch ledger plugin for `Metadata DB's Python reference
implementation <https://github.com/neveminedio/metadata-driver-elastic>`_.
Plugs in a Elasticsearch instance as the persistence layer for Metadata Db
related actions.
"""
def __init__(self, config=None):
"""Initialize a :class:`~.Plugin` instance and connect to Elasticsearch.
"""
self.driver = get_database_instance(config)
self.logger = logging.getLogger('Plugin')
logging.basicConfig(level=logging.INFO)
@property
def type(self):
"""str: the type of this plugin (``'Elasticsearch'``)"""
return 'Elasticsearch'
def write(self, obj, resource_id=None):
"""Write obj in elasticsearch.
:param obj: value to be written in elasticsearch.
:param resource_id: id for the resource.
:return: id of the transaction.
"""
self.logger.debug('elasticsearch::write::{}'.format(resource_id))
if resource_id is not None:
if self.driver._es.exists(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
):
raise ValueError(
"Resource \"{}\" already exists, use update instead".format(resource_id))
return self.driver._es.index(
index=self.driver._index,
id=resource_id,
body=obj,
doc_type='_doc',
refresh='wait_for'
)['_id']
def read(self, resource_id):
"""Read object in elasticsearch using the resource_id.
:param resource_id: id of the object to be read.
:return: object value from elasticsearch.
"""
self.logger.debug('elasticsearch::read::{}'.format(resource_id))
return self.driver._es.get(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
)['_source']
def update(self, obj, resource_id):
"""Update object in elasticsearch using the resource_id.
:param metadata: new metadata for the transaction.
:param resource_id: id of the object to be updated.
:return: id of the object.
"""
self.logger.debug('elasticsearch::update::{}'.format(resource_id))
return self.driver._es.index(
index=self.driver._index,
id=resource_id,
body=obj,
doc_type='_doc',
refresh='wait_for'
)['_id']
def delete(self, resource_id):
"""Delete an object from elasticsearch.
:param resource_id: id of the object to be deleted.
:return:
"""
self.logger.debug('elasticsearch::delete::{}'.format(resource_id))
if self.driver._es.exists(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
) == False:
raise ValueError("Resource \"{}\" does not exists".format(resource_id))
return self.driver._es.delete(
index=self.driver._index,
id=resource_id,
doc_type='_doc'
)
def list(self, search_from=None, search_to=None, limit=None):
"""List all the objects saved elasticsearch.
:param search_from: start offset of objects to return.
:param search_to: last offset of objects to return.
:param limit: max number of values to be returned.
:return: list with transactions.
"""
self.logger.debug('elasticsearch::list')
body = {
'sort': [
{"_id": "asc"},
],
'query': {
'match_all': {}
}
}
if search_from:
body['from'] = search_from
if search_to:
body['size'] = search_to - search_from
if limit:
body['size'] = limit
page = self.driver._es.search(
index=self.driver._index,
body=body
)
object_list = []
for x in page['hits']['hits']:
object_list.append(x['_source'])
return object_list
def query(self, search_model: QueryModel):
"""Query elasticsearch for objects.
:param search_model: object of QueryModel.
:return: list of objects that match the query.
"""
assert search_model.page >= 1, 'page value %s is invalid' % search_model.page
self.logger.debug(f'elasticsearch::query::{search_model.query}')
if search_model.sort is not None:
self._mapping_to_sort(search_model.sort.keys())
sort = self._sort_object(search_model.sort)
else:
sort = [{"_id": "asc"}]
if search_model.query == {}:
query = {'match_all': {}}
else:
query = search_model.query
body = {
'sort': sort,
'from': (search_model.page - 1) * search_model.offset,
'size': search_model.offset,
}
if query != {}:
body['query'] = query
page = self.driver._es.search(
index=self.driver._index,
body=body,
q=search_model.text
)
object_list = []
for x in page['hits']['hits']:
object_list.append(x['_source'])
return object_list, page['hits']['total']['value']
def text_query(self, search_model: FullTextModel):
"""Query elasticsearch for objects.
:param search_model: object of FullTextModel
:return: list of objects that match the query.
"""
assert search_model.page >= 1, 'page value %s is invalid' % search_model.page
self.logger.debug('elasticsearch::text_query::{}'.format(search_model.text))
if search_model.sort is not None:
self._mapping_to_sort(search_model.sort.keys())
sort = self._sort_object(search_model.sort)
else:
sort = [{"_id": "asc"}]
body = {
'sort': sort,
'from': (search_model.page - 1) * search_model.offset,
'size': search_model.offset,
}
page = self.driver._es.search(
index=self.driver._index,
body=body,
q=search_model.text
)
object_list = []
for x in page['hits']['hits']:
object_list.append(x['_source'])
return object_list, page['hits']['total']['value']
def _mapping_to_sort(self, keys):
for i in keys:
mapping = """{
"properties": {
"%s" : {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
}
""" % i
if self.driver._es.indices.get_field_mapping(i)[self.driver._index]['mappings'] == {}:
self.driver._es.indices.put_mapping(index=self.driver._index, body=mapping,
doc_type='_doc', include_type_name=True)
def _sort_object(self, sort):
try:
o = []
for i in sort.keys():
if self.driver._es.indices.get_field_mapping(i)[self.driver._index]['mappings'][
i]['mapping'][i.split('.')[-1]]['type'] == 'text':
o.append({i + ".keyword": ('asc' if sort.get(i) == 1 else 'desc')}, )
else:
o.append({i: ('asc' if sort.get(i) == 1 else 'desc')}, )
return o
except Exception:
raise Exception("Sort \"{}\" does not have a valid format.".format(sort)) | 0.752922 | 0.146392 |
import click
import yaml
import json
import re
proxies = []
proxy_count = 1
def flatten_dict(dd, separator=".", prefix=""):
return (
{
prefix + separator + k if prefix else k: v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
}
if isinstance(dd, dict)
else {prefix: dd}
)
def make_new_proxy(name):
global proxy_count
prox = {
"id": proxy_count,
"mtime": 1,
"own": [],
"name": name,
"type": name,
"tags": [],
"properties": {},
}
proxy_count += 1
return prox
def clean_value(value):
# yaml.safe_load not specific enough (eg parses "1e-4" as string)
if isinstance(value, str):
try:
value = float(value)
except:
pass
return value
def add_proxies_for_dynamic_model(proxies, prefix, run, model_type_name, model_type):
proxies_by_name = {}
for (prop_name, prop) in model_type.items():
if prop_name == "name" or prop_name.startswith("_") or "{" in prop_name:
continue
exportSuffix = prop["_exportSuffix"]
for run_key in run.keys():
# Build up regex to match dynamic run key
dynamic_name_group = "\.([^.]+)\."
regex = prefix + dynamic_name_group + exportSuffix + "$"
match = re.match(regex, run_key)
if match:
# Collect proxies of this type by name
dynamic_name = match.group(1)
if dynamic_name not in proxies_by_name:
proxies_by_name[dynamic_name] = make_new_proxy(model_type_name)
proxies_by_name[dynamic_name]["properties"]["name"] = dynamic_name
value = clean_value(run[run_key])
proxies_by_name[dynamic_name]["properties"][prop_name] = value
proxies[model_type_name] = list(proxies_by_name.values())
def add_key_to_proxy(new_proxy, prop_name, run, run_key, prop):
if prop_name in ["name", "_exportPrefix"]:
return
for domain in prop.get("domains", []):
if domain.get("type") == "ProxyBuilder":
return
exportSuffix = prop["_exportSuffix"]
if exportSuffix == run_key:
value = run[run_key]
new_proxy["properties"][prop_name] = clean_value(value)
@click.command()
@click.option(
"-r",
"--run-file",
required=True,
help="A flat map of keys to value from a previous parflow run.",
)
@click.option(
"-m",
"--model-file",
required=True,
help="A pysimput model whose modeltypes will extract values from the run into proxies.",
)
@click.option(
"-o",
"--output",
default="pf_settings.yaml",
help="location to write the output to.",
)
def cli(run_file, model_file, output):
with open(run_file) as run_file_handle:
run = flatten_dict(yaml.safe_load(run_file_handle))
with open(model_file) as model_file_handle:
model_types = yaml.safe_load(model_file_handle)
proxies = {}
for (model_type_name, model_type) in model_types.items():
prefix = model_type.get("_exportPrefix")
static_model = not prefix
if static_model:
# Make one proxy for each modeltype, and fill it with run's values
new_proxy = make_new_proxy(model_type_name)
for (prop_name, prop) in model_type.items():
for run_key in run.keys():
add_key_to_proxy(new_proxy, prop_name, run, run_key, prop)
proxies[model_type_name] = [new_proxy]
else:
add_proxies_for_dynamic_model(
proxies, prefix, run, model_type_name, model_type
)
flat_proxies = [proxy for model in proxies.values() for proxy in model]
pf_settings = {"save": json.dumps({"model": model_types, "proxies": flat_proxies})}
with open(output, "w") as output_handle:
yaml.dump(pf_settings, output_handle)
if __name__ == "__main__":
cli() | scripts/parflow/read_run.py | import click
import yaml
import json
import re
proxies = []
proxy_count = 1
def flatten_dict(dd, separator=".", prefix=""):
return (
{
prefix + separator + k if prefix else k: v
for kk, vv in dd.items()
for k, v in flatten_dict(vv, separator, kk).items()
}
if isinstance(dd, dict)
else {prefix: dd}
)
def make_new_proxy(name):
global proxy_count
prox = {
"id": proxy_count,
"mtime": 1,
"own": [],
"name": name,
"type": name,
"tags": [],
"properties": {},
}
proxy_count += 1
return prox
def clean_value(value):
# yaml.safe_load not specific enough (eg parses "1e-4" as string)
if isinstance(value, str):
try:
value = float(value)
except:
pass
return value
def add_proxies_for_dynamic_model(proxies, prefix, run, model_type_name, model_type):
proxies_by_name = {}
for (prop_name, prop) in model_type.items():
if prop_name == "name" or prop_name.startswith("_") or "{" in prop_name:
continue
exportSuffix = prop["_exportSuffix"]
for run_key in run.keys():
# Build up regex to match dynamic run key
dynamic_name_group = "\.([^.]+)\."
regex = prefix + dynamic_name_group + exportSuffix + "$"
match = re.match(regex, run_key)
if match:
# Collect proxies of this type by name
dynamic_name = match.group(1)
if dynamic_name not in proxies_by_name:
proxies_by_name[dynamic_name] = make_new_proxy(model_type_name)
proxies_by_name[dynamic_name]["properties"]["name"] = dynamic_name
value = clean_value(run[run_key])
proxies_by_name[dynamic_name]["properties"][prop_name] = value
proxies[model_type_name] = list(proxies_by_name.values())
def add_key_to_proxy(new_proxy, prop_name, run, run_key, prop):
if prop_name in ["name", "_exportPrefix"]:
return
for domain in prop.get("domains", []):
if domain.get("type") == "ProxyBuilder":
return
exportSuffix = prop["_exportSuffix"]
if exportSuffix == run_key:
value = run[run_key]
new_proxy["properties"][prop_name] = clean_value(value)
@click.command()
@click.option(
"-r",
"--run-file",
required=True,
help="A flat map of keys to value from a previous parflow run.",
)
@click.option(
"-m",
"--model-file",
required=True,
help="A pysimput model whose modeltypes will extract values from the run into proxies.",
)
@click.option(
"-o",
"--output",
default="pf_settings.yaml",
help="location to write the output to.",
)
def cli(run_file, model_file, output):
with open(run_file) as run_file_handle:
run = flatten_dict(yaml.safe_load(run_file_handle))
with open(model_file) as model_file_handle:
model_types = yaml.safe_load(model_file_handle)
proxies = {}
for (model_type_name, model_type) in model_types.items():
prefix = model_type.get("_exportPrefix")
static_model = not prefix
if static_model:
# Make one proxy for each modeltype, and fill it with run's values
new_proxy = make_new_proxy(model_type_name)
for (prop_name, prop) in model_type.items():
for run_key in run.keys():
add_key_to_proxy(new_proxy, prop_name, run, run_key, prop)
proxies[model_type_name] = [new_proxy]
else:
add_proxies_for_dynamic_model(
proxies, prefix, run, model_type_name, model_type
)
flat_proxies = [proxy for model in proxies.values() for proxy in model]
pf_settings = {"save": json.dumps({"model": model_types, "proxies": flat_proxies})}
with open(output, "w") as output_handle:
yaml.dump(pf_settings, output_handle)
if __name__ == "__main__":
cli() | 0.448909 | 0.214147 |
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.list_cert_response import ListCertResponse
from cohesity_management_sdk.models.certificate_details import CertificateDetails
from cohesity_management_sdk.models.ssl_certificate_config import SslCertificateConfig
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class CertificatesController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, config=None, client=None, call_back=None):
super(CertificatesController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
self.config = config
def get_certificate_list(self):
"""Does a GET request to /public/certificates/global.
Returns the all certificate and their details generated from this
cluster.
Returns:
ListCertResponse: Response from the API. List Host Certificate
Response.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_certificate_list called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_certificate_list.')
_url_path = '/public/certificates/global'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_certificate_list.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_certificate_list.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_certificate_list')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_certificate_list.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ListCertResponse.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def create_deploy_host_certificate(self, body=None):
"""Does a POST request to /public/certificates/global.
Returns the global certificate for a single or multiple hosts.
Args:
body (DeployCertParameters, optional): Request to generate and
deploy a new certificate.
Returns:
CertificateDetails: Response from the API. Host Certificate
Download Response.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_deploy_host_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for create_deploy_host_certificate.')
_url_path = '/public/certificates/global'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for create_deploy_host_certificate.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for create_deploy_host_certificate.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='create_deploy_host_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for create_deploy_host_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body, CertificateDetails.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def delete_web_server_certificate(self):
"""Does a DELETE request to /public/certificates/webServer.
Returns delete status upon completion.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_web_server_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for delete_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info(
'Preparing and executing request for delete_web_server_certificate.'
)
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='delete_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for delete_web_server_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_web_server_certificate(self):
"""Does a GET request to /public/certificates/webServer.
Returns the Server Certificate configured on the cluster.
Returns:
SslCertificateConfig: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_web_server_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for get_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for get_web_server_certificate.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_web_server_certificate.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for get_web_server_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
SslCertificateConfig.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_web_server_certificate(self, body=None):
"""Does a PUT request to /public/certificates/webServer.
Returns the updated Web Server Certificate on the cluster.
Args:
body (SslCertificateConfig, optional): TODO: type description
here. Example:
Returns:
SslCertificateConfig: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_web_server_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for update_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_web_server_certificate.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_web_server_certificate.'
)
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_web_server_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
SslCertificateConfig.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise | cohesity_management_sdk/controllers/certificates_controller.py |
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.list_cert_response import ListCertResponse
from cohesity_management_sdk.models.certificate_details import CertificateDetails
from cohesity_management_sdk.models.ssl_certificate_config import SslCertificateConfig
from cohesity_management_sdk.exceptions.request_error_error_exception import RequestErrorErrorException
class CertificatesController(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, config=None, client=None, call_back=None):
super(CertificatesController, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
self.config = config
def get_certificate_list(self):
"""Does a GET request to /public/certificates/global.
Returns the all certificate and their details generated from this
cluster.
Returns:
ListCertResponse: Response from the API. List Host Certificate
Response.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_certificate_list called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_certificate_list.')
_url_path = '/public/certificates/global'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_certificate_list.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_certificate_list.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_certificate_list')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_certificate_list.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body,
ListCertResponse.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def create_deploy_host_certificate(self, body=None):
"""Does a POST request to /public/certificates/global.
Returns the global certificate for a single or multiple hosts.
Args:
body (DeployCertParameters, optional): Request to generate and
deploy a new certificate.
Returns:
CertificateDetails: Response from the API. Host Certificate
Download Response.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_deploy_host_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for create_deploy_host_certificate.')
_url_path = '/public/certificates/global'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for create_deploy_host_certificate.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for create_deploy_host_certificate.'
)
_request = self.http_client.post(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='create_deploy_host_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for create_deploy_host_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body, CertificateDetails.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def delete_web_server_certificate(self):
"""Does a DELETE request to /public/certificates/webServer.
Returns delete status upon completion.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_web_server_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for delete_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info(
'Preparing and executing request for delete_web_server_certificate.'
)
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='delete_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for delete_web_server_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def get_web_server_certificate(self):
"""Does a GET request to /public/certificates/webServer.
Returns the Server Certificate configured on the cluster.
Returns:
SslCertificateConfig: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_web_server_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for get_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for get_web_server_certificate.')
_headers = {'accept': 'application/json'}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for get_web_server_certificate.'
)
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request, self.config)
_context = self.execute_request(_request,
name='get_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for get_web_server_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
SslCertificateConfig.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise
def update_web_server_certificate(self, body=None):
"""Does a PUT request to /public/certificates/webServer.
Returns the updated Web Server Certificate on the cluster.
Args:
body (SslCertificateConfig, optional): TODO: type description
here. Example:
Returns:
SslCertificateConfig: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_web_server_certificate called.')
# Prepare query URL
self.logger.info(
'Preparing query URL for update_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info(
'Preparing headers for update_web_server_certificate.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info(
'Preparing and executing request for update_web_server_certificate.'
)
_request = self.http_client.put(
_query_url,
headers=_headers,
parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request, self.config)
_context = self.execute_request(
_request, name='update_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info(
'Validating response for update_web_server_certificate.')
if _context.response.status_code == 0:
raise RequestErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(
_context.response.raw_body,
SslCertificateConfig.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info=True)
raise | 0.826607 | 0.089733 |
from mimetypes import MimeTypes
from pathlib import Path
from typing import Iterator, Optional
from flask import Response, send_from_directory, stream_with_context
from werkzeug.utils import secure_filename
from restapi.config import DATA_PATH
from restapi.exceptions import NotFound
from restapi.services.uploader import Uploader
from restapi.utilities.logs import log
DEFAULT_CHUNK_SIZE = 1048576 # 1 MB
class Downloader:
@staticmethod
def guess_mime_type(path: Path) -> Optional[str]:
# guess_type expects a str as argument because
# it is intended to be used with urls and not with paths
mime_type = MimeTypes().guess_type(str(path))
return mime_type[0]
# This is good for small files, in particular with displayable files
# like images, videos or PDF files
# It is also good for media files by sending Range header
@staticmethod
def send_file_content(
filename: str,
subfolder: Path,
mime: Optional[str] = None,
) -> Response:
Uploader.validate_upload_folder(subfolder)
filename = secure_filename(filename)
filepath = subfolder.joinpath(filename)
if not filepath.is_file():
raise NotFound("The requested file does not exist")
if mime is None:
mime = Downloader.guess_mime_type(filepath)
log.info("Sending file content from {}", filepath)
# This function is mainly used for displayable files like images and video
# so that DO NOT SET as_attachment=True that would force the download
return send_from_directory(subfolder, filename, mimetype=mime)
@staticmethod
def read_in_chunks(
path: Path, chunk_size: int = DEFAULT_CHUNK_SIZE
) -> Iterator[bytes]:
"""
Lazy function (generator) to read a file piece by piece.
"""
with open(path, "rb") as file_handle:
while data := file_handle.read(chunk_size):
yield data
# this is good for large files
@staticmethod
def send_file_streamed(
filename: str,
subfolder: Path,
mime: Optional[str] = None,
out_filename: Optional[str] = None,
) -> Response:
Uploader.validate_upload_folder(subfolder)
filename = secure_filename(filename)
filepath = subfolder.joinpath(filename)
if not filepath.is_file():
raise NotFound("The requested file does not exist")
if mime is None:
mime = Downloader.guess_mime_type(filepath)
log.info("Providing streamed content from {} (mime={})", filepath, mime)
response = Response(
stream_with_context(Downloader.read_in_chunks(filepath)),
mimetype=mime,
)
if not out_filename:
out_filename = filepath.name
response.headers["Content-Disposition"] = f"attachment; filename={out_filename}"
response.headers["Content-Length"] = filepath.stat().st_size
return response | restapi/services/download.py | from mimetypes import MimeTypes
from pathlib import Path
from typing import Iterator, Optional
from flask import Response, send_from_directory, stream_with_context
from werkzeug.utils import secure_filename
from restapi.config import DATA_PATH
from restapi.exceptions import NotFound
from restapi.services.uploader import Uploader
from restapi.utilities.logs import log
DEFAULT_CHUNK_SIZE = 1048576 # 1 MB
class Downloader:
@staticmethod
def guess_mime_type(path: Path) -> Optional[str]:
# guess_type expects a str as argument because
# it is intended to be used with urls and not with paths
mime_type = MimeTypes().guess_type(str(path))
return mime_type[0]
# This is good for small files, in particular with displayable files
# like images, videos or PDF files
# It is also good for media files by sending Range header
@staticmethod
def send_file_content(
filename: str,
subfolder: Path,
mime: Optional[str] = None,
) -> Response:
Uploader.validate_upload_folder(subfolder)
filename = secure_filename(filename)
filepath = subfolder.joinpath(filename)
if not filepath.is_file():
raise NotFound("The requested file does not exist")
if mime is None:
mime = Downloader.guess_mime_type(filepath)
log.info("Sending file content from {}", filepath)
# This function is mainly used for displayable files like images and video
# so that DO NOT SET as_attachment=True that would force the download
return send_from_directory(subfolder, filename, mimetype=mime)
@staticmethod
def read_in_chunks(
path: Path, chunk_size: int = DEFAULT_CHUNK_SIZE
) -> Iterator[bytes]:
"""
Lazy function (generator) to read a file piece by piece.
"""
with open(path, "rb") as file_handle:
while data := file_handle.read(chunk_size):
yield data
# this is good for large files
@staticmethod
def send_file_streamed(
filename: str,
subfolder: Path,
mime: Optional[str] = None,
out_filename: Optional[str] = None,
) -> Response:
Uploader.validate_upload_folder(subfolder)
filename = secure_filename(filename)
filepath = subfolder.joinpath(filename)
if not filepath.is_file():
raise NotFound("The requested file does not exist")
if mime is None:
mime = Downloader.guess_mime_type(filepath)
log.info("Providing streamed content from {} (mime={})", filepath, mime)
response = Response(
stream_with_context(Downloader.read_in_chunks(filepath)),
mimetype=mime,
)
if not out_filename:
out_filename = filepath.name
response.headers["Content-Disposition"] = f"attachment; filename={out_filename}"
response.headers["Content-Length"] = filepath.stat().st_size
return response | 0.768038 | 0.137359 |
import copy
import pickle
import pandas as pd
import numpy as np
import os, sys
from ..Model import models
from ..Input import readInput as readIn
from ..Data import curate as curate
from ..Data import descriptors as descr
from ..Data import postprocess as pproc
from ..Validation import modelability as modi
from ..Validation import appDom as appDom
class Model:
'''
Class which contains information and methods relating to a QSAR model.
'''
# Constructor
def __init__(self,inputParams={}):
# Variables
self.paramList = [] # List to store model parameters for each run
# Set up model parameters
self.modelParams = {'inputParams':inputParams}
# Create new model_variables class
#self.paramList.append(model_variables(inputParams))
# Shallow copy
def __copy__(self):
return model(self.name)
# Deep copy
def __deepcopy__(self, memo):
return model(copy.deepcopy(self.name, memo))
# Load DescDF
def load_Descriptors(self,modelParams):
'''
Load descriptor data frame from file.
INPUT
modelParams: (dict) Model parameters.
OUTPUT
'''
# Load data
fileName = self.modelParams['inputParams']['read_csv_desc']
self.modelParams['DescDF'] = readIn.read_CSV(fileName)
# Load inDF
def load_Input(self,modelParams,quiet=False):
'''
Load input (activity,structure) data frame from file for training. If testing, only the structures are loaded.
INPUT
modelParams: (dict) Model parameters.
OUTPUT
'''
# Print start message
if (not quiet):
print("========================================")
print("Read Input File")
# Load data
fileName = self.modelParams['inputParams']['csvfilename']
self.modelParams['inDF'] = readIn.read_CSV(fileName)
self.modelParams['workingDF'] = self.modelParams['inDF'].copy()
# Print number of compounds
if (not quiet):
print("\tNumber of Compounds: " + str((self.modelParams['inDF'].shape)[0]))
# Print end
if (not quiet):
print("========================================")
print("")
# Data curation
def data_curation(self,quiet=False):
'''
Perform data curation operations on workingDF.
INPUT
OUTPUT
'''
# Print start message
if (not quiet):
print("========================================")
print("Data Curation")
# Variables
strcolname = self.modelParams['inputParams']["strcolname"]
filter_atnum = self.modelParams['inputParams']["filter_atnum"]
# Remove empty rows
if (self.modelParams['inputParams']["rm_empty_rows"].lower() == 'true'):
self.modelParams['workingDF'] = curate.removeEmptyRows(self.modelParams['workingDF'])
if (not quiet):
print("\tNumber of Empty Rows Removed: " + str((self.modelParams['inDF'].shape)[0]-(self.modelParams['workingDF'].shape)[0]))
# Remove duplicates
if (self.modelParams['inputParams']["rm_duplicates"].lower() == 'true'):
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.removeDuplicateStructures(self.modelParams['workingDF'],colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Duplicates Removed: " + str(startNum-endNum))
# Remove invalid SMILES
if (self.modelParams['inputParams']["rm_invalid"].lower() == 'true'):
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.removeInvalidSmiles(self.modelParams['workingDF'],colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Invalid SMILES Removed: " + str(startNum-endNum))
# Remove salts
if (self.modelParams['inputParams']["rm_salts"].lower() == 'true'):
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.removeSalts(self.modelParams['workingDF'],colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Salts Removed: " + str(startNum-endNum))
# Filter elements
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.filterElements(self.modelParams['workingDF'],
keepEle=filter_atnum,
colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Compounds Removed by Element Filtering: " + str(startNum-endNum))
# Print end
if (not quiet):
print("========================================")
print("")
# Calculate structures
def calculate_structures(self):
'''
Calculate structures from structural information in workingDF.
INPUT
OUTPUT
'''
# Variables
strcolname = self.modelParams['inputParams']["strcolname"]
# Generate 3d structures
if (self.modelParams['inputParams']["calc_3d"].lower() == 'true'):
print('Calculating 3d Coordinates...')
self.modelParams['workingDF'] = curate.smi2sdf_par(self.modelParams['workingDF'],colName=strcolname)
# Calculate descriptors
def calculate_descriptors(self):
'''
Calculate descriptors from structural information in inDF.
INPUT
OUTPUT
'''
# Variables
strcolname = self.modelParams['inputParams']["strcolname"]
# Set dimensionality
if (self.modelParams['inputParams']["calc_3d"].lower() == 'true'):
print('Calculating 3d Descriptors...')
coord = 3
colName = 'SDF'
else:
print('Calculating 2d Descriptors...')
coord = 2
colName = strcolname
# Calculate descriptors
self.modelParams['DescDF'] = descr.calc_mordred(self.modelParams['workingDF'],colName=colName,coord=coord)
# Clean descriptors for training
if (len(self.paramList) == 0):
print('Cleaning Descriptors...')
self.modelParams['DescDF'] = descr.cleanDescriptors(self.modelParams['DescDF'],descStart=coord)
# Remove structure columns to prepare for modeling
print('Removing structure columns...')
if (coord == 3):
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=[strcolname,'SDF'],axis=1)
else:
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=[strcolname],axis=1)
# Curate descriptors
def descriptor_curation(self):
'''
Curate features.
INPUT
OUTPUT
'''
# Imports
import sklearn.preprocessing as skp
# Only curate if training
if (len(self.paramList) == 0):
# Remove features with low standard deviation
print('Removing features with low standard deviation...')
for std in self.modelParams['inputParams']["low_std"]:
self.modelParams['DescDF'] = descr.removeSTD(self.modelParams['DescDF'],thresh=std)
# Remove correlated descriptors
print('Removing correlated descriptors...')
for corr in self.modelParams['inputParams']["corr_desc"]:
self.modelParams['DescDF'] = descr.removeCorrelated(self.modelParams['DescDF'],thresh=corr)
# Normalize descriptors
print('Normalizing descriptors...')
normDesc,self.modelParams['norms'] = skp.normalize(self.modelParams['DescDF'].iloc[:,1:],axis=0,return_norm=True)
for index,colName in enumerate(self.modelParams['DescDF'].columns):
# Skip activity column
if (index == 0):
continue
self.modelParams['DescDF'][colName] = normDesc[:,index-1]
print('training!')
print(self.modelParams['DescDF'][colName])
# Match descriptors for testing
if (len(self.paramList) >= 1):
print('Matching Descriptors for Training...')
# Determine columns to remove
train_colNames = (self.paramList[0]['DescDF'].columns.values)[1:]
test_colNames = (self.modelParams['DescDF'].columns.values)[1:]
rmCols = [colName for colName in test_colNames if colName not in train_colNames]
# Remove columns
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=rmCols,axis=1)
# Normalize descriptors according to training norms
print('Normalizing descriptors...')
for index,colName in enumerate(self.modelParams['DescDF'].columns):
# Skip activity column
if (index == 0):
continue
self.modelParams['DescDF'][colName] = self.modelParams['DescDF'][colName]/self.paramList[0]['norms'][index-1]
print('testing!')
print(self.modelParams['DescDF'][colName])
# Calculate training set applicability domain
# Modelability
def calculate_MODI(self):
'''
Calcualate modelability and possibly remove activity cliffs.
INPUT
OUTPUT
'''
# Calculate MODI
if (self.modelParams['inputParams']["model_type"] == "classification"):
self.modelParams['MODIVal'], self.modelParams['cliffIdx'] = modi.cMODI(self.modelParams['DescDF'])
print('Classification MODI: ' + str(self.modelParams['MODIVal']))
else:
self.modelParams['MODIVal'], self.modelParams['cliffIdx'] = modi.rMODI_Spectral(self.modelParams['DescDF'])
print('Regression MODI (Spectral): ' + str(self.modelParams['MODIVal']))
# Remove cliffs
if (self.modelParams['inputParams']["rm_modi"].lower() == "true"):
# Save information about full descriptors
self.modelParams['DescDF_FullDesc'] = self.modelParams['DescDF'].copy()
self.modelParams['MODIVal_FullDesc'] = self.modelParams['MODIVal']
self.modelParams['cliffIdx_FullDesc'] = copy.deepcopy(self.modelParams['cliffIdx'])
# Remove cliffs
print('Removing ' + str(len(self.modelParams['cliffIdx'])) + ' compounds for MODI...')
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(self.modelParams['DescDF'].index[self.modelParams['cliffIdx']])
# Compute new MODI
self.modelParams['MODIVal'], self.modelParams['cliffIdx'] = modi.cMODI(self.modelParams['DescDF'])
print('New MODI: ' + str(self.modelParams['MODIVal']))
# Fit model
def fit_model(self):
'''
Fit model.
INPUT
OUTPUT
'''
# Imports
import sklearn.model_selection as skm
import sklearn.decomposition as skd
# Try PCA
'''
ncomp = 20
pca = skd.PCA(n_components=ncomp)
pca.fit(np.transpose(self.modelParams['DescDF'].iloc[:,1:].values))
self.modelParams['DescDF'].iloc[:,1:ncomp+1] = np.transpose(pca.components_)
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=self.modelParams['DescDF'].columns.values[ncomp+1:],axis=1)
'''
# Perform clustering
#modi.show_hierarchical_clustering(self.modelParams['DescDF'])
# Split data
self.modelParams['trainDF'],self.modelParams['testDF'] = skm.train_test_split(self.modelParams['DescDF'],
test_size=self.modelParams['inputParams']["test_split"],
random_state=42)
print("Total number of compounds: " + str((self.modelParams['DescDF'].shape)[0]))
# Fit model
if (self.modelParams['inputParams']["model_type"] == "regression"):
print('Regression...')
# Scikit learn random forest | regression
if (self.modelParams['inputParams']["model"] == "random_forest"):
print("--Random Forest--")
self.modelParams['Fit_Pred_Train'],self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_rf_reg(self.modelParams['trainDF'],self.modelParams['testDF'])
# Scikit learn neural network | regression
elif (self.modelParams['inputParams']["model"] == "neural_network"):
print("--Neural Network--")
self.modelParams['Fit_Pred_Train'],self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_nn_reg(self.modelParams['trainDF'],self.modelParams['testDF'])
# KNN Read across | regression
elif (self.modelParams['inputParams']["model"] == "knn_ra"):
print("--KNN Read Across--")
# Set training prediction to empty list
self.modelParams['Fit_Pred_Train'] = []
self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_knnra_reg(self.modelParams['trainDF'],self.modelParams['testDF'],knn=2)
else:
print('Classification...')
# Scikit learn random forest | classification
if (self.modelParams['inputParams']["model"] == "random_forest"):
print("--Random Forest--")
self.modelParams['Fit_Pred_Train'],self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_rf_class(self.modelParams['trainDF'],self.modelParams['testDF'])
# Post processing
if (self.modelParams['inputParams']["postproc"].lower() == "true"):
fitParams = pproc.pca_shift_init(self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Train'],plot=False)
self.modelParams['Fit_Pred_Train_Shift'],r_value_train = pproc.pca_shift_calc(self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Train'],fitParams,plot=True)
# Only apply if test set is not empty
if (len(self.modelParams['Fit_Test']) != 0):
self.modelParams['Fit_Pred_Test_Shift'],r_value_test = pproc.pca_shift_calc(self.modelParams['Fit_Test'],self.modelParams['Fit_Pred_Test'],fitParams,plot=True)
else:
self.modelParams['Fit_Pred_Test_Shift'] = []
# Plot final regression
models.plotPrediction(self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Train_Shift'],self.modelParams['Fit_Test'],self.modelParams['Fit_Pred_Test_Shift'])
else:
self.modelParams['Fit_Pred_Train_Shift'] = []
self.modelParams['Fit_Pred_Test_Shift'] = []
# Save model
def save_model(self,outFileName='model.pickle'):
'''
Save model.
INPUT
outFileName: (str) Name of output file.
OUTPUT
'''
# Save model class
with open(outFileName,'wb') as outFile:
pickle.dump(self,outFile)
# Train model
def train_model(self):
'''
Train model by performing all data and descriptor curation.
INPUT
OUTPUT
'''
# Variables
loaded_descr = False
# Read input file
if (len(self.modelParams['inputParams']["read_csv_desc"]) > 0):
print('Loading descriptors...')
self.load_Descriptors(self.modelParams)
loaded_descr = True
else:
print("Reading input file...")
self.load_Input(self.modelParams)
# Work on data if not provided with descriptor file
if (loaded_descr == False):
# Data curation
self.data_curation()
# Calculate structures
self.calculate_structures()
# Calculate descriptors
self.calculate_descriptors()
# Curate descriptors
self.descriptor_curation()
# Save descriptors
if (self.modelParams['inputParams']["save_csv"].strip() != ""):
self.modelParams['DescDF'].to_csv(self.modelParams['inputParams']["save_csv"],index=False)
# Descriptor curation when loading descriptor files
if ((loaded_descr == True) and (self.modelParams['inputParams']["curate_desc"].lower() == "true")):
self.descriptor_curation()
# Calculate MODI
self.calculate_MODI()
# Fit model
self.fit_model()
# Write csv of results
#outDF = pd.DataFrame(self.modelParams['Fit_Pred_Train'])
#result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])
# Store results
self.paramList.append(self.modelParams)
save_model = self.modelParams['inputParams']["save_model"]
self.modelParams = {}
# Save model
if (save_model.strip() != ""):
self.save_model(outFileName=save_model)
# Test model
def test_model(self,inputParameters=None):
'''
Test model on set of data.
INPUT
OUTPUT
'''
# Initialize model parameters
self.modelParams = {'inputParams':inputParameters}
# Load file
self.load_Input(self.modelParams)
# Data curation
self.data_curation()
# Calculate structures
self.calculate_structures()
# Calculate descriptors
self.calculate_descriptors()
# Curate descriptors
self.descriptor_curation()
# Save descriptors
if (self.modelParams['inputParams']["save_csv"].strip() != ""):
self.modelParams['DescDF'].to_csv(self.modelParams['inputParams']["save_csv"],index=False)
# Test model
modelFit = self.paramList[0]['model_Fit']
Y_Pred,X_Test = models.model_test(self.modelParams['DescDF'],modelFit)
# Save results
saveDF = pd.DataFrame()
saveDF['predict'] = Y_Pred
saveDF['SMILES'] = self.modelParams['workingDF']['SMILES'].values
saveDF.to_csv('prediction.csv',index=False)
if (__name__ == '__main__'):
pass | models/classes/pyQSAR/Src/Model/c_model.py | import copy
import pickle
import pandas as pd
import numpy as np
import os, sys
from ..Model import models
from ..Input import readInput as readIn
from ..Data import curate as curate
from ..Data import descriptors as descr
from ..Data import postprocess as pproc
from ..Validation import modelability as modi
from ..Validation import appDom as appDom
class Model:
'''
Class which contains information and methods relating to a QSAR model.
'''
# Constructor
def __init__(self,inputParams={}):
# Variables
self.paramList = [] # List to store model parameters for each run
# Set up model parameters
self.modelParams = {'inputParams':inputParams}
# Create new model_variables class
#self.paramList.append(model_variables(inputParams))
# Shallow copy
def __copy__(self):
return model(self.name)
# Deep copy
def __deepcopy__(self, memo):
return model(copy.deepcopy(self.name, memo))
# Load DescDF
def load_Descriptors(self,modelParams):
'''
Load descriptor data frame from file.
INPUT
modelParams: (dict) Model parameters.
OUTPUT
'''
# Load data
fileName = self.modelParams['inputParams']['read_csv_desc']
self.modelParams['DescDF'] = readIn.read_CSV(fileName)
# Load inDF
def load_Input(self,modelParams,quiet=False):
'''
Load input (activity,structure) data frame from file for training. If testing, only the structures are loaded.
INPUT
modelParams: (dict) Model parameters.
OUTPUT
'''
# Print start message
if (not quiet):
print("========================================")
print("Read Input File")
# Load data
fileName = self.modelParams['inputParams']['csvfilename']
self.modelParams['inDF'] = readIn.read_CSV(fileName)
self.modelParams['workingDF'] = self.modelParams['inDF'].copy()
# Print number of compounds
if (not quiet):
print("\tNumber of Compounds: " + str((self.modelParams['inDF'].shape)[0]))
# Print end
if (not quiet):
print("========================================")
print("")
# Data curation
def data_curation(self,quiet=False):
'''
Perform data curation operations on workingDF.
INPUT
OUTPUT
'''
# Print start message
if (not quiet):
print("========================================")
print("Data Curation")
# Variables
strcolname = self.modelParams['inputParams']["strcolname"]
filter_atnum = self.modelParams['inputParams']["filter_atnum"]
# Remove empty rows
if (self.modelParams['inputParams']["rm_empty_rows"].lower() == 'true'):
self.modelParams['workingDF'] = curate.removeEmptyRows(self.modelParams['workingDF'])
if (not quiet):
print("\tNumber of Empty Rows Removed: " + str((self.modelParams['inDF'].shape)[0]-(self.modelParams['workingDF'].shape)[0]))
# Remove duplicates
if (self.modelParams['inputParams']["rm_duplicates"].lower() == 'true'):
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.removeDuplicateStructures(self.modelParams['workingDF'],colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Duplicates Removed: " + str(startNum-endNum))
# Remove invalid SMILES
if (self.modelParams['inputParams']["rm_invalid"].lower() == 'true'):
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.removeInvalidSmiles(self.modelParams['workingDF'],colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Invalid SMILES Removed: " + str(startNum-endNum))
# Remove salts
if (self.modelParams['inputParams']["rm_salts"].lower() == 'true'):
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.removeSalts(self.modelParams['workingDF'],colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Salts Removed: " + str(startNum-endNum))
# Filter elements
startNum = (self.modelParams['workingDF'].shape)[0]
self.modelParams['workingDF'] = curate.filterElements(self.modelParams['workingDF'],
keepEle=filter_atnum,
colName=strcolname)
endNum = (self.modelParams['workingDF'].shape)[0]
if (not quiet):
print("\tNumber of Compounds Removed by Element Filtering: " + str(startNum-endNum))
# Print end
if (not quiet):
print("========================================")
print("")
# Calculate structures
def calculate_structures(self):
'''
Calculate structures from structural information in workingDF.
INPUT
OUTPUT
'''
# Variables
strcolname = self.modelParams['inputParams']["strcolname"]
# Generate 3d structures
if (self.modelParams['inputParams']["calc_3d"].lower() == 'true'):
print('Calculating 3d Coordinates...')
self.modelParams['workingDF'] = curate.smi2sdf_par(self.modelParams['workingDF'],colName=strcolname)
# Calculate descriptors
def calculate_descriptors(self):
'''
Calculate descriptors from structural information in inDF.
INPUT
OUTPUT
'''
# Variables
strcolname = self.modelParams['inputParams']["strcolname"]
# Set dimensionality
if (self.modelParams['inputParams']["calc_3d"].lower() == 'true'):
print('Calculating 3d Descriptors...')
coord = 3
colName = 'SDF'
else:
print('Calculating 2d Descriptors...')
coord = 2
colName = strcolname
# Calculate descriptors
self.modelParams['DescDF'] = descr.calc_mordred(self.modelParams['workingDF'],colName=colName,coord=coord)
# Clean descriptors for training
if (len(self.paramList) == 0):
print('Cleaning Descriptors...')
self.modelParams['DescDF'] = descr.cleanDescriptors(self.modelParams['DescDF'],descStart=coord)
# Remove structure columns to prepare for modeling
print('Removing structure columns...')
if (coord == 3):
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=[strcolname,'SDF'],axis=1)
else:
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=[strcolname],axis=1)
# Curate descriptors
def descriptor_curation(self):
'''
Curate features.
INPUT
OUTPUT
'''
# Imports
import sklearn.preprocessing as skp
# Only curate if training
if (len(self.paramList) == 0):
# Remove features with low standard deviation
print('Removing features with low standard deviation...')
for std in self.modelParams['inputParams']["low_std"]:
self.modelParams['DescDF'] = descr.removeSTD(self.modelParams['DescDF'],thresh=std)
# Remove correlated descriptors
print('Removing correlated descriptors...')
for corr in self.modelParams['inputParams']["corr_desc"]:
self.modelParams['DescDF'] = descr.removeCorrelated(self.modelParams['DescDF'],thresh=corr)
# Normalize descriptors
print('Normalizing descriptors...')
normDesc,self.modelParams['norms'] = skp.normalize(self.modelParams['DescDF'].iloc[:,1:],axis=0,return_norm=True)
for index,colName in enumerate(self.modelParams['DescDF'].columns):
# Skip activity column
if (index == 0):
continue
self.modelParams['DescDF'][colName] = normDesc[:,index-1]
print('training!')
print(self.modelParams['DescDF'][colName])
# Match descriptors for testing
if (len(self.paramList) >= 1):
print('Matching Descriptors for Training...')
# Determine columns to remove
train_colNames = (self.paramList[0]['DescDF'].columns.values)[1:]
test_colNames = (self.modelParams['DescDF'].columns.values)[1:]
rmCols = [colName for colName in test_colNames if colName not in train_colNames]
# Remove columns
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=rmCols,axis=1)
# Normalize descriptors according to training norms
print('Normalizing descriptors...')
for index,colName in enumerate(self.modelParams['DescDF'].columns):
# Skip activity column
if (index == 0):
continue
self.modelParams['DescDF'][colName] = self.modelParams['DescDF'][colName]/self.paramList[0]['norms'][index-1]
print('testing!')
print(self.modelParams['DescDF'][colName])
# Calculate training set applicability domain
# Modelability
def calculate_MODI(self):
'''
Calcualate modelability and possibly remove activity cliffs.
INPUT
OUTPUT
'''
# Calculate MODI
if (self.modelParams['inputParams']["model_type"] == "classification"):
self.modelParams['MODIVal'], self.modelParams['cliffIdx'] = modi.cMODI(self.modelParams['DescDF'])
print('Classification MODI: ' + str(self.modelParams['MODIVal']))
else:
self.modelParams['MODIVal'], self.modelParams['cliffIdx'] = modi.rMODI_Spectral(self.modelParams['DescDF'])
print('Regression MODI (Spectral): ' + str(self.modelParams['MODIVal']))
# Remove cliffs
if (self.modelParams['inputParams']["rm_modi"].lower() == "true"):
# Save information about full descriptors
self.modelParams['DescDF_FullDesc'] = self.modelParams['DescDF'].copy()
self.modelParams['MODIVal_FullDesc'] = self.modelParams['MODIVal']
self.modelParams['cliffIdx_FullDesc'] = copy.deepcopy(self.modelParams['cliffIdx'])
# Remove cliffs
print('Removing ' + str(len(self.modelParams['cliffIdx'])) + ' compounds for MODI...')
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(self.modelParams['DescDF'].index[self.modelParams['cliffIdx']])
# Compute new MODI
self.modelParams['MODIVal'], self.modelParams['cliffIdx'] = modi.cMODI(self.modelParams['DescDF'])
print('New MODI: ' + str(self.modelParams['MODIVal']))
# Fit model
def fit_model(self):
'''
Fit model.
INPUT
OUTPUT
'''
# Imports
import sklearn.model_selection as skm
import sklearn.decomposition as skd
# Try PCA
'''
ncomp = 20
pca = skd.PCA(n_components=ncomp)
pca.fit(np.transpose(self.modelParams['DescDF'].iloc[:,1:].values))
self.modelParams['DescDF'].iloc[:,1:ncomp+1] = np.transpose(pca.components_)
self.modelParams['DescDF'] = self.modelParams['DescDF'].drop(labels=self.modelParams['DescDF'].columns.values[ncomp+1:],axis=1)
'''
# Perform clustering
#modi.show_hierarchical_clustering(self.modelParams['DescDF'])
# Split data
self.modelParams['trainDF'],self.modelParams['testDF'] = skm.train_test_split(self.modelParams['DescDF'],
test_size=self.modelParams['inputParams']["test_split"],
random_state=42)
print("Total number of compounds: " + str((self.modelParams['DescDF'].shape)[0]))
# Fit model
if (self.modelParams['inputParams']["model_type"] == "regression"):
print('Regression...')
# Scikit learn random forest | regression
if (self.modelParams['inputParams']["model"] == "random_forest"):
print("--Random Forest--")
self.modelParams['Fit_Pred_Train'],self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_rf_reg(self.modelParams['trainDF'],self.modelParams['testDF'])
# Scikit learn neural network | regression
elif (self.modelParams['inputParams']["model"] == "neural_network"):
print("--Neural Network--")
self.modelParams['Fit_Pred_Train'],self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_nn_reg(self.modelParams['trainDF'],self.modelParams['testDF'])
# KNN Read across | regression
elif (self.modelParams['inputParams']["model"] == "knn_ra"):
print("--KNN Read Across--")
# Set training prediction to empty list
self.modelParams['Fit_Pred_Train'] = []
self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_knnra_reg(self.modelParams['trainDF'],self.modelParams['testDF'],knn=2)
else:
print('Classification...')
# Scikit learn random forest | classification
if (self.modelParams['inputParams']["model"] == "random_forest"):
print("--Random Forest--")
self.modelParams['Fit_Pred_Train'],self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Test'],self.modelParams['Fit_Test'],self.modelParams['model_Fit'] = models.model_rf_class(self.modelParams['trainDF'],self.modelParams['testDF'])
# Post processing
if (self.modelParams['inputParams']["postproc"].lower() == "true"):
fitParams = pproc.pca_shift_init(self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Train'],plot=False)
self.modelParams['Fit_Pred_Train_Shift'],r_value_train = pproc.pca_shift_calc(self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Train'],fitParams,plot=True)
# Only apply if test set is not empty
if (len(self.modelParams['Fit_Test']) != 0):
self.modelParams['Fit_Pred_Test_Shift'],r_value_test = pproc.pca_shift_calc(self.modelParams['Fit_Test'],self.modelParams['Fit_Pred_Test'],fitParams,plot=True)
else:
self.modelParams['Fit_Pred_Test_Shift'] = []
# Plot final regression
models.plotPrediction(self.modelParams['Fit_Train'],self.modelParams['Fit_Pred_Train_Shift'],self.modelParams['Fit_Test'],self.modelParams['Fit_Pred_Test_Shift'])
else:
self.modelParams['Fit_Pred_Train_Shift'] = []
self.modelParams['Fit_Pred_Test_Shift'] = []
# Save model
def save_model(self,outFileName='model.pickle'):
'''
Save model.
INPUT
outFileName: (str) Name of output file.
OUTPUT
'''
# Save model class
with open(outFileName,'wb') as outFile:
pickle.dump(self,outFile)
# Train model
def train_model(self):
'''
Train model by performing all data and descriptor curation.
INPUT
OUTPUT
'''
# Variables
loaded_descr = False
# Read input file
if (len(self.modelParams['inputParams']["read_csv_desc"]) > 0):
print('Loading descriptors...')
self.load_Descriptors(self.modelParams)
loaded_descr = True
else:
print("Reading input file...")
self.load_Input(self.modelParams)
# Work on data if not provided with descriptor file
if (loaded_descr == False):
# Data curation
self.data_curation()
# Calculate structures
self.calculate_structures()
# Calculate descriptors
self.calculate_descriptors()
# Curate descriptors
self.descriptor_curation()
# Save descriptors
if (self.modelParams['inputParams']["save_csv"].strip() != ""):
self.modelParams['DescDF'].to_csv(self.modelParams['inputParams']["save_csv"],index=False)
# Descriptor curation when loading descriptor files
if ((loaded_descr == True) and (self.modelParams['inputParams']["curate_desc"].lower() == "true")):
self.descriptor_curation()
# Calculate MODI
self.calculate_MODI()
# Fit model
self.fit_model()
# Write csv of results
#outDF = pd.DataFrame(self.modelParams['Fit_Pred_Train'])
#result = pd.concat([df1, df4], axis=1, join_axes=[df1.index])
# Store results
self.paramList.append(self.modelParams)
save_model = self.modelParams['inputParams']["save_model"]
self.modelParams = {}
# Save model
if (save_model.strip() != ""):
self.save_model(outFileName=save_model)
# Test model
def test_model(self,inputParameters=None):
'''
Test model on set of data.
INPUT
OUTPUT
'''
# Initialize model parameters
self.modelParams = {'inputParams':inputParameters}
# Load file
self.load_Input(self.modelParams)
# Data curation
self.data_curation()
# Calculate structures
self.calculate_structures()
# Calculate descriptors
self.calculate_descriptors()
# Curate descriptors
self.descriptor_curation()
# Save descriptors
if (self.modelParams['inputParams']["save_csv"].strip() != ""):
self.modelParams['DescDF'].to_csv(self.modelParams['inputParams']["save_csv"],index=False)
# Test model
modelFit = self.paramList[0]['model_Fit']
Y_Pred,X_Test = models.model_test(self.modelParams['DescDF'],modelFit)
# Save results
saveDF = pd.DataFrame()
saveDF['predict'] = Y_Pred
saveDF['SMILES'] = self.modelParams['workingDF']['SMILES'].values
saveDF.to_csv('prediction.csv',index=False)
if (__name__ == '__main__'):
pass | 0.383641 | 0.261405 |
"""Setup module."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_requires():
"""Read requirements.txt."""
requirements = open("requirements.txt", "r").read()
return list(filter(lambda x: x != "", requirements.split()))
def read_description():
"""Read README.md and CHANGELOG.md."""
try:
with open("README.md") as r:
description = "\n"
description += r.read()
return description
except Exception:
return '''
Pyrandwalk is a tool for simulating random walks,
calculate the probability of given state sequences and etc.
Random walk is a representation of discrete-time,
discrete-value Markov chain model using in stochastic processes..'''
setup(
name='pyrandwalk',
packages=['pyrandwalk'],
version='1.1',
description='Python Library for Random Walks',
long_description=read_description(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/sadrasabouri/pyrandwalk',
download_url='https://github.com/sadrasabouri/pyrandwalk/tarball/v1.1',
keywords="random-walk markov-chain stochastic-processes",
project_urls={
'Source': 'https://github.com/sadrasabouri/pyrandwalk',
},
install_requires=get_requires(),
python_requires='>=3.5',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
],
license='MIT',
) | setup.py | """Setup module."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_requires():
"""Read requirements.txt."""
requirements = open("requirements.txt", "r").read()
return list(filter(lambda x: x != "", requirements.split()))
def read_description():
"""Read README.md and CHANGELOG.md."""
try:
with open("README.md") as r:
description = "\n"
description += r.read()
return description
except Exception:
return '''
Pyrandwalk is a tool for simulating random walks,
calculate the probability of given state sequences and etc.
Random walk is a representation of discrete-time,
discrete-value Markov chain model using in stochastic processes..'''
setup(
name='pyrandwalk',
packages=['pyrandwalk'],
version='1.1',
description='Python Library for Random Walks',
long_description=read_description(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/sadrasabouri/pyrandwalk',
download_url='https://github.com/sadrasabouri/pyrandwalk/tarball/v1.1',
keywords="random-walk markov-chain stochastic-processes",
project_urls={
'Source': 'https://github.com/sadrasabouri/pyrandwalk',
},
install_requires=get_requires(),
python_requires='>=3.5',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
],
license='MIT',
) | 0.686055 | 0.307306 |
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
def test_gumbel_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Gumbel([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Gumbel(0., 1., dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Gumbel(0., 1., name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Gumbel(0., 1., seed='seed')
def test_scale():
with pytest.raises(ValueError):
msd.Gumbel(0., 0.)
with pytest.raises(ValueError):
msd.Gumbel(0., -1.)
def test_arguments():
"""
args passing during initialization.
"""
l = msd.Gumbel([3.0], [4.0], dtype=dtype.float32)
assert isinstance(l, msd.Distribution)
class GumbelProb(nn.Cell):
"""
Gumbel distribution: initialize with loc/scale.
"""
def __init__(self):
super(GumbelProb, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0, dtype=dtype.float32)
def construct(self, value):
prob = self.gumbel.prob(value)
log_prob = self.gumbel.log_prob(value)
cdf = self.gumbel.cdf(value)
log_cdf = self.gumbel.log_cdf(value)
sf = self.gumbel.survival_function(value)
log_sf = self.gumbel.log_survival(value)
return prob + log_prob + cdf + log_cdf + sf + log_sf
def test_gumbel_prob():
"""
Test probability functions: passing value through construct.
"""
net = GumbelProb()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class KL(nn.Cell):
"""
Test kl_loss.
"""
def __init__(self):
super(KL, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0)
def construct(self, mu, s):
kl = self.gumbel.kl_loss('Gumbel', mu, s)
cross_entropy = self.gumbel.cross_entropy('Gumbel', mu, s)
return kl + cross_entropy
def test_kl_cross_entropy():
"""
Test kl_loss and cross_entropy.
"""
net = KL()
loc_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
scale_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
ans = net(loc_b, scale_b)
assert isinstance(ans, Tensor)
class GumbelBasics(nn.Cell):
"""
Test class: basic loc/scale function.
"""
def __init__(self):
super(GumbelBasics, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0, dtype=dtype.float32)
def construct(self):
mean = self.gumbel.mean()
sd = self.gumbel.sd()
mode = self.gumbel.mode()
entropy = self.gumbel.entropy()
return mean + sd + mode + entropy
def test_bascis():
"""
Test mean/sd/mode/entropy functionality of Gumbel.
"""
net = GumbelBasics()
ans = net()
assert isinstance(ans, Tensor)
class GumbelConstruct(nn.Cell):
"""
Gumbel distribution: going through construct.
"""
def __init__(self):
super(GumbelConstruct, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0)
def construct(self, value):
prob = self.gumbel('prob', value)
prob1 = self.gumbel.prob(value)
return prob + prob1
def test_gumbel_construct():
"""
Test probability function going through construct.
"""
net = GumbelConstruct()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor) | tests/ut/python/nn/probability/distribution/test_gumbel.py | import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
def test_gumbel_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Gumbel([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Gumbel(0., 1., dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Gumbel(0., 1., name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Gumbel(0., 1., seed='seed')
def test_scale():
with pytest.raises(ValueError):
msd.Gumbel(0., 0.)
with pytest.raises(ValueError):
msd.Gumbel(0., -1.)
def test_arguments():
"""
args passing during initialization.
"""
l = msd.Gumbel([3.0], [4.0], dtype=dtype.float32)
assert isinstance(l, msd.Distribution)
class GumbelProb(nn.Cell):
"""
Gumbel distribution: initialize with loc/scale.
"""
def __init__(self):
super(GumbelProb, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0, dtype=dtype.float32)
def construct(self, value):
prob = self.gumbel.prob(value)
log_prob = self.gumbel.log_prob(value)
cdf = self.gumbel.cdf(value)
log_cdf = self.gumbel.log_cdf(value)
sf = self.gumbel.survival_function(value)
log_sf = self.gumbel.log_survival(value)
return prob + log_prob + cdf + log_cdf + sf + log_sf
def test_gumbel_prob():
"""
Test probability functions: passing value through construct.
"""
net = GumbelProb()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class KL(nn.Cell):
"""
Test kl_loss.
"""
def __init__(self):
super(KL, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0)
def construct(self, mu, s):
kl = self.gumbel.kl_loss('Gumbel', mu, s)
cross_entropy = self.gumbel.cross_entropy('Gumbel', mu, s)
return kl + cross_entropy
def test_kl_cross_entropy():
"""
Test kl_loss and cross_entropy.
"""
net = KL()
loc_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
scale_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
ans = net(loc_b, scale_b)
assert isinstance(ans, Tensor)
class GumbelBasics(nn.Cell):
"""
Test class: basic loc/scale function.
"""
def __init__(self):
super(GumbelBasics, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0, dtype=dtype.float32)
def construct(self):
mean = self.gumbel.mean()
sd = self.gumbel.sd()
mode = self.gumbel.mode()
entropy = self.gumbel.entropy()
return mean + sd + mode + entropy
def test_bascis():
"""
Test mean/sd/mode/entropy functionality of Gumbel.
"""
net = GumbelBasics()
ans = net()
assert isinstance(ans, Tensor)
class GumbelConstruct(nn.Cell):
"""
Gumbel distribution: going through construct.
"""
def __init__(self):
super(GumbelConstruct, self).__init__()
self.gumbel = msd.Gumbel(3.0, 4.0)
def construct(self, value):
prob = self.gumbel('prob', value)
prob1 = self.gumbel.prob(value)
return prob + prob1
def test_gumbel_construct():
"""
Test probability function going through construct.
"""
net = GumbelConstruct()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor) | 0.733833 | 0.667703 |
import os
import platform
import tempfile
import time
from pathlib import Path
from test.conftest import TEST_REF, conan_create_and_upload
from typing import List
from conan_app_launcher.core.conan import (ConanApi, ConanCleanup,
_create_key_value_pair_list)
from conan_app_launcher.core.conan_worker import (ConanWorker,
ConanWorkerElement)
from conans import __version__
from conans.model.ref import ConanFileReference
def test_conan_profile_name_alias_builder():
""" Test, that the build_conan_profile_name_alias returns human readable strings. """
# check empty - should return a default name
profile_name = ConanApi.build_conan_profile_name_alias({})
assert profile_name == "No Settings"
# check a partial
settings = {'os': 'Windows', 'arch': 'x86_64'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Windows_x64"
# check windows
settings = {'os': 'Windows', 'os_build': 'Windows', 'arch': 'x86_64', 'arch_build': 'x86_64',
'compiler': 'Visual Studio', 'compiler.version': '16', 'compiler.toolset': 'v142', 'build_type': 'Release'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Windows_x64_vs16_v142_release"
# check linux
settings = {'os': 'Linux', 'arch': 'x86_64', 'compiler': 'gcc',
'compiler.version': '7.4', 'build_type': 'Debug'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Linux_x64_gcc7.4_debug"
def test_conan_short_path_root():
""" Test, that short path root can be read. """
new_short_home = Path(tempfile.gettempdir()) / "._myconan_short"
os.environ["CONAN_USER_HOME_SHORT"] = str(new_short_home)
conan = ConanApi()
if platform.system() == "Windows":
assert conan.get_short_path_root() == new_short_home
else:
assert not conan.get_short_path_root().exists()
os.environ.pop("CONAN_USER_HOME_SHORT")
def test_empty_cleanup_cache(base_fixture):
"""
Test, if a clean cache returns no dirs. Actual functionality is tested with gui.
It is assumed, that the cash is clean, like it would be on the CI.
"""
os.environ["CONAN_USER_HOME"] = str(Path(tempfile.gettempdir()) / "._myconan_home")
os.environ["CONAN_USER_HOME_SHORT"] = str(Path(tempfile.gettempdir()) / "._myconan_short")
paths = ConanCleanup(ConanApi()).get_cleanup_cache_paths()
assert not paths
os.environ.pop("CONAN_USER_HOME")
os.environ.pop("CONAN_USER_HOME_SHORT")
def test_conan_find_remote_pkg(base_fixture):
"""
Test, if search_package_in_remotes finds a package for the current system and the specified options.
The function must find exactly one pacakge, which uses the spec. options and corresponds to the
default settings.
"""
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
default_settings = dict(conan.client_cache.default_profile.settings)
pkgs = conan.get_matching_package_in_remotes(ConanFileReference.loads(TEST_REF), {"shared": "True"})
assert len(pkgs) > 0
pkg = pkgs[0]
assert {"shared": "True"}.items() <= pkg["options"].items()
for setting in default_settings:
if setting in pkg["settings"].keys():
assert default_settings[setting] in pkg["settings"][setting]
def test_conan_not_find_remote_pkg_wrong_opts(base_fixture):
"""
Test, if a wrong Option return causes an error.
Empty list must be returned and the error be logged.
"""
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
pkg = conan.get_matching_package_in_remotes(ConanFileReference.loads(TEST_REF), {"BogusOption": "True"})
assert not pkg
def test_conan_find_local_pkg(base_fixture):
"""
Test, if get_package installs the package and returns the path and check it again.
The bin dir in the package must exist (indicating it was correctly downloaded)
"""
os.system(f"conan install {TEST_REF} -u")
conan = ConanApi()
pkgs = conan.find_best_matching_packages(ConanFileReference.loads(TEST_REF))
assert len(pkgs) == 1
def test_get_path_or_install(base_fixture):
"""
Test, if get_package installs the package and returns the path and check it again.
The bin dir in the package must exist (indicating it was correctly downloaded)
"""
dir_to_check = "bin"
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
# Gets package path / installs the package
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF))
assert (package_folder / dir_to_check).is_dir()
# check again for already installed package
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF))
assert (package_folder / dir_to_check).is_dir()
def test_get_path_or_install_manual_options(capsys):
"""
Test, if a package with options can install.
The actual installaton must not return an error and non given options be merged with default options.
"""
# This package has an option "shared" and is fairly small.
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF), {"shared": "True"})
if platform.system() == "Windows":
assert (package_folder / "bin" / "python.exe").is_file()
elif platform.system() == "Linux":
assert (package_folder / "bin" / "python").is_file()
def test_install_with_any_settings(mocker, capfd):
"""
Test, if a package with <setting>=Any flags can install
The actual installaton must not return an error.
"""
# mock the remote response
os.system(f"conan remove {TEST_REF} -f")
# Create the "any" package
conan = ConanApi()
assert conan.install_package(
ConanFileReference.loads(TEST_REF),
{'id': '325c44fdb228c32b3de52146f3e3ff8d94dddb60', 'options': {}, 'settings': {
'arch_build': 'any', 'os_build': 'Linux', "build_type": "ANY"}, 'requires': [], 'outdated': False},)
captured = capfd.readouterr()
assert "ERROR" not in captured.err
assert "Cannot install package" not in captured.err
def test_compiler_no_settings(base_fixture, capfd):
"""
Test, if a package with no settings at all can install
The actual installaton must not return an error.
"""
conanfile = str(base_fixture.testdata_path / "conan" / "conanfile_no_settings.py")
ref = "example/1.0.0@local/no_sets"
conan_create_and_upload(conanfile, ref)
os.system(f"conan remove {ref} -f")
conan = ConanApi()
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(ref))
assert (package_folder / "bin").is_dir()
captured = capfd.readouterr()
assert "ERROR" not in captured.err
assert "Can't find a matching package" not in captured.err
os.system(f"conan remove {ref} -f")
def test_resolve_default_options(base_fixture):
"""
Test, if different kind of types of default options can be converted to a dict
Dict is expected.
"""
conan = ConanApi()
str_val = "option=value"
ret = conan._resolve_default_options(str_val)
assert ret.items()
tup_val = ("option=value", "options2=value2")
ret = conan._resolve_default_options(tup_val)
assert ret.items()
list_val = ["option=value", "options2=value2"]
ret = conan._resolve_default_options(list_val)
assert ret.items()
def test_create_key_value_list(base_fixture):
"""
Test, that key value pairs can be extracted as strings. No arrays or other tpyes supported.
The return value must be a list of strings in the format ["key1=value1", "key2=value2]
"Any" values are ignored. (case insensitive)
"""
inp = {"Key1": "Value1"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1"]
inp = {"Key1": "Value1", "Key2": "Value2"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1", "Key2=Value2"]
inp = {"Key1": "Value1", "Key2": "Any"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1"]
def test_search_for_all_packages(base_fixture):
""" Test, that an existing ref will be found in the remotes. """
conan = ConanApi()
res = conan.search_recipe_alternatives_in_remotes(ConanFileReference.loads(TEST_REF))
ref = ConanFileReference.loads(TEST_REF) # need to convert @_/_
assert str(ref) in str(res)
def test_conan_worker(base_fixture, mocker):
"""
Test, if conan worker works on the queue.
It is expected,that the queue size decreases over time.
"""
conan_refs: List[ConanWorkerElement] = [{"ref_pkg_id": "m4/1.4.19@_/_", "options": {},
"settings": {}, "update": False, "auto_install": True},
{"ref_pkg_id": "zlib/1.2.11@conan/stable", "options": {"shared": "True"},
"settings": {}, "update": False, "auto_install": True}
]
mock_func = mocker.patch('conan_app_launcher.core.ConanApi.get_path_or_auto_install')
import conan_app_launcher.app as app
conan_worker = ConanWorker(ConanApi(), app.active_settings)
conan_worker.update_all_info(conan_refs, None)
time.sleep(3)
conan_worker.finish_working()
mock_func.assert_called()
assert conan_worker._conan_install_queue.qsize() == 0 | test/01_unit/test_conan.py | import os
import platform
import tempfile
import time
from pathlib import Path
from test.conftest import TEST_REF, conan_create_and_upload
from typing import List
from conan_app_launcher.core.conan import (ConanApi, ConanCleanup,
_create_key_value_pair_list)
from conan_app_launcher.core.conan_worker import (ConanWorker,
ConanWorkerElement)
from conans import __version__
from conans.model.ref import ConanFileReference
def test_conan_profile_name_alias_builder():
""" Test, that the build_conan_profile_name_alias returns human readable strings. """
# check empty - should return a default name
profile_name = ConanApi.build_conan_profile_name_alias({})
assert profile_name == "No Settings"
# check a partial
settings = {'os': 'Windows', 'arch': 'x86_64'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Windows_x64"
# check windows
settings = {'os': 'Windows', 'os_build': 'Windows', 'arch': 'x86_64', 'arch_build': 'x86_64',
'compiler': 'Visual Studio', 'compiler.version': '16', 'compiler.toolset': 'v142', 'build_type': 'Release'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Windows_x64_vs16_v142_release"
# check linux
settings = {'os': 'Linux', 'arch': 'x86_64', 'compiler': 'gcc',
'compiler.version': '7.4', 'build_type': 'Debug'}
profile_name = ConanApi.build_conan_profile_name_alias(settings)
assert profile_name == "Linux_x64_gcc7.4_debug"
def test_conan_short_path_root():
""" Test, that short path root can be read. """
new_short_home = Path(tempfile.gettempdir()) / "._myconan_short"
os.environ["CONAN_USER_HOME_SHORT"] = str(new_short_home)
conan = ConanApi()
if platform.system() == "Windows":
assert conan.get_short_path_root() == new_short_home
else:
assert not conan.get_short_path_root().exists()
os.environ.pop("CONAN_USER_HOME_SHORT")
def test_empty_cleanup_cache(base_fixture):
"""
Test, if a clean cache returns no dirs. Actual functionality is tested with gui.
It is assumed, that the cash is clean, like it would be on the CI.
"""
os.environ["CONAN_USER_HOME"] = str(Path(tempfile.gettempdir()) / "._myconan_home")
os.environ["CONAN_USER_HOME_SHORT"] = str(Path(tempfile.gettempdir()) / "._myconan_short")
paths = ConanCleanup(ConanApi()).get_cleanup_cache_paths()
assert not paths
os.environ.pop("CONAN_USER_HOME")
os.environ.pop("CONAN_USER_HOME_SHORT")
def test_conan_find_remote_pkg(base_fixture):
"""
Test, if search_package_in_remotes finds a package for the current system and the specified options.
The function must find exactly one pacakge, which uses the spec. options and corresponds to the
default settings.
"""
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
default_settings = dict(conan.client_cache.default_profile.settings)
pkgs = conan.get_matching_package_in_remotes(ConanFileReference.loads(TEST_REF), {"shared": "True"})
assert len(pkgs) > 0
pkg = pkgs[0]
assert {"shared": "True"}.items() <= pkg["options"].items()
for setting in default_settings:
if setting in pkg["settings"].keys():
assert default_settings[setting] in pkg["settings"][setting]
def test_conan_not_find_remote_pkg_wrong_opts(base_fixture):
"""
Test, if a wrong Option return causes an error.
Empty list must be returned and the error be logged.
"""
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
pkg = conan.get_matching_package_in_remotes(ConanFileReference.loads(TEST_REF), {"BogusOption": "True"})
assert not pkg
def test_conan_find_local_pkg(base_fixture):
"""
Test, if get_package installs the package and returns the path and check it again.
The bin dir in the package must exist (indicating it was correctly downloaded)
"""
os.system(f"conan install {TEST_REF} -u")
conan = ConanApi()
pkgs = conan.find_best_matching_packages(ConanFileReference.loads(TEST_REF))
assert len(pkgs) == 1
def test_get_path_or_install(base_fixture):
"""
Test, if get_package installs the package and returns the path and check it again.
The bin dir in the package must exist (indicating it was correctly downloaded)
"""
dir_to_check = "bin"
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
# Gets package path / installs the package
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF))
assert (package_folder / dir_to_check).is_dir()
# check again for already installed package
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF))
assert (package_folder / dir_to_check).is_dir()
def test_get_path_or_install_manual_options(capsys):
"""
Test, if a package with options can install.
The actual installaton must not return an error and non given options be merged with default options.
"""
# This package has an option "shared" and is fairly small.
os.system(f"conan remove {TEST_REF} -f")
conan = ConanApi()
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(TEST_REF), {"shared": "True"})
if platform.system() == "Windows":
assert (package_folder / "bin" / "python.exe").is_file()
elif platform.system() == "Linux":
assert (package_folder / "bin" / "python").is_file()
def test_install_with_any_settings(mocker, capfd):
"""
Test, if a package with <setting>=Any flags can install
The actual installaton must not return an error.
"""
# mock the remote response
os.system(f"conan remove {TEST_REF} -f")
# Create the "any" package
conan = ConanApi()
assert conan.install_package(
ConanFileReference.loads(TEST_REF),
{'id': '325c44fdb228c32b3de52146f3e3ff8d94dddb60', 'options': {}, 'settings': {
'arch_build': 'any', 'os_build': 'Linux', "build_type": "ANY"}, 'requires': [], 'outdated': False},)
captured = capfd.readouterr()
assert "ERROR" not in captured.err
assert "Cannot install package" not in captured.err
def test_compiler_no_settings(base_fixture, capfd):
"""
Test, if a package with no settings at all can install
The actual installaton must not return an error.
"""
conanfile = str(base_fixture.testdata_path / "conan" / "conanfile_no_settings.py")
ref = "example/1.0.0@local/no_sets"
conan_create_and_upload(conanfile, ref)
os.system(f"conan remove {ref} -f")
conan = ConanApi()
id, package_folder = conan.get_path_or_auto_install(ConanFileReference.loads(ref))
assert (package_folder / "bin").is_dir()
captured = capfd.readouterr()
assert "ERROR" not in captured.err
assert "Can't find a matching package" not in captured.err
os.system(f"conan remove {ref} -f")
def test_resolve_default_options(base_fixture):
"""
Test, if different kind of types of default options can be converted to a dict
Dict is expected.
"""
conan = ConanApi()
str_val = "option=value"
ret = conan._resolve_default_options(str_val)
assert ret.items()
tup_val = ("option=value", "options2=value2")
ret = conan._resolve_default_options(tup_val)
assert ret.items()
list_val = ["option=value", "options2=value2"]
ret = conan._resolve_default_options(list_val)
assert ret.items()
def test_create_key_value_list(base_fixture):
"""
Test, that key value pairs can be extracted as strings. No arrays or other tpyes supported.
The return value must be a list of strings in the format ["key1=value1", "key2=value2]
"Any" values are ignored. (case insensitive)
"""
inp = {"Key1": "Value1"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1"]
inp = {"Key1": "Value1", "Key2": "Value2"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1", "Key2=Value2"]
inp = {"Key1": "Value1", "Key2": "Any"}
res = _create_key_value_pair_list(inp)
assert res == ["Key1=Value1"]
def test_search_for_all_packages(base_fixture):
""" Test, that an existing ref will be found in the remotes. """
conan = ConanApi()
res = conan.search_recipe_alternatives_in_remotes(ConanFileReference.loads(TEST_REF))
ref = ConanFileReference.loads(TEST_REF) # need to convert @_/_
assert str(ref) in str(res)
def test_conan_worker(base_fixture, mocker):
"""
Test, if conan worker works on the queue.
It is expected,that the queue size decreases over time.
"""
conan_refs: List[ConanWorkerElement] = [{"ref_pkg_id": "m4/1.4.19@_/_", "options": {},
"settings": {}, "update": False, "auto_install": True},
{"ref_pkg_id": "zlib/1.2.11@conan/stable", "options": {"shared": "True"},
"settings": {}, "update": False, "auto_install": True}
]
mock_func = mocker.patch('conan_app_launcher.core.ConanApi.get_path_or_auto_install')
import conan_app_launcher.app as app
conan_worker = ConanWorker(ConanApi(), app.active_settings)
conan_worker.update_all_info(conan_refs, None)
time.sleep(3)
conan_worker.finish_working()
mock_func.assert_called()
assert conan_worker._conan_install_queue.qsize() == 0 | 0.510496 | 0.194483 |
import komand
import json
from .schema import SendInput, SendOutput
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
class Send(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='send',
description='Send an email',
input=SendInput(),
output=SendOutput())
def run(self, params={}):
"""Run action"""
client = self.connection.get()
msg = MIMEMultipart()
emails = []
msg['Subject'] = params.get('subject')
msg['From'] = params['email_from']
msg['To'] = params['email_to']
html = params['html']
emails.append(params['email_to'])
cc_emails = []
bcc_emails = []
if params.get('cc'):
msg['CC'] = ', '.join(params['cc'])
cc_emails = params['cc']
emails = emails + cc_emails
if params.get('bcc'):
bcc_emails = params['bcc']
emails = emails + bcc_emails
msg.attach(MIMEText(params.get('message'), 'plain' if not html else 'html'))
# Check if attachment exists. If it does, attach it!
if len(params.get("attachment", {"content": "", "filename": ""}).get("content")) > 0:
self.logger.info("Found attachment! Attaching...")
attachment_base64 = params.get("attachment").get("content")
attachment_filename = params.get("attachment").get("filename")
# Prepare the attachment. Parts of this code below pulled out of encoders.encode_base64.
# Since we already have base64, don't bother calling that func since it does too much.
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment_base64)
part['Content-Transfer-Encoding'] = 'base64'
part.add_header('Content-Disposition', "attachment; filename= %s" % attachment_filename)
msg.attach(part)
client.sendmail(
params['email_from'],
emails,
msg.as_string(),
)
client.quit()
return {'result': 'ok'}
def test(self, params={}):
"""Test action"""
client = self.connection.get()
return {'result': 'ok'} | smtp/komand_smtp/actions/send/action.py | import komand
import json
from .schema import SendInput, SendOutput
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
class Send(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='send',
description='Send an email',
input=SendInput(),
output=SendOutput())
def run(self, params={}):
"""Run action"""
client = self.connection.get()
msg = MIMEMultipart()
emails = []
msg['Subject'] = params.get('subject')
msg['From'] = params['email_from']
msg['To'] = params['email_to']
html = params['html']
emails.append(params['email_to'])
cc_emails = []
bcc_emails = []
if params.get('cc'):
msg['CC'] = ', '.join(params['cc'])
cc_emails = params['cc']
emails = emails + cc_emails
if params.get('bcc'):
bcc_emails = params['bcc']
emails = emails + bcc_emails
msg.attach(MIMEText(params.get('message'), 'plain' if not html else 'html'))
# Check if attachment exists. If it does, attach it!
if len(params.get("attachment", {"content": "", "filename": ""}).get("content")) > 0:
self.logger.info("Found attachment! Attaching...")
attachment_base64 = params.get("attachment").get("content")
attachment_filename = params.get("attachment").get("filename")
# Prepare the attachment. Parts of this code below pulled out of encoders.encode_base64.
# Since we already have base64, don't bother calling that func since it does too much.
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment_base64)
part['Content-Transfer-Encoding'] = 'base64'
part.add_header('Content-Disposition', "attachment; filename= %s" % attachment_filename)
msg.attach(part)
client.sendmail(
params['email_from'],
emails,
msg.as_string(),
)
client.quit()
return {'result': 'ok'}
def test(self, params={}):
"""Test action"""
client = self.connection.get()
return {'result': 'ok'} | 0.340814 | 0.070784 |
from webob import exc
from oslo_log import log as logging
from senlin.api.openstack.v1 import util
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common import serializers
from senlin.common import utils
from senlin.common import wsgi
from senlin.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
class ActionData(object):
'''All required data fields for an action.'''
PARAMS = (consts.ACTION_NAME, consts.ACTION_TARGET, consts.ACTION_ACTION)
def __init__(self, data):
self.data = data
def name(self):
if consts.ACTION_NAME not in self.data:
raise exc.HTTPBadRequest(_("No action name specified"))
return self.data[consts.ACTION_NAME]
def target(self):
if consts.ACTION_TARGET not in self.data:
raise exc.HTTPBadRequest(_("No target specified"))
return self.data[consts.ACTION_TARGET]
def action(self):
if consts.ACTION_ACTION not in self.data:
raise exc.HTTPBadRequest(_("No action specified"))
return self.data[consts.ACTION_ACTION]
def params(self):
data = self.data.items()
return dict((k, v) for k, v in data if k not in self.PARAMS)
class ActionController(object):
'''WSGI controller for Actions in Senlin v1 API.'''
# Define request scope (must match what is in policy.json)
REQUEST_SCOPE = 'actions'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def default(self, req, **args):
raise exc.HTTPNotFound()
@util.policy_enforce
def index(self, req):
filter_whitelist = {
'name': 'mixed',
'target': 'mixed',
'action': 'mixed',
'created_time': 'single',
'updated_time': 'single',
'deleted_time': 'single',
}
param_whitelist = {
'limit': 'single',
'marker': 'single',
'sort_dir': 'single',
'sort_keys': 'multi',
'show_deleted': 'single',
}
params = util.get_allowed_params(req.params, param_whitelist)
filters = util.get_allowed_params(req.params, filter_whitelist)
key = consts.PARAM_LIMIT
if key in params:
params[key] = utils.parse_int_param(key, params[key])
key = consts.PARAM_SHOW_DELETED
if key in params:
params[key] = utils.parse_bool_param(key, params[key])
if not filters:
filters = None
actions = self.rpc_client.action_list(req.context,
filters=filters,
**params)
return {'actions': actions}
@util.policy_enforce
def create(self, req, body):
data = ActionData(body)
result = self.rpc_client.action_create(req.context,
data.name(),
data.target(),
data.action(),
data.params())
return result
@util.policy_enforce
def get(self, req, action_id):
action = self.rpc_client.action_get(req.context, action_id)
if not action:
raise exc.HTTPNotFound()
return action
def create_resource(options):
'''Actions factory method.'''
return wsgi.Resource(ActionController(options),
wsgi.JSONRequestDeserializer(),
serializers.JSONResponseSerializer()) | senlin/api/openstack/v1/actions.py |
from webob import exc
from oslo_log import log as logging
from senlin.api.openstack.v1 import util
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common import serializers
from senlin.common import utils
from senlin.common import wsgi
from senlin.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
class ActionData(object):
'''All required data fields for an action.'''
PARAMS = (consts.ACTION_NAME, consts.ACTION_TARGET, consts.ACTION_ACTION)
def __init__(self, data):
self.data = data
def name(self):
if consts.ACTION_NAME not in self.data:
raise exc.HTTPBadRequest(_("No action name specified"))
return self.data[consts.ACTION_NAME]
def target(self):
if consts.ACTION_TARGET not in self.data:
raise exc.HTTPBadRequest(_("No target specified"))
return self.data[consts.ACTION_TARGET]
def action(self):
if consts.ACTION_ACTION not in self.data:
raise exc.HTTPBadRequest(_("No action specified"))
return self.data[consts.ACTION_ACTION]
def params(self):
data = self.data.items()
return dict((k, v) for k, v in data if k not in self.PARAMS)
class ActionController(object):
'''WSGI controller for Actions in Senlin v1 API.'''
# Define request scope (must match what is in policy.json)
REQUEST_SCOPE = 'actions'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def default(self, req, **args):
raise exc.HTTPNotFound()
@util.policy_enforce
def index(self, req):
filter_whitelist = {
'name': 'mixed',
'target': 'mixed',
'action': 'mixed',
'created_time': 'single',
'updated_time': 'single',
'deleted_time': 'single',
}
param_whitelist = {
'limit': 'single',
'marker': 'single',
'sort_dir': 'single',
'sort_keys': 'multi',
'show_deleted': 'single',
}
params = util.get_allowed_params(req.params, param_whitelist)
filters = util.get_allowed_params(req.params, filter_whitelist)
key = consts.PARAM_LIMIT
if key in params:
params[key] = utils.parse_int_param(key, params[key])
key = consts.PARAM_SHOW_DELETED
if key in params:
params[key] = utils.parse_bool_param(key, params[key])
if not filters:
filters = None
actions = self.rpc_client.action_list(req.context,
filters=filters,
**params)
return {'actions': actions}
@util.policy_enforce
def create(self, req, body):
data = ActionData(body)
result = self.rpc_client.action_create(req.context,
data.name(),
data.target(),
data.action(),
data.params())
return result
@util.policy_enforce
def get(self, req, action_id):
action = self.rpc_client.action_get(req.context, action_id)
if not action:
raise exc.HTTPNotFound()
return action
def create_resource(options):
'''Actions factory method.'''
return wsgi.Resource(ActionController(options),
wsgi.JSONRequestDeserializer(),
serializers.JSONResponseSerializer()) | 0.595257 | 0.110567 |
import atexit
import os
import signal
import sys
class Daemon(object):
"""A basic daemon class. Credits to <NAME>, the developers of daemonize and
python-daemon, Python Cookbook 3rd Ed. by <NAME> and <NAME>, and more.
"""
def __init__(self, pidfile, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull):
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def start(self, *args, **kwargs):
"""Start the daemon.
"""
# If a pidfile exists, the daemon could be running.
if os.path.isfile(self.pidfile):
raise RuntimeError('Already running.')
# Daemonize the process and call the run method.
self._daemonize()
self.run(*args, **kwargs)
def run(self):
"""Override this method when subclassing Daemon. It will be called
after the process has been daemonized by start() or restart().
"""
raise NotImplementedError
def _daemonize(self):
"""Follow the standard UNIX double-fork procedure. Refer to <NAME>
Stevens' "Advanced Programming in the UNIX Environment" for details.
"""
# First fork to detach from the parent.
try:
pid = os.fork()
if pid > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('First fork failed: [{0.errno!s}] {0.strerror}'.format(e))
# Ensure the daemon doesn't keep any directory in use and that
# operating system calls provide their own permission masks.
# The umask value of 022 is more secure than the standard 0.
os.chdir('/')
os.umask(022)
os.setsid()
# Second fork to relinquish session leadership.
try:
pid = os.fork()
if pid > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('Second fork failed: [{0.errno!s}] {0.strerror}'.format(e))
# Flush I/O buffers and establish new file descriptors for the standard streams.
sys.stdout.flush()
sys.stderr.flush()
stdin = file(self.stdin, 'r')
stdout = file(self.stdout, 'a+')
stderr = file(self.stderr, 'a+')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# Register the pidfile for removal upon exit.
atexit.register(os.remove, self.pidfile)
# Create the pidfile and write the daemon's PID.
with open(self.pidfile, 'w') as pidfile:
pidfile.write(str(os.getpid())) | daemon.py | import atexit
import os
import signal
import sys
class Daemon(object):
"""A basic daemon class. Credits to <NAME>, the developers of daemonize and
python-daemon, Python Cookbook 3rd Ed. by <NAME> and <NAME>, and more.
"""
def __init__(self, pidfile, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull):
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def start(self, *args, **kwargs):
"""Start the daemon.
"""
# If a pidfile exists, the daemon could be running.
if os.path.isfile(self.pidfile):
raise RuntimeError('Already running.')
# Daemonize the process and call the run method.
self._daemonize()
self.run(*args, **kwargs)
def run(self):
"""Override this method when subclassing Daemon. It will be called
after the process has been daemonized by start() or restart().
"""
raise NotImplementedError
def _daemonize(self):
"""Follow the standard UNIX double-fork procedure. Refer to <NAME>
Stevens' "Advanced Programming in the UNIX Environment" for details.
"""
# First fork to detach from the parent.
try:
pid = os.fork()
if pid > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('First fork failed: [{0.errno!s}] {0.strerror}'.format(e))
# Ensure the daemon doesn't keep any directory in use and that
# operating system calls provide their own permission masks.
# The umask value of 022 is more secure than the standard 0.
os.chdir('/')
os.umask(022)
os.setsid()
# Second fork to relinquish session leadership.
try:
pid = os.fork()
if pid > 0:
raise SystemExit(0)
except OSError as e:
raise RuntimeError('Second fork failed: [{0.errno!s}] {0.strerror}'.format(e))
# Flush I/O buffers and establish new file descriptors for the standard streams.
sys.stdout.flush()
sys.stderr.flush()
stdin = file(self.stdin, 'r')
stdout = file(self.stdout, 'a+')
stderr = file(self.stderr, 'a+')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# Register the pidfile for removal upon exit.
atexit.register(os.remove, self.pidfile)
# Create the pidfile and write the daemon's PID.
with open(self.pidfile, 'w') as pidfile:
pidfile.write(str(os.getpid())) | 0.223631 | 0.172555 |
from pyriemann.utils.distance import distance
from similarity import similarity_matrix
import sys
sys.path.append('/home/tevo/Documents/UFABC/Spikes')
sys.path.append('/home/tevo/Documents/UFABC/SingleUnit Spike Learning/src/models/')
import os
os.chdir('/home/tevo/Documents/UFABC/Spikes')
from spikeHelper.loadSpike import Rat
import pandas as pd
import numpy as np
from numpy.linalg import eig, norm
ds = ['riemann', 'euclid', 'logdet', 'kullback', 'kullback_sym']
import pickle
from scipy.stats import pearsonr
from scipy.spatial.distance import directed_hausdorff
##TODO group trials by similarity and compute tgen matrices
# group MATRICES by similarity and compute
# k-means trial matrices
## Measure distance between each single-trial generalization matrix and each one of the others
# Bonus: get 2-trial and 5-trial mean matrices
iti_best = {7:400, 8:550, 9:300, 10:400}
n_trials_for_mean_sim = 20
all_res = pd.DataFrame()
templates = pd.DataFrame()
for rat_number in [7,8,9,10]:
r = Rat(rat_number, sigma=None, binSize=120)
#({'minDuration':1300,'maxDuration':1700},zmax=3)
r.selecTrials({'minDuration':1300,'maxDuration':1700, 'trialMax':iti_best[rat_number]})
r.selecTimes(0,1300)
early_sim = similarity_matrix(r.cubicNeuronTimeTrial()[:,:,:n_trials_for_mean_sim],
n_splits = 100, method = 'pearson').mean(axis=2)
late_sim = similarity_matrix(r.cubicNeuronTimeTrial()[:,:,-n_trials_for_mean_sim:],
n_splits = 100, method = 'pearson').mean(axis=2)
templates=templates.append(pd.DataFrame({'early':[early_sim],'late':[late_sim],'rat':rat_number}))
for trial in np.unique(r.trial):
one_trial_activity = r.X[r.trial==trial,:].transpose()
one_trial_gen = np.nan_to_num(pd.DataFrame(one_trial_activity).corr().values)
one_trial_res = {#'to early':norm(one_trial_gen - early_sim),
#'to late':norm(one_trial_gen - late_sim),
'to early':pearsonr(one_trial_gen.ravel(), early_sim.ravel())[0],
'to late':pearsonr(one_trial_gen.ravel(), late_sim.ravel())[0],
'trial': trial, 'rat_number':rat_number,
'matrix': [one_trial_gen]}
all_res = all_res.append(pd.DataFrame(one_trial_res))
pickle.dump(templates, open('similarity_templates_cp_corr_smoothNO.pickle','wb'))
pickle.dump(all_res, open('similarity_results_cp_corr_smoothNO.pickle','wb'))
# s = all_res.drop(['rat_number','matrix'],axis=1).set_index('trial')
# (s['to early']-s['to late']).plot()
# plt.fill_betweenx([-1,1],s.index[n_trials_for_mean_sim],s.index[0],color='g',alpha=.5)
# plt.fill_betweenx([-1,1],s.index[-1],s.index[-n_trials_for_mean_sim],color='r',alpha=.5)
# plt.show() | src/analysis/hypothesis_testing/similarity_evolution.py | from pyriemann.utils.distance import distance
from similarity import similarity_matrix
import sys
sys.path.append('/home/tevo/Documents/UFABC/Spikes')
sys.path.append('/home/tevo/Documents/UFABC/SingleUnit Spike Learning/src/models/')
import os
os.chdir('/home/tevo/Documents/UFABC/Spikes')
from spikeHelper.loadSpike import Rat
import pandas as pd
import numpy as np
from numpy.linalg import eig, norm
ds = ['riemann', 'euclid', 'logdet', 'kullback', 'kullback_sym']
import pickle
from scipy.stats import pearsonr
from scipy.spatial.distance import directed_hausdorff
##TODO group trials by similarity and compute tgen matrices
# group MATRICES by similarity and compute
# k-means trial matrices
## Measure distance between each single-trial generalization matrix and each one of the others
# Bonus: get 2-trial and 5-trial mean matrices
iti_best = {7:400, 8:550, 9:300, 10:400}
n_trials_for_mean_sim = 20
all_res = pd.DataFrame()
templates = pd.DataFrame()
for rat_number in [7,8,9,10]:
r = Rat(rat_number, sigma=None, binSize=120)
#({'minDuration':1300,'maxDuration':1700},zmax=3)
r.selecTrials({'minDuration':1300,'maxDuration':1700, 'trialMax':iti_best[rat_number]})
r.selecTimes(0,1300)
early_sim = similarity_matrix(r.cubicNeuronTimeTrial()[:,:,:n_trials_for_mean_sim],
n_splits = 100, method = 'pearson').mean(axis=2)
late_sim = similarity_matrix(r.cubicNeuronTimeTrial()[:,:,-n_trials_for_mean_sim:],
n_splits = 100, method = 'pearson').mean(axis=2)
templates=templates.append(pd.DataFrame({'early':[early_sim],'late':[late_sim],'rat':rat_number}))
for trial in np.unique(r.trial):
one_trial_activity = r.X[r.trial==trial,:].transpose()
one_trial_gen = np.nan_to_num(pd.DataFrame(one_trial_activity).corr().values)
one_trial_res = {#'to early':norm(one_trial_gen - early_sim),
#'to late':norm(one_trial_gen - late_sim),
'to early':pearsonr(one_trial_gen.ravel(), early_sim.ravel())[0],
'to late':pearsonr(one_trial_gen.ravel(), late_sim.ravel())[0],
'trial': trial, 'rat_number':rat_number,
'matrix': [one_trial_gen]}
all_res = all_res.append(pd.DataFrame(one_trial_res))
pickle.dump(templates, open('similarity_templates_cp_corr_smoothNO.pickle','wb'))
pickle.dump(all_res, open('similarity_results_cp_corr_smoothNO.pickle','wb'))
# s = all_res.drop(['rat_number','matrix'],axis=1).set_index('trial')
# (s['to early']-s['to late']).plot()
# plt.fill_betweenx([-1,1],s.index[n_trials_for_mean_sim],s.index[0],color='g',alpha=.5)
# plt.fill_betweenx([-1,1],s.index[-1],s.index[-n_trials_for_mean_sim],color='r',alpha=.5)
# plt.show() | 0.333612 | 0.274315 |
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
from operator import add
class DoorKeyObstEnv(MiniGridEnv):
"""
Environment with a door and key, sparse reward, with 0 or n obstacles
"""
def __init__(self, size=7, n_obstacles=1, key_pos=(1, 1)):
# Reduce obstacles if there are too many
if n_obstacles <= size / 2 + 1:
self.n_obstacles = int(n_obstacles)
else:
self.n_obstacles = int(size / 2)
self._key_default_pos = np.array(key_pos)
super().__init__(
grid_size=size,
max_steps=5 * size * size
)
# Only 5 actions permitted: left, right, forward, pickup, tooggle
self.action_space = spaces.Discrete(self.actions.drop + 1)
self.reward_range = (-1, 1)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal in the bottom-right corner
self.put_obj(Goal(), width - 2, height - 2)
# Create a vertical splitting wall
splitIdx = math.floor(width / 2)
self.grid.vert_wall(splitIdx, 0)
# Place a door in the wall
doorIdx = 1
self.put_obj(Door('yellow', is_locked=True), splitIdx, doorIdx)
# Place a yellow key on the left side
self.put_obj(Key('yellow'), *self._key_default_pos)
# Place the agent at a random position and orientation
# on the left side of the splitting wall
self.place_agent(size=(splitIdx, height))
# Place obstacles
# on the right side of the splitting wall
self.obstacles = []
top = (splitIdx + 1, 1)
for i_obst in range(self.n_obstacles):
self.obstacles.append(Ball())
self.place_obj(self.obstacles[i_obst], size=(splitIdx, height), max_tries=100)
self.mission = "use the key to open the door and then get to the goal, avoid obstacles"
def step(self, action):
# Invalid action
if action >= self.action_space.n:
action = 0
# drop is not used, it is mapped to toggle instead
# map drop action to toggle
if action == self.actions.drop:
action = self.actions.toggle
# Check if there is a ball in front of the agent
front_cell = self.grid.get(*self.front_pos)
not_clear = front_cell and front_cell.type == 'ball'
# If the agent tried to walk over an obstacle
if action == self.actions.forward and not_clear:
reward = -1
done = True
obs = self.gen_obs()
info = {}
return obs, reward, done, info
# Update the agent's position/direction
obs, reward, done, info = MiniGridEnv.step(self, action)
# Update obstacle positions
for i_obst in range(len(self.obstacles)):
old_pos = self.obstacles[i_obst].cur_pos
top = tuple(map(add, old_pos, (-1, -1)))
top = (max(-1, top[0]), max(-1, top[1]))
try:
self.place_obj(self.obstacles[i_obst], top=top, size=(3, 3), max_tries=100)
self.grid.set(*old_pos, None)
except:
pass
# generate observation after obstacle positions are updated
obs = self.gen_obs()
return obs, reward, done, info
# register classes of stochastic environments with obstacles
class DoorKeyObstEnv6x6(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=6, n_obstacles=1)
class DoorKeyObstEnv8x8(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=8, n_obstacles=1)
class DoorKeyObstEnv17x17(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=17, n_obstacles=3)
# register classes of deterministic environments without obstacles
class DoorKeyNoObstEnv6x6(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=6, n_obstacles=0)
class DoorKeyNoObstEnv7x7(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=7, n_obstacles=0)
class DoorKeyNoObstEnv8x8(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=8, n_obstacles=0)
class DoorKeyNoObstEnv17x17(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=17, n_obstacles=0)
# register stochastic environments with obstacles
register(
id='MiniGrid-DoorKeyObst-6x6-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv6x6'
)
register(
id='MiniGrid-DoorKeyObst-7x7-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv'
)
register(
id='MiniGrid-DoorKeyObst-8x8-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv8x8'
)
register(
id='MiniGrid-DoorKeyObst-17x17-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv17x17'
)
# register deterministic environments without obstacles
register(
id='MiniGrid-DoorKeyNoObst-6x6-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv6x6'
)
register(
id='MiniGrid-DoorKeyNoObst-7x7-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv7x7'
)
register(
id='MiniGrid-DoorKeyNoObst-8x8-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv8x8'
)
register(
id='MiniGrid-DoorKeyNoObst-17x17-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv17x17'
) | gym_minigrid/envs/doorkeywithobstacles.py | from gym_minigrid.minigrid import *
from gym_minigrid.register import register
from operator import add
class DoorKeyObstEnv(MiniGridEnv):
"""
Environment with a door and key, sparse reward, with 0 or n obstacles
"""
def __init__(self, size=7, n_obstacles=1, key_pos=(1, 1)):
# Reduce obstacles if there are too many
if n_obstacles <= size / 2 + 1:
self.n_obstacles = int(n_obstacles)
else:
self.n_obstacles = int(size / 2)
self._key_default_pos = np.array(key_pos)
super().__init__(
grid_size=size,
max_steps=5 * size * size
)
# Only 5 actions permitted: left, right, forward, pickup, tooggle
self.action_space = spaces.Discrete(self.actions.drop + 1)
self.reward_range = (-1, 1)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal in the bottom-right corner
self.put_obj(Goal(), width - 2, height - 2)
# Create a vertical splitting wall
splitIdx = math.floor(width / 2)
self.grid.vert_wall(splitIdx, 0)
# Place a door in the wall
doorIdx = 1
self.put_obj(Door('yellow', is_locked=True), splitIdx, doorIdx)
# Place a yellow key on the left side
self.put_obj(Key('yellow'), *self._key_default_pos)
# Place the agent at a random position and orientation
# on the left side of the splitting wall
self.place_agent(size=(splitIdx, height))
# Place obstacles
# on the right side of the splitting wall
self.obstacles = []
top = (splitIdx + 1, 1)
for i_obst in range(self.n_obstacles):
self.obstacles.append(Ball())
self.place_obj(self.obstacles[i_obst], size=(splitIdx, height), max_tries=100)
self.mission = "use the key to open the door and then get to the goal, avoid obstacles"
def step(self, action):
# Invalid action
if action >= self.action_space.n:
action = 0
# drop is not used, it is mapped to toggle instead
# map drop action to toggle
if action == self.actions.drop:
action = self.actions.toggle
# Check if there is a ball in front of the agent
front_cell = self.grid.get(*self.front_pos)
not_clear = front_cell and front_cell.type == 'ball'
# If the agent tried to walk over an obstacle
if action == self.actions.forward and not_clear:
reward = -1
done = True
obs = self.gen_obs()
info = {}
return obs, reward, done, info
# Update the agent's position/direction
obs, reward, done, info = MiniGridEnv.step(self, action)
# Update obstacle positions
for i_obst in range(len(self.obstacles)):
old_pos = self.obstacles[i_obst].cur_pos
top = tuple(map(add, old_pos, (-1, -1)))
top = (max(-1, top[0]), max(-1, top[1]))
try:
self.place_obj(self.obstacles[i_obst], top=top, size=(3, 3), max_tries=100)
self.grid.set(*old_pos, None)
except:
pass
# generate observation after obstacle positions are updated
obs = self.gen_obs()
return obs, reward, done, info
# register classes of stochastic environments with obstacles
class DoorKeyObstEnv6x6(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=6, n_obstacles=1)
class DoorKeyObstEnv8x8(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=8, n_obstacles=1)
class DoorKeyObstEnv17x17(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=17, n_obstacles=3)
# register classes of deterministic environments without obstacles
class DoorKeyNoObstEnv6x6(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=6, n_obstacles=0)
class DoorKeyNoObstEnv7x7(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=7, n_obstacles=0)
class DoorKeyNoObstEnv8x8(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=8, n_obstacles=0)
class DoorKeyNoObstEnv17x17(DoorKeyObstEnv):
def __init__(self):
super().__init__(size=17, n_obstacles=0)
# register stochastic environments with obstacles
register(
id='MiniGrid-DoorKeyObst-6x6-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv6x6'
)
register(
id='MiniGrid-DoorKeyObst-7x7-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv'
)
register(
id='MiniGrid-DoorKeyObst-8x8-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv8x8'
)
register(
id='MiniGrid-DoorKeyObst-17x17-v0',
entry_point='gym_minigrid.envs:DoorKeyObstEnv17x17'
)
# register deterministic environments without obstacles
register(
id='MiniGrid-DoorKeyNoObst-6x6-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv6x6'
)
register(
id='MiniGrid-DoorKeyNoObst-7x7-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv7x7'
)
register(
id='MiniGrid-DoorKeyNoObst-8x8-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv8x8'
)
register(
id='MiniGrid-DoorKeyNoObst-17x17-v0',
entry_point='gym_minigrid.envs:DoorKeyNoObstEnv17x17'
) | 0.81928 | 0.387343 |
from os import close
import pygame
import time
import random
pygame.init()
width,height=800,600#screen
disp=pygame.display.set_mode((width,height))
pygame.display.set_caption("SNEK")
green,red,black,white,grey=(0,204,153),(255,8,0),(0,0,0),(255,255,255),(128,128,128)
font_style=pygame.font.SysFont(None,30)
cell=20
level_no=1
pygame.mixer.init()
food= pygame.mixer.Sound('apple_bite.mp3')
death = pygame.mixer.Sound('oof.mp3')
fh=open('scores.txt','r')
scores=fh.read().split('\n')
hs=max(scores)
fh.close()
def get_food_position(width, height, body):
while True:
food_x=round(random.randrange(0,width-cell)/cell)*cell
food_y=round(random.randrange(0,height-cell)/cell)*cell
if [food_x, food_y] not in body:
return food_x, food_y
def gameloop():
end=0
x,y,x1,y1=width/2,height/2,0,0#x,y->head pos;x1,y1->change in pos
snake_speed=10
body,blen=[],1
clk=pygame.time.Clock()
food_x, food_y= get_food_position(width,height, body)
while not end:
for event in pygame.event.get():
if event.type==pygame.QUIT:
end=1
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
x1,y1=-cell,0
elif event.key==pygame.K_UP:
x1,y1=-0,-cell
elif event.key==pygame.K_RIGHT:
x1,y1=cell,0
elif event.key==pygame.K_DOWN:
x1,y1=0,cell
x+=x1;y+=y1
if x>width or x<0 or y>height or y<0:#screen boundary condition
pygame.mixer.Sound.play(death)
break
disp.fill(black)
pygame.draw.rect(disp,red,[food_x,food_y,cell,cell])
head=[]
head.append(x);head.append(y)
body.append(head)#append new head to body
for block in body[:blen-1]:
if block==head:#snake head touches body
end=1
if len(body)>blen:#snake movement display
del body[0]
for block in body:
pygame.draw.rect(disp,green,[block[0],block[1],cell,cell])
score=font_style.render("Score: "+str(blen-1),True,white)
snk_sp=font_style.render("Snake Speed: "+str(snake_speed),True,white)
lvl=font_style.render("Current Level:"+str(level_no),True,white)
disp.blit(score,[25,0])
disp.blit(snk_sp,[25,20])
disp.blit(lvl,[625,0])
pygame.display.update()
if food_x==x and food_y==y:#contact with food
food_x, food_y= get_food_position(width,height, body)
blen+=1#body length increases
pygame.mixer.Sound.play(food)
if snake_speed<60:
snake_speed+=0.5
clk.tick(snake_speed)#fps
clk.tick(snake_speed)
disp.fill(black)
m=font_style.render("Game Over",True,red)
disp.blit(m,[(width/2)-40,height/2])
f_score=font_style.render("Score: "+str(blen-1),True,white)
h_score=font_style.render("High Score: "+str(hs),True,white)
disp.blit(f_score,[(width/2)-30,(height/2)+27])
disp.blit(h_score,[(width/2)-45,(height/2)+54])
fh=open('scores.txt','a')
fh.write('\n'+str(blen-1))
fh.close()
pygame.display.update()
time.sleep(2)
pygame.quit()
quit()
gameloop() | main.py | from os import close
import pygame
import time
import random
pygame.init()
width,height=800,600#screen
disp=pygame.display.set_mode((width,height))
pygame.display.set_caption("SNEK")
green,red,black,white,grey=(0,204,153),(255,8,0),(0,0,0),(255,255,255),(128,128,128)
font_style=pygame.font.SysFont(None,30)
cell=20
level_no=1
pygame.mixer.init()
food= pygame.mixer.Sound('apple_bite.mp3')
death = pygame.mixer.Sound('oof.mp3')
fh=open('scores.txt','r')
scores=fh.read().split('\n')
hs=max(scores)
fh.close()
def get_food_position(width, height, body):
while True:
food_x=round(random.randrange(0,width-cell)/cell)*cell
food_y=round(random.randrange(0,height-cell)/cell)*cell
if [food_x, food_y] not in body:
return food_x, food_y
def gameloop():
end=0
x,y,x1,y1=width/2,height/2,0,0#x,y->head pos;x1,y1->change in pos
snake_speed=10
body,blen=[],1
clk=pygame.time.Clock()
food_x, food_y= get_food_position(width,height, body)
while not end:
for event in pygame.event.get():
if event.type==pygame.QUIT:
end=1
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_LEFT:
x1,y1=-cell,0
elif event.key==pygame.K_UP:
x1,y1=-0,-cell
elif event.key==pygame.K_RIGHT:
x1,y1=cell,0
elif event.key==pygame.K_DOWN:
x1,y1=0,cell
x+=x1;y+=y1
if x>width or x<0 or y>height or y<0:#screen boundary condition
pygame.mixer.Sound.play(death)
break
disp.fill(black)
pygame.draw.rect(disp,red,[food_x,food_y,cell,cell])
head=[]
head.append(x);head.append(y)
body.append(head)#append new head to body
for block in body[:blen-1]:
if block==head:#snake head touches body
end=1
if len(body)>blen:#snake movement display
del body[0]
for block in body:
pygame.draw.rect(disp,green,[block[0],block[1],cell,cell])
score=font_style.render("Score: "+str(blen-1),True,white)
snk_sp=font_style.render("Snake Speed: "+str(snake_speed),True,white)
lvl=font_style.render("Current Level:"+str(level_no),True,white)
disp.blit(score,[25,0])
disp.blit(snk_sp,[25,20])
disp.blit(lvl,[625,0])
pygame.display.update()
if food_x==x and food_y==y:#contact with food
food_x, food_y= get_food_position(width,height, body)
blen+=1#body length increases
pygame.mixer.Sound.play(food)
if snake_speed<60:
snake_speed+=0.5
clk.tick(snake_speed)#fps
clk.tick(snake_speed)
disp.fill(black)
m=font_style.render("Game Over",True,red)
disp.blit(m,[(width/2)-40,height/2])
f_score=font_style.render("Score: "+str(blen-1),True,white)
h_score=font_style.render("High Score: "+str(hs),True,white)
disp.blit(f_score,[(width/2)-30,(height/2)+27])
disp.blit(h_score,[(width/2)-45,(height/2)+54])
fh=open('scores.txt','a')
fh.write('\n'+str(blen-1))
fh.close()
pygame.display.update()
time.sleep(2)
pygame.quit()
quit()
gameloop() | 0.099105 | 0.104112 |
import config
class Attacker():
def __init__(self, name, expertise, softwares, probability):
self.name = name
self.expertise = expertise
self.softwares = softwares
self.attacks = []
self.cve_list = []
self.prob = probability
def getAttackers():
attackers = []
f = open('{0}/attackers.cfg'.format(config.CONFIG_FILE_PATH), 'r')
num = int(f.readline().strip())
for i in xrange(num):
name = f.readline().strip()
if name == '':
break
tech = f.readline().strip().split(',')
skill = [float(x) for x in f.readline().strip().split(',')]
prob = float(f.readline().strip())
attackers.append(Attacker(name, skill, tech, prob))
return attackers
def cveSoftwareInConfig(softwaresAffectedByCVE, attackerSoftwares):
'''
If atleast one of the technologies in a configuration is among the affected software
'''
#print config
#print 'in'
#print softwares
#print '---'
for i in xrange(len(attackerSoftwares)):
for s in softwaresAffectedByCVE:
if attackerSoftwares[i] in s:
return i
return -1
def populateAttackerRewards(attackers, cve_list):
for attacker in attackers:
for cve in cve_list:
tech_index = cveSoftwareInConfig(cve.systems, attacker.softwares)
if tech_index > -1 and attacker.expertise[tech_index] > cve.exploit_score:
attacker.attacks.append(cve.rewards)
attacker.cve_list.append(cve.name)
def printInGameFormat(attackers):
for attacker in attackers:
print attacker.prob
# One extra attack action is the NO-OP
print str(len(attacker.attacks)+1)
for i in range(4):
if i == 0:
temps = ""
for cve in attacker.cve_list:
temps += cve + '|'
print temps+'NO-OP'
temps = ""
for attack in attacker.attacks:
temps += str(attack[i])
print temps+'(0,0)'
def getAllAttacksUsed(attackers):
unique_attack_list = []
for attacker in attackers:
for i in range(len(attacker.cve_list)):
if not attacker.cve_list[i] in unique_attack_list:
unique_attack_list.append(attacker.cve_list[i])
return unique_attack_list | src/attacker.py | import config
class Attacker():
def __init__(self, name, expertise, softwares, probability):
self.name = name
self.expertise = expertise
self.softwares = softwares
self.attacks = []
self.cve_list = []
self.prob = probability
def getAttackers():
attackers = []
f = open('{0}/attackers.cfg'.format(config.CONFIG_FILE_PATH), 'r')
num = int(f.readline().strip())
for i in xrange(num):
name = f.readline().strip()
if name == '':
break
tech = f.readline().strip().split(',')
skill = [float(x) for x in f.readline().strip().split(',')]
prob = float(f.readline().strip())
attackers.append(Attacker(name, skill, tech, prob))
return attackers
def cveSoftwareInConfig(softwaresAffectedByCVE, attackerSoftwares):
'''
If atleast one of the technologies in a configuration is among the affected software
'''
#print config
#print 'in'
#print softwares
#print '---'
for i in xrange(len(attackerSoftwares)):
for s in softwaresAffectedByCVE:
if attackerSoftwares[i] in s:
return i
return -1
def populateAttackerRewards(attackers, cve_list):
for attacker in attackers:
for cve in cve_list:
tech_index = cveSoftwareInConfig(cve.systems, attacker.softwares)
if tech_index > -1 and attacker.expertise[tech_index] > cve.exploit_score:
attacker.attacks.append(cve.rewards)
attacker.cve_list.append(cve.name)
def printInGameFormat(attackers):
for attacker in attackers:
print attacker.prob
# One extra attack action is the NO-OP
print str(len(attacker.attacks)+1)
for i in range(4):
if i == 0:
temps = ""
for cve in attacker.cve_list:
temps += cve + '|'
print temps+'NO-OP'
temps = ""
for attack in attacker.attacks:
temps += str(attack[i])
print temps+'(0,0)'
def getAllAttacksUsed(attackers):
unique_attack_list = []
for attacker in attackers:
for i in range(len(attacker.cve_list)):
if not attacker.cve_list[i] in unique_attack_list:
unique_attack_list.append(attacker.cve_list[i])
return unique_attack_list | 0.131996 | 0.124585 |
import numpy as np
try:
from sklearn.base import BaseEstimator, RegressorMixin, MultiOutputMixin
from sklearn.utils import check_X_y
from sklearn.utils.validation import (check_is_fitted, check_array,
FLOAT_DTYPES)
except ImportError:
raise ImportError(
"Install scikit-learn (e.g. pip install scikit-learn) to use this "
"extension.")
from .gmm import GMM
class GaussianMixtureRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Gaussian mixture regression compatible to scikit-learn.
Parameters
----------
n_components : int
Number of MVNs that compose the GMM.
priors : array, shape (n_components,), optional
Weights of the components.
means : array, shape (n_components, n_features), optional
Means of the components.
covariances : array, shape (n_components, n_features, n_features), optional
Covariances of the components.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState, optional (default: global random state)
If an integer is given, it fixes the seed. Defaults to the global numpy
random number generator.
R_diff : float, optional (default: 1e-4)
Minimum allowed difference of responsibilities between successive
EM iterations.
n_iter : int, optional (default: 500)
Maximum number of iterations.
init_params : str, optional (default: 'random')
Parameter initialization strategy. If means and covariances are
given in the constructor, this parameter will have no effect.
'random' will sample initial means randomly from the dataset
and set covariances to identity matrices. This is the
computationally cheap solution.
'kmeans++' will use k-means++ initialization for means and
initialize covariances to diagonal matrices with variances
set based on the average distances of samples in each dimensions.
This is computationally more expensive but often gives much
better results.
Attributes
----------
gmm_ : GMM
Underlying GMM object
indices_ : array, shape (n_features,)
Indices of inputs
"""
def __init__(self, n_components, priors=None, means=None, covariances=None,
verbose=0, random_state=None, R_diff=1e-4, n_iter=500,
init_params="random"):
self.n_components = n_components
self.priors = priors
self.means = means
self.covariances = covariances
self.verbose = verbose
self.random_state = random_state
self.R_diff = R_diff
self.n_iter = n_iter
self.init_params = init_params
def fit(self, X, y):
self.gmm_ = GMM(
self.n_components, priors=self.priors, means=self.means,
covariances=self.covariances, verbose=self.verbose,
random_state=self.random_state)
X, y = check_X_y(X, y, estimator=self.gmm_, dtype=FLOAT_DTYPES,
multi_output=True)
if y.ndim == 1:
y = np.expand_dims(y, 1)
self.indices_ = np.arange(X.shape[1])
self.gmm_.from_samples(
np.hstack((X, y)), R_diff=self.R_diff, n_iter=self.n_iter,
init_params=self.init_params)
return self
def predict(self, X):
check_is_fitted(self, ["gmm_", "indices_"])
X = check_array(X, estimator=self.gmm_, dtype=FLOAT_DTYPES)
return self.gmm_.predict(self.indices_, X) | gmr/sklearn.py | import numpy as np
try:
from sklearn.base import BaseEstimator, RegressorMixin, MultiOutputMixin
from sklearn.utils import check_X_y
from sklearn.utils.validation import (check_is_fitted, check_array,
FLOAT_DTYPES)
except ImportError:
raise ImportError(
"Install scikit-learn (e.g. pip install scikit-learn) to use this "
"extension.")
from .gmm import GMM
class GaussianMixtureRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Gaussian mixture regression compatible to scikit-learn.
Parameters
----------
n_components : int
Number of MVNs that compose the GMM.
priors : array, shape (n_components,), optional
Weights of the components.
means : array, shape (n_components, n_features), optional
Means of the components.
covariances : array, shape (n_components, n_features, n_features), optional
Covariances of the components.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState, optional (default: global random state)
If an integer is given, it fixes the seed. Defaults to the global numpy
random number generator.
R_diff : float, optional (default: 1e-4)
Minimum allowed difference of responsibilities between successive
EM iterations.
n_iter : int, optional (default: 500)
Maximum number of iterations.
init_params : str, optional (default: 'random')
Parameter initialization strategy. If means and covariances are
given in the constructor, this parameter will have no effect.
'random' will sample initial means randomly from the dataset
and set covariances to identity matrices. This is the
computationally cheap solution.
'kmeans++' will use k-means++ initialization for means and
initialize covariances to diagonal matrices with variances
set based on the average distances of samples in each dimensions.
This is computationally more expensive but often gives much
better results.
Attributes
----------
gmm_ : GMM
Underlying GMM object
indices_ : array, shape (n_features,)
Indices of inputs
"""
def __init__(self, n_components, priors=None, means=None, covariances=None,
verbose=0, random_state=None, R_diff=1e-4, n_iter=500,
init_params="random"):
self.n_components = n_components
self.priors = priors
self.means = means
self.covariances = covariances
self.verbose = verbose
self.random_state = random_state
self.R_diff = R_diff
self.n_iter = n_iter
self.init_params = init_params
def fit(self, X, y):
self.gmm_ = GMM(
self.n_components, priors=self.priors, means=self.means,
covariances=self.covariances, verbose=self.verbose,
random_state=self.random_state)
X, y = check_X_y(X, y, estimator=self.gmm_, dtype=FLOAT_DTYPES,
multi_output=True)
if y.ndim == 1:
y = np.expand_dims(y, 1)
self.indices_ = np.arange(X.shape[1])
self.gmm_.from_samples(
np.hstack((X, y)), R_diff=self.R_diff, n_iter=self.n_iter,
init_params=self.init_params)
return self
def predict(self, X):
check_is_fitted(self, ["gmm_", "indices_"])
X = check_array(X, estimator=self.gmm_, dtype=FLOAT_DTYPES)
return self.gmm_.predict(self.indices_, X) | 0.916643 | 0.62134 |
import os
import subprocess
from subprocess import STDOUT
from sys import platform
def setupLilypondClean(path_to_lily):
path = os.environ['PATH']
new_path = path_to_lily + os.path.pathsep + path
os.environ['PATH'] = new_path
def setup_lilypond(path_to_lilypond_folder="default"):
'''
Optional helper method which works out the platform and calls the relevant setup method
* param path_to_lilypond_folder: the path where lilypond.exe or the lilypond runner tool in mac is located. Not needed if
setup is default, or if using linux
* :return: None
'''
options = {"win32": setup_lilypond_windows, "darwin": setup_lilypond_osx}
if platform.startswith("linux"):
setup_lilypond_linux()
else:
options[platform](path_to_lilypond_folder)
def setup_lilypond_windows(path="default"):
'''
Optional helper method which does the environment setup for lilypond in windows. If you've ran this method, you do not need and should not provide
a lyscript when you instantiate this class. As this method is static, you can run this method before you set up the LilypondRenderer
instance.
* parameter: path_to_lilypond is the path to the folder which contains the file "lilypond.exe". Usually ProgramFiles/Lilypond/usr/bin.
Leave at default to set to this path.
* returns: None
'''
default = "C:/Program Files (x86)/LilyPond/usr/bin"
path_variable = os.environ['PATH'].split(";")
if path == "default":
path_variable.append(default)
else:
path_variable.append(path)
os.environ['PATH'] = ";".join(path_variable)
def setup_lilypond_linux():
'''
Optional helper method which downloads and installs lilypond from apt-get.
* return: None
'''
print("Sorry, not currently providing a setup method for linux systems. If you're using apt-get, run \"sudo apt-get install lilypond\". or on yum \"sudo yum install lilypond\"")
def setup_lilypond_osx(path="default"):
'''
Optional helper method which sets up the environment on osx.
* parameter: path is the path to the file you are using as an lyscript. Please refer to the lilypond.org documentation for what this should contain
* return: None
'''
default = "/Applications/LilyPond.app/Contents/Resources/bin"
path_variable = os.environ['PATH'].split(":")
if path == "default":
path_variable.append(default)
else:
path_variable.append(path)
os.environ['PATH'] = ":".join(path_variable) | MuseParse/classes/Output/helpers.py | import os
import subprocess
from subprocess import STDOUT
from sys import platform
def setupLilypondClean(path_to_lily):
path = os.environ['PATH']
new_path = path_to_lily + os.path.pathsep + path
os.environ['PATH'] = new_path
def setup_lilypond(path_to_lilypond_folder="default"):
'''
Optional helper method which works out the platform and calls the relevant setup method
* param path_to_lilypond_folder: the path where lilypond.exe or the lilypond runner tool in mac is located. Not needed if
setup is default, or if using linux
* :return: None
'''
options = {"win32": setup_lilypond_windows, "darwin": setup_lilypond_osx}
if platform.startswith("linux"):
setup_lilypond_linux()
else:
options[platform](path_to_lilypond_folder)
def setup_lilypond_windows(path="default"):
'''
Optional helper method which does the environment setup for lilypond in windows. If you've ran this method, you do not need and should not provide
a lyscript when you instantiate this class. As this method is static, you can run this method before you set up the LilypondRenderer
instance.
* parameter: path_to_lilypond is the path to the folder which contains the file "lilypond.exe". Usually ProgramFiles/Lilypond/usr/bin.
Leave at default to set to this path.
* returns: None
'''
default = "C:/Program Files (x86)/LilyPond/usr/bin"
path_variable = os.environ['PATH'].split(";")
if path == "default":
path_variable.append(default)
else:
path_variable.append(path)
os.environ['PATH'] = ";".join(path_variable)
def setup_lilypond_linux():
'''
Optional helper method which downloads and installs lilypond from apt-get.
* return: None
'''
print("Sorry, not currently providing a setup method for linux systems. If you're using apt-get, run \"sudo apt-get install lilypond\". or on yum \"sudo yum install lilypond\"")
def setup_lilypond_osx(path="default"):
'''
Optional helper method which sets up the environment on osx.
* parameter: path is the path to the file you are using as an lyscript. Please refer to the lilypond.org documentation for what this should contain
* return: None
'''
default = "/Applications/LilyPond.app/Contents/Resources/bin"
path_variable = os.environ['PATH'].split(":")
if path == "default":
path_variable.append(default)
else:
path_variable.append(path)
os.environ['PATH'] = ":".join(path_variable) | 0.227469 | 0.242015 |
from argparse import ArgumentParser
import os
import numpy as np
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from PIL import Image
def load_image(filename):
try:
with open(filename, "rb") as f:
image = Image.open(f)
return image.convert("RGB")
except UserWarning as e:
print(filename)
input("Something wrong happens while loading image: {} {}".format(filename, str(e)))
# Example Model definition
class Model(object):
def __init__(self, dirname):
import animecv
self.encoder = animecv.general.create_OML_ImageFolder_Encoder(dirname)
self.encoder.to("cuda")
# img1, img2: PIL image
def score(self, img1, img2):
vecs = self.encoder.encode([img1, img2]).detach().cpu().numpy()
score = np.dot(vecs[0], vecs[1])
return score
if __name__=="__main__":
parser = ArgumentParser()
parser.add_argument("--test-pairs", help="CSV file which lists test image pairs.")
parser.add_argument("--test-dataset-dir", help="Directory of test images.")
parser.add_argument("--target-fnr", type=float, default=0.139, help="Reference FNR used to compute FPR.")
args = parser.parse_args()
model = Model("0206_seresnet152")
df = pd.read_csv(args.test_pairs)
df = df[df["invalid"]==0]
true_labels = df["label"].values
ROOT_DIR = args.test_dataset_dir
scores = []
for pathA, pathB, label in tqdm(df[["pathA", "pathB", "label"]].values):
img1 = load_image(os.path.join(args.test_dataset_dir, pathA))
img2 = load_image(os.path.join(args.test_dataset_dir, pathB))
score = model.score(img1, img2)
scores.append(score)
fpr, tpr, threshold = roc_curve(true_labels, scores)
eer = 1. - brentq(lambda x: 1. - x - interp1d(tpr, fpr)(x), 0., 1.)
fnr = 1. - tpr
print("False Positive Rate: ", interp1d(fnr, fpr)(args.target_fnr))
print("Threshold: ", interp1d(fnr, threshold)(args.target_fnr))
print("Equal Error Rate: ", eer) | evaluate.py | from argparse import ArgumentParser
import os
import numpy as np
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from PIL import Image
def load_image(filename):
try:
with open(filename, "rb") as f:
image = Image.open(f)
return image.convert("RGB")
except UserWarning as e:
print(filename)
input("Something wrong happens while loading image: {} {}".format(filename, str(e)))
# Example Model definition
class Model(object):
def __init__(self, dirname):
import animecv
self.encoder = animecv.general.create_OML_ImageFolder_Encoder(dirname)
self.encoder.to("cuda")
# img1, img2: PIL image
def score(self, img1, img2):
vecs = self.encoder.encode([img1, img2]).detach().cpu().numpy()
score = np.dot(vecs[0], vecs[1])
return score
if __name__=="__main__":
parser = ArgumentParser()
parser.add_argument("--test-pairs", help="CSV file which lists test image pairs.")
parser.add_argument("--test-dataset-dir", help="Directory of test images.")
parser.add_argument("--target-fnr", type=float, default=0.139, help="Reference FNR used to compute FPR.")
args = parser.parse_args()
model = Model("0206_seresnet152")
df = pd.read_csv(args.test_pairs)
df = df[df["invalid"]==0]
true_labels = df["label"].values
ROOT_DIR = args.test_dataset_dir
scores = []
for pathA, pathB, label in tqdm(df[["pathA", "pathB", "label"]].values):
img1 = load_image(os.path.join(args.test_dataset_dir, pathA))
img2 = load_image(os.path.join(args.test_dataset_dir, pathB))
score = model.score(img1, img2)
scores.append(score)
fpr, tpr, threshold = roc_curve(true_labels, scores)
eer = 1. - brentq(lambda x: 1. - x - interp1d(tpr, fpr)(x), 0., 1.)
fnr = 1. - tpr
print("False Positive Rate: ", interp1d(fnr, fpr)(args.target_fnr))
print("Threshold: ", interp1d(fnr, threshold)(args.target_fnr))
print("Equal Error Rate: ", eer) | 0.609873 | 0.217358 |
from maya import cmds
from maya import mel
import re
window = 'cube_unwrap_window'
def select_by_normal(object, vector):
normals = cmds.polyInfo(object, faceNormals=True)
faces = []
for i in range(cmds.polyEvaluate(object, f=True)):
normal = [float(normals[i].rsplit(' ', 3)[j]) for j in range(-3, 0)]
dotProduct = sum(p * q for p, q in zip(normal, vector))
if dotProduct + .5 > 1:
faces.append(i)
return faces
def select_components(object, indices, type='f'):
components = ('f', 'e', 'vtx')
if type in components:
for i in indices:
cmds.select('{}.{}[{}]'.format(object, type, i), add=True)
def get_indices(selection):
indices = []
for s in selection:
result = re.search(r'\[(\d+)(?::(\d+))?\]', s)
if result and result.group(2):
indices.extend(range(int(result.group(1)), int(result.group(2)) + 1))
elif result:
indices.append(int(result.group(1)))
return indices
def unwrap(axis=0):
directions = ((1, 0, 0), (0, 1, 0), (0, 0, 1), (-1, 0, 0), (0, -1, 0), (0, 0, -1))
axis = cmds.radioButtonGrp('axis_radio', query=True, select=True) - 1
if axis == -1:
axis = cmds.radioButtonGrp('axis_radio_2', query=True, select=True) + 2
frontDirection = directions[axis]
for object in cmds.ls(selection=True):
backDirection = tuple([-i for i in frontDirection])
sideDirections = list(directions)
sideDirections.remove(frontDirection)
sideDirections.remove(backDirection)
sideEdges = []
for d in sideDirections:
cmds.select(clear=True)
select_components(object, select_by_normal(object, d), type='f')
edges = cmds.polyListComponentConversion(
cmds.ls(selection=True),
fromFace=True,
toEdge=True,
border=True)
sideEdges.extend(get_indices(edges))
cutEdges = [s for s in sideEdges if sideEdges.count(s) > 1]
backEdges = []
for d in (backDirection, sideDirections[0]):
cmds.select(clear=True)
select_components(object, select_by_normal(object, d), type='f')
edges = cmds.polyListComponentConversion(
cmds.ls(selection=True),
fromFace=True,
toEdge=True,
border=True)
backEdges.append(get_indices(edges))
backEdges = set(backEdges[0]) - set(backEdges[1])
cutEdges.extend(backEdges)
cmds.select(object + '.e[:]', replace=True)
cmds.polyMapSewMove()
cmds.select(clear=True)
select_components(object, cutEdges, type='e')
cmds.polyMapCut()
cmds.select(object + '.map[:]', replace=True)
mel.eval('u3dUnfold -iterations 1 -pack 1 -borderintersection 1 -triangleflip 1 -mapsize 1024 -roomspace 2')
cmds.delete(object, constructionHistory=True)
cmds.select(object, replace=True)
def delete_window():
cmds.deleteUI(window)
def ui():
if cmds.window(window, exists=True):
delete_window()
if cmds.windowPref(window, exists=True):
cmds.windowPref(window, remove=True)
cmds.window(window, title='Unwrap Cubes', widthHeight=(550, 120), sizeable=False)
form = cmds.formLayout()
frame = cmds.frameLayout(borderVisible=True, labelVisible=False, width=530, height=70)
cmds.formLayout(form, edit=True, attachForm=[(frame, 'left', 10), (frame, 'top', 10)])
settings_form = cmds.formLayout()
axis_radio = cmds.radioButtonGrp('axis_radio', label='Axis:', labelArray3=['X', 'Y', 'Z'], numberOfRadioButtons=3)
axis_radio_2 = cmds.radioButtonGrp('axis_radio_2', numberOfRadioButtons=3, shareCollection=axis_radio, label='', labelArray3=['-X', '-Y', '-Z'] )
cmds.radioButtonGrp(axis_radio, edit=True, select=0)
cmds.formLayout(settings_form, edit=True, attachForm=[(axis_radio, 'left', -50), (axis_radio, 'top', 10)])
cmds.formLayout(settings_form, edit=True, attachForm=[(axis_radio_2, 'left', -50), (axis_radio_2, 'top', 40)])
cmds.setParent(form)
buttons = []
buttons.append(cmds.button(label='Unwrap', width=170, command='cube_unwrap.unwrap(); cube_unwrap.delete_window();'))
buttons.append(cmds.button(label='Apply', width=170, command='cube_unwrap.unwrap()'))
buttons.append(cmds.button(label='Close', width=170, command='cube_unwrap.delete_window()'))
cmds.formLayout(form, edit=True, attachForm=[(buttons[0], 'left', 10), (buttons[0], 'top', 90)])
cmds.formLayout(form, edit=True, attachForm=[(buttons[1], 'left', 190), (buttons[1], 'top', 90)])
cmds.formLayout(form, edit=True, attachForm=[(buttons[2], 'left', 370), (buttons[2], 'top', 90)])
cmds.showWindow(window) | maya/scripts/py/cube_unwrap.py |
from maya import cmds
from maya import mel
import re
window = 'cube_unwrap_window'
def select_by_normal(object, vector):
normals = cmds.polyInfo(object, faceNormals=True)
faces = []
for i in range(cmds.polyEvaluate(object, f=True)):
normal = [float(normals[i].rsplit(' ', 3)[j]) for j in range(-3, 0)]
dotProduct = sum(p * q for p, q in zip(normal, vector))
if dotProduct + .5 > 1:
faces.append(i)
return faces
def select_components(object, indices, type='f'):
components = ('f', 'e', 'vtx')
if type in components:
for i in indices:
cmds.select('{}.{}[{}]'.format(object, type, i), add=True)
def get_indices(selection):
indices = []
for s in selection:
result = re.search(r'\[(\d+)(?::(\d+))?\]', s)
if result and result.group(2):
indices.extend(range(int(result.group(1)), int(result.group(2)) + 1))
elif result:
indices.append(int(result.group(1)))
return indices
def unwrap(axis=0):
directions = ((1, 0, 0), (0, 1, 0), (0, 0, 1), (-1, 0, 0), (0, -1, 0), (0, 0, -1))
axis = cmds.radioButtonGrp('axis_radio', query=True, select=True) - 1
if axis == -1:
axis = cmds.radioButtonGrp('axis_radio_2', query=True, select=True) + 2
frontDirection = directions[axis]
for object in cmds.ls(selection=True):
backDirection = tuple([-i for i in frontDirection])
sideDirections = list(directions)
sideDirections.remove(frontDirection)
sideDirections.remove(backDirection)
sideEdges = []
for d in sideDirections:
cmds.select(clear=True)
select_components(object, select_by_normal(object, d), type='f')
edges = cmds.polyListComponentConversion(
cmds.ls(selection=True),
fromFace=True,
toEdge=True,
border=True)
sideEdges.extend(get_indices(edges))
cutEdges = [s for s in sideEdges if sideEdges.count(s) > 1]
backEdges = []
for d in (backDirection, sideDirections[0]):
cmds.select(clear=True)
select_components(object, select_by_normal(object, d), type='f')
edges = cmds.polyListComponentConversion(
cmds.ls(selection=True),
fromFace=True,
toEdge=True,
border=True)
backEdges.append(get_indices(edges))
backEdges = set(backEdges[0]) - set(backEdges[1])
cutEdges.extend(backEdges)
cmds.select(object + '.e[:]', replace=True)
cmds.polyMapSewMove()
cmds.select(clear=True)
select_components(object, cutEdges, type='e')
cmds.polyMapCut()
cmds.select(object + '.map[:]', replace=True)
mel.eval('u3dUnfold -iterations 1 -pack 1 -borderintersection 1 -triangleflip 1 -mapsize 1024 -roomspace 2')
cmds.delete(object, constructionHistory=True)
cmds.select(object, replace=True)
def delete_window():
cmds.deleteUI(window)
def ui():
if cmds.window(window, exists=True):
delete_window()
if cmds.windowPref(window, exists=True):
cmds.windowPref(window, remove=True)
cmds.window(window, title='Unwrap Cubes', widthHeight=(550, 120), sizeable=False)
form = cmds.formLayout()
frame = cmds.frameLayout(borderVisible=True, labelVisible=False, width=530, height=70)
cmds.formLayout(form, edit=True, attachForm=[(frame, 'left', 10), (frame, 'top', 10)])
settings_form = cmds.formLayout()
axis_radio = cmds.radioButtonGrp('axis_radio', label='Axis:', labelArray3=['X', 'Y', 'Z'], numberOfRadioButtons=3)
axis_radio_2 = cmds.radioButtonGrp('axis_radio_2', numberOfRadioButtons=3, shareCollection=axis_radio, label='', labelArray3=['-X', '-Y', '-Z'] )
cmds.radioButtonGrp(axis_radio, edit=True, select=0)
cmds.formLayout(settings_form, edit=True, attachForm=[(axis_radio, 'left', -50), (axis_radio, 'top', 10)])
cmds.formLayout(settings_form, edit=True, attachForm=[(axis_radio_2, 'left', -50), (axis_radio_2, 'top', 40)])
cmds.setParent(form)
buttons = []
buttons.append(cmds.button(label='Unwrap', width=170, command='cube_unwrap.unwrap(); cube_unwrap.delete_window();'))
buttons.append(cmds.button(label='Apply', width=170, command='cube_unwrap.unwrap()'))
buttons.append(cmds.button(label='Close', width=170, command='cube_unwrap.delete_window()'))
cmds.formLayout(form, edit=True, attachForm=[(buttons[0], 'left', 10), (buttons[0], 'top', 90)])
cmds.formLayout(form, edit=True, attachForm=[(buttons[1], 'left', 190), (buttons[1], 'top', 90)])
cmds.formLayout(form, edit=True, attachForm=[(buttons[2], 'left', 370), (buttons[2], 'top', 90)])
cmds.showWindow(window) | 0.300335 | 0.227041 |
from itertools import product
from math import log
from typing import List, Tuple
import numpy as np
from scipy.special import logsumexp
from data import FeatVec
from hmm.state import State, NULL_OBSERVATION
TP_EPS = 1e-15
def _compute_adjacency(states: List[State]) -> Tuple[dict, dict]:
e_in, e_out = {s: [] for s in states}, {s: [] for s in states}
for s in states:
for n, tp in zip(s.neigh, s.trans):
e_out[s].append((n, tp))
e_in[n].append((s, tp))
return e_in, e_out
def _compute_forward_backward(e_in, e_out, states, observations):
n, m = len(states), len(observations)
F, B = np.full((n, m), -np.inf), np.full((n, m), -np.inf)
F[1, 0] = states[1].emitting_logprobability(observations[0])
B[n - 1, -1] = 0.
for j, i in product(range(1, m), range(1, n)):
summands = [F[k.rank, j - 1] + log(tp + TP_EPS) for k, tp in e_in[states[i]]]
F[i, j] = logsumexp(summands) + states[i].emitting_logprobability(observations[j])
for j, i in product(range(m - 2, -1, -1), range(n - 2, 0, -1)):
summands = [B[k.rank, j + 1] + log(tp + TP_EPS) + k.emitting_logprobability(observations[j + 1])
for k, tp in e_out[states[i]]]
B[i, j] = logsumexp(summands)
return F, B
def _compute_gamma(F, B):
gamma = F + B
denominator = logsumexp(gamma, axis=0)
denominator[denominator == -np.inf] = 0.
return gamma - denominator
def _compute_ksi(F, B, e_out, states, observations):
n, m = F.shape
ksi = np.full((n, n, m), -np.inf)
for i, t in product(range(n), range(m - 1)):
for j, tp in e_out[states[i]]:
ksi[i, j.rank, t] = F[i, t] + B[j.rank, t + 1] + log(tp + TP_EPS) \
+ j.emitting_logprobability(observations[t + 1])
denominator = logsumexp(ksi, axis=(0, 1))
denominator[denominator == -np.inf] = 0.
return ksi - denominator
def baum_welch(states: List[State], observations: List[FeatVec]):
assert (not states[0].is_emitting) and (not states[-1].is_emitting)
e_in, e_out = _compute_adjacency(states)
observations = np.vstack([observations, NULL_OBSERVATION])
F, B = _compute_forward_backward(e_in, e_out, states, observations)
gamma = _compute_gamma(F, B)[:, :-1]
ksi = _compute_ksi(F, B, e_out, states, observations)[:, :, :-1]
gamma, ksi = np.nan_to_num(np.exp(gamma)), np.nan_to_num(np.exp(ksi))
return gamma, ksi | hmm/baum_welch.py | from itertools import product
from math import log
from typing import List, Tuple
import numpy as np
from scipy.special import logsumexp
from data import FeatVec
from hmm.state import State, NULL_OBSERVATION
TP_EPS = 1e-15
def _compute_adjacency(states: List[State]) -> Tuple[dict, dict]:
e_in, e_out = {s: [] for s in states}, {s: [] for s in states}
for s in states:
for n, tp in zip(s.neigh, s.trans):
e_out[s].append((n, tp))
e_in[n].append((s, tp))
return e_in, e_out
def _compute_forward_backward(e_in, e_out, states, observations):
n, m = len(states), len(observations)
F, B = np.full((n, m), -np.inf), np.full((n, m), -np.inf)
F[1, 0] = states[1].emitting_logprobability(observations[0])
B[n - 1, -1] = 0.
for j, i in product(range(1, m), range(1, n)):
summands = [F[k.rank, j - 1] + log(tp + TP_EPS) for k, tp in e_in[states[i]]]
F[i, j] = logsumexp(summands) + states[i].emitting_logprobability(observations[j])
for j, i in product(range(m - 2, -1, -1), range(n - 2, 0, -1)):
summands = [B[k.rank, j + 1] + log(tp + TP_EPS) + k.emitting_logprobability(observations[j + 1])
for k, tp in e_out[states[i]]]
B[i, j] = logsumexp(summands)
return F, B
def _compute_gamma(F, B):
gamma = F + B
denominator = logsumexp(gamma, axis=0)
denominator[denominator == -np.inf] = 0.
return gamma - denominator
def _compute_ksi(F, B, e_out, states, observations):
n, m = F.shape
ksi = np.full((n, n, m), -np.inf)
for i, t in product(range(n), range(m - 1)):
for j, tp in e_out[states[i]]:
ksi[i, j.rank, t] = F[i, t] + B[j.rank, t + 1] + log(tp + TP_EPS) \
+ j.emitting_logprobability(observations[t + 1])
denominator = logsumexp(ksi, axis=(0, 1))
denominator[denominator == -np.inf] = 0.
return ksi - denominator
def baum_welch(states: List[State], observations: List[FeatVec]):
assert (not states[0].is_emitting) and (not states[-1].is_emitting)
e_in, e_out = _compute_adjacency(states)
observations = np.vstack([observations, NULL_OBSERVATION])
F, B = _compute_forward_backward(e_in, e_out, states, observations)
gamma = _compute_gamma(F, B)[:, :-1]
ksi = _compute_ksi(F, B, e_out, states, observations)[:, :, :-1]
gamma, ksi = np.nan_to_num(np.exp(gamma)), np.nan_to_num(np.exp(ksi))
return gamma, ksi | 0.597373 | 0.561996 |
import speech_recognition as sr
import os
from pocketsphinx import LiveSpeech, get_model_path
import socket
print("###welcome to the 🗣️ speech-recognition-command-line-utility🗣️ ###")
print("#Initiating speech recognition ...")
print("\nNow checking the internet connectivity of your device ...🤖️🤖️")
REMOTE_SERVER = "www.google.com"
isOnline = False
try:
host = socket.gethostbyname(REMOTE_SERVER)
s = socket.create_connection((host, 80), 2)
s.close()
print("Voilaa !! device found online.🤗️🤗️🤗️🤗️")
print("here we are going to use 📢️ google-speech-recognition📢️ ,hence please make sure that internet speed is good enough to listen and change.")
isOnline = True
except:
print("Your Device is offline !!😑️😑️")
print("here we are going to use CMU-Sphinx,its accuracy is not so good so please try to say a single word and wait for the response.")
#corpus to gather the sentences.
corpus = []
if isOnline == True :
#device found online
#using google speech recognition here
print("\nenter [e] to say something and [s]to stop in your choice\n")
while True :
r = sr.Recognizer()
while True :
ch = input("\nYour Choice :")
if ch.lower() == "s" or ch.lower() == "e" :
break
else :
print("please enter the correct choice !")
if ch.lower() == "s" :
break
with sr.Microphone() as source:
print("\nSay")
audio = r.listen(source)
try:
told = r.recognize_google(audio)
print("you said : " + told)
corpus.append(str(told))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
else :
#device found offline
#using pocketsphinx here
speech = LiveSpeech(
verbose=False,
sampling_rate=16000,
buffer_size=2048,
no_search=False,
full_utt=False,
#added my model here.
#you can also add your model here.
#enter name of your model
hmm='en-in',
#enter name of lm file
lm='en-in.lm.bin',
#enter name of dict file
dic='cmudict-en-in.dict'
)
print("\n Note: enter ctrl+c to stop listening.\n\n start saying something :")
for phrase in speech:
print(phrase)
corpus.append(str(phrase))
print("\ndo you wish to print out the sentences you have spoken yet ?? ")
print("if YES -> enter[Y] below")
printAll = str(input("your choice : ")).upper()
if printAll == "Y" :
i = 1
for sen in corpus :
print(i,"-> ",sen)
i = i + 1 | script.py |
import speech_recognition as sr
import os
from pocketsphinx import LiveSpeech, get_model_path
import socket
print("###welcome to the 🗣️ speech-recognition-command-line-utility🗣️ ###")
print("#Initiating speech recognition ...")
print("\nNow checking the internet connectivity of your device ...🤖️🤖️")
REMOTE_SERVER = "www.google.com"
isOnline = False
try:
host = socket.gethostbyname(REMOTE_SERVER)
s = socket.create_connection((host, 80), 2)
s.close()
print("Voilaa !! device found online.🤗️🤗️🤗️🤗️")
print("here we are going to use 📢️ google-speech-recognition📢️ ,hence please make sure that internet speed is good enough to listen and change.")
isOnline = True
except:
print("Your Device is offline !!😑️😑️")
print("here we are going to use CMU-Sphinx,its accuracy is not so good so please try to say a single word and wait for the response.")
#corpus to gather the sentences.
corpus = []
if isOnline == True :
#device found online
#using google speech recognition here
print("\nenter [e] to say something and [s]to stop in your choice\n")
while True :
r = sr.Recognizer()
while True :
ch = input("\nYour Choice :")
if ch.lower() == "s" or ch.lower() == "e" :
break
else :
print("please enter the correct choice !")
if ch.lower() == "s" :
break
with sr.Microphone() as source:
print("\nSay")
audio = r.listen(source)
try:
told = r.recognize_google(audio)
print("you said : " + told)
corpus.append(str(told))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
else :
#device found offline
#using pocketsphinx here
speech = LiveSpeech(
verbose=False,
sampling_rate=16000,
buffer_size=2048,
no_search=False,
full_utt=False,
#added my model here.
#you can also add your model here.
#enter name of your model
hmm='en-in',
#enter name of lm file
lm='en-in.lm.bin',
#enter name of dict file
dic='cmudict-en-in.dict'
)
print("\n Note: enter ctrl+c to stop listening.\n\n start saying something :")
for phrase in speech:
print(phrase)
corpus.append(str(phrase))
print("\ndo you wish to print out the sentences you have spoken yet ?? ")
print("if YES -> enter[Y] below")
printAll = str(input("your choice : ")).upper()
if printAll == "Y" :
i = 1
for sen in corpus :
print(i,"-> ",sen)
i = i + 1 | 0.136551 | 0.125762 |
from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem
class AngularVelocity321(Component):
"""
Notes
------
Evaluates the body frame angular velocity from 321 Euler angles and their derivatives
Units are in radians and radians/s
Params
------
Yaw : float
Yaw angle (3-axis rotation) of body frame with respect to the inertial NED frame. Default value is 0.0 rad
Pitch : float
Pitch angle (2-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Roll : float
Roll angle (1-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Yaw rate : float
Yaw rate of pod body frame. Default value is .01 rad/s
Pitch rate : float
Pitch rate of pod body frame. Default value is .01 rad/s
Roll rate : float
Roll rate of pod body frame. Default value is 0.0 rad/s
Returns
-------
Angular velocity : float
Returns the body fame angular velocity of the pod in rad/s
"""
def __init__(self):
super(AngularVelocity321, self).__init__()
self.add_param('psi', val = 0.0, units = 'rad', desc = 'Pod yaw angle')
self.add_param('theta', val = 0.0, units = 'rad', desc = 'Pod pitch angle')
self.add_param('phi', val = 0.0, units = 'rad', desc = 'Pod roll angle')
self.add_param('psi_dot', val = 0.0, units = 'rad', desc = 'Pod yaw rate')
self.add_param('theta_dot', val = 0.0, units = 'rad', desc = 'Pod pitch rate')
self.add_param('phi_dot', val = 0.0, units = 'rad', desc = 'Pod roll rate')
self.add_output('omega_b', val = np.matrix('0.0; 0.0; 0.0'), units = 'rad/s', desc = 'Angular velocity vector')
def solve_nonlinear(self, p, u, r):
"""
Notes
------
omega = [[s(psi)*s(theta), c(psi), 0], [c(psi)*s(theta), -s(psi), 0], [c(theta), 0,1]] * [[phi], [theta], [psi]]
Params
------
Yaw : float
Yaw angle (3-axis rotation) of body frame with respect to the inertial NED frame. Default value is 0.0 rad
Pitch : float
Pitch angle (2-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Roll : float
Roll angle (1-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Yaw rate : float
Yaw rate of pod body frame. Default value is .01 rad/s
Pitch rate : float
Pitch rate of pod body frame. Default value is .01 rad/s
Roll rate : float
Roll rate of pod body frame. Default value is 0.0 rad/s
Returns
-------
Angular velocity : float
Returns the body fame angular velocity of the pod in rad/s
"""
psi = p['psi']
theta = p['theta']
phi = p['phi']
psi_dot = p['psi_dot']
theta_dot = p['theta_dot']
phi_dot = p['phi_dot']
B = np.matrix([[-np.sin(theta), 0.0, 1.0], [np.sin(phi)*np.cos(theta), np.cos(phi), 0.0], [np.cos(phi)*np.cos(theta), -np.sin(phi), 0]])
u['omega_b'] = B * np.matrix([[phi_dot], [theta_dot], [psi_dot]])
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (
('psi', 0.0, {'units' : 'rad'}),
('theta', 0.0, {'units' : 'rad'}),
('phi', 0.0, {'units' : 'rad'}),
('psi_dot', 0.1, {'units' : 'rad'}),
('theta_dot', 0.1, {'units' : 'rad'}),
('phi_dot', 0.0, {'units' : 'rad'})
)
root.add('input_vars', IndepVarComp(params), promotes = ['psi', 'theta', 'phi', 'psi_dot', 'theta_dot', 'psi_dot'])
root.add('p', AngularVelocity321(), promotes = ['psi', 'theta', 'phi', 'psi_dot', 'theta_dot', 'psi_dot', 'omega_b'])
top.setup()
top.run()
print('Bod frame angular velocity vector = ')
print(top['omega_b']) | src/hyperloop/Python/angular_velocity321.py | from __future__ import print_function
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem
class AngularVelocity321(Component):
"""
Notes
------
Evaluates the body frame angular velocity from 321 Euler angles and their derivatives
Units are in radians and radians/s
Params
------
Yaw : float
Yaw angle (3-axis rotation) of body frame with respect to the inertial NED frame. Default value is 0.0 rad
Pitch : float
Pitch angle (2-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Roll : float
Roll angle (1-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Yaw rate : float
Yaw rate of pod body frame. Default value is .01 rad/s
Pitch rate : float
Pitch rate of pod body frame. Default value is .01 rad/s
Roll rate : float
Roll rate of pod body frame. Default value is 0.0 rad/s
Returns
-------
Angular velocity : float
Returns the body fame angular velocity of the pod in rad/s
"""
def __init__(self):
super(AngularVelocity321, self).__init__()
self.add_param('psi', val = 0.0, units = 'rad', desc = 'Pod yaw angle')
self.add_param('theta', val = 0.0, units = 'rad', desc = 'Pod pitch angle')
self.add_param('phi', val = 0.0, units = 'rad', desc = 'Pod roll angle')
self.add_param('psi_dot', val = 0.0, units = 'rad', desc = 'Pod yaw rate')
self.add_param('theta_dot', val = 0.0, units = 'rad', desc = 'Pod pitch rate')
self.add_param('phi_dot', val = 0.0, units = 'rad', desc = 'Pod roll rate')
self.add_output('omega_b', val = np.matrix('0.0; 0.0; 0.0'), units = 'rad/s', desc = 'Angular velocity vector')
def solve_nonlinear(self, p, u, r):
"""
Notes
------
omega = [[s(psi)*s(theta), c(psi), 0], [c(psi)*s(theta), -s(psi), 0], [c(theta), 0,1]] * [[phi], [theta], [psi]]
Params
------
Yaw : float
Yaw angle (3-axis rotation) of body frame with respect to the inertial NED frame. Default value is 0.0 rad
Pitch : float
Pitch angle (2-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Roll : float
Roll angle (1-axis rotation) of body fram with respect to the inertial NED frame. Default value is 0.0 rad
Yaw rate : float
Yaw rate of pod body frame. Default value is .01 rad/s
Pitch rate : float
Pitch rate of pod body frame. Default value is .01 rad/s
Roll rate : float
Roll rate of pod body frame. Default value is 0.0 rad/s
Returns
-------
Angular velocity : float
Returns the body fame angular velocity of the pod in rad/s
"""
psi = p['psi']
theta = p['theta']
phi = p['phi']
psi_dot = p['psi_dot']
theta_dot = p['theta_dot']
phi_dot = p['phi_dot']
B = np.matrix([[-np.sin(theta), 0.0, 1.0], [np.sin(phi)*np.cos(theta), np.cos(phi), 0.0], [np.cos(phi)*np.cos(theta), -np.sin(phi), 0]])
u['omega_b'] = B * np.matrix([[phi_dot], [theta_dot], [psi_dot]])
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (
('psi', 0.0, {'units' : 'rad'}),
('theta', 0.0, {'units' : 'rad'}),
('phi', 0.0, {'units' : 'rad'}),
('psi_dot', 0.1, {'units' : 'rad'}),
('theta_dot', 0.1, {'units' : 'rad'}),
('phi_dot', 0.0, {'units' : 'rad'})
)
root.add('input_vars', IndepVarComp(params), promotes = ['psi', 'theta', 'phi', 'psi_dot', 'theta_dot', 'psi_dot'])
root.add('p', AngularVelocity321(), promotes = ['psi', 'theta', 'phi', 'psi_dot', 'theta_dot', 'psi_dot', 'omega_b'])
top.setup()
top.run()
print('Bod frame angular velocity vector = ')
print(top['omega_b']) | 0.881085 | 0.785638 |
from typing import Dict
from apysc._animation.animation_skew_y_interface import AnimationSkewYInterface
from apysc._type.attr_linking_interface import AttrLinkingInterface
from apysc._type.int import Int
from apysc._type.revert_interface import RevertInterface
class SkewYInterface(
AnimationSkewYInterface, RevertInterface, AttrLinkingInterface):
_skew_y: Int
def _initialize_skew_y_if_not_initialized(self) -> None:
"""
Initialize the _skew_y attribute if it hasn't been initialized yet.
"""
if hasattr(self, '_skew_y'):
return
self._skew_y = Int(0)
self._append_skew_y_attr_linking_setting()
def _append_skew_y_attr_linking_setting(self) -> None:
"""
Append a skew-y attribute linking setting.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_skew_y_attr_linking_setting,
locals_=locals(),
module_name=__name__, class_=SkewYInterface):
self._append_applying_new_attr_val_exp(
new_attr=self._skew_y, attr_name='skew_y')
self._append_attr_to_linking_stack(
attr=self._skew_y, attr_name='skew_y')
@property
def skew_y(self) -> Int:
"""
Get a current skew y value of the instance.
Returns
-------
skew_y : Int
Current skew y value of the instance.
References
----------
- GraphicsBase skew_x and skew_y interfaces document
- https://simon-ritchie.github.io/apysc/graphics_base_skew.html
"""
import apysc as ap
with ap.DebugInfo(
callable_='skew_y', locals_=locals(),
module_name=__name__, class_=SkewYInterface):
from apysc._type import value_util
self._initialize_skew_y_if_not_initialized()
return value_util.get_copy(value=self._skew_y)
@skew_y.setter
def skew_y(self, value: Int) -> None:
"""
Update a skew y value of this instance.
Parameters
----------
value : Int
Skew y value to set.
References
----------
- GraphicsBase skew_x and skew_y interfaces document
- https://simon-ritchie.github.io/apysc/graphics_base_skew.html
"""
import apysc as ap
with ap.DebugInfo(
callable_='skew_y', locals_=locals(),
module_name=__name__, class_=SkewYInterface):
from apysc._validation import number_validation
self._initialize_skew_y_if_not_initialized()
number_validation.validate_integer(integer=value)
before_value: ap.Int = self._skew_y
self._skew_y = value
self._append_skew_y_update_expression(before_value=before_value)
self._append_skew_y_attr_linking_setting()
def _append_skew_y_update_expression(
self, *, before_value: Int) -> None:
"""
Append the skew y updating expression.
Parameters
----------
before_value : ap.Int
Before updating value.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_skew_y_update_expression,
locals_=locals(),
module_name=__name__, class_=SkewYInterface):
from apysc._type import value_util
before_value_str: str = value_util.get_value_str_for_expression(
value=before_value)
after_value_str: str = value_util.get_value_str_for_expression(
value=self._skew_y)
expression: str = (
f'{self.variable_name}.skew(0, -{before_value_str});'
f'\n{self.variable_name}.skew(0, {after_value_str});'
f'\n{before_value_str} = {after_value_str};'
)
ap.append_js_expression(expression=expression)
_skew_y_snapshot: Dict[str, int]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_skew_y_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_skew_y_snapshot',
value=int(self._skew_y._value), snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._skew_y._value = self._skew_y_snapshot[snapshot_name] | apysc/_display/skew_y_interface.py | from typing import Dict
from apysc._animation.animation_skew_y_interface import AnimationSkewYInterface
from apysc._type.attr_linking_interface import AttrLinkingInterface
from apysc._type.int import Int
from apysc._type.revert_interface import RevertInterface
class SkewYInterface(
AnimationSkewYInterface, RevertInterface, AttrLinkingInterface):
_skew_y: Int
def _initialize_skew_y_if_not_initialized(self) -> None:
"""
Initialize the _skew_y attribute if it hasn't been initialized yet.
"""
if hasattr(self, '_skew_y'):
return
self._skew_y = Int(0)
self._append_skew_y_attr_linking_setting()
def _append_skew_y_attr_linking_setting(self) -> None:
"""
Append a skew-y attribute linking setting.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_skew_y_attr_linking_setting,
locals_=locals(),
module_name=__name__, class_=SkewYInterface):
self._append_applying_new_attr_val_exp(
new_attr=self._skew_y, attr_name='skew_y')
self._append_attr_to_linking_stack(
attr=self._skew_y, attr_name='skew_y')
@property
def skew_y(self) -> Int:
"""
Get a current skew y value of the instance.
Returns
-------
skew_y : Int
Current skew y value of the instance.
References
----------
- GraphicsBase skew_x and skew_y interfaces document
- https://simon-ritchie.github.io/apysc/graphics_base_skew.html
"""
import apysc as ap
with ap.DebugInfo(
callable_='skew_y', locals_=locals(),
module_name=__name__, class_=SkewYInterface):
from apysc._type import value_util
self._initialize_skew_y_if_not_initialized()
return value_util.get_copy(value=self._skew_y)
@skew_y.setter
def skew_y(self, value: Int) -> None:
"""
Update a skew y value of this instance.
Parameters
----------
value : Int
Skew y value to set.
References
----------
- GraphicsBase skew_x and skew_y interfaces document
- https://simon-ritchie.github.io/apysc/graphics_base_skew.html
"""
import apysc as ap
with ap.DebugInfo(
callable_='skew_y', locals_=locals(),
module_name=__name__, class_=SkewYInterface):
from apysc._validation import number_validation
self._initialize_skew_y_if_not_initialized()
number_validation.validate_integer(integer=value)
before_value: ap.Int = self._skew_y
self._skew_y = value
self._append_skew_y_update_expression(before_value=before_value)
self._append_skew_y_attr_linking_setting()
def _append_skew_y_update_expression(
self, *, before_value: Int) -> None:
"""
Append the skew y updating expression.
Parameters
----------
before_value : ap.Int
Before updating value.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_skew_y_update_expression,
locals_=locals(),
module_name=__name__, class_=SkewYInterface):
from apysc._type import value_util
before_value_str: str = value_util.get_value_str_for_expression(
value=before_value)
after_value_str: str = value_util.get_value_str_for_expression(
value=self._skew_y)
expression: str = (
f'{self.variable_name}.skew(0, -{before_value_str});'
f'\n{self.variable_name}.skew(0, {after_value_str});'
f'\n{before_value_str} = {after_value_str};'
)
ap.append_js_expression(expression=expression)
_skew_y_snapshot: Dict[str, int]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_skew_y_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_skew_y_snapshot',
value=int(self._skew_y._value), snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._skew_y._value = self._skew_y_snapshot[snapshot_name] | 0.872293 | 0.115811 |
from django.db import models
from accounts.models import User
# Create your models here.
#
class parts(models.Model):
part_id = models.IntegerField(primary_key=True)
ok = models.BooleanField(default=True)
part_name = models.CharField(max_length=255)
short_desc = models.CharField(max_length=255,null=True)
description = models.TextField(null=True)
part_type = models.CharField(max_length=20,null=True)
author = models.CharField(max_length=200,null=True)
status = models.CharField(max_length=20,null=True)
dominant = models.BooleanField(default=True)
discontinued = models.IntegerField(null=True)
part_status = models.CharField(max_length=40,null=True)
sample_status = models.CharField(max_length=40,null=True)
p_status_cache = models.CharField(max_length=1000,null=True)
s_status_cache = models.CharField(max_length=1000,null=True)
in_stock = models.BooleanField(default=True)
results = models.CharField(max_length=20, null=True)
favorite = models.IntegerField(null=True)
specified_u_list = models.TextField(null=True)
deep_u_list = models.TextField(null=True)
deep_count = models.IntegerField(null=True)
ps_string = models.TextField(null=True)
scars = models.CharField(max_length=20,null=True)
barcode = models.CharField(max_length=50,null=True)
notes = models.TextField(null=True)
source = models.TextField(null=True)
nickname = models.CharField(max_length=50,null=True)
premium = models.IntegerField(null=True)
categories = models.CharField(max_length=500,null=True)
sequence = models.TextField(null=True)
sequence_length = models.IntegerField(null=True)
part_url = models.CharField(max_length=255, null=True)
score = models.FloatField(null=True)
def __unicode__(self):
return self.part_name
class Meta:
db_table = 'bio_parts'
class part_parameters(models.Model):
part = models.ForeignKey(parts)
name = models.CharField(max_length=256)
value = models.CharField(max_length=256)
class Meta:
db_table = 'bio_part_parameters'
class part_twins(models.Model):
part_1 = models.ForeignKey(parts)
part_2 = models.ForeignKey(parts, related_name='FK_PART_TWIN2', db_column='part_2_id')
class Meta:
db_table = 'bio_part_twins'
class features(models.Model):
feature_id = models.IntegerField(primary_key=True)
title = models.CharField(max_length=128, null=True)
feature_type = models.CharField(max_length=128, null=True)
direction = models.CharField(max_length=256, null=True)
startpos = models.IntegerField(null=True)
endpos = models.IntegerField(null=True)
class Meta:
db_table = 'bio_features'
class part_features(models.Model):
part = models.ForeignKey(parts)
feature = models.ForeignKey(features)
class Meta:
db_table = 'bio_part_features'
class tracks(models.Model):
track = models.CharField(max_length=64)
def __unicode__(self):
return self.track
class Meta:
db_table = 'bio_tracks'
class functions(models.Model):
function = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.function
class Meta:
db_table = 'bio_functions'
class track_functions(models.Model):
track = models.ForeignKey(tracks)
function = models.ForeignKey(functions)
def __unicode__(self):
return '%s %s' % (self.track, self.function)
class Meta:
db_table = 'bio_track_function'
class teams(models.Model):
team_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=64)
track = models.ForeignKey(tracks)
function = models.ForeignKey(functions)
year = models.CharField(max_length=16)
def __unicode__(self):
return self.name
class Meta:
db_table = 'bio_team'
class project(models.Model):
project_name = models.CharField(max_length=64)
creator = models.ForeignKey(User)
create_time = models.DateTimeField(auto_now_add=True)
function = models.ForeignKey(functions, null=True)
track = models.ForeignKey(tracks, null=True)
is_deleted = models.BooleanField(default=False)
def __unicode__(self):
return self.project_name
class Meta:
db_table = 'bio_project'
class team_parts(models.Model):
team = models.ForeignKey(teams)
part = models.ForeignKey(parts)
def __unicode__(self):
return self.team_name
class Meta:
db_table = 'bio_team_parts'
class user_project(models.Model):
user = models.ForeignKey(User)
project = models.ForeignKey(project)
def __unicode__(self):
return self.user
class Meta:
db_table = 'bio_user_project'
class chain(models.Model):
sequence = models.CharField(max_length=255,null=True)
project = models.ForeignKey(project)
name = models.CharField(max_length=64, null=False)
isModified = models.BooleanField(default=True)
image_file_path = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.sequence
class Meta:
db_table = 'bio_chain'
class paper(models.Model):
paper_id = models.CharField(max_length=128, primary_key=True)
paper_name = models.CharField(max_length=255, null=True)
paper_file_location = models.CharField(max_length=256, null=True)
paper_url = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.paper_name
class Meta:
db_table = 'bio_paper'
class part_papers(models.Model):
part = models.ForeignKey(parts)
paper = models.ForeignKey(paper)
def __unicode__(self):
return self.part.part_name + ' ' + self.paper.paper_name
class Meta:
db_table = 'bio_part_papers' | design/models.py | from django.db import models
from accounts.models import User
# Create your models here.
#
class parts(models.Model):
part_id = models.IntegerField(primary_key=True)
ok = models.BooleanField(default=True)
part_name = models.CharField(max_length=255)
short_desc = models.CharField(max_length=255,null=True)
description = models.TextField(null=True)
part_type = models.CharField(max_length=20,null=True)
author = models.CharField(max_length=200,null=True)
status = models.CharField(max_length=20,null=True)
dominant = models.BooleanField(default=True)
discontinued = models.IntegerField(null=True)
part_status = models.CharField(max_length=40,null=True)
sample_status = models.CharField(max_length=40,null=True)
p_status_cache = models.CharField(max_length=1000,null=True)
s_status_cache = models.CharField(max_length=1000,null=True)
in_stock = models.BooleanField(default=True)
results = models.CharField(max_length=20, null=True)
favorite = models.IntegerField(null=True)
specified_u_list = models.TextField(null=True)
deep_u_list = models.TextField(null=True)
deep_count = models.IntegerField(null=True)
ps_string = models.TextField(null=True)
scars = models.CharField(max_length=20,null=True)
barcode = models.CharField(max_length=50,null=True)
notes = models.TextField(null=True)
source = models.TextField(null=True)
nickname = models.CharField(max_length=50,null=True)
premium = models.IntegerField(null=True)
categories = models.CharField(max_length=500,null=True)
sequence = models.TextField(null=True)
sequence_length = models.IntegerField(null=True)
part_url = models.CharField(max_length=255, null=True)
score = models.FloatField(null=True)
def __unicode__(self):
return self.part_name
class Meta:
db_table = 'bio_parts'
class part_parameters(models.Model):
part = models.ForeignKey(parts)
name = models.CharField(max_length=256)
value = models.CharField(max_length=256)
class Meta:
db_table = 'bio_part_parameters'
class part_twins(models.Model):
part_1 = models.ForeignKey(parts)
part_2 = models.ForeignKey(parts, related_name='FK_PART_TWIN2', db_column='part_2_id')
class Meta:
db_table = 'bio_part_twins'
class features(models.Model):
feature_id = models.IntegerField(primary_key=True)
title = models.CharField(max_length=128, null=True)
feature_type = models.CharField(max_length=128, null=True)
direction = models.CharField(max_length=256, null=True)
startpos = models.IntegerField(null=True)
endpos = models.IntegerField(null=True)
class Meta:
db_table = 'bio_features'
class part_features(models.Model):
part = models.ForeignKey(parts)
feature = models.ForeignKey(features)
class Meta:
db_table = 'bio_part_features'
class tracks(models.Model):
track = models.CharField(max_length=64)
def __unicode__(self):
return self.track
class Meta:
db_table = 'bio_tracks'
class functions(models.Model):
function = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.function
class Meta:
db_table = 'bio_functions'
class track_functions(models.Model):
track = models.ForeignKey(tracks)
function = models.ForeignKey(functions)
def __unicode__(self):
return '%s %s' % (self.track, self.function)
class Meta:
db_table = 'bio_track_function'
class teams(models.Model):
team_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=64)
track = models.ForeignKey(tracks)
function = models.ForeignKey(functions)
year = models.CharField(max_length=16)
def __unicode__(self):
return self.name
class Meta:
db_table = 'bio_team'
class project(models.Model):
project_name = models.CharField(max_length=64)
creator = models.ForeignKey(User)
create_time = models.DateTimeField(auto_now_add=True)
function = models.ForeignKey(functions, null=True)
track = models.ForeignKey(tracks, null=True)
is_deleted = models.BooleanField(default=False)
def __unicode__(self):
return self.project_name
class Meta:
db_table = 'bio_project'
class team_parts(models.Model):
team = models.ForeignKey(teams)
part = models.ForeignKey(parts)
def __unicode__(self):
return self.team_name
class Meta:
db_table = 'bio_team_parts'
class user_project(models.Model):
user = models.ForeignKey(User)
project = models.ForeignKey(project)
def __unicode__(self):
return self.user
class Meta:
db_table = 'bio_user_project'
class chain(models.Model):
sequence = models.CharField(max_length=255,null=True)
project = models.ForeignKey(project)
name = models.CharField(max_length=64, null=False)
isModified = models.BooleanField(default=True)
image_file_path = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.sequence
class Meta:
db_table = 'bio_chain'
class paper(models.Model):
paper_id = models.CharField(max_length=128, primary_key=True)
paper_name = models.CharField(max_length=255, null=True)
paper_file_location = models.CharField(max_length=256, null=True)
paper_url = models.CharField(max_length=255, null=True)
def __unicode__(self):
return self.paper_name
class Meta:
db_table = 'bio_paper'
class part_papers(models.Model):
part = models.ForeignKey(parts)
paper = models.ForeignKey(paper)
def __unicode__(self):
return self.part.part_name + ' ' + self.paper.paper_name
class Meta:
db_table = 'bio_part_papers' | 0.662032 | 0.149749 |
import yarom as Y
from yarom.utils import slice_dict
import six
class Worm(Y.DataObject):
datatypeProperties = [{'name': 'scientific_name', 'multiple': False}]
objectProperties = ['neuron_network', 'muscle']
def defined_augment(self):
if len(self.scientific_name.values) < 1:
return False
return True
def identifier_augment(self):
return self.make_identifier_from_properties('scientific_name')
class Evidence(Y.DataObject):
_ = ['title', 'asserts']
def _ident_data(self):
return [self.title.values]
def defined_augment(self):
for p in self._ident_data():
if len(p) < 1:
return False
return True
def identifier_augment(self):
return self.make_identifier_from_properties('title')
class Cell(Y.DataObject):
"""
A biological cell.
All cells with the same name are considered to be the same object.
Parameters
-----------
name : string
The name of the cell
lineageName : string
The lineageName of the cell
Example::
>>> c = Cell(name="ADAL")
>>> c.lineageName() # Returns ["AB plapaaaapp"]
Attributes
----------
name : DatatypeProperty
The 'adult' name of the cell typically used by biologists when
discussing C. elegans
lineageName : DatatypeProperty
The lineageName of the cell
description : DatatypeProperty
A description of the cell
divisionVolume : DatatypeProperty
When called with no argument, return the volume of the cell at division
during development.
When called with an argument, set the volume of the cell at division
Example::
>>> v = Quantity("600","(um)^3")
>>> c = Cell(lineageName="AB plapaaaap")
>>> c.divisionVolume(v)
"""
datatypeProperties = ['lineageName',
{'name': 'name', 'multiple': False},
'divisionVolume',
'description']
def __init__(self, name=False, **kwargs):
if name:
kwargs['name'] = name
super(Cell, self).__init__(**kwargs)
def _ident_data(self):
return [self.name.values]
def defined_augment(self):
for p in self._ident_data():
if len(p) < 1:
return False
return True
def identifier_augment(self):
return self.make_identifier_direct(str(self.name.values[0]))
class Neuron(Cell):
"""
A neuron.
See what neurons express some neuropeptide
Example::
# Grabs the representation of the neuronal network
>>> net = P.Worm().get_neuron_network()
# Grab a specific neuron
>>> aval = net.aneuron('AVAL')
>>> aval.type()
set([u'interneuron'])
# show how many connections go out of AVAL
>>> aval.connection.count('pre')
77
>>> aval.name()
u'AVAL'
# list all known receptors
>>> sorted(aval.receptors())
[u'GGR-3',
u'GLR-1',
u'GLR-2',
u'GLR-4',
u'GLR-5',
u'NMR-1',
u'NMR-2',
u'UNC-8']
# show how many chemical synapses go in and out of AVAL
>>> aval.Syn_degree()
90
Parameters
----------
name : string
The name of the neuron.
Attributes
----------
type : DatatypeProperty
The neuron type (i.e., sensory, interneuron, motor)
receptor : DatatypeProperty
The receptor types associated with this neuron
innexin : DatatypeProperty
Innexin types associated with this neuron
neurotransmitter : DatatypeProperty
Neurotransmitters associated with this neuron
neuropeptide : DatatypeProperty
Name of the gene corresponding to the neuropeptide produced by this
neuron
neighbor : Property
Get neurons connected to this neuron if called with no arguments, or
with arguments, state that neuronName is a neighbor of this Neuron
connection : Property
Get a set of Connection objects describing chemical synapses or gap
junctions between this neuron and others
"""
datatypeProperties = [
"type",
"receptor",
"innexin",
"neurotransmitter",
"neuropeptide"]
objectProperties = [
"neighbor",
"connection"
]
def __init__(self, *args, **kwargs):
super(Neuron, self).__init__(*args, **kwargs)
self.set_property_values(slice_dict(kwargs, self.datatypeProperties))
self.set_property_values(slice_dict(kwargs, self.objectProperties))
class SynapseType:
Chemical = "send"
GapJunction = "gapJunction"
class Connection(Y.DataObject):
"""Connection between neurons
Parameters
----------
pre_cell : string or Neuron, optional
The pre-synaptic cell
post_cell : string or Neuron, optional
The post-synaptic cell
number : int, optional
The weight of the connection
syntype : {'gapJunction', 'send'}, optional
The kind of synaptic connection. 'gapJunction' indicates
a gap junction and 'send' a chemical synapse
synclass : string, optional
The kind of Neurotransmitter (if any) sent between `pre_cell` and
`post_cell`
"""
datatypeProperties = ['syntype',
'synclass',
'number']
objectProperties = ['pre_cell', 'post_cell']
def __init__(self, **kwargs):
super(Connection, self).__init__(**kwargs)
pre_cell = kwargs.get('pre_cell', None)
post_cell = kwargs('post_cell', None)
number = kwargs('number', None)
syntype = kwargs('syntype', None)
synclass = kwargs('synclass', None)
if isinstance(pre_cell, Y.Neuron):
self.pre_cell(pre_cell)
elif pre_cell is not None:
self.pre_cell(Y.Neuron(name=pre_cell, conf=self.conf))
if (isinstance(post_cell, Y.Neuron)):
self.post_cell(post_cell)
elif post_cell is not None:
self.post_cell(Y.Neuron(name=post_cell, conf=self.conf))
if isinstance(number, int):
self.number(int(number))
elif number is not None:
raise Exception(
"Connection number must be an int, given %s" %
number)
if isinstance(syntype, six.string_types):
syntype = syntype.lower()
if syntype in ('send', SynapseType.Chemical):
self.syntype(SynapseType.Chemical)
elif syntype in ('gapjunction', SynapseType.GapJunction):
self.syntype(SynapseType.GapJunction)
if isinstance(synclass, six.string_types):
self.synclass(synclass)
class Muscle(Cell):
"""A single muscle cell.
See what neurons innervate a muscle:
Example::
>>> mdr21 = P.Muscle('MDR21')
>>> innervates_mdr21 = mdr21.innervatedBy()
>>> len(innervates_mdr21)
4
Attributes
----------
neurons : ObjectProperty
Neurons synapsing with this muscle
receptors : DatatypeProperty
Get a list of receptors for this muscle if called with no arguments,
or state that this muscle has the given receptor type if called with
an argument
"""
objectProperties = ['innervatedBy']
datatypeProperties = ['receptor']
def __init__(self, name=False, **kwargs):
super(Muscle, self).__init__(name=name, **kwargs)
class Network(Y.DataObject):
"""A network of neurons
Attributes
-----------
neuron
Representation of neurons in the network
synapse
Representation of synapses in the network
"""
objectProperties = ['synapse', 'neuron']
def __init__(self, **kwargs):
super(Network, self).__init__(**kwargs) | examples/c_elegans.py | import yarom as Y
from yarom.utils import slice_dict
import six
class Worm(Y.DataObject):
datatypeProperties = [{'name': 'scientific_name', 'multiple': False}]
objectProperties = ['neuron_network', 'muscle']
def defined_augment(self):
if len(self.scientific_name.values) < 1:
return False
return True
def identifier_augment(self):
return self.make_identifier_from_properties('scientific_name')
class Evidence(Y.DataObject):
_ = ['title', 'asserts']
def _ident_data(self):
return [self.title.values]
def defined_augment(self):
for p in self._ident_data():
if len(p) < 1:
return False
return True
def identifier_augment(self):
return self.make_identifier_from_properties('title')
class Cell(Y.DataObject):
"""
A biological cell.
All cells with the same name are considered to be the same object.
Parameters
-----------
name : string
The name of the cell
lineageName : string
The lineageName of the cell
Example::
>>> c = Cell(name="ADAL")
>>> c.lineageName() # Returns ["AB plapaaaapp"]
Attributes
----------
name : DatatypeProperty
The 'adult' name of the cell typically used by biologists when
discussing C. elegans
lineageName : DatatypeProperty
The lineageName of the cell
description : DatatypeProperty
A description of the cell
divisionVolume : DatatypeProperty
When called with no argument, return the volume of the cell at division
during development.
When called with an argument, set the volume of the cell at division
Example::
>>> v = Quantity("600","(um)^3")
>>> c = Cell(lineageName="AB plapaaaap")
>>> c.divisionVolume(v)
"""
datatypeProperties = ['lineageName',
{'name': 'name', 'multiple': False},
'divisionVolume',
'description']
def __init__(self, name=False, **kwargs):
if name:
kwargs['name'] = name
super(Cell, self).__init__(**kwargs)
def _ident_data(self):
return [self.name.values]
def defined_augment(self):
for p in self._ident_data():
if len(p) < 1:
return False
return True
def identifier_augment(self):
return self.make_identifier_direct(str(self.name.values[0]))
class Neuron(Cell):
"""
A neuron.
See what neurons express some neuropeptide
Example::
# Grabs the representation of the neuronal network
>>> net = P.Worm().get_neuron_network()
# Grab a specific neuron
>>> aval = net.aneuron('AVAL')
>>> aval.type()
set([u'interneuron'])
# show how many connections go out of AVAL
>>> aval.connection.count('pre')
77
>>> aval.name()
u'AVAL'
# list all known receptors
>>> sorted(aval.receptors())
[u'GGR-3',
u'GLR-1',
u'GLR-2',
u'GLR-4',
u'GLR-5',
u'NMR-1',
u'NMR-2',
u'UNC-8']
# show how many chemical synapses go in and out of AVAL
>>> aval.Syn_degree()
90
Parameters
----------
name : string
The name of the neuron.
Attributes
----------
type : DatatypeProperty
The neuron type (i.e., sensory, interneuron, motor)
receptor : DatatypeProperty
The receptor types associated with this neuron
innexin : DatatypeProperty
Innexin types associated with this neuron
neurotransmitter : DatatypeProperty
Neurotransmitters associated with this neuron
neuropeptide : DatatypeProperty
Name of the gene corresponding to the neuropeptide produced by this
neuron
neighbor : Property
Get neurons connected to this neuron if called with no arguments, or
with arguments, state that neuronName is a neighbor of this Neuron
connection : Property
Get a set of Connection objects describing chemical synapses or gap
junctions between this neuron and others
"""
datatypeProperties = [
"type",
"receptor",
"innexin",
"neurotransmitter",
"neuropeptide"]
objectProperties = [
"neighbor",
"connection"
]
def __init__(self, *args, **kwargs):
super(Neuron, self).__init__(*args, **kwargs)
self.set_property_values(slice_dict(kwargs, self.datatypeProperties))
self.set_property_values(slice_dict(kwargs, self.objectProperties))
class SynapseType:
Chemical = "send"
GapJunction = "gapJunction"
class Connection(Y.DataObject):
"""Connection between neurons
Parameters
----------
pre_cell : string or Neuron, optional
The pre-synaptic cell
post_cell : string or Neuron, optional
The post-synaptic cell
number : int, optional
The weight of the connection
syntype : {'gapJunction', 'send'}, optional
The kind of synaptic connection. 'gapJunction' indicates
a gap junction and 'send' a chemical synapse
synclass : string, optional
The kind of Neurotransmitter (if any) sent between `pre_cell` and
`post_cell`
"""
datatypeProperties = ['syntype',
'synclass',
'number']
objectProperties = ['pre_cell', 'post_cell']
def __init__(self, **kwargs):
super(Connection, self).__init__(**kwargs)
pre_cell = kwargs.get('pre_cell', None)
post_cell = kwargs('post_cell', None)
number = kwargs('number', None)
syntype = kwargs('syntype', None)
synclass = kwargs('synclass', None)
if isinstance(pre_cell, Y.Neuron):
self.pre_cell(pre_cell)
elif pre_cell is not None:
self.pre_cell(Y.Neuron(name=pre_cell, conf=self.conf))
if (isinstance(post_cell, Y.Neuron)):
self.post_cell(post_cell)
elif post_cell is not None:
self.post_cell(Y.Neuron(name=post_cell, conf=self.conf))
if isinstance(number, int):
self.number(int(number))
elif number is not None:
raise Exception(
"Connection number must be an int, given %s" %
number)
if isinstance(syntype, six.string_types):
syntype = syntype.lower()
if syntype in ('send', SynapseType.Chemical):
self.syntype(SynapseType.Chemical)
elif syntype in ('gapjunction', SynapseType.GapJunction):
self.syntype(SynapseType.GapJunction)
if isinstance(synclass, six.string_types):
self.synclass(synclass)
class Muscle(Cell):
"""A single muscle cell.
See what neurons innervate a muscle:
Example::
>>> mdr21 = P.Muscle('MDR21')
>>> innervates_mdr21 = mdr21.innervatedBy()
>>> len(innervates_mdr21)
4
Attributes
----------
neurons : ObjectProperty
Neurons synapsing with this muscle
receptors : DatatypeProperty
Get a list of receptors for this muscle if called with no arguments,
or state that this muscle has the given receptor type if called with
an argument
"""
objectProperties = ['innervatedBy']
datatypeProperties = ['receptor']
def __init__(self, name=False, **kwargs):
super(Muscle, self).__init__(name=name, **kwargs)
class Network(Y.DataObject):
"""A network of neurons
Attributes
-----------
neuron
Representation of neurons in the network
synapse
Representation of synapses in the network
"""
objectProperties = ['synapse', 'neuron']
def __init__(self, **kwargs):
super(Network, self).__init__(**kwargs) | 0.805594 | 0.521654 |
import sys, time, re, socket, psycopg2
from db_con import conn, cur, ipam_ip_indx_rst, dcim_iface_indx_rst, dcim_device_indx_rst
#Time stamps for DB updates.
date = time.strftime("%Y-%m-%d")
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S")
def ipam_mgmt_ip():
#Create "mgmt" network interface for existing devices.
cur.execute("SELECT id,name FROM dcim_device;")
for db_fetch in cur.fetchall():
try:
ipv4_list = socket.gethostbyname(db_fetch[1])
#print str(db_fetch[1]) + "<<<" + str(ipv4_list)
ipv6_list = socket.getaddrinfo(db_fetch[1], None, socket.AF_INET6)
#print str(db_fetch[1]) + "<<<" + str(ipv6_list[0][4][0])
#Ignore Loopbacks
if re.findall(r'127.0.[0-1].1', ipv4_list):
continue
cur.execute("INSERT INTO dcim_interface(name, form_factor, mgmt_only, description, device_id) VALUES (%s, %s, %s, %s, %s) ON CONFLICT DO NOTHING",
("mgmt", "0", "t", db_fetch[1], db_fetch[0]))
dcim_iface_indx_rst()
conn.commit()
cur.execute("SELECT id FROM dcim_interface where name='mgmt' AND device_id=%s;" %(db_fetch[0]))
iface_id = cur.fetchall()
cur.execute("SELECT description FROM ipam_ipaddress where description='%s';" %(db_fetch[1]))
mgmt_ip_desc = cur.fetchall()
if len(mgmt_ip_desc) == 0:
#print "Unique Entry"
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "4", ipv4_list, db_fetch[1], iface_id[0], "2", "1"))
print "Adding:" + str(db_fetch[1]) + " " + str(ipv4_list)
ipam_ip_indx_rst()
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "6", ipv6_list[0][4][0], db_fetch[1], iface_id[0], "2", "1"))
print "Adding:" + str(db_fetch[1]) + " " + str(ipv6_list[0][4][0])
ipam_ip_indx_rst()
conn.commit()
elif str(mgmt_ip_desc[0][0]) == str(db_fetch[1]):
print "Hostanme:" + str(db_fetch[1]) + " " + "already has a management IP" + " >> " + str(ipv4_list) + " >> " + str(ipv6_list[0][4][0])
continue
else:
print "None found. A new DB record is added" + str(uniq_mgmt_ip[0][0])
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "4", ipv4_list, db_fetch[1], int_id[0], "2", "1"))
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "6", ipv6_list[0][4][0], db_fetch[1], int_id[0], "2", "1"))
ipam_ip_indx_rst()
conn.commit()
except socket.gaierror:
continue
cur.execute("SELECT id,family,description FROM ipam_ipaddress WHERE family=4;")
for prim_ip_set in cur.fetchall():
#print str(prim_ip_set[0]) + str(str(prim_ip_set[1]))
cur.execute("UPDATE dcim_device SET primary_ip4_id = %s WHERE name=%s", (prim_ip_set[0], prim_ip_set[2]))
conn.commit()
cur.execute("SELECT id,family,description FROM ipam_ipaddress WHERE family=6;")
for prim_ip_set in cur.fetchall():
#print str(prim_ip_set[0]) + str(str(prim_ip_set[1]))
cur.execute("UPDATE dcim_device SET primary_ip6_id = %s WHERE name=%s", (prim_ip_set[0], prim_ip_set[2]))
conn.commit()
ipam_mgmt_ip()
dcim_device_indx_rst() | ipam_mgmt.py | import sys, time, re, socket, psycopg2
from db_con import conn, cur, ipam_ip_indx_rst, dcim_iface_indx_rst, dcim_device_indx_rst
#Time stamps for DB updates.
date = time.strftime("%Y-%m-%d")
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S")
def ipam_mgmt_ip():
#Create "mgmt" network interface for existing devices.
cur.execute("SELECT id,name FROM dcim_device;")
for db_fetch in cur.fetchall():
try:
ipv4_list = socket.gethostbyname(db_fetch[1])
#print str(db_fetch[1]) + "<<<" + str(ipv4_list)
ipv6_list = socket.getaddrinfo(db_fetch[1], None, socket.AF_INET6)
#print str(db_fetch[1]) + "<<<" + str(ipv6_list[0][4][0])
#Ignore Loopbacks
if re.findall(r'127.0.[0-1].1', ipv4_list):
continue
cur.execute("INSERT INTO dcim_interface(name, form_factor, mgmt_only, description, device_id) VALUES (%s, %s, %s, %s, %s) ON CONFLICT DO NOTHING",
("mgmt", "0", "t", db_fetch[1], db_fetch[0]))
dcim_iface_indx_rst()
conn.commit()
cur.execute("SELECT id FROM dcim_interface where name='mgmt' AND device_id=%s;" %(db_fetch[0]))
iface_id = cur.fetchall()
cur.execute("SELECT description FROM ipam_ipaddress where description='%s';" %(db_fetch[1]))
mgmt_ip_desc = cur.fetchall()
if len(mgmt_ip_desc) == 0:
#print "Unique Entry"
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "4", ipv4_list, db_fetch[1], iface_id[0], "2", "1"))
print "Adding:" + str(db_fetch[1]) + " " + str(ipv4_list)
ipam_ip_indx_rst()
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "6", ipv6_list[0][4][0], db_fetch[1], iface_id[0], "2", "1"))
print "Adding:" + str(db_fetch[1]) + " " + str(ipv6_list[0][4][0])
ipam_ip_indx_rst()
conn.commit()
elif str(mgmt_ip_desc[0][0]) == str(db_fetch[1]):
print "Hostanme:" + str(db_fetch[1]) + " " + "already has a management IP" + " >> " + str(ipv4_list) + " >> " + str(ipv6_list[0][4][0])
continue
else:
print "None found. A new DB record is added" + str(uniq_mgmt_ip[0][0])
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "4", ipv4_list, db_fetch[1], int_id[0], "2", "1"))
cur.execute("INSERT INTO ipam_ipaddress(created, last_updated, family, address, description, interface_id, tenant_id, status) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (nat_inside_id) DO NOTHING",
(date, time_stamp, "6", ipv6_list[0][4][0], db_fetch[1], int_id[0], "2", "1"))
ipam_ip_indx_rst()
conn.commit()
except socket.gaierror:
continue
cur.execute("SELECT id,family,description FROM ipam_ipaddress WHERE family=4;")
for prim_ip_set in cur.fetchall():
#print str(prim_ip_set[0]) + str(str(prim_ip_set[1]))
cur.execute("UPDATE dcim_device SET primary_ip4_id = %s WHERE name=%s", (prim_ip_set[0], prim_ip_set[2]))
conn.commit()
cur.execute("SELECT id,family,description FROM ipam_ipaddress WHERE family=6;")
for prim_ip_set in cur.fetchall():
#print str(prim_ip_set[0]) + str(str(prim_ip_set[1]))
cur.execute("UPDATE dcim_device SET primary_ip6_id = %s WHERE name=%s", (prim_ip_set[0], prim_ip_set[2]))
conn.commit()
ipam_mgmt_ip()
dcim_device_indx_rst() | 0.028734 | 0.057865 |
import datetime
from app import app
from auth import auth
from models import User, Note
from flask import request, redirect, url_for, render_template, flash
from flask_turboduck.utils import get_object_or_404, object_list
# Note List
@app.route('/note/', methods=['GET','POST'])
@app.route('/notes/', methods=['GET','POST'])
def note_list():
user = auth.get_logged_in_user()
notes = Note.select().where(Note.user == user).order_by(Note.created.desc())
return object_list('note_list.html', notes, 'notes')
# Note View
@app.route('/note/<int:noteid>', methods=['GET','POST'])
def note_view(noteid):
user = auth.get_logged_in_user() # Logged In User
note = get_object_or_404(Note, Note.id==noteid, Note.user==user)
return render_template('note_view.html', note=note)
# Note Add
@app.route('/note/add/', methods=['GET','POST'])
@auth.login_required
def note_add():
if request.method == 'POST' and request.form['message']:
user = auth.get_logged_in_user()
message = Note.create(user=user, message=request.form['message'], title=request.form['title'],)
message.save()
flash('You submited data!')
return redirect(url_for('note_list'))
return render_template('note_add.html')
# Note Edit
@app.route('/note/<noteid>/edit', methods=['GET','POST'])
@auth.login_required
def note_edit(noteid):
user = auth.get_logged_in_user() # Logged In User
note = get_object_or_404(Note, Note.user==user, Note.id==noteid)
if request.method == 'POST' and request.form['message']:
note.message = request.form['message']
note.title = request.form['title']
note.save()
flash('Thanks! You updated the data!')
return redirect(url_for('note_list'))
return render_template('note_edit.html', note=note)
# Private Area
@app.route('/private/')
@auth.login_required
def private_timeline():
user = auth.get_logged_in_user()
return 'PRIVATE!'
# User List
@app.route('/users/')
def user_list():
users = User.select().order_by(User.username)
return object_list('user_list.html', users, 'user_list')
# User View
@app.route('/users/<username>/')
def user_detail(username):
user = get_object_or_404(User, User.username==username)
return user
# Login Page
@app.route('/login/')
def login():
return redirect('/accounts/login/') | flask_netpad/views.py |
import datetime
from app import app
from auth import auth
from models import User, Note
from flask import request, redirect, url_for, render_template, flash
from flask_turboduck.utils import get_object_or_404, object_list
# Note List
@app.route('/note/', methods=['GET','POST'])
@app.route('/notes/', methods=['GET','POST'])
def note_list():
user = auth.get_logged_in_user()
notes = Note.select().where(Note.user == user).order_by(Note.created.desc())
return object_list('note_list.html', notes, 'notes')
# Note View
@app.route('/note/<int:noteid>', methods=['GET','POST'])
def note_view(noteid):
user = auth.get_logged_in_user() # Logged In User
note = get_object_or_404(Note, Note.id==noteid, Note.user==user)
return render_template('note_view.html', note=note)
# Note Add
@app.route('/note/add/', methods=['GET','POST'])
@auth.login_required
def note_add():
if request.method == 'POST' and request.form['message']:
user = auth.get_logged_in_user()
message = Note.create(user=user, message=request.form['message'], title=request.form['title'],)
message.save()
flash('You submited data!')
return redirect(url_for('note_list'))
return render_template('note_add.html')
# Note Edit
@app.route('/note/<noteid>/edit', methods=['GET','POST'])
@auth.login_required
def note_edit(noteid):
user = auth.get_logged_in_user() # Logged In User
note = get_object_or_404(Note, Note.user==user, Note.id==noteid)
if request.method == 'POST' and request.form['message']:
note.message = request.form['message']
note.title = request.form['title']
note.save()
flash('Thanks! You updated the data!')
return redirect(url_for('note_list'))
return render_template('note_edit.html', note=note)
# Private Area
@app.route('/private/')
@auth.login_required
def private_timeline():
user = auth.get_logged_in_user()
return 'PRIVATE!'
# User List
@app.route('/users/')
def user_list():
users = User.select().order_by(User.username)
return object_list('user_list.html', users, 'user_list')
# User View
@app.route('/users/<username>/')
def user_detail(username):
user = get_object_or_404(User, User.username==username)
return user
# Login Page
@app.route('/login/')
def login():
return redirect('/accounts/login/') | 0.313105 | 0.053379 |
import os
import sys
from copy import deepcopy
import numpy as np
import pandas as pd
import parse_spe_reaction_info as psri
import parse_pattern as pp
def prepare_pathway_name(
data_dir, top_n=5, flag="", end_s_idx=None, species_path=False,
path_reg=None, no_path_reg=None, spe_idx=None, spe_production_oriented=False, n_threshold=0,
same_path_list=False):
"""
prepare pathway_name_candidate.csv
"""
# read from pathway_stat.csv
prefix = ""
if species_path is True:
prefix = "species_"
if flag == "":
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate.csv")
else:
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate_" + str(flag) + ".csv")
if same_path_list is True and os.path.isfile(f_n_pn):
path_list = np.loadtxt(f_n_pn, dtype=str, delimiter=',')
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list)
try:
os.remove(f_n_pn)
except OSError:
pass
f_n_ps = os.path.join(data_dir, "output", prefix + "pathway_stat.csv")
path_list = []
d_f = pd.read_csv(f_n_ps, names=['pathway', 'frequency'])
if path_reg is not None:
mask1 = d_f['pathway'].str.contains(path_reg)
else:
mask1 = d_f['pathway'].str.len() > 0
if spe_idx is None:
mask2 = d_f['pathway'].str.len() > 0
else:
net_reactant = psri.parse_reaction_net_reactant(data_dir)
net_product = psri.parse_reaction_net_product(data_dir)
s_p_r_c = psri.parse_species_pair_reaction(data_dir)
mask2 = d_f.apply(lambda x: pp.parse_net_species_along_path_using_reaction(
pathname=x['pathway'], net_r=net_reactant, net_p=net_product, spe_idx=spe_idx, s_p_r_c=s_p_r_c) >= n_threshold, axis=1)
# read
if end_s_idx is None or end_s_idx == []:
mask3 = d_f['pathway'].str.len() > 0
path_list.extend(d_f[mask1 & mask2 & mask3]['pathway'])
if spe_production_oriented is False or spe_idx is None:
# save
np.savetxt(f_n_pn, path_list[0:top_n], fmt="%s")
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list[0:top_n])
elif spe_idx is not None:
path_list2 = []
path_set = set()
for _, val1 in enumerate(path_list):
p_list, r_list = pp.get_spe_production_sub_path(
val1, net_reactant, net_product, spe_idx, s_p_r_c, n_threshold)
for idx2, val2 in enumerate(r_list):
if val2 not in path_set:
path_set.add(val2)
path_list2.append(p_list[idx2])
# one more filter of path, have to contain path_reg
path_list3 = []
for path in path_list2:
if pp.path_contain_regex(path, path_reg=path_reg):
path_list3.append(path)
# one more filter of path, don't contain no_path_reg
path_list4 = []
if no_path_reg is None:
path_list4 = path_list3
else:
for path in path_list3:
if not pp.path_contain_regex(path, path_reg=no_path_reg):
path_list4.append(path)
np.savetxt(f_n_pn, path_list4[0:top_n], fmt="%s")
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list4[0:top_n])
else:
for s_i in end_s_idx:
mask3 = d_f['pathway'].str.endswith("S" + str(s_i))
path_list.extend(d_f[mask1 & mask2 & mask3]['pathway'][0:top_n])
# save
np.savetxt(f_n_pn, path_list, fmt="%s")
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list)
def prepare_pathway_name_for_passage_time(data_dir, flag="", init_s_idx=None):
"""
prepare pathway_name_candidate.csv
"""
# read from pathway_stat.csv
prefix = "species_"
if flag == "":
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate.csv")
else:
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate_" + str(flag) + ".csv")
try:
os.remove(f_n_pn)
except OSError:
pass
# read
if init_s_idx is None:
init_s_idx_tmp = [62]
else:
init_s_idx_tmp = deepcopy(init_s_idx)
path_list = []
for s_i in init_s_idx_tmp:
path_list.extend(["S" + str(s_i) + "R100S100"])
# save
np.savetxt(f_n_pn, path_list, fmt="%s")
return len(path_list)
def prepare_pathway_time(
data_dir, top_n=5, num=1, flag="", begin_t=0.0, end_t=1.0,
species_path=False, fixed_t0_or_tf=None):
"""
prepare pathway_time.csv
num represents number of points
"""
prefix = ""
if species_path is True:
prefix = "species_"
if flag == "":
f_n_pt = os.path.join(data_dir, "output",
prefix + "pathway_time_candidate.csv")
else:
f_n_pt = os.path.join(data_dir, "output",
prefix + "pathway_time_candidate_" + str(flag) + ".csv")
try:
os.remove(f_n_pt)
except OSError:
pass
# time matrix
t_mat = np.empty((top_n, num + 1, ))
for idx, _ in enumerate(t_mat):
t_mat[idx] = np.linspace(begin_t, end_t, num + 1)
if fixed_t0_or_tf is None or fixed_t0_or_tf == "t0":
np.savetxt(f_n_pt, t_mat[:, 1::], delimiter=',', fmt='%.15f')
else:
np.savetxt(f_n_pt, t_mat[:, :-1], delimiter=',', fmt='%.15f')
if __name__ == '__main__':
# print("hello")
DATA_DIR = os.path.abspath(os.path.join(os.path.realpath(
sys.argv[0]), os.pardir, os.pardir, os.pardir, os.pardir, "SOHR_DATA"))
# print(DATA_DIR)
# prepare_pathway_name(DATA_DIR, top_n=5, flag="",
# end_s_idx=[62, 59])
# prepare_pathway_name(DATA_DIR, top_n=10, flag="",
# end_s_idx=None, species_path=False, path_reg='^S62R(736|738)', no_path_reg=None)
prepare_pathway_name(
DATA_DIR, top_n=5, flag="", end_s_idx=None, species_path=False,
path_reg=None, no_path_reg=None, spe_idx=10, spe_production_oriented=True) | prepare_path_name_time.py | import os
import sys
from copy import deepcopy
import numpy as np
import pandas as pd
import parse_spe_reaction_info as psri
import parse_pattern as pp
def prepare_pathway_name(
data_dir, top_n=5, flag="", end_s_idx=None, species_path=False,
path_reg=None, no_path_reg=None, spe_idx=None, spe_production_oriented=False, n_threshold=0,
same_path_list=False):
"""
prepare pathway_name_candidate.csv
"""
# read from pathway_stat.csv
prefix = ""
if species_path is True:
prefix = "species_"
if flag == "":
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate.csv")
else:
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate_" + str(flag) + ".csv")
if same_path_list is True and os.path.isfile(f_n_pn):
path_list = np.loadtxt(f_n_pn, dtype=str, delimiter=',')
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list)
try:
os.remove(f_n_pn)
except OSError:
pass
f_n_ps = os.path.join(data_dir, "output", prefix + "pathway_stat.csv")
path_list = []
d_f = pd.read_csv(f_n_ps, names=['pathway', 'frequency'])
if path_reg is not None:
mask1 = d_f['pathway'].str.contains(path_reg)
else:
mask1 = d_f['pathway'].str.len() > 0
if spe_idx is None:
mask2 = d_f['pathway'].str.len() > 0
else:
net_reactant = psri.parse_reaction_net_reactant(data_dir)
net_product = psri.parse_reaction_net_product(data_dir)
s_p_r_c = psri.parse_species_pair_reaction(data_dir)
mask2 = d_f.apply(lambda x: pp.parse_net_species_along_path_using_reaction(
pathname=x['pathway'], net_r=net_reactant, net_p=net_product, spe_idx=spe_idx, s_p_r_c=s_p_r_c) >= n_threshold, axis=1)
# read
if end_s_idx is None or end_s_idx == []:
mask3 = d_f['pathway'].str.len() > 0
path_list.extend(d_f[mask1 & mask2 & mask3]['pathway'])
if spe_production_oriented is False or spe_idx is None:
# save
np.savetxt(f_n_pn, path_list[0:top_n], fmt="%s")
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list[0:top_n])
elif spe_idx is not None:
path_list2 = []
path_set = set()
for _, val1 in enumerate(path_list):
p_list, r_list = pp.get_spe_production_sub_path(
val1, net_reactant, net_product, spe_idx, s_p_r_c, n_threshold)
for idx2, val2 in enumerate(r_list):
if val2 not in path_set:
path_set.add(val2)
path_list2.append(p_list[idx2])
# one more filter of path, have to contain path_reg
path_list3 = []
for path in path_list2:
if pp.path_contain_regex(path, path_reg=path_reg):
path_list3.append(path)
# one more filter of path, don't contain no_path_reg
path_list4 = []
if no_path_reg is None:
path_list4 = path_list3
else:
for path in path_list3:
if not pp.path_contain_regex(path, path_reg=no_path_reg):
path_list4.append(path)
np.savetxt(f_n_pn, path_list4[0:top_n], fmt="%s")
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list4[0:top_n])
else:
for s_i in end_s_idx:
mask3 = d_f['pathway'].str.endswith("S" + str(s_i))
path_list.extend(d_f[mask1 & mask2 & mask3]['pathway'][0:top_n])
# save
np.savetxt(f_n_pn, path_list, fmt="%s")
if (len(path_list) == 0):
raise ValueError("NO VALID path found!!!")
return len(path_list)
def prepare_pathway_name_for_passage_time(data_dir, flag="", init_s_idx=None):
"""
prepare pathway_name_candidate.csv
"""
# read from pathway_stat.csv
prefix = "species_"
if flag == "":
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate.csv")
else:
f_n_pn = os.path.join(data_dir, "output",
prefix + "pathway_name_candidate_" + str(flag) + ".csv")
try:
os.remove(f_n_pn)
except OSError:
pass
# read
if init_s_idx is None:
init_s_idx_tmp = [62]
else:
init_s_idx_tmp = deepcopy(init_s_idx)
path_list = []
for s_i in init_s_idx_tmp:
path_list.extend(["S" + str(s_i) + "R100S100"])
# save
np.savetxt(f_n_pn, path_list, fmt="%s")
return len(path_list)
def prepare_pathway_time(
data_dir, top_n=5, num=1, flag="", begin_t=0.0, end_t=1.0,
species_path=False, fixed_t0_or_tf=None):
"""
prepare pathway_time.csv
num represents number of points
"""
prefix = ""
if species_path is True:
prefix = "species_"
if flag == "":
f_n_pt = os.path.join(data_dir, "output",
prefix + "pathway_time_candidate.csv")
else:
f_n_pt = os.path.join(data_dir, "output",
prefix + "pathway_time_candidate_" + str(flag) + ".csv")
try:
os.remove(f_n_pt)
except OSError:
pass
# time matrix
t_mat = np.empty((top_n, num + 1, ))
for idx, _ in enumerate(t_mat):
t_mat[idx] = np.linspace(begin_t, end_t, num + 1)
if fixed_t0_or_tf is None or fixed_t0_or_tf == "t0":
np.savetxt(f_n_pt, t_mat[:, 1::], delimiter=',', fmt='%.15f')
else:
np.savetxt(f_n_pt, t_mat[:, :-1], delimiter=',', fmt='%.15f')
if __name__ == '__main__':
# print("hello")
DATA_DIR = os.path.abspath(os.path.join(os.path.realpath(
sys.argv[0]), os.pardir, os.pardir, os.pardir, os.pardir, "SOHR_DATA"))
# print(DATA_DIR)
# prepare_pathway_name(DATA_DIR, top_n=5, flag="",
# end_s_idx=[62, 59])
# prepare_pathway_name(DATA_DIR, top_n=10, flag="",
# end_s_idx=None, species_path=False, path_reg='^S62R(736|738)', no_path_reg=None)
prepare_pathway_name(
DATA_DIR, top_n=5, flag="", end_s_idx=None, species_path=False,
path_reg=None, no_path_reg=None, spe_idx=10, spe_production_oriented=True) | 0.168309 | 0.093761 |
# Import
import sqlite3
from logging import getLogger
from os import walk, mkdir
from os.path import join, isdir
from shutil import rmtree
from params import (LOGGER_NAME, INSPECT_COLLECT_DIR, SQLITE_DB_DIR)
from utils import json_deserialize
# Basic info
__version__ = "0.0.0-Beta"
__all__ = []
__author__ = "yyg"
# Add logger
logger = getLogger(LOGGER_NAME)
# Exceptions
# main code
class Assembler(object):
"""
Generate formated inspect outcome
- step1: reverse-serialize
- step2: re-range data
- step3: generate tables
- collect table cols
- table(disk_info) = > disk_***
- table(network_info) = > netwk_**
tables = [u"basic_info", u"disk_info", u"netwk_info"]
table struct:
- disk
id|hostname/ip|disk_num|disk_1 | disk_2 |
1 |10.10.10.10|2 |/data1=100G+10%|/data2=200G+20%|
"""
def __init__(self):
self.db = "laserjet.db"
self.conn = None
self.data = list()
self.tables = {
# "xxx" : [[cols],sql_create_table, [data], [sql_insert_rows]]
"basic_info": [[], None, [], []],
"disk_info": [[], None, [], []],
"netwk_info": [[], None, [], []]
}
# steps
def start(self):
self.create_db()
self.deserialize()
self.create_tables()
self.insert_rows()
def create_db(self):
if not isdir(SQLITE_DB_DIR):
mkdir(SQLITE_DB_DIR)
else:
rmtree(SQLITE_DB_DIR) # clean up existing laserjet.db
mkdir(SQLITE_DB_DIR)
self.conn = sqlite3.connect(join(SQLITE_DB_DIR, self.db))
def deserialize(self):
total_cols = set()
logger.info("Start deserialize")
for file in Assembler.__jfiles():
with open(file) as j_content:
j_content_dict = json_deserialize(j_content)
self.data.append(j_content_dict)
total_cols = total_cols | set(j_content_dict.keys())
tmp = self.__filter_cols(total_cols, "disk_")
self.tables["disk_info"][0] = tmp[1].append("hostname")
self.tables["disk_info"][1] = Assembler.sql_crt_tb("disk_info", tmp[1])
tmp = self.__filter_cols(tmp[0], "netwk_")
self.tables["netwk_info"][0] = tmp[1]
self.tables["netwk_info"][1] = Assembler.sql_crt_tb("netwk_info", tmp[1])
self.tables["basic_info"][0] = tmp[0]
self.tables["basic_info"][1] = Assembler.sql_crt_tb("basic_info", tmp[0])
logger.info("Table disk_info contains columns: %s" % self.tables["disk_info"][0])
logger.info("Table disk_info use sql: %s" % self.tables["disk_info"][1])
logger.info("Table netwk_info contains columns: %s" % self.tables["netwk_info"][0])
logger.info("Table netwk_info use sql: %s" % self.tables["netwk_info"][1])
logger.info("Table basic_info contains columns: %s" % self.tables["basic_info"][0])
logger.info("Table basic_info use sql: %s" % self.tables["basic_info"][1])
def create_tables(self):
for tb in self.tables.values():
# excute each sql to create corresponding tables
self.conn.execute(tb[1])
def categorize_data(self):
"""
self.tables["disk_info"][3].append({})
self.tables["netwk_info"][3].append({})
self.tables["basic_info"][3].append({})
"""
for element in self.data:
disk_info = dict()
netwk_info = dict()
basic_info = dict()
for k, v in element.iteritems():
if k.startswith("disk_") or k == "hostname":
disk_info[k] = v
elif k.startswith("netwk_") or k == "hostname":
netwk_info[k] = v
else:
basic_info[k] = v
self.tables["disk_info"][2].append(disk_info)
self.tables["netwk_info"][2].append(netwk_info)
self.tables["basic_info"][2].append(basic_info)
def insert_rows(self):
self.categorize_data()
for k, v in self.tables.iteritems():
# k = "disk_info"
# v = [[cols],sql_create_table, [{data},{data}], [sql_insert_rows]]
for data in v[2]:
self.conn.execute(Assembler.sql_insert_rows(k, data))
self.conn.commit()
self.conn.close()
# private methods
@staticmethod
def sql_insert_rows(tb, data):
cols = []
values = []
for k, v in data.iteritems():
cols.append(k)
values.append(v)
cols = ",".join(cols)
values = map(Assembler.addquotation, values)
values = ",".join(values)
sql = "INSERT INTO {0} ({1}) VALUES ({2});".format(tb, cols, values)
logger.info("SQL = %s" % sql)
return sql
@staticmethod
def addquotation(a):
return "'" + str(a) + "'"
@staticmethod
def sql_crt_tb(tb, cols):
"""
:param tb: str
:param cols: list
:return: sql: str
"""
col_style = " VARCHAR(20)"
for col in cols:
# col col_style,
cols[cols.index(col)] = col + col_style
columns = ",".join(cols)
return "CREATE TABLE {0} ( {1} );".format(tb, columns)
@staticmethod
def __jfiles():
"""
: () => ["/**/.../**.json", "/**/.../**.json", ...]
"""
return [join(INSPECT_COLLECT_DIR, file) for file in walk(INSPECT_COLLECT_DIR).next()[2] if
file.endswith(".json")]
@staticmethod
def __filter_cols(data, label):
"""
: (list, str) => [[rest],[filtered]]
"""
return [[i for i in data if not i.startswith(label)], [i for i in data if i.startswith(label)]]
if __name__ == "__main__":
pass | lib/core/assemble.py | # Import
import sqlite3
from logging import getLogger
from os import walk, mkdir
from os.path import join, isdir
from shutil import rmtree
from params import (LOGGER_NAME, INSPECT_COLLECT_DIR, SQLITE_DB_DIR)
from utils import json_deserialize
# Basic info
__version__ = "0.0.0-Beta"
__all__ = []
__author__ = "yyg"
# Add logger
logger = getLogger(LOGGER_NAME)
# Exceptions
# main code
class Assembler(object):
"""
Generate formated inspect outcome
- step1: reverse-serialize
- step2: re-range data
- step3: generate tables
- collect table cols
- table(disk_info) = > disk_***
- table(network_info) = > netwk_**
tables = [u"basic_info", u"disk_info", u"netwk_info"]
table struct:
- disk
id|hostname/ip|disk_num|disk_1 | disk_2 |
1 |10.10.10.10|2 |/data1=100G+10%|/data2=200G+20%|
"""
def __init__(self):
self.db = "laserjet.db"
self.conn = None
self.data = list()
self.tables = {
# "xxx" : [[cols],sql_create_table, [data], [sql_insert_rows]]
"basic_info": [[], None, [], []],
"disk_info": [[], None, [], []],
"netwk_info": [[], None, [], []]
}
# steps
def start(self):
self.create_db()
self.deserialize()
self.create_tables()
self.insert_rows()
def create_db(self):
if not isdir(SQLITE_DB_DIR):
mkdir(SQLITE_DB_DIR)
else:
rmtree(SQLITE_DB_DIR) # clean up existing laserjet.db
mkdir(SQLITE_DB_DIR)
self.conn = sqlite3.connect(join(SQLITE_DB_DIR, self.db))
def deserialize(self):
total_cols = set()
logger.info("Start deserialize")
for file in Assembler.__jfiles():
with open(file) as j_content:
j_content_dict = json_deserialize(j_content)
self.data.append(j_content_dict)
total_cols = total_cols | set(j_content_dict.keys())
tmp = self.__filter_cols(total_cols, "disk_")
self.tables["disk_info"][0] = tmp[1].append("hostname")
self.tables["disk_info"][1] = Assembler.sql_crt_tb("disk_info", tmp[1])
tmp = self.__filter_cols(tmp[0], "netwk_")
self.tables["netwk_info"][0] = tmp[1]
self.tables["netwk_info"][1] = Assembler.sql_crt_tb("netwk_info", tmp[1])
self.tables["basic_info"][0] = tmp[0]
self.tables["basic_info"][1] = Assembler.sql_crt_tb("basic_info", tmp[0])
logger.info("Table disk_info contains columns: %s" % self.tables["disk_info"][0])
logger.info("Table disk_info use sql: %s" % self.tables["disk_info"][1])
logger.info("Table netwk_info contains columns: %s" % self.tables["netwk_info"][0])
logger.info("Table netwk_info use sql: %s" % self.tables["netwk_info"][1])
logger.info("Table basic_info contains columns: %s" % self.tables["basic_info"][0])
logger.info("Table basic_info use sql: %s" % self.tables["basic_info"][1])
def create_tables(self):
for tb in self.tables.values():
# excute each sql to create corresponding tables
self.conn.execute(tb[1])
def categorize_data(self):
"""
self.tables["disk_info"][3].append({})
self.tables["netwk_info"][3].append({})
self.tables["basic_info"][3].append({})
"""
for element in self.data:
disk_info = dict()
netwk_info = dict()
basic_info = dict()
for k, v in element.iteritems():
if k.startswith("disk_") or k == "hostname":
disk_info[k] = v
elif k.startswith("netwk_") or k == "hostname":
netwk_info[k] = v
else:
basic_info[k] = v
self.tables["disk_info"][2].append(disk_info)
self.tables["netwk_info"][2].append(netwk_info)
self.tables["basic_info"][2].append(basic_info)
def insert_rows(self):
self.categorize_data()
for k, v in self.tables.iteritems():
# k = "disk_info"
# v = [[cols],sql_create_table, [{data},{data}], [sql_insert_rows]]
for data in v[2]:
self.conn.execute(Assembler.sql_insert_rows(k, data))
self.conn.commit()
self.conn.close()
# private methods
@staticmethod
def sql_insert_rows(tb, data):
cols = []
values = []
for k, v in data.iteritems():
cols.append(k)
values.append(v)
cols = ",".join(cols)
values = map(Assembler.addquotation, values)
values = ",".join(values)
sql = "INSERT INTO {0} ({1}) VALUES ({2});".format(tb, cols, values)
logger.info("SQL = %s" % sql)
return sql
@staticmethod
def addquotation(a):
return "'" + str(a) + "'"
@staticmethod
def sql_crt_tb(tb, cols):
"""
:param tb: str
:param cols: list
:return: sql: str
"""
col_style = " VARCHAR(20)"
for col in cols:
# col col_style,
cols[cols.index(col)] = col + col_style
columns = ",".join(cols)
return "CREATE TABLE {0} ( {1} );".format(tb, columns)
@staticmethod
def __jfiles():
"""
: () => ["/**/.../**.json", "/**/.../**.json", ...]
"""
return [join(INSPECT_COLLECT_DIR, file) for file in walk(INSPECT_COLLECT_DIR).next()[2] if
file.endswith(".json")]
@staticmethod
def __filter_cols(data, label):
"""
: (list, str) => [[rest],[filtered]]
"""
return [[i for i in data if not i.startswith(label)], [i for i in data if i.startswith(label)]]
if __name__ == "__main__":
pass | 0.272896 | 0.080647 |
import requests
from .log import LOGGER
from requests.auth import HTTPBasicAuth as Auth
class HttpApi(object):
"""
User and Password are data from Flussonic Server
(see to edit_auth, view_auth).
HTTP Basic auth.
"""
def __init__(self, user, password, url):
self.auth = Auth(user, password)
self.message = None
self.url = 'http://{}/flussonic/api/'.format(url)
self.api = None
@property
def _connect(self):
try:
r = requests.get(''.join((self.url, self.api)), auth=self.auth)
except (requests.RequestException, requests.Timeout) as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
try:
# TODO for stream_health
if r.status_code == 424:
# stream is dead
return False
r.raise_for_status()
except requests.HTTPError as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
try:
response = r.json()
except ValueError as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
else:
return response
def simple_method(self, api, message):
"""
Simple basic method for API.
If need to create something quickly.
"""
self.api = api
self.message = message
return self._connect
def dvr_status(self, year, month, day, stream_name):
self.api = 'dvr_status/{}/{}/{}/{}'.format(year, month, day, stream_name)
self.message = 'Recording map over the past day {}/{}/{}'.format(year, month, day)
return self._connect
def media_info(self, stream_name):
self.api = 'media_info/{}'.format(stream_name)
self.message = 'Stream information'
return self._connect
@property
def server(self):
self.api = 'server'
self.message = 'Server info in JSON format.'
return self._connect
@property
def sessions(self):
self.api = 'sessions'
self.message = 'Number of open sessions'
return self._connect
def sessions_stream(self, stream_name):
self.api = 'sessions?name={}'.format(stream_name)
self.message = 'List of open sessions for a specific stream'
return self._connect
def stream_health(self, stream_name):
self.api = 'stream_health/{}'.format(stream_name)
self.message = 'Stream quality'
return self._connect
@property
def streams(self):
self.api = 'streams'
self.message = 'List of streams, clients and state of this streams'
return self._connect | api/http.py | import requests
from .log import LOGGER
from requests.auth import HTTPBasicAuth as Auth
class HttpApi(object):
"""
User and Password are data from Flussonic Server
(see to edit_auth, view_auth).
HTTP Basic auth.
"""
def __init__(self, user, password, url):
self.auth = Auth(user, password)
self.message = None
self.url = 'http://{}/flussonic/api/'.format(url)
self.api = None
@property
def _connect(self):
try:
r = requests.get(''.join((self.url, self.api)), auth=self.auth)
except (requests.RequestException, requests.Timeout) as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
try:
# TODO for stream_health
if r.status_code == 424:
# stream is dead
return False
r.raise_for_status()
except requests.HTTPError as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
try:
response = r.json()
except ValueError as e:
LOGGER.error('Error request {}: {}'.format(self.message, e))
return None
else:
return response
def simple_method(self, api, message):
"""
Simple basic method for API.
If need to create something quickly.
"""
self.api = api
self.message = message
return self._connect
def dvr_status(self, year, month, day, stream_name):
self.api = 'dvr_status/{}/{}/{}/{}'.format(year, month, day, stream_name)
self.message = 'Recording map over the past day {}/{}/{}'.format(year, month, day)
return self._connect
def media_info(self, stream_name):
self.api = 'media_info/{}'.format(stream_name)
self.message = 'Stream information'
return self._connect
@property
def server(self):
self.api = 'server'
self.message = 'Server info in JSON format.'
return self._connect
@property
def sessions(self):
self.api = 'sessions'
self.message = 'Number of open sessions'
return self._connect
def sessions_stream(self, stream_name):
self.api = 'sessions?name={}'.format(stream_name)
self.message = 'List of open sessions for a specific stream'
return self._connect
def stream_health(self, stream_name):
self.api = 'stream_health/{}'.format(stream_name)
self.message = 'Stream quality'
return self._connect
@property
def streams(self):
self.api = 'streams'
self.message = 'List of streams, clients and state of this streams'
return self._connect | 0.368292 | 0.080321 |