text stringlengths 957 885k |
|---|
<filename>architect/examples/multi_agent_manipulation/mam_plotting.py
import jax.numpy as jnp
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from celluloid import Camera
def make_box_patches(
box_state, alpha: float, box_side_length: float, ax, hatch: bool = False
):
"""Adds patches for visualizing the box to the given axes
args:
box_state: (x, y, theta, vx, vy, thetadot)
alpha: float transparency
box_side_length: float side length of box
ax: matplotlib axes
hatch: if True, hatch the box patch
returns:
a list of properly transformed and colored patches for the box
"""
patches_list = []
box_xy = box_state[:2]
box_theta = box_state[2]
xform = transforms.Affine2D()
xform = xform.rotate_around(
box_side_length / 2.0, box_side_length / 2.0, theta=box_theta
)
xform = xform.translate(*(box_xy - box_side_length / 2.0))
xform = xform + ax.transData
box = patches.Rectangle(
(0, 0),
box_side_length,
box_side_length,
linewidth=2,
transform=xform,
edgecolor=plt.get_cmap("Blues")(0.1 + alpha),
fill=False,
hatch=("/" if hatch else None),
)
ax.add_patch(box)
patches_list.append(box)
# Add an arrow pointing up
xform = transforms.Affine2D()
xform = xform.rotate_around(0.0, 0.0, theta=box_theta - jnp.pi / 2.0)
xform = xform.translate(*box_xy)
xform = xform + ax.transData
arrow = patches.Arrow(
0,
0,
0,
box_side_length / 8,
width=box_side_length / 20,
linewidth=2,
transform=xform,
edgecolor=plt.get_cmap("Blues")(0.1 + alpha),
facecolor=plt.get_cmap("Blues")(0.1 + alpha),
fill=True,
)
ax.add_patch(arrow)
patches_list.append(arrow)
return patches_list
def make_turtle_patches(turtle_state, alpha: float, radius: float, ax):
"""Adds patches for visualizing the turtle to the given axes
args:
turtle_state: (x, z, theta, vx, vz, thetadot)
alpha: float transparency
radius: float radius of turtlebot
ax: matplotlib axes
"""
turtle_xy = turtle_state[:2]
xform = transforms.Affine2D()
xform = xform.translate(*turtle_xy)
xform = xform + ax.transData
turtle = patches.Circle(
(0, 0),
radius,
linewidth=2,
transform=xform,
edgecolor=plt.get_cmap("Oranges")(0.1 + alpha),
fill=False,
)
ax.add_patch(turtle)
# Add an arrow pointing up
turtle_theta = turtle_state[2]
xform = transforms.Affine2D()
xform = xform.rotate_around(0.0, 0.0, theta=turtle_theta - jnp.pi / 2.0)
xform = xform.translate(*turtle_xy)
xform = xform + ax.transData
arrow = patches.Arrow(
0,
0,
0,
0.8 * radius,
width=radius / 2,
linewidth=2,
transform=xform,
edgecolor=plt.get_cmap("Oranges")(0.1 + alpha),
facecolor=plt.get_cmap("Oranges")(0.1 + alpha),
fill=True,
)
ax.add_patch(arrow)
def plot_turtle_trajectory(turtle_states, radius: float, n_steps_to_show: int, ax):
"""Plot a trajectory of the turtlebot on the given axes.
args:
turtle_states: (N, 6) array of states
radius: float radius of turtlebot
n_steps_to_show: plot a continuous line for the trajectory along with
`n_steps_to_show` circles for the turtlebot at different points
in time
ax: the matplotlib axis to plot upon
"""
# Plot the center-of-mass trajectory
ax.plot(
turtle_states[:, 0],
turtle_states[:, 1],
label="Turtlebot",
color=plt.get_cmap("Oranges")(1.0),
)
# Draw the snapshots
n_steps = turtle_states.shape[0]
i_to_show = jnp.linspace(0, n_steps, n_steps_to_show, dtype=int)
alphas = jnp.linspace(0.3, 1.0, n_steps)
for i in i_to_show:
make_turtle_patches(turtle_states[i], alphas[i].item(), radius, ax)
def plot_box_trajectory(box_states, box_size: float, n_steps_to_show: int, ax):
"""Plot a trajectory of the turtlebot on the given axes.
args:
box_states: (N, 6) array of states
box_size: float box_size of turtlebot
n_steps_to_show: plot a continuous line for the trajectory along with
`n_steps_to_show` circles for the turtlebot at different points
in time
ax: the matplotlib axis to plot upon
"""
# Plot the center-of-mass trajectory
ax.plot(
box_states[:, 0],
box_states[:, 1],
label="Box",
color=plt.get_cmap("Blues")(1.0),
)
# Draw the snapshots
n_steps = box_states.shape[0]
i_to_show = jnp.linspace(0, n_steps, n_steps_to_show, dtype=int)
alphas = jnp.linspace(0.3, 1.0, n_steps)
for i in i_to_show:
make_box_patches(box_states[i], alphas[i].item(), box_size, ax)
def make_pushing_animation(
box_states,
turtle_states,
desired_box_pose,
box_size: float,
radius: float,
n_steps_to_show: int,
ms_per_frame: int,
save_filename: str,
):
"""Make an animation of the pushing action and save it
args:
box_states: (N, 6) array of box states
turtle_states: (N, n_turtles, 6) array of turtlebot states
desired_box_pose: (3,) array of (x, y, theta) desired box pose
box_size: float box_size of turtlebot
radius: float turtlebot radius
n_steps_to_show: plot a continuous line for the trajectory along with
`n_steps_to_show` circles for the turtlebot at different points
in time
ms_per_frame: milliseconds per frame
save_filename: filename where the animation should be saved.
"""
# Make a figure for the animation
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
camera = Camera(fig)
# For each frame, plot the turtlebots and box
n_steps = box_states.shape[0]
n_turtles = turtle_states.shape[1]
i_to_show = jnp.linspace(0, n_steps, n_steps_to_show, dtype=int)
for i in i_to_show:
# Plot box center-of-mass trajectory
ax.plot(
box_states[:i, 0],
box_states[:i, 1],
color=plt.get_cmap("Blues")(1.0),
)
# Plot box patch
make_box_patches(box_states[i], 1.0, box_size, ax)
# Plot desired box pose
make_box_patches(desired_box_pose, 1.0, box_size, plt.gca(), hatch=True)
label = "Desired box pose" if i == i_to_show[0] else None
ax.fill_between(
[],
[],
[],
edgecolor=plt.get_cmap("Blues")(1.0),
hatch="xx",
label=label,
facecolor="none",
)
ax.legend()
for j in range(n_turtles):
# Plot turtle center-of-mass trajectory
ax.plot(
turtle_states[:i, j, 0],
turtle_states[:i, j, 1],
color=plt.get_cmap("Oranges")(1.0),
)
# Plot turtle patch
make_turtle_patches(turtle_states[i, j], 1.0, radius, ax)
# Prettify
plt.xlabel("x")
plt.ylabel("y")
plt.xlim([-0.75, 1.0])
plt.ylim([-0.75, 1.0])
plt.gca().set_aspect("equal")
# Take a snapshot
camera.snap()
# Save the animation
animation = camera.animate(interval=ms_per_frame)
animation.save(save_filename)
|
# -------------算数运算符-----------------------
# Python里支持很多算数运算符
# + - * / **幂运算 //除数 %余数
print(1 + 1) # 2
print(4 - 1) # 3
print(3 * 2) # 6
# Python3里,两个整数相除,得到的结果
print(6 / 2) # 3.0
print(9 / 2) # 4.5
print(10 / 3) # 3.3333333333333335
print(3 ** 3) # 27
print(81 ** (1 / 2)) # 9.0
# 字符串中里有限度的支持加法和乘法运算符
# 加法运算符:只能用于两个字符串类型的数据,用来拼接两个字符串
print('hello' + 'world')
# print('18' + 1) # 在Python中数字和字符串之间不能做加法运算
# 乘法运算符:可以用于数字和字符串之间,用来将一个字符串重复多次
print('hello' * 2)
# -------------赋值运算符-----------------------
x = 1
x += 1
print(x)
x -= 1
print(x)
x *= 2
print(x)
x /= 2
print(x)
x **= 4
print(x)
x //= 5
print(x)
x %= 6
print(x)
a = b = c = d = 'hello'
print(a)
print(b)
print(c)
# 拆包时,变量个数和值得个数不一致就报错
e, f = 3, 5 # 拆包
print(e)
print(f)
g = 'hello', 'world'
print(type(g)) # <class 'tuple'>
# * 表示可变长度
h, *i, j = 1, 2, 3, 4, 5, 6
print(h, i, j) # 1 [2, 3, 4, 5] 6
# -------------比较运算符-----------------------
# > < >= <= != ==
# 字符串之间:根据各个字符的编码值进行逐一比较,ASCII
print('a' > 'b') # False
print('abc' > 'b') # False
# 数字和字符串之间: == 结果是False, != 结果是True, 其他 报错
print('a' == 5)
print('a' != 5)
# print('a' > 90)
# -------------逻辑运算符-----------------------
print('------逻辑---------')
# 逻辑与规则:只要有一个运算数是False,结果就是False;只有所有的运算数都是True,结果才是True
print(2 > 1 and 5 > 3 and 10 > 2) # True
print(3 > 5 and 5 < 4 and 6 > 1) # False
# 逻辑或规则:只要有一个运算数是True,结果就是True;只有所有的运算数都是False,结果才是False
print(3 > 9 or 4 < 7 or 10 < 3) # True
print(3 > 5 or 4 < 2 or 8 < 7) # False
# 逻辑非运算:True ==> False False ==> True
print(not (5 > 2)) # False
# 与and 或or 非not
# 短路与
4 > 3 and print('hhh')
4 < 3 and print('lll')
# 短路或
4 > 3 or print('hhh')
4 < 3 or print('lll')
# 逻辑与运算结果,一定是布尔值吗?不一定
# 逻辑与运算做取值时,取第一个为False的值,如果所有的运算数都是True,取最后一个值
# 短路:只要遇到False就停止,不再继续执行了
print(3 and 5 and 0 and 'hello') # 0
print(3 and 5 and 1 and 'hello') # hello
# 逻辑或运算做取值时,取第一个为True的值,如果所有元素都是False,取最后一个值
# 短路:只要遇到True就停止,不再继续执行了
print(0 or [] or 'lisi' or 5) # lisi
print(0 or [] or {} or ()) # ()
# -------------位运算符-----------------------
# 按位 &与 |或 ^异或 <<左移 >>右移 ~取反
k = 23
l = 15
print(k & l) # 7
print(k | l) # 31
print(k ^ l) # 24
print(5 << 3) # a << n ==> a * 2的n次方
print(16 >> 2) # a << n ==> a / 2的n次方
color = 0xF0384E
red = hex(color >> 16)
green = hex(color >> 8 & 0xFF)
blue = hex(color & 0xFF)
print(bin(color), red, green, blue)
# 逻辑运算符的优先级:not > and > or
print(True or False and True) # True
print(False or not False) # True
print(True or True and False) # True
# ------------------------------------------------------
# + :可以用来拼接 字符串/元组/列表
print('hello' + 'world')
print(('good', 'yes') + ('hi', 'ok'))
print([1, 2, 3] + [4, 5, 6])
# - :只能用于集合,求差集
print({1, 2, 3} - {3})
# * :可以用于字符串元组列表,表示重复多次。不能用于字典和集合
print('hello' * 3)
print([1, 3, 4] * 3)
print((1, 3, 4) * 3)
# 字典/集合都是不重复的
# in成员运算符 字符串 元组 列表 字典
print('zhangsan' in {'name': 'zhangsan', 'age': 18, 'height': '180cm'}) # False
print('name' in {'name': 'zhangsan', 'age': 18, 'height': '180cm'}) # True
nums = [19, 82, 39, 12]
# 带下标的遍历
# enumerate 类的使用,一般用于列表和元组等有序的数据
for i, e in enumerate(nums):
print('第%d个数据是%d' % (i, e))
ll = {'name': 'zhangsan', 'age': 18, 'height': '180cm'}
for i,e in enumerate(ll):
print('%s的值是%s' % (i,e))
|
<filename>cms_test2/migrations/0012_auto_20180412_1206.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-04-12 12:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms_test2', '0011_auto_20180412_1136'),
]
operations = [
migrations.CreateModel(
name='SeasonInfo',
fields=[
('season_info_id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='SeasonLangPack',
fields=[
('season_langpack_id', models.AutoField(primary_key=True, serialize=False)),
('is_default', models.PositiveIntegerField(choices=[(0, 'not default language'), (1, 'is default language')], default=0)),
('title', models.CharField(blank=True, max_length=100, null=True)),
('description', models.CharField(blank=True, max_length=1000, null=True)),
('icon', models.CharField(blank=True, max_length=300, null=True)),
('album_pic', models.CharField(blank=True, max_length=300, null=True)),
('actors', models.ManyToManyField(to='cms_test2.Actor')),
('directors', models.ManyToManyField(to='cms_test2.Director')),
('language', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.Language')),
('season_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.SeasonInfo')),
],
),
migrations.AlterField(
model_name='album',
name='genres',
field=models.ManyToManyField(to='cms_test2.Genre'),
),
migrations.AlterField(
model_name='episodelangpack',
name='actors',
field=models.ManyToManyField(to='cms_test2.Actor'),
),
migrations.AlterField(
model_name='episodelangpack',
name='directors',
field=models.ManyToManyField(to='cms_test2.Director'),
),
migrations.AlterField(
model_name='episodelangpack',
name='is_default',
field=models.PositiveIntegerField(choices=[(0, 'not default language'), (1, 'is default language')], default=0),
),
migrations.AlterField(
model_name='episodelangpack',
name='thumbnails',
field=models.ManyToManyField(to='cms_test2.Thumbnail'),
),
migrations.AlterField(
model_name='movielangpack',
name='actors',
field=models.ManyToManyField(to='cms_test2.Actor'),
),
migrations.AlterField(
model_name='movielangpack',
name='directors',
field=models.ManyToManyField(to='cms_test2.Director'),
),
migrations.AlterField(
model_name='movielangpack',
name='is_default',
field=models.PositiveIntegerField(choices=[(0, 'not default language'), (1, 'is default language')], default=0),
),
migrations.AlterField(
model_name='movielangpack',
name='thumbnails',
field=models.ManyToManyField(to='cms_test2.Thumbnail'),
),
migrations.AlterField(
model_name='musiclangpack',
name='composers',
field=models.ManyToManyField(to='cms_test2.Composer'),
),
migrations.AlterField(
model_name='musiclangpack',
name='is_default',
field=models.PositiveIntegerField(choices=[(0, 'not default language'), (1, 'is default language')], default=0),
),
migrations.AlterField(
model_name='musiclangpack',
name='lyricists',
field=models.ManyToManyField(to='cms_test2.Lyricist'),
),
migrations.AlterField(
model_name='musiclangpack',
name='recordlabels',
field=models.ManyToManyField(to='cms_test2.RecordLabel'),
),
migrations.AlterField(
model_name='musiclangpack',
name='thumbnails',
field=models.ManyToManyField(to='cms_test2.Thumbnail'),
),
migrations.AlterField(
model_name='shortvideolangpack',
name='is_default',
field=models.PositiveIntegerField(choices=[(0, 'not default language'), (1, 'is default language')], default=0),
),
migrations.AlterField(
model_name='shortvideolangpack',
name='thumbnails',
field=models.ManyToManyField(to='cms_test2.Thumbnail'),
),
migrations.AlterField(
model_name='video',
name='categories',
field=models.ManyToManyField(to='cms_test2.Category'),
),
migrations.AlterField(
model_name='video',
name='genres',
field=models.ManyToManyField(to='cms_test2.Genre'),
),
migrations.AddField(
model_name='album',
name='season_info',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cms_test2.SeasonInfo'),
),
]
|
from django.forms import BaseForm
from django.forms.forms import BoundField
from django.forms.widgets import TextInput, CheckboxInput, CheckboxSelectMultiple, RadioSelect
from django.template import Context
from django.template.loader import get_template
from django import template
from django.conf import settings
BOOTSTRAP_BASE_URL = getattr(settings, 'BOOTSTRAP_BASE_URL',
'http://twitter.github.com/bootstrap/assets/'
)
BOOTSTRAP_JS_BASE_URL = getattr(settings, 'BOOTSTRAP_JS_BASE_URL',
BOOTSTRAP_BASE_URL + 'js/'
)
BOOTSTRAP_JS_URL = getattr(settings, 'BOOTSTRAP_JS_URL',
None
)
BOOTSTRAP_CSS_BASE_URL = getattr(settings, 'BOOTSTRAP_CSS_BASE_URL',
BOOTSTRAP_BASE_URL + 'css/'
)
BOOTSTRAP_CSS_URL = getattr(settings, 'BOOTSTRAP_CSS_URL',
BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
)
register = template.Library()
@register.simple_tag
def bootstrap_stylesheet_url():
"""
URL to Bootstrap Stylesheet (CSS)
"""
return BOOTSTRAP_CSS_URL
@register.simple_tag
def bootstrap_stylesheet_tag():
"""
HTML tag to insert Bootstrap stylesheet
"""
return u'<link rel="stylesheet" href="%s">' % bootstrap_stylesheet_url()
@register.simple_tag
def bootstrap_javascript_url(name):
"""
URL to Bootstrap javascript file
"""
if BOOTSTRAP_JS_URL:
return BOOTSTRAP_JS_URL
return BOOTSTRAP_JS_BASE_URL + 'bootstrap-' + name + '.js'
@register.simple_tag
def bootstrap_javascript_tag(name):
"""
HTML tag to insert bootstrap_toolkit javascript file
"""
return u'<script src="%s"></script>' % bootstrap_javascript_url(name)
@register.filter
def as_bootstrap(form_or_field, layout='vertical,false'):
"""
Render a field or a form according to Bootstrap guidelines
"""
params = split(layout, ",")
layout = str(params[0]).lower()
try:
float = str(params[1]).lower() == "float"
except IndexError:
float = False
if isinstance(form_or_field, BaseForm):
return get_template("bootstrap_toolkit/form.html").render(
Context({
'form': form_or_field,
'layout': layout,
'float': float,
})
)
elif isinstance(form_or_field, BoundField):
return get_template("bootstrap_toolkit/field.html").render(
Context({
'field': form_or_field,
'layout': layout,
'float': float,
})
)
else:
# Display the default
return settings.TEMPLATE_STRING_IF_INVALID
@register.filter
def is_disabled(field):
"""
Returns True if fields is disabled, readonly or not marked as editable, False otherwise
"""
if not getattr(field.field, 'editable', True):
return True
if getattr(field.field.widget.attrs, 'readonly', False):
return True
if getattr(field.field.widget.attrs, 'disabled', False):
return True
return False
@register.filter
def is_enabled(field):
"""
Shortcut to return the logical negative of is_disabled
"""
return not is_disabled(field)
@register.filter
def bootstrap_input_type(field):
"""
Return input type to use for field
"""
try:
widget = field.field.widget
except:
raise ValueError("Expected a Field, got a %s" % type(field))
input_type = getattr(widget, 'bootstrap_input_type', None)
if input_type:
return unicode(input_type)
if isinstance(widget, TextInput):
return u'text'
if isinstance(widget, CheckboxInput):
return u'checkbox'
if isinstance(widget, CheckboxSelectMultiple):
return u'multicheckbox'
if isinstance(widget, RadioSelect):
return u'radioset'
return u'default'
@register.simple_tag
def active_url(request, url, output=u'active'):
# Tag that outputs text if the given url is active for the request
if url == request.path:
return output
return ''
@register.filter
def pagination(page, range=5):
"""
Generate Bootstrap pagination links from a page object
"""
num_pages = page.paginator.num_pages
current_page = page.number
range_min = max(current_page - range, 1)
range_max = min(current_page + range, num_pages)
return get_template("bootstrap_toolkit/pagination.html").render(
Context({
'page': page,
'num_pages': num_pages,
'current_page': current_page,
'range_min': range_min,
'range_max': range_max,
})
)
@register.filter
def split(str, splitter):
"""
Split a string
"""
return str.split(splitter)
|
<gh_stars>0
# FileName: Lesson 16
# Insurance Company Program
# Author: <NAME>
# Date: October 26, 2021
#Constants
HOME_POLICY = 400
AUTO_POLICY = 700
BOTH_POLICY = 1000
RENEWAL_DIS = .10 # 10% for renewed policies
EXTRA_LIABILITY = 75
EXTRA_PERSON = 90
CONTENT_INSURANCE = 110
TAX_RATE = .15
INTEREST_RATE = 0.054
PROCESSING_FEE = 47.95
MONTH = 8 # 8 month payments
TERMS_DIS = 0.02 # 2% discount if the bill is paid in 10 days or less
# Imports
import datetime
import random
def As_Dollars(Number):
"""Format Dollars amounts to strings"""
Number_Display = f"${Number:,.2f}"
return Number_Display
Extra_Cost = 0 #Set Extra Cost to 0 to be used in a logic statements
Policy_Date = "2021-07-25"
Policy_Date = datetime.datetime.strptime(Policy_Date, "%Y-%m-%d").date()
First_Name = "Michael"
Last_Name = "Wadden"
Street_Address = "44 Kenai Cresent"
City = "St.John's"
Province = "NL"
Postal_Code = "A1A5A5"
Home_Phone = "7097262539"
Cell_Phone = "7097432738"
Work_Phone = "7093641444"
while True:
Policy = input("New Policy or Renewal? (N)ew or (R)enewal: ").upper()
if Policy == "R":
Policy_Message = "(10% reduction for policy renewal)"
Discount = RENEWAL_DIS
break
elif Policy == "N":
Policy_Message = ""
Discount = 0
break
else:
print("Invalid Input: Please Enter (N) for New or (R) for Renewal: ")
while True:
Policy_Type = input("Select a Policy: (H)ome, (A)uto, or (B)oth: ").upper()
if Policy_Type == "H":
Base_Policy = HOME_POLICY
break
elif Policy_Type == "A":
Base_Policy = AUTO_POLICY
break
elif Policy_Type == "B":
Base_Policy = BOTH_POLICY
break
else:
print("Invalid Input: Please Enter (H) for Home or (A) for Auto or (B) for Both: ")
print()
print("Input (Y) for Yes and (N) for No: On the Follow Extra Options:")
print()
while True:
Liability = input("Extra Liability: ").upper()
if Liability == "Y":
Extra_Cost += EXTRA_LIABILITY
break
elif Liability == "N":
break
else:
print("Invalid Input: Please Enter (Y) for Yes or (N) for No: ")
while True:
Extra_Person_Coverage = input("Extra Person Coverage: ").upper()
if Extra_Person_Coverage == "Y":
Extra_Cost += EXTRA_PERSON
break
elif Extra_Person_Coverage == "N":
break
else:
print("Invalid Input: Please Enter (Y) for Yes or (N) for No: ")
while True:
Content = input("Content Insurance: ").upper()
if Content == "Y":
Extra_Cost += CONTENT_INSURANCE
break
elif Content == "N":
break
else:
print("Invalid Input: Please Enter (Y) for Yes or (N) for No: ")
# Processing
Base_Policy = Base_Policy * (1-Discount) #Works out Base Policy and Discounted Base Policy baed on Policy Selection
Sub_Total = Base_Policy + Extra_Cost
Hst = Sub_Total * TAX_RATE
Policy_Total = Sub_Total + Hst
Term_Discount = Sub_Total * TERMS_DIS
Interest = Policy_Total * INTEREST_RATE * (MONTH/12)
Final_Total = Policy_Total + Interest + PROCESSING_FEE
#Date Processing
Ten_Days = Policy_Date + datetime.timedelta(days = 10)
Forty_Five_Days = Policy_Date + datetime.timedelta(days = 45)
First_Payment = Policy_Date + datetime.timedelta(days = 30)
Policy_date_Str = str(Policy_Date)
Random_Number = str(random.randint(100, 999))
Monthly_Payment = Final_Total / MONTH
# Policy Number
Policy_Number = f"{First_Name[0]}{Last_Name[0]}-{Policy_date_Str[0:4]}-{Random_Number}"
#Output
print()
print(F"{'ONE STOP INSURANCE':30}{Policy_Date.strftime('%d-%b-%y'):>11}")
print(F"{'CUSTOMER POLICY SUMMARY':30}{Policy_Number:>11}")
print("-" * 41)
print(F"Client: {First_Name[0]}.{Last_Name}")
print(F"{' ' * 8}{Street_Address}")
print(F"{' ' * 8}{City}, {Province} {Postal_Code}")
print()
print(F"{'Policy base cost:':30}{As_Dollars(Base_Policy):>11}")
print(F" {Policy_Message}")
print(F"{'Extra cost:':30}{As_Dollars(Extra_Cost):>11}")
print(F"{'Subtotal:':30}{As_Dollars(Sub_Total):>11}")
print(F"{'HST:':30}{As_Dollars(Hst):>11}")
print(F"{' '* 30}{'-' * 9:>11}")
print(F"{'Policy total':30}{As_Dollars(Final_Total):>11}")
print()
print("For Monthly payment customers:")
print(F" {'Monthly payment:':27}{As_Dollars(Monthly_Payment):>11}")
print(F" {'First payment date:':27}{First_Payment.strftime('%d-%b-%y'):>11}")
print()
print("For payment in full:")
print(F" {'Discount date:':27}{Ten_Days.strftime('%d-%b-%y'):>11}")
print(F" {'Discount amount:':27}{As_Dollars(Term_Discount):>11}")
print(F" {'Full payment date:':27}{Forty_Five_Days.strftime('%d-%b-%y'):>11}")
print("-" * 41)
print(" " * 4,'"ONE STOP - Insuring the world!"') |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. _qubit_ops:
Qubit quantum operations
========================
.. currentmodule:: pennylane.ops.qubit
**Module name:** :mod:`pennylane.ops.qubit`
This section contains the available built-in discrete-variable
quantum operations supported by PennyLane, as well as their conventions.
Gates
-----
.. autosummary::
Hadamard
PauliX
PauliY
PauliZ
CNOT
CZ
SWAP
RX
RY
RZ
PhaseShift
Rot
QubitUnitary
State preparation
-----------------
.. autosummary::
BasisState
QubitStateVector
Code details
~~~~~~~~~~~~
"""
from pennylane.operation import Operation
class Hadamard(Operation):
r"""Hadamard(wires)
The Hadamard operator
.. math:: H = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1\\ 1 & -1\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
class PauliX(Operation):
r"""PauliX(wires)
The Pauli X operator
.. math:: \sigma_x = \begin{bmatrix} 0 & 1 \\ 1 & 0\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
class PauliY(Operation):
r"""PauliY(wires)
The Pauli Y operator
.. math:: \sigma_y = \begin{bmatrix} 0 & -i \\ i & 0\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
class PauliZ(Operation):
r"""PauliZ(wires)
The Pauli Z operator
.. math:: \sigma_z = \begin{bmatrix} 1 & 0 \\ 0 & -1\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
class CNOT(Operation):
r"""CNOT(wires)
The controlled-NOT operator
.. math:: CNOT = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 0 & 1\\
0 & 0 & 1 & 0
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 0
num_wires = 2
par_domain = None
class CZ(Operation):
r"""CZ(wires)
The controlled-Z operator
.. math:: CZ = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & -1
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 0
num_wires = 2
par_domain = None
class SWAP(Operation):
r"""SWAP(wires)
The swap operator
.. math:: SWAP = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & 0 & 1
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 0
num_wires = 2
par_domain = None
class RX(Operation):
r"""RX(phi, wires)
The single qubit X rotation
.. math:: R_x(\phi) = e^{-i\phi\sigma_x/2} = \begin{bmatrix}
\cos(\phi/2) & -i\sin(\phi/2) \\
-i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}R_x(\phi) = \frac{1}{2}\left[R_x(\phi+\pi/2)+R_x(\phi-\pi/2)\right]`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = 'R'
grad_method = 'A'
class RY(Operation):
r"""RY(phi, wires)
The single qubit Y rotation
.. math:: R_y(\phi) = e^{-i\phi\sigma_y/2} = \begin{bmatrix}
\cos(\phi/2) & -\sin(\phi/2) \\
\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}R_y(\phi) = \frac{1}{2}\left[R_y(\phi+\pi/2)+R_y(\phi-\pi/2)\right]`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = 'R'
grad_method = 'A'
class RZ(Operation):
r"""RZ(phi, wires)
The single qubit Z rotation
.. math:: R_z(\phi) = e^{-i\phi\sigma_z/2} = \begin{bmatrix}
e^{-i\phi/2} & 0 \\
0 & e^{i\phi/2}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}R_z(\phi) = \frac{1}{2}\left[R_z(\phi+\pi/2)+R_z(\phi-\pi/2)\right]`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = 'R'
grad_method = 'A'
class PhaseShift(Operation):
r"""PhaseShift(phi, wires)
Arbitrary single qubit local phase shift
.. math:: R_\phi(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}R_\phi(\phi) = \frac{1}{2}\left[R_\phi(\phi+\pi/2)+R_\phi(\phi-\pi/2)\right]`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 1
num_wires = 1
par_domain = 'R'
grad_method = 'A'
class Rot(Operation):
r"""Rot(phi, theta, omega, wires)
Arbitrary single qubit rotation
.. math:: R(\phi,\theta,\omega) = RZ(\omega)RY(\theta)RZ(\phi)= \begin{bmatrix}
e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2) \\
e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Gradient recipe: :math:`\frac{d}{d\phi}R(\phi) = \frac{1}{2}\left[R(\phi+\pi/2)+R(\phi-\pi/2)\right]`.
This gradient recipe applies for each angle argument :math:`\{\phi, \theta, \omega\}`.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 3
num_wires = 1
par_domain = 'R'
grad_method = 'A'
#=============================================================================
# Arbitrary operations
#=============================================================================
class QubitUnitary(Operation):
r"""QubitUnitary(U, wires)
Apply an arbitrary unitary matrix
**Details:**
* Number of wires: None (applied to the entire system)
* Number of parameters: 1
* Gradient recipe: None (uses finite difference)
Args:
U (array[complex]): square unitary matrix
wires (Sequence[int] or int): the wire(s) the operation acts on
"""
num_params = 1
num_wires = 0
par_domain = 'A'
grad_method = 'F'
#=============================================================================
# State preparation
#=============================================================================
class BasisState(Operation):
r"""BasisState(n, wires)
Prepares a single computational basis state.
**Details:**
* Number of wires: None (applied to the entire system)
* Number of parameters: 1
* Gradient recipe: None (integer parameters not supported)
Args:
n (array): prepares the basis state :math:`\ket{n}`, where ``n`` is an
array of integers from the set :math:`\{0, 1\}`, i.e.,
if ``n = np.array([0, 1, 0])``, prepares the state :math:`|010\rangle`.
wires (Sequence[int] or int): the wire(s) the operation acts on
"""
num_params = 1
num_wires = 0
par_domain = 'A'
grad_method = None
class QubitStateVector(Operation):
r"""QubitStateVector(state, wires)
Prepare subsystems using the given ket vector in the Fock basis.
**Details:**
* Number of wires: None (applied to the entire system)
* Number of parameters: 1
* Gradient recipe: None (uses finite difference)
Args:
state (array[complex]): a state vector of size 2**len(wires)
wires (Sequence[int] or int): the wire(s) the operation acts on
"""
num_params = 1
num_wires = 0
par_domain = 'A'
grad_method = 'F'
all_ops = [
Hadamard,
PauliX,
PauliY,
PauliZ,
CNOT,
CZ,
SWAP,
RX,
RY,
RZ,
PhaseShift,
Rot,
BasisState,
QubitStateVector,
QubitUnitary
]
__all__ = [cls.__name__ for cls in all_ops]
|
<filename>pretraining/train_vocab/train_vocab.py<gh_stars>1-10
"""
Author: bugface https://github.com/bugface
The script is based on Google's SentencePiece to train a vocab from local corpous
see more details at https://github.com/google/sentencepiece
note: training with a large corpus may take TB-level of RAM; sentencepiece can limit input number of sentences, we did not use
in this project.
"""
import sentencepiece as spm
from pathlib import Path
import argparse
def read_text(fn):
with open(fn, "r") as f:
text = f.read().strip()
return text
def write_text(text, fn):
with open(fn, "w") as f:
f.write(text)
def main(args):
mn = args.prefix
output = args.outpu
data = args.input
bert_head = args.bert_header
pref = f"{output}/{mn}"
vsz = args.vocab_size
p = Path(f"{pref}")
p.mkdir(parents=True, exist_ok=True)
if args.lower_case:
rule = 'nmt_nfkc_cf'
else:
rule = 'nmt_nfkc'
spm.SentencePieceTrainer.Train(
f'--input={data} ' \
'--input_format=text ' \
f'--model_prefix={pref}/{mn} ' \
f'--vocab_size={vsz} ' \
f'--normalization_rule_name={rule} ' \
'--character_coverage=0.9999 ' \
'--model_type=bpe ' \
'--train_extremely_large_corpus=true ' \
'--self_test_sample_size=100' \
'--max_sentencepiece_length=128' \
'--max_sentence_length=33536' \
'--hard_vocab_limit=false' \
f'--num_threads={args.threads}'
)
bert_header = read_text(bert_head).strip().split("\n")
exclude = {'[UNK]', "[CLS]", "[SEP]", "[PAD]", "[MASK]", "<unk>", "<s>", "</s>", "<pad>", "<cls>", "<sep>"}
nv = [each.split("\t")[0] for each in read_text(pref+f"/{mn}.vocab").strip().split("\n")]
nv = [each for each in nv if each not in exclude]
nnv = [each.replace("▁", "") if each.startswith("▁") else "##"+each for each in nv]
bert_vocab = bert_header + nnv
# output dir
with open(p/"vocab.txt", "w") as f:
f.write("\n".join(bert_vocab))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True,
help="input text file for training vocab")
parser.add_argument("--prefix", type=str, required=True,
help="the prefix of the output file - vocab name")
parser.add_argument("--output", type=str, default="./GatorTron/vocabs",
help="output path for trained vocab files")
parser.add_argument("--bert_header", type=str, default="./bert_vocab_head.txt",
help="the standard bert vocab special tags - like [CLS] [SEP] [PAD] [unused1-99]")
parser.add_argument("--vocab_size", default=32000, type=int,
help="targeted vocab size")
parser.add_argument("--threads", default=32, type=int,
help="number of threads used for training")
parser.add_argument("--lower_case", action='store_true',
help="set training to use lower case for all text")
global_args = parser.parse_args()
main(global_args) |
import pass_pipeline as ppipe
import passes as p
def diagnostic_passlist():
return ppipe.PassList([
p.CapturePromotion,
p.AllocBoxToStack,
p.InOutDeshadowing,
p.NoReturnFolding,
p.DefiniteInitialization,
p.PredictableMemoryOptimizations,
p.DiagnosticConstantPropagation,
p.DiagnoseUnreachable,
p.EmitDFDiagnostics,
p.SplitNonCondBrCriticalEdges,
])
def simplifycfg_silcombine_passlist():
return ppipe.PassList([
p.SimplifyCFG,
p.SILCombine,
p.SimplifyCFG,
])
def highlevel_loopopt_passlist():
return ppipe.PassList([
p.LowerAggregateInstrs,
p.SILCombine,
p.SROA,
p.Mem2Reg,
p.DCE,
p.SILCombine,
simplifycfg_silcombine_passlist(),
p.LoopRotate,
p.DCE,
p.CSE,
p.SILCombine,
p.SimplifyCFG,
p.ABCOpt,
p.DCE,
p.COWArrayOpts,
p.DCE,
p.SwiftArrayOpts,
])
def lowlevel_loopopt_passlist():
return ppipe.PassList([
p.LICM,
p.DCE,
p.CSE,
p.SILCombine,
p.SimplifyCFG,
])
def inliner_for_optlevel(optlevel):
if optlevel == 'high':
return p.EarlyInliner
elif optlevel == 'mid':
return p.PerfInliner
elif optlevel == 'low':
return p.LateInliner
else:
raise RuntimeError('Unknown opt level')
def ssapass_passlist(optlevel):
return ppipe.PassList([
simplifycfg_silcombine_passlist(),
p.AllocBoxToStack,
p.CopyForwarding,
p.LowerAggregateInstrs,
p.SILCombine,
p.SROA,
p.Mem2Reg,
p.PerformanceConstantPropagation,
p.DCE,
p.CSE,
p.SILCombine,
simplifycfg_silcombine_passlist(),
p.GlobalLoadStoreOpts,
p.CodeMotion, # Need to add proper argument here
p.GlobalARCOpts,
p.SpeculativeDevirtualizer,
p.SILLinker,
inliner_for_optlevel(optlevel),
p.SimplifyCFG,
p.CodeMotion,
p.GlobalARCOpts,
])
def lower_passlist():
return ppipe.PassList([
p.DeadFunctionElimination,
p.DeadObjectElimination,
p.GlobalOpt,
p.CapturePropagation,
p.ClosureSpecializer,
p.SpeculativeDevirtualizer,
p.FunctionSignatureOpts,
])
def normal_passpipelines():
result = []
x = ppipe.PassPipeline('HighLevel', {'name': 'run_n_times', 'count': 2})
x.addPass(ssapass_passlist('high'))
result.append(x)
x = ppipe.PassPipeline('EarlyLoopOpt', {'name' : 'run_n_times', 'count' : 1})
x.addPass(highlevel_loopopt_passlist())
result.append(x)
x = ppipe.PassPipeline('MidLevelOpt', {'name' : 'run_n_times', 'count' : 2})
x.addPass(ssapass_passlist('mid'))
result.append(x)
x = ppipe.PassPipeline('Lower', {'name' : 'run_to_fixed_point'})
x.addPass(lower_passlist())
result.append(x)
x = ppipe.PassPipeline('LowLevel', {'name' : 'run_n_times', 'count' : 1})
x.addPass(ssapass_passlist('low'))
result.append(x)
x = ppipe.PassPipeline('LateLoopOpt', {'name' : 'run_n_times', 'count' : 1})
x.addPass([lowlevel_loopopt_passlist(), p.DeadFunctionElimination])
result.append(x)
return result
|
<reponame>heurezjusz/Athena
"""
Dataset - set (list) of configs given to algorithm as an input.
"datasets" is a dictionary from algorithm shortcut to list of available
datasets.
Do not forget to update help message after changing!
"""
datasets = {
"sender": [[(0.3, 0.75)],
[(0.02, 1.0), (0.04, 1.0), (0.06, 1.0), (0.08, 1.0), (0.1, 1.0),
(0.12, 1.0), (0.14, 1.0), (0.16, 1.0), (0.18, 1.0), (0.2, 1.0),
(0.22, 1.0), (0.24, 1.0), (0.26, 1.0), (0.28, 1.0), (0.3, 1.0),
(0.325, 1.0), (0.35, 1.0), (0.375, 1.0), (0.4, 1.0),
(0.45, 1.0), (0.5, 1.0), (0.55, 1.0), (0.6, 1.0), (0.7, 1.0),
(0.8, 1.0), (0.9, 1.0)],
[(0.02, 0.75), (0.04, 0.75), (0.06, 0.75), (0.08, 0.75),
(0.1, 0.75), (0.12, 0.75), (0.14, 0.75), (0.16, 0.75),
(0.18, 0.75), (0.2, 0.75), (0.22, 0.75), (0.24, 0.75),
(0.26, 0.75), (0.28, 0.75), (0.3, 0.75), (0.325, 0.75),
(0.35, 0.75), (0.375, 0.75), (0.4, 0.75), (0.45, 0.75),
(0.5, 0.75), (0.55, 0.75), (0.6, 0.75), (0.7, 0.75),
(0.8, 0.75), (0.9, 0.75)],
[(0.02, 0.5), (0.04, 0.5), (0.06, 0.5), (0.08, 0.5), (0.1, 0.5),
(0.12, 0.5), (0.14, 0.5), (0.16, 0.5), (0.18, 0.5), (0.2, 0.5),
(0.22, 0.5), (0.24, 0.5), (0.26, 0.5), (0.28, 0.5), (0.3, 0.5),
(0.325, 0.5), (0.35, 0.5), (0.375, 0.5), (0.4, 0.5),
(0.45, 0.5), (0.5, 0.5), (0.55, 0.5), (0.6, 0.5), (0.7, 0.5),
(0.8, 0.5), (0.9, 0.5)],
[(a / 99., 1.) for a in xrange(100)],
[(a / 99., 0.75) for a in xrange(100)]],
"sender2": [[(0.3, 0.75)],
[(0.02, 1.0), (0.04, 1.0), (0.06, 1.0), (0.08, 1.0),
(0.1, 1.0), (0.12, 1.0), (0.14, 1.0), (0.16, 1.0),
(0.18, 1.0), (0.2, 1.0), (0.22, 1.0), (0.24, 1.0),
(0.26, 1.0), (0.28, 1.0), (0.3, 1.0), (0.325, 1.0),
(0.35, 1.0), (0.375, 1.0), (0.4, 1.0), (0.45, 1.0),
(0.5, 1.0), (0.55, 1.0), (0.6, 1.0), (0.7, 1.0), (0.8, 1.0),
(0.9, 1.0)],
[(0.02, 0.75), (0.04, 0.75), (0.06, 0.75), (0.08, 0.75),
(0.1, 0.75), (0.12, 0.75), (0.14, 0.75), (0.16, 0.75),
(0.18, 0.75), (0.2, 0.75), (0.22, 0.75), (0.24, 0.75),
(0.26, 0.75), (0.28, 0.75), (0.3, 0.75), (0.325, 0.75),
(0.35, 0.75), (0.375, 0.75), (0.4, 0.75), (0.45, 0.75),
(0.5, 0.75), (0.55, 0.75), (0.6, 0.75), (0.7, 0.75),
(0.8, 0.75), (0.9, 0.75)],
[(0.02, 0.5), (0.04, 0.5), (0.06, 0.5), (0.08, 0.5),
(0.1, 0.5), (0.12, 0.5), (0.14, 0.5), (0.16, 0.5),
(0.18, 0.5), (0.2, 0.5), (0.22, 0.5), (0.24, 0.5),
(0.26, 0.5), (0.28, 0.5), (0.3, 0.5), (0.325, 0.5),
(0.35, 0.5), (0.375, 0.5), (0.4, 0.5), (0.45, 0.5),
(0.5, 0.5), (0.55, 0.5), (0.6, 0.5), (0.7, 0.5), (0.8, 0.5),
(0.9, 0.5)],
[(x / 100., 1.) for x in xrange(1, 21)],
[(a / 99., 1.) for a in xrange(100)],
[(a / 99., 0.75) for a in xrange(100)]],
"rat": [[0.5],
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
[0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],
[0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2,
0.22, 0.24, 0.26, 0.28, 0.3, 0.325, 0.35, 0.375, 0.4, 0.45,
0.5, 0.55, 0.6, 0.7, 0.8, 0.9],
[x / 50. for x in xrange(1, 30)] +
[x / 40. for x in xrange(24, 40)],
[x / 100. for x in xrange(1, 21)]],
"rat2": [[0.5],
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
[0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],
[0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2,
0.22, 0.24, 0.26, 0.28, 0.3, 0.325, 0.35, 0.375, 0.4, 0.45,
0.5, 0.55, 0.6, 0.7, 0.8, 0.9],
[x / 50. for x in xrange(1, 30)] +
[x / 40. for x in xrange(24, 40)]],
"filters": [[(0.3, 1, (5, 75, 75))],
[(x / 10., 1, (5, 75, 75)) for x in xrange(1, 10)],
[(x / 20., 1, (5, 75, 75)) for x in xrange(1, 10)]],
"derest": [[0.5],
[(x / 10.) for x in xrange(1, 10)],
[(x / 30.) for x in xrange(1, 20)]]
}
|
<filename>ResoFit/data/IPTS_20784/ipts_20784_AgI.py
from ResoFit.calibration import Calibration
from ResoFit.fitresonance import FitResonance
from ResoFit.experiment import Experiment
import matplotlib.pyplot as plt
import numpy as np
import pprint
from ResoFit._utilities import get_foil_density_gcm3
from ResoFit._utilities import Layer
import lmfit
# Global parameters
energy_min = 3
energy_max = 200
energy_step = 0.01
database = 'ENDF_VIII'
# Input sample name or names as str, case sensitive
layers = Layer()
layers.add_layer(layer='Ag', thickness_mm=0.0635)
layers.add_layer(layer='I', thickness_mm=0.0635)
folder = 'data/IPTS_20784/reso_data_20784'
# data_file2 = 'spheres_background_1.csv'
spectra_file = 'Ta_lead_10mm__0__040_Spectra.txt'
# data_file = 'AgI.csv'
data_file = 'AgI_pellets_all.csv'
image_start = None # Can be omitted or =None
image_end = None # Can be omitted or =None
# norm_to_file = 'blank_region.csv'
norm_to_file = 'blank_pellets_all.csv'
baseline = True
baseline_deg = 3
each_step = False
norm_factor = 1
source_to_detector_m = 16.5 # 16#16.445359069030175#16.447496101100739
offset_us = 0 # 0#2.7120797253959119#2.7355447625559037
# x_type = 'lambda'
# x_type = 'energy'
x_type = 'number'
# x_type = 'time'
# y_type = 'transmission'
y_type = 'attenuation'
# Calibrate the peak positions
calibration = Calibration(data_file=data_file,
spectra_file=spectra_file,
layer=layers,
energy_min=energy_min,
energy_max=energy_max,
energy_step=energy_step,
folder=folder,
exp_source_to_detector_m=source_to_detector_m,
exp_offset_us=offset_us,
database=database,
baseline=baseline,
baseline_deg=baseline_deg,
x_type=x_type,
y_type=y_type
)
calibration.experiment.norm_to(file=norm_to_file, norm_factor=norm_factor)
calibration.experiment.slice(start=image_start, end=image_end)
calibrate_result = calibration.calibrate(source_to_detector_m=source_to_detector_m,
offset_us=offset_us,
vary='all',
each_step=each_step)
calibration.index_peak(thres_exp=0.12, min_dist_exp=20, min_dist_map=15, thres_map=0.12, rel_tol=0.01)
# calibration.analyze_peak(report=False, fit_model='Lorentzian') # ['Gaussian', 'Lorentzian']
# calibration.export(y_type='attenuation',
# # y_type='transmission',
# x_type='energy',)
calibration.plot(y_type=y_type,
x_type=x_type,
# t_unit='ns',
# before=True,
# interp=True,
mixed=True,
table=True,
peak_exp='all',
peak_height=True,
index_level='ele',
# peak_id='all',
logx=False,
)
# plt.xlim(left=0, right=400)
plt.show()
# calibration = Calibration(data_file=data_file,
# spectra_file=spectra_file,
# layer=layers,
# energy_min=energy_min,
# energy_max=energy_max,
# energy_step=energy_step,
# folder=folder,
# baseline=baseline)
#
# calibration.experiment.norm_to(norm_to_file, norm_factor=norm_factor)
# calibration.experiment.slice(start=image_start, end=image_end)
# calibrate_result = calibration.calibrate(source_to_detector_m=source_to_detector_m,
# offset_us=offset_us,
# vary='all',
# # vary='source_to_detector',
# each_step=each_step)
# calibration.index_peak(thres_exp=0.05, min_dist_exp=2, min_dist_map=5, thres_map=0.05)
# # calibration.analyze_peak()
# calibration.experiment.plot()
# calibration.plot(y_type='attenuation',
# # y_type='transmission',
# x_type='energy',
# # t_unit='ms',
# # before=True,
# # interp=True,
# # mixed=True,
# # peak_exp='all',
# table=False,
# # peak_exp='indexed',
# peak_height=False,
# index_level='ele',
# peak_id='indexed',
# logx=False,
# )
# plt.xlim(left=1, right=100)
# plt.show()
#
# calibration.export(y_type='attenuation',
# # y_type='transmission',
# x_type='energy',
# # t_unit='ms',
# # before=True,
# # interp=True,
# # mixed=True,
# # peak_exp='all',
# # peak_exp='indexed',
# index_level='ele',
# peak_id='indexed',
# )
# # Fit the peak height
# fit = FitResonance(spectra_file=spectra_file,
# data_file=data_file,
# folder=folder,
# repeat=repeat,
# energy_min=energy_min,
# energy_max=energy_max,
# energy_step=energy_step,
# calibrated_offset_us=calibration.calibrated_offset_us,
# calibrated_source_to_detector_m=calibration.calibrated_source_to_detector_m,
# norm_to_file=norm_to_file,
# slice_start=image_start,
# slice_end=image_end,
# baseline=baseline)
# fit_result = fit.fit(layer, vary=fit_vary, each_step=each_step)
# fit.molar_conc()
# fit.index_peak(thres=0.15, min_dist=25)
# # fit.fit_iso(layer=layer_2)
# fit.plot(peak_id='all', interp=False)
# # fit.export('Exp_Gd_150_um.csv')
|
<filename>test/pytorch_backend/pytorch_tensor.py<gh_stars>1000+
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from crypten import CrypTensor, register_cryptensor
@register_cryptensor("ptt")
class PyTorchTensor(CrypTensor):
"""
CrypTensor class that uses plaintext PyTorch tensors as underlying backend.
This class should be used for testing purposes.
"""
def __init__(self, tensor, device=None, *args, **kwargs):
# take required_grad from kwargs, input tensor, or set to False:
default = tensor.requires_grad if torch.is_tensor(tensor) else False
requires_grad = kwargs.pop("requires_grad", default)
# call CrypTensor constructor:
super().__init__(requires_grad=requires_grad)
if device is None:
device = torch.device("cpu")
if not torch.is_tensor(tensor):
tensor = torch.tensor(tensor, device=device)
else:
tensor = tensor.detach().to(device=device)
tensor.requires_grad = False
self._tensor = tensor
def get_plain_text(self):
return self._tensor
def shallow_copy(self):
result = PyTorchTensor([])
result._tensor = self._tensor
return result
def clone(self):
result = PyTorchTensor([])
result._tensor = self._tensor.clone()
return result
def copy_(self, other):
"""Copies value of other PyTorchTensor into this PyTorchTensor."""
assert isinstance(other, PyTorchTensor), "other must be PyTorchTensor"
self._tensor = other._tensor
def add(self, tensor):
result = self.clone()
tensor = tensor._tensor if hasattr(tensor, "_tensor") else tensor
result._tensor = result._tensor + tensor
return result
def neg(self):
result = self.clone()
result._tensor.neg_()
return result
def mul(self, tensor):
result = self.clone()
tensor = tensor._tensor if hasattr(tensor, "_tensor") else tensor
result._tensor = result._tensor * tensor
return result
def div(self, tensor):
result = self.clone()
tensor = tensor._tensor if hasattr(tensor, "_tensor") else tensor
result._tensor = result._tensor / tensor
return result
def matmul(self, tensor):
result = self.clone()
tensor = tensor._tensor if hasattr(tensor, "_tensor") else tensor
result._tensor = result._tensor @ tensor
return result
def conv1d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = kernel._tensor if hasattr(kernel, "_tensor") else kernel
result._tensor = torch.nn.functional.conv1d(
result._tensor, kernel, *args, **kwargs
)
return result
def conv2d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = kernel._tensor if hasattr(kernel, "_tensor") else kernel
result._tensor = torch.nn.functional.conv2d(
result._tensor, kernel, *args, **kwargs
)
return result
def conv_transpose1d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = kernel._tensor if hasattr(kernel, "_tensor") else kernel
result._tensor = torch.nn.functional.conv_transpose1d(
result._tensor, kernel, *args, **kwargs
)
return result
def conv_transpose2d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = kernel._tensor if hasattr(kernel, "_tensor") else kernel
result._tensor = torch.nn.functional.conv_transpose2d(
result._tensor, kernel, *args, **kwargs
)
return result
def avg_pool2d(self, kernel_size, stride=None, padding=0):
result = self.clone()
result._tensor = torch.nn.functional.avg_pool2d(
result._tensor, kernel_size, stride=stride, padding=padding
)
return result
@property
def dtype(self):
return self._tensor.dtype
def _ltz(self):
"""Returns 1 for elements that are < 0 and 0 otherwise"""
result = self.clone()
result._tensor = result._tensor.lt(0).to(self.dtype)
return result
@staticmethod
def rand(*sizes, device=None):
"""
Returns a tensor with elements uniformly sampled in [0, 1). The uniform
random samples are generated by generating random bits using fixed-point
encoding and converting the result to an ArithmeticSharedTensor.
"""
if device is None:
device = torch.device("cpu")
return PyTorchTensor(torch.rand(*sizes, device=device))
|
"""
Plot the performance on the test set as in Figures 6, 12, 18.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import config
import utils
import matplotlib
from matplotlib import pyplot
import numpy as np
from matplotlib.ticker import FixedLocator, NullFormatter
preamble = (
r'\usepackage{amsmath}'
r'\usepackage{amssymb}'
r'\newcommand{\vekt}[1]{\mbox{$\boldsymbol{#1}$}}'
)
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'text.latex.preamble': preamble,
'pgf.preamble': preamble,
'pgf.rcfonts': False,
'font.size': 8
})
#https://timodenk.com/blog/exporting-matplotlib-plots-to-latex/
## In case we don't want to plot all L for visibility
L_max = [20,20,20]
dataset = 'test'
df = utils.load_error_table(dataset)
fig, axes = pyplot.subplots(1,len(config.components))
fig.suptitle('')
legends = []
cmap = pyplot.get_cmap('tab10')
keys = [F'eps_pod{m.lower()}_sq' for m in list(utils.models.keys()) + ['']]
for idx_ax, component in enumerate(config.components):
L = config.num_basis[component]
ls = [l for l in range(L+1)]
df_filtered = df.loc[ (df['component']==component)]
markers = ['1','2','3','4']
for i, key in enumerate(keys):
mean_sq = np.array([ df_filtered.loc[(df_filtered['l']==l)][key].mean() for l in ls ])
mean = np.array([(df_filtered.loc[(df_filtered['l']==l)][key]**0.5).mean() for l in ls ])
rmse = np.sqrt(mean_sq)
axes[idx_ax].plot(ls, rmse, color=cmap(i), marker=markers[i], markersize=6, linewidth=1, markevery=5)
axes[idx_ax].set_yscale('log')
axes[idx_ax].set_ylim([None,1])
axes[idx_ax].set_xlim([0, L_max[idx_ax]])
#axes[idx_ax].set_xlabel(r'$L_{}$'.format(component))
axes[idx_ax].set_xlabel(r'$L$')
axes[idx_ax].grid(which='major', axis='y', linestyle=':')
axes[idx_ax].grid(which='major', axis='x', linestyle=':')
axes[idx_ax].title.set_text(r'${}$'.format([r'\vekt{u}',r'p',r'T'][idx_ax]))
# all 10 integer powers between min and max
displayed_data = rmse[:L_max[idx_ax]]
exp_min = np.floor(np.log10(np.min(displayed_data)))
exp_max = 0#np.log10(np.max(rmse))
axes[idx_ax].set_ylim([10**exp_min, 10**exp_max])
# MAJOR
exps_major = np.arange( exp_min, exp_max+1 )
axes[idx_ax].yaxis.set_major_locator(FixedLocator(10**exps_major))
# MINOR
axes[idx_ax].yaxis.set_minor_formatter(NullFormatter())
axes[idx_ax].yaxis.set_ticks_position('both')
axes[idx_ax].tick_params(axis='y', direction="in", which='both')
labels = [r'${\varepsilon}' + r'_\text{{{}}}'.format(key) + r'(\mathbb{P}_{te})$' for key in [r'POD-RBF', 'POD-GPR', 'POD-ANN', 'POD'] ]
lines = axes[0].get_lines()
fig.legend(lines, labels, ncol = 4, loc="lower center")
fig.subplots_adjust(bottom=0.35, top=0.90, left=0.10, right=0.95, wspace=0.35, hspace=0.20)
fig.set_size_inches(w=6.3, h=2.3)
pyplot.show()
|
<filename>will/sockets.py
#!/usr/bin/env python
#
# Courtesy of https://blog.miguelgrinberg.com/post/easy-websockets-with-flask-and-gevent
#
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
from flask import Flask, render_template, session, request
import settings
import logging
import os
import json
import redis
import urlparse
from pprint import pformat
logger = logging.getLogger(__name__)
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
async_mode = None
from will.webapp import app
socketio = SocketIO(app, async_mode=async_mode)
thread = None
def get_socketio_app():
# This is the key to starting the socketio app.
# It runs as a wrapper around Flask. See webapp.bootstrap_flask().
return socketio
def background_thread():
"""Example of how to send server generated events to clients."""
count = 0
REDIS_CHANNELS = ['updates']
url = urlparse.urlparse(os.environ.get('REDISCLOUD_URL'))
r = redis.Redis(host=url.hostname, port=url.port, password=url.password)
pubsub = r.pubsub()
pubsub.subscribe(REDIS_CHANNELS)
logger.info("Starting the redis pubsub listener ...")
while True:
message = pubsub.get_message() # SDG!
if message:
logger.info(u'pubsub saw this message: {}'.format(pformat(message)))
try:
data = json.loads(message.get('data'))
socketio.emit('my_picture', data, namespace='/max', broadcast=True)
logger.info(u'Sent that new pic for clients: {}'.format(pformat(data)))
except Exception as e:
logger.warn("That didn't appear to be JSON so I didn't forward it")
socketio.sleep(0.01)
logger.info("Redis pubsub over and out!")
@app.route('/sox')
def index():
return render_template('sockets.html', async_mode=socketio.async_mode)
@socketio.on('my_event', namespace='/max')
def test_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']})
@socketio.on('my_broadcast_event', namespace='/max')
def test_broadcast_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
broadcast=True)
@socketio.on('join', namespace='/max')
def join(message):
join_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
@socketio.on('leave', namespace='/max')
def leave(message):
leave_room(message['room'])
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'In rooms: ' + ', '.join(rooms()),
'count': session['receive_count']})
@socketio.on('close_room', namespace='/max')
def close(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response', {'data': 'Room ' + message['room'] + ' is closing.',
'count': session['receive_count']},
room=message['room'])
close_room(message['room'])
@socketio.on('my_room_event', namespace='/max')
def send_room_message(message):
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': message['data'], 'count': session['receive_count']},
room=message['room'])
@socketio.on('disconnect_request', namespace='/max')
def disconnect_request():
session['receive_count'] = session.get('receive_count', 0) + 1
emit('my_response',
{'data': 'Disconnected!', 'count': session['receive_count']})
disconnect()
@socketio.on('my_ping', namespace='/max')
def ping_pong():
emit('my_pong')
@socketio.on('connect', namespace='/max')
def test_connect():
global thread
if thread is None:
thread = socketio.start_background_task(target=background_thread)
emit('my_response', {'data': 'Connected', 'count': 0})
@socketio.on('disconnect', namespace='/max')
def test_disconnect():
print('Client disconnected', request.sid)
if __name__ == '__main__':
socketio.run(app, debug=True)
|
<filename>timemap/models.py
import uuid
import datetime
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from preferences.models import Preferences
from epl.custommodels import IntegerRangeField, FloatRangeField
from util.file_validator import FileValidator
from timemap.constants import BRANCH_NAME_LEN, BRANCH_DESCRIPTION_LEN, STORY_TITLE_LEN, \
STORY_DESCRIPTION_LEN, STORY_TEXT_LEN, MAP_BASE_FOLDER_LEN, \
MAP_TITLE_LEN, MAP_AUTHOR_LEN, UPLOAD_EXTENSIONS, \
UPLOAD_MIME_TYPES, BASE_URL_LEN, KEY_LEN
from util.email import emailer, email_template
class Branch(models.Model):
class Meta:
verbose_name_plural = "Branches"
BRANCH = "B"
STREET_CAR = "S"
BOOK_MOBILE = "M"
BRANCH_TYPE_CHOICES = (
(BRANCH, 'branch'),
(STREET_CAR, 'street'),
(BOOK_MOBILE, 'mobile'),
)
name = models.CharField(db_index=True, max_length=BRANCH_NAME_LEN)
description = models.TextField(max_length=BRANCH_DESCRIPTION_LEN)
start_year = IntegerRangeField(db_index=True, min_value=1900, max_value=3000)
end_year = IntegerRangeField(db_index=True, min_value=1900, max_value=3000, blank=True, null=True)
floor_plan = models.FileField(upload_to="floor_plans")
latitude_help = "Latitude range : -90:90"
latitude = FloatRangeField(min_value=-90, max_value=90, help_text=latitude_help)
longitude_help = "Longitude range : -180:180"
longitude = FloatRangeField(min_value=-180, max_value=180, help_text=longitude_help)
btype = models.CharField(db_index=True,
max_length=1,
choices=BRANCH_TYPE_CHOICES,
default=BRANCH)
def clean(self):
if self.end_year and self.start_year > self.end_year:
raise ValidationError("End year must occur after start year")
def __unicode__(self):
return self.name
def media_upload_to(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return instance.CONTENT_TYPE_DICT[instance.content_type]+ "/" + filename
class Story(models.Model):
TEXT = "T"
LINK = "L"
IMAGE = "I"
PDF = "P"
AUDIO = "A"
VIDEO = "V"
CONTENT_TYPE_CHOICES = (
(TEXT, 'text'),
(LINK, 'link'),
(IMAGE, 'image'),
(PDF, 'pdf'),
(AUDIO, 'audio'),
(VIDEO, 'video'),
)
CONTENT_TYPE_DICT = dict(CONTENT_TYPE_CHOICES)
class Meta:
verbose_name_plural = "Stories"
title = models.CharField(db_index=True, max_length=STORY_TITLE_LEN)
description = models.TextField(db_index=True, max_length=STORY_DESCRIPTION_LEN)
story_text = models.TextField(max_length=STORY_TEXT_LEN, blank=True)
content_type = models.CharField(db_index=True,
max_length=1,
choices=CONTENT_TYPE_CHOICES,
default=TEXT)
link_url = models.URLField(blank=True, error_messages={'invalid': "Please input a valid URL (for example: http://www.example.com)."})
media_file = models.FileField(upload_to=media_upload_to,
blank=True,
validators=[FileValidator(allowed_extensions=UPLOAD_EXTENSIONS,
allowed_mimetypes=UPLOAD_MIME_TYPES)])
year = IntegerRangeField(db_index=True, min_value=1900, max_value=3000)
month = IntegerRangeField(min_value=1, max_value=12, blank=True, null=True)
day = IntegerRangeField(min_value=1, max_value=31, blank=True, null=True)
branch = models.ForeignKey('Branch', blank=True, null=True)
keywords = TaggableManager(verbose_name="keywords",
help_text=("A comma-separated list of keywords"),
blank=True)
user = models.ForeignKey(User)
anonymous = models.BooleanField(default=False)
public_approved = models.BooleanField(default=False)
def clean(self):
try:
day = self.day if self.day else 1
month = self.month if self.month else 1
date = "%s/%s/%s" % (day, month, self.year)
datetime.datetime.strptime(date, "%d/%m/%Y")
except ValueError:
#TODO: Should make the resulting error clearer
raise ValidationError("Please enter a valid date.")
def __unicode__(self):
return self.title
class Map(models.Model):
class Meta:
verbose_name_plural = "Maps"
base_folder = models.CharField(max_length=MAP_BASE_FOLDER_LEN)
title = models.CharField(max_length=MAP_TITLE_LEN)
author = models.CharField(max_length=MAP_AUTHOR_LEN)
published = IntegerRangeField(min_value=1900, max_value=3000)
start_year = IntegerRangeField(min_value=1900, max_value=3000)
end_year = IntegerRangeField(min_value=1900, max_value=3000)
def clean(self):
if self.start_year > self.end_year:
raise ValidationError("End year must occur after start year.")
def __unicode__(self):
return self.title
class FeaturedStory(models.Model):
class Meta:
verbose_name_plural = "Featured Stories"
story = models.ForeignKey('Story')
def __unicode__(self):
return self.story.title
class TimemapPreferences(Preferences):
class Meta:
verbose_name_plural = "Timemap Preferences"
__module__ = 'preferences.models'
timeline_init_date = models.DateField(default=datetime.date(2013, 1, 1))
timeline_start_date = models.DateField(default=datetime.date(1900, 1, 1))
timeline_end_date = models.DateField(default=datetime.date(2014, 1, 1))
base_url = models.CharField(max_length=BASE_URL_LEN, default="http://serve.ctrlshiftcreate.com/")
facebook_key = models.CharField(max_length=KEY_LEN, default='150662938425048')
google_key = models.CharField(max_length=KEY_LEN, default='<KEY>')
# Signal setup
from django.dispatch.dispatcher import receiver
from django.db.models.signals import pre_save, pre_delete
@receiver(pre_save)
def validate_model(sender, **kwargs):
"""
Force a clean call when certain models are saved in order to do
keep model constrains
"""
if sender in [Branch, Story, Map] and 'raw' in kwargs and not kwargs['raw']:
kwargs['instance'].full_clean()
@receiver(pre_delete)
def story_delete(sender, instance, **kwargs):
"""
Delete media files when stories are deleted
"""
if sender in [Story] and instance.media_file:
instance.media_file.delete(False)
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import freqz
fs = 1000 # mintavételezési frekvencia (Hz)
fc = 400 # vágási frekvencia (Hz)
Ts = 1 / fs
Ns = 17 # FIR szűrő súlyfüggvényének nem nulla elemeinek száma
f1 = 10 # az első jel 100 Hz-es
f2 = 470 # a második jel 2 kHz-es
t = np.arange(0, 1, Ts)
x1 = np.sin(2 * np.pi * f1 * t)
x2 = np.sin(2 * np.pi * f2 * t)
x = x1 + x2
X = np.fft.fft(x)
X = X[:int(len(X)/2)]
f = np.linspace(0, fs/2, len(X))
A = np.sqrt(np.imag(X)**2 + np.real(X)**2)
A = A/len(A)
plt.figure(1)
plt.subplot(311)
plt.plot(t, x1, label="x1: 10Hz")
plt.plot(t, x2, label="x2: 470Hz")
plt.xlabel("t")
plt.ylabel("A")
plt.legend()
plt.subplot(312)
plt.plot(t, x, label="x1 + x2")
plt.xlabel("t")
plt.ylabel("A")
plt.legend()
plt.subplot(313)
plt.xlabel("f")
plt.ylabel("|A|")
plt.plot(f, A)
# Design a LP FIR filter
wc = 2 * np.pi * fc / fs
h = np.zeros(Ns)
n = np.arange(0, Ns)
nk = n[0:int(Ns/2)]
h[0:int(Ns/2)] = np.sin(wc * (nk - (Ns - 1) / 2)) / (np.pi * (nk - (Ns - 1) / 2))
h[int(Ns/2)] = 4 / 5
nk = n[int(Ns/2) + 1:Ns]
h[int(Ns/2) + 1:Ns] = np.sin(wc * (nk - (Ns - 1) / 2)) / (np.pi * (nk - (Ns - 1) / 2))
tri_win = 1 - np.abs((2*n-Ns+1)/(Ns-1))
h_win = np.multiply(h, tri_win)
plt.figure(2)
plt.subplot(211)
plt.stem(n, h, label="h[n]")
plt.plot(n, tri_win, label="Háromszög ablak")
plt.xlabel("n")
plt.ylabel("h[n]")
plt.legend()
plt.subplot(212)
plt.xlabel("n")
plt.ylabel("h[n]")
plt.stem(n, h_win)
plt.figure(3)
# Szűrés
plt.subplot(211)
plt.title("Ablakozás nélkül")
y1 = np.convolve(x, h, mode='same')
plt.plot(t, y1)
plt.xlabel("t")
plt.ylabel("A")
plt.subplot(212)
X = np.fft.fft(y1)
X = X[:int(len(X)/2)]
f = np.linspace(0, fs/2, len(X))
A = np.sqrt(np.imag(X)**2 + np.real(X)**2)
A = A/len(A)
plt.xlabel("f")
plt.ylabel("|A|")
plt.plot(f, A)
plt.figure(4)
plt.subplot(211)
plt.title("Ablakozással")
y2 = np.convolve(x, h_win, mode='same')
plt.plot(t, y1)
plt.xlabel("t")
plt.ylabel("|A|")
plt.subplot(212)
X = np.fft.fft(y2)
X = X[:int(len(X)/2)]
f = np.linspace(0, fs/2, len(X))
A = np.sqrt(np.imag(X)**2 + np.real(X)**2)
A = A/len(A)
plt.xlabel("f")
plt.ylabel("|A|")
plt.plot(f, A)
# check the filter response
w1, h1 = freqz(h, worN=8000)
f = np.linspace(0, fs / 2, 8000)
w2, h2 = freqz(h_win, worN=8000)
plt.figure(5)
plt.plot(f, np.abs(h1), label='Ablakozás nélkül')
plt.plot(f, np.abs(h2), label='Ablakozással')
plt.xlabel("f")
plt.ylabel("|A|")
plt.legend()
plt.show() |
import torch
import torchvision.models as models
import torch.optim as optim
import argparse
import matplotlib.pylab as plt
from network.deeplabv3.deeplabv3 import *
from build_data import *
from module_list import *
parser = argparse.ArgumentParser(description='Supervised Segmentation with Partial Labels')
parser.add_argument('--mode', default=None, type=str)
parser.add_argument('--port', default=None, type=int)
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--lr', default=2.5e-3, type=float)
parser.add_argument('--weight_decay', default=5e-4, type=float)
parser.add_argument('--apply_aug', default='cutout', type=str, help='apply semi-supervised method: cutout cutmix classmix')
parser.add_argument('--weak_threshold', default=0.7, type=float)
parser.add_argument('--strong_threshold', default=0.97, type=float)
parser.add_argument('--output_dim', default=256, type=int, help='output dimension from representation head')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--partial', default='p0', type=str, help='p0, p1, p5, p25')
parser.add_argument('--dataset', default='cityscapes', type=str, help='pascal, cityscapes')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
data_loader = BuildDataLoader(args.dataset, 0)
train_l_loader, test_loader = data_loader.build(supervised=True, partial=args.partial, partial_seed=args.seed)
# Load Semantic Network
device = torch.device("cuda:{:d}".format(args.gpu) if torch.cuda.is_available() else "cpu")
model = DeepLabv3Plus(models.resnet101(pretrained=True), num_classes=data_loader.num_segments, output_dim=args.output_dim).to(device)
total_epoch = 200
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=0.9, nesterov=True)
scheduler = PolyLR(optimizer, total_epoch, power=0.9)
train_epoch = len(train_l_loader)
test_epoch = len(test_loader)
avg_cost = np.zeros((total_epoch, 6))
iteration = 0
for index in range(total_epoch):
cost = np.zeros(3)
train_l_dataset = iter(train_l_loader)
model.train()
l_conf_mat = ConfMatrix(data_loader.num_segments)
for i in range(train_epoch):
train_l_data, train_l_label = train_l_dataset.next()
train_l_data, train_l_label = train_l_data.to(device), train_l_label.to(device)
optimizer.zero_grad()
# generate labelled and unlabelled data loss
pred_l, rep_l = model(train_l_data)
pred_l_large = F.interpolate(pred_l, size=train_l_label.shape[1:], mode='bilinear', align_corners=True)
# supervised-learning loss
sup_loss = compute_supervised_loss(pred_l_large, train_l_label)
loss = sup_loss
loss.backward()
optimizer.step()
l_conf_mat.update(pred_l_large.argmax(1).flatten(), train_l_label.flatten())
avg_cost[index, 0] += sup_loss.item() / train_epoch
iteration += 1
avg_cost[index, 1:3] = l_conf_mat.get_metrics()
with torch.no_grad():
model.eval()
test_dataset = iter(test_loader)
conf_mat = ConfMatrix(data_loader.num_segments)
for i in range(test_epoch):
test_data, test_label = test_dataset.next()
test_data, test_label = test_data.to(device), test_label.to(device)
pred, _ = model(test_data)
pred = F.interpolate(pred, size=test_label.shape[1:], mode='bilinear', align_corners=True)
loss = compute_supervised_loss(pred, test_label)
# compute metrics by confusion matrix
conf_mat.update(pred.argmax(1).flatten(), test_label.flatten())
avg_cost[index, 3:] += loss.item() / test_epoch
avg_cost[index, 4:6] = conf_mat.get_metrics()
scheduler.step()
print('EPOCH: {:04d} ITER: {:04d} | TRAIN [Loss | mIoU | Acc.]: {:.4f} {:.4f} {:.4f} || Test [Loss | mIoU | Acc.]: {:.4f} {:.4f} {:.4f}'
.format(index, iteration, avg_cost[index][0], avg_cost[index][1], avg_cost[index][2],
avg_cost[index][3], avg_cost[index][4], avg_cost[index][5]))
print('Top: mIoU {:.4f} IoU {:.4f}'.format(avg_cost[:, 4].max(), avg_cost[:, 5].max()))
if avg_cost[index][4] >= avg_cost[:, 4].max():
torch.save(model.state_dict(), 'model_weights/{}_{}_sup_{}.pth'.format(args.dataset, args.partial, args.seed))
np.save('logging/{}_{}_sup_{}.npy'.format(args.dataset, args.partial, args.seed), avg_cost)
|
import math
from typing import Dict, List, Mapping, Optional, Tuple, Union
import pandas
class Ancestry:
""" Holds the possible ancestry candidates as well as the confidance score for each.
Parameters
----------
initial_background: pandas.Series
The genotype which reached the maximum relative frequency within the population. Serves as the first
nested genotype to compare all others against.
timepoints: pandas.DataFrame
A copy of the genotype timeseries table. Currently only used in the `self.get_sum_of_backgrounds` method.
May be worth removing later to reduce the number of dependent parameters.
cautious: bool = True
Indicates whether to favor the oldest genotype within at least 2 points of the maximum genotype.
Basically controlls the likliness that these scripts will assign a genotype to an olser lineage
rather than nesting the genotype under a newer lineage.
"""
def __init__(self, initial_background: pandas.Series, timepoints: pandas.DataFrame, cautious: bool = True):
self.cautious = cautious
self.initial_background_label: str = initial_background.name
# The minimum score to consider a genotype as a possible ancestor.
#self.minimum_score = 1 if not self.cautious else 0.01 # basically anything non-zero.
self.minimum_score = 1
# The number of points separating a genotype from the maximum scored genotype
# by which an older genotype will be considered as a viable newest ancestor
# Make a copy to prevent unintended modifications to source table.
self.score_window = 2
self.timepoints = timepoints.copy()
# Keep track of the parent and confidence for each new genotype.
self.confidence: Dict[str, List[Tuple[str, float]]] = dict()
self.ancestral_genotype = 'genotype-0'
self.nests: Dict[str, List[str]] = dict()
self.add_genotype_to_background(initial_background.name, self.ancestral_genotype, 1)
def add_genotype_to_background(self, unnested_label: str, nested_label: str, priority: Union[int, float]) -> None:
if unnested_label not in self.nests:
self.nests[unnested_label] = list()
self.nests[unnested_label].append(nested_label)
if unnested_label not in self.confidence:
self.confidence[unnested_label] = []
self.confidence[unnested_label].append((nested_label, priority))
def get(self, label: str) -> List[str]:
return self.nests[label]
def is_a_member(self, label: str) -> bool:
return label in self.nests.keys()
def get_sum_of_backgrounds(self) -> pandas.Series:
background_labels = [k for k in self.nests.keys() if self.is_a_background(k)]
background_frequencies = self.timepoints.loc[background_labels]
total = background_frequencies.sum()
return total
def is_a_background(self, element: str) -> bool:
background = self.get(element)
return len(background) == 1 or (len(background) == 2 and 'genotype-0' in background)
def get_highest_priority_legacy(self, label: str) -> Tuple[Optional[str], float]:
candidates = self.confidence.get(label, [])
if candidates:
# Explicity tell the sorting method to use the priority score.
# This will prevent the method from using the genotype name to sort the elements,
# So ties should be broken by whichever candidate was added as a candidate first.
candidate, score = max(candidates, key = lambda s: s[1])
else:
# `candidates` was an empty sequence.
candidate = None
score = math.nan
if score < 1:
candidate = None
return candidate, score
def get_highest_priority(self, label: str) -> Tuple[Optional[str], float]:
""" Returns the genotype label representing the newest ancestor for the genotype indicated by `label`."""
candidates = self.confidence.get(label, [])
maximum_genotype, maximum_score = max(candidates, key = lambda s: s[1])
for candidate, score in candidates:
# Make sure the score is within 2 or so of the maximum.
# Will output the genotype with the maximum score if no other genotype exists with 2 points of the maximum.
if score > self.minimum_score and abs(maximum_score - score) <= self.score_window:
return candidate, score
else:
return None, math.nan
def as_ancestry_table(self) -> pandas.Series:
table = list()
for identity, background in self.nests.items():
# parent = self.get_highest_priority(identity)
# if parent is None:
parent, score = self.get_highest_priority(identity)
if parent == identity or parent is None:
parent = self.ancestral_genotype
row = {
'Parent': parent,
'Identity': identity
}
table.append(row)
table = pandas.DataFrame(table)[['Parent', 'Identity']] # Reorder columns
return table.set_index('Identity')['Parent']
def as_dict(self) -> Mapping[str, str]:
return self.as_ancestry_table().to_dict()
def priority_table(self) -> pandas.DataFrame:
data = list()
for identity, background in self.nests.items():
# parent = self.get_highest_priority(identity)
# if parent is None:
parent, score = self.get_highest_priority(identity)
if parent == identity or parent is None:
parent = self.ancestral_genotype
row = {
'parent': parent,
'identity': identity,
'score': score
}
data.append(row)
return pandas.DataFrame(data)
def to_table(self) -> pandas.DataFrame:
data = list()
for identity, candidates in self.confidence.items():
for candidate, score in candidates:
row = {
'identity': identity,
'candidate': candidate,
'score': score
}
data.append(row)
return pandas.DataFrame(data)
|
<reponame>42cc/dashr-gw
import logging
import socket
from datetime import timedelta
from mock import patch
from ripple_api.models import Transaction as RippleTransaction
from django.db.utils import OperationalError
from django.test import TestCase
from apps.core import models, tasks, utils
from gateway import celery_app
class CeleryTransactionBaseTaskTest(TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
models.RippleWalletCredentials.get_solo()
@patch('apps.core.models.DashWallet.get_new_address')
def test_task_on_failure_with_deposit(self, patched_get_new_address):
patched_get_new_address.return_value = (
'XekiLaxnqpFb2m4NQAEcsKutZcZgcyfo6W'
)
transaction = models.DepositTransaction.objects.create(
ripple_address='rp2PaYDxVwDvaZVLEQv7bHhoFQEyX1mEx7',
dash_to_transfer=1,
)
task = tasks.CeleryTransactionBaseTask()
task.on_failure(None, None, (transaction.id,), None, None)
transaction.refresh_from_db()
self.assertEqual(transaction.state, transaction.FAILED)
def test_task_on_failure_with_withdrawal(self):
transaction = models.WithdrawalTransaction.objects.create(
dash_address='yBVKPLuULvioorP8d1Zu8hpeYE7HzVUtB9',
dash_to_transfer=1,
)
task = tasks.CeleryTransactionBaseTask()
task.on_failure(None, None, (transaction.id,), None, None)
transaction.refresh_from_db()
self.assertEqual(transaction.state, transaction.FAILED)
class MonitorDashToRippleTransactionTaskTest(TestCase):
@patch('apps.core.models.DashWallet.get_new_address')
def setUp(self, patched_get_new_address):
logging.disable(logging.CRITICAL)
celery_app.conf.update(CELERY_ALWAYS_EAGER=True)
models.RippleWalletCredentials.get_solo()
patched_get_new_address.return_value = (
'XekiLaxnqpFb2m4NQAEcsKutZcZgcyfo6W'
)
self.transaction = models.DepositTransaction.objects.create(
ripple_address='rp2PaYDxVwDvaZVLEQv7bHhoFQEyX1mEx7',
dash_to_transfer=1,
)
@patch('apps.core.tasks.monitor_transaction_confirmations_number.delay')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_marks_transaction_as_unconfirmed_if_balance_positive(
self,
patched_get_address_balance,
patched_monitor_confirmations_number_task_delay,
):
patched_get_address_balance.return_value = 1
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.UNCONFIRMED)
@patch('apps.core.tasks.monitor_transaction_confirmations_number.delay')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_launches_monitoring_confirmations_number_if_balance_positive(
self,
patched_get_address_balance,
patched_monitor_confirmations_number_task_delay,
):
patched_get_address_balance.return_value = 1
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
patched_monitor_confirmations_number_task_delay.assert_called_once()
@patch('apps.core.models.DashWallet.get_address_balance')
def test_marks_transaction_as_overdue_if_time_exceeded(
self,
patched_get_address_balance,
):
patched_get_address_balance.return_value = 0
gateway_settings = models.GatewaySettings.get_solo()
self.transaction.timestamp = (
self.transaction.timestamp -
timedelta(
minutes=gateway_settings.transaction_expiration_minutes + 1,
)
)
self.transaction.save()
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.OVERDUE)
@patch('apps.core.models.DashWallet.get_address_balance')
def test_not_marks_transaction_as_overdue_if_time_not_exceeded(
self,
patched_get_address_balance,
):
patched_get_address_balance.return_value = 0
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertNotEqual(self.transaction.state, self.transaction.OVERDUE)
@patch('apps.core.tasks.monitor_dash_to_ripple_transaction.retry')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_retries_if_balance_is_not_positive(
self,
patched_get_address_balance,
patched_retry,
):
patched_get_address_balance.return_value = 0
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
patched_retry.assert_called_once()
@patch('apps.core.models.DashWallet')
@patch('apps.core.tasks.monitor_dash_to_ripple_transaction.retry')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_retries_if_cannot_connect_to_db(
self,
patched_get_address_balance,
patched_retry,
patched_model,
):
patched_get_address_balance.return_value = 0
patched_model.objects.get.side_effect = OperationalError
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
patched_retry.assert_called_once()
@patch('apps.core.tasks.monitor_dash_to_ripple_transaction.retry')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_retries_if_cannot_connect_to_dash_server(
self,
patched_get_address_balance,
patched_retry,
):
patched_get_address_balance.side_effect = socket.error
tasks.monitor_dash_to_ripple_transaction.apply((self.transaction.id,))
patched_retry.assert_called_once()
class MonitorTransactionConfirmationsNumberTaskTest(TestCase):
@patch('apps.core.models.DashWallet.get_new_address')
def setUp(self, patched_get_new_address):
logging.disable(logging.CRITICAL)
celery_app.conf.update(CELERY_ALWAYS_EAGER=True)
models.RippleWalletCredentials.get_solo()
patched_get_new_address.return_value = (
'XekiLaxnqpFb2m4NQAEcsKutZcZgcyfo6W'
)
self.transaction = models.DepositTransaction.objects.create(
ripple_address='rp2PaYDxVwDvaZVLEQv7bHhoFQEyX1mEx7',
dash_to_transfer=1,
)
@patch('apps.core.tasks.send_ripple_transaction.delay')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_marks_transaction_as_confirmed_if_confirmed_balance_positive(
self,
patched_get_address_balance,
patched_send_ripple_transaction_task_delay,
):
patched_get_address_balance.return_value = 1
tasks.monitor_transaction_confirmations_number.apply(
(self.transaction.id,),
)
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.CONFIRMED)
@patch('apps.core.tasks.send_ripple_transaction.delay')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_launches_send_ripple_transaction_if_confirmed_balance_positive(
self,
patched_get_address_balance,
patched_send_ripple_transaction_task_delay,
):
patched_get_address_balance.return_value = 1
tasks.monitor_transaction_confirmations_number.apply(
(self.transaction.id,),
)
patched_send_ripple_transaction_task_delay.assert_called_once()
@patch('apps.core.tasks.monitor_transaction_confirmations_number.retry')
@patch('apps.core.models.DashWallet.get_address_balance')
def test_retries_if_confirmed_balance_is_not_positive(
self,
patched_get_address_balance,
patched_retry,
):
self.transaction.dash_to_transfer = 1
self.transaction.save()
patched_get_address_balance.return_value = 0
tasks.monitor_transaction_confirmations_number.apply(
(self.transaction.id,),
)
patched_retry.assert_called_once()
class SendRippleTransactionTaskTest(TestCase):
@patch('apps.core.models.DashWallet.get_new_address')
def setUp(self, patched_get_new_address):
logging.disable(logging.CRITICAL)
models.RippleWalletCredentials.objects.create(
address='rp2PaYDxVwDvaZVLEQv7bHhoFQEyX1mEx7',
)
celery_app.conf.update(CELERY_ALWAYS_EAGER=True)
patched_get_new_address.return_value = (
'XekiLaxnqpFb2m4NQAEcsKutZcZgcyfo6W'
)
self.transaction = models.DepositTransaction.objects.create(
ripple_address='rp2PaYDxVwDvaZVLEQv7bHhoFQEyX1mEx7',
dash_to_transfer=1,
)
@staticmethod
def set_last_ripple_transaction_status(status):
last_ripple_transaction = tasks.RippleTransaction.objects.last()
last_ripple_transaction.status = status
last_ripple_transaction.save()
@patch('apps.core.tasks.is_trust_set')
@patch('apps.core.tasks.get_ripple_balance')
@patch('apps.core.tasks.submit_task')
@patch('apps.core.tasks.sign_task')
def test_sends_ripple_tokens_and_marks_transaction_as_processed(
self,
patched_sign_task,
patched_submit_task,
patched_get_ripple_balance,
patched_is_trust_set,
):
patched_get_ripple_balance.return_value = 0
patched_is_trust_set.return_value = True
patched_sign_task.side_effect = (
lambda *args: self.set_last_ripple_transaction_status(
tasks.RippleTransaction.PENDING,
)
)
patched_submit_task.side_effect = (
lambda *args: self.set_last_ripple_transaction_status(
tasks.RippleTransaction.SUBMITTED,
)
)
tasks.send_ripple_transaction.apply((self.transaction.id,))
patched_sign_task.assert_called_once()
patched_submit_task.assert_called_once()
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.PROCESSED)
@patch('apps.core.tasks.send_ripple_transaction.retry')
@patch('apps.core.tasks.is_trust_set')
@patch('apps.core.tasks.get_ripple_balance')
def test_retries_if_trust_is_not_set(
self,
patched_get_ripple_balance,
patched_is_trust_set,
patched_retry,
):
patched_get_ripple_balance.return_value = 0
patched_is_trust_set.return_value = False
tasks.send_ripple_transaction.apply((self.transaction.id,))
patched_retry.assert_called_once()
@patch('apps.core.tasks.is_trust_set')
@patch('apps.core.tasks.get_ripple_balance')
@patch('apps.core.tasks.sign_task')
def test_marks_transaction_as_failed_if_cannot_sign(
self,
patched_sign_task,
patched_get_ripple_balance,
patched_is_trust_set,
):
patched_get_ripple_balance.return_value = 0
patched_is_trust_set.return_value = True
patched_sign_task.side_effect = (
lambda *args: self.set_last_ripple_transaction_status(
tasks.RippleTransaction.FAILURE,
)
)
tasks.send_ripple_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.FAILED)
@patch('apps.core.tasks.is_trust_set')
@patch('apps.core.tasks.get_ripple_balance')
@patch('apps.core.tasks.submit_task')
@patch('apps.core.tasks.sign_task')
def test_marks_transaction_as_failed_if_cannot_submit(
self,
patched_sign_task,
patched_submit_task,
patched_get_ripple_balance,
patched_is_trust_set,
):
patched_get_ripple_balance.return_value = 0
patched_is_trust_set.return_value = True
patched_sign_task.side_effect = (
lambda *args: self.set_last_ripple_transaction_status(
tasks.RippleTransaction.PENDING,
)
)
patched_submit_task.side_effect = (
lambda *args: self.set_last_ripple_transaction_status(
tasks.RippleTransaction.FAILURE,
)
)
tasks.send_ripple_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.FAILED)
class MonitorRippleToDashTransactionTaskTest(TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
celery_app.conf.update(CELERY_ALWAYS_EAGER=True)
self.ripple_credentials = models.RippleWalletCredentials.get_solo()
self.transaction = models.WithdrawalTransaction.objects.create(
dash_address='yBVKPLuULvioorP8d1Zu8hpeYE7HzVUtB9',
dash_to_transfer=1,
)
def create_ripple_transaction(self):
return RippleTransaction.objects.create(
destination_tag=self.transaction.destination_tag,
issuer=self.ripple_credentials.address,
currency='DSH',
status=RippleTransaction.RECEIVED,
hash='some_hash',
value='1',
)
@patch('apps.core.tasks.send_dash_transaction.delay')
def test_modifies_transaction_if_ripple_transaction_exists(
self,
patched_send_dash_transaction_task_delay,
):
self.create_ripple_transaction()
tasks.monitor_ripple_to_dash_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.CONFIRMED)
@patch('apps.core.tasks.send_dash_transaction.delay')
def test_checks_amount_of_all_transactions_with_destination_tag(
self,
patched_send_dash_transaction_task_delay,
):
self.transaction.dash_to_transfer = 2
self.transaction.save()
self.create_ripple_transaction()
self.create_ripple_transaction()
tasks.monitor_ripple_to_dash_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.CONFIRMED)
@patch('apps.core.tasks.send_dash_transaction.delay')
def test_launches_send_dash_transaction_if_balance_positive(
self,
patched_send_dash_transaction_task_delay,
):
self.create_ripple_transaction()
tasks.monitor_ripple_to_dash_transaction.apply(
(self.transaction.id,),
)
patched_send_dash_transaction_task_delay.assert_called_once()
def test_marks_transaction_as_overdue_if_time_exceeded(self):
gateway_settings = models.GatewaySettings.get_solo()
self.transaction.timestamp = (
self.transaction.timestamp -
timedelta(
minutes=gateway_settings.transaction_expiration_minutes + 1,
)
)
self.transaction.save()
tasks.monitor_ripple_to_dash_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.OVERDUE)
def test_not_marks_transaction_as_overdue_if_time_not_exceeded(
self,
):
tasks.monitor_ripple_to_dash_transaction.apply((self.transaction.id,))
self.transaction.refresh_from_db()
self.assertNotEqual(self.transaction.state, self.transaction.OVERDUE)
@patch('apps.core.tasks.monitor_ripple_to_dash_transaction.retry')
def test_retries_if_no_ripple_transaction_is_found(
self,
patched_retry,
):
tasks.monitor_ripple_to_dash_transaction.apply((self.transaction.id,))
patched_retry.assert_called_once()
class SendDashTransactionTaskTest(TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
celery_app.conf.update(CELERY_ALWAYS_EAGER=True)
self.transaction = models.WithdrawalTransaction.objects.create(
dash_address='yBVKPLuULvioorP8d1Zu8hpeYE7HzVUtB9',
dash_to_transfer=1,
)
@patch('apps.core.tasks.wallet.DashWallet.send_to_address')
def test_sends_dash_and_marks_transaction_as_processed(
self,
patched_send_to_address,
):
patched_send_to_address.return_value = 'hash'
tasks.send_dash_transaction.apply((self.transaction.id,))
patched_send_to_address.assert_called_with(
self.transaction.dash_address,
utils.get_received_amount(
self.transaction.dash_to_transfer,
'withdrawal',
),
)
self.transaction.refresh_from_db()
self.assertEqual(self.transaction.state, self.transaction.PROCESSED)
self.assertEqual(
self.transaction.outgoing_dash_transaction_hash,
patched_send_to_address.return_value,
)
|
<reponame>pzaffino/SlicerLungDensitySegmentation
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
from slicer.util import setSliceViewerLayers
import numpy as np
import SimpleITK as sitk
import sitkUtils
import scipy.ndimage
#
# LungCTGMMSegmentation
#
class LungCTGMMSegmentation(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Lung CT GMM Segmentation" # TODO make this more human readable by adding spaces
self.parent.categories = ["Segmentation"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (Magna Graecia University of Catanzaro, Italy)", "<NAME> (Magna Graecia University of Catanzaro, Italy)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = '''
This module labels lung tissues on basis of intensities.
The full validation workflow is described in ''' + f'<p> <a href="{"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7919807/"}">this article</a>.</p>'
self.parent.helpText += f'<p>For more information see the <a href="{"https://github.com/pzaffino/SlicerDensityLungSegmentation"}">online documentation</a>.</p>'
self.parent.acknowledgementText = """ """ # replace with organization, grant and thanks.
#
# LungCTGMMSegmentationWidget
#
class LungCTGMMSegmentationWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# GT CT volume selector
#
self.CTSelector = slicer.qMRMLNodeComboBox()
self.CTSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.CTSelector.selectNodeUponCreation = True
self.CTSelector.addEnabled = False
self.CTSelector.removeEnabled = False
self.CTSelector.noneEnabled = False
self.CTSelector.showHidden = False
self.CTSelector.showChildNodeTypes = False
self.CTSelector.setMRMLScene(slicer.mrmlScene)
self.CTSelector.setToolTip( "Select the CT" )
parametersFormLayout.addRow("CT volume: ", self.CTSelector)
#
# output volume selector
#
self.outputSelector = slicer.qMRMLNodeComboBox()
self.outputSelector.nodeTypes = ["vtkMRMLSegmentationNode"]
self.outputSelector.selectNodeUponCreation = True
self.outputSelector.addEnabled = True
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.showHidden = False
self.outputSelector.showChildNodeTypes = False
self.outputSelector.setMRMLScene(slicer.mrmlScene)
self.outputSelector.baseName = "Lung density segmentation"
self.outputSelector.setToolTip("Select or create a segmentation for lung tissue classification")
parametersFormLayout.addRow("Output segmentation: ", self.outputSelector)
#
# Averaged output volume selector
#
self.averagedOutputSelector = slicer.qMRMLNodeComboBox()
self.averagedOutputSelector.nodeTypes = ["vtkMRMLSegmentationNode"]
self.averagedOutputSelector.selectNodeUponCreation = True
self.averagedOutputSelector.addEnabled = True
self.averagedOutputSelector.removeEnabled = True
self.averagedOutputSelector.noneEnabled = True
self.averagedOutputSelector.showHidden = False
self.averagedOutputSelector.showChildNodeTypes = False
self.averagedOutputSelector.setMRMLScene(slicer.mrmlScene)
self.averagedOutputSelector.baseName = "Averaged lung density segmentation"
self.averagedOutputSelector.setToolTip("Select or create a segmentation for averaged lung tissue classification")
parametersFormLayout.addRow("Averaged output segmentation: ", self.averagedOutputSelector)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply (it can take some minutes)")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
# connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.CTSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.averagedOutputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSelect()
# Create logic object
self.logic = LungCTGMMSegmentationLogic()
def onSelect(self):
self.applyButton.enabled = self.CTSelector.currentNode() and self.outputSelector.currentNode() and self.averagedOutputSelector.currentNode()
def onApplyButton(self):
self.logic.run(self.CTSelector.currentNode(), self.outputSelector.currentNode(), self.averagedOutputSelector.currentNode())
#
# LungCTGMMSegmentationLogic
#
class LungCTGMMSegmentationLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def lungSegmentationErrorBox(self):
errorMBox = qt.QMessageBox()
errorMBox.setIcon(qt.QMessageBox().Critical)
errorMBox.setWindowTitle("Error")
errorMBox.setText("Error in lung segmentation")
errorMBox.exec()
def extract_only_lungs_islands(self, thr_img):
"""
Extract only lung islands from patient's binary image
"""
# Create final mask
final_mask = np.zeros_like(thr_img, dtype=np.uint8)
# Compute islands
label_im, nb_labels = scipy.ndimage.label(thr_img)
sizes = scipy.ndimage.sum(thr_img, label_im, range(nb_labels + 1))
# investigate each island
for i in range(nb_labels):
# discard small islands
if sizes[i] < 5.0e5:
continue
# Check if island is background (bbox overlapping with image corner)
img_coords = np.zeros_like(thr_img, dtype=np.uint8)
img_coords[label_im==i]=1
coords = self.bbox(img_coords, margin=0)
if (coords[2] != 0 and coords[4]!=0 and
coords[3] != thr_img.shape[1]-1 and coords[5] != thr_img.shape[2]-1): # non background, set as lung
final_mask[img_coords==1]=1
return final_mask
def bbox(self, img, margin=20):
"""
Compute bounding box of a binary mask and add a maring (only in axial plane).
"""
coords=[0,img.shape[0],0,img.shape[1],0,img.shape[2]]
# i
for i in range(img.shape[0]):
if 1 in img[i,:,:]:
coords[0]=i
break
for i in range(img.shape[0]-1,-1,-1):
if 1 in img[i,:,:]:
coords[1]=i
break
# j
for j in range(img.shape[1]):
if 1 in img[:,j,:]:
coords[2]=j - margin
break
for j in range(img.shape[1]-1,-1,-1):
if 1 in img[:,j,:]:
coords[3]=j + margin
break
# k
for k in range(img.shape[2]):
if 1 in img[:,:,k]:
coords[4]=k - margin
break
for k in range(img.shape[2]-1,-1,-1):
if 1 in img[:,:,k]:
coords[5]=k + margin
break
# Error in finding bbox
if not ((coords[0] >= 0 and coords[2] >= 0 and coords[4] >= 0) or
(coords[1] <= img.shape[0]-1 and coords[3] <= img.shape[1]-1 and coords[5] <= img.shape[2]-1)):
self.lungSegmentationErrorBox()
raise Exception("Error in lung segmentation")
return coords
def binary_closing_sitk(self, img_np, radius_list):
"""
SimpleITK much faster and less compute-intesive than skimage
"""
img_sitk = sitk.GetImageFromArray(img_np)
for radius in radius_list:
img_sitk = sitk.BinaryMorphologicalClosing(img_sitk, [radius, radius, radius])
return sitk.GetArrayFromImage(img_sitk).astype(np.uint8)
def threshold_image(self, ct, intensity_thr=-155):
"""
Execute a threshold based segmentation and fill holes
"""
thr_img = np.zeros_like(ct, dtype=np.uint8)
thr_img[ct>=intensity_thr]=1
thr_img = 1 - thr_img
thr_img = scipy.ndimage.binary_opening(thr_img, iterations=3)
return thr_img
def close_lungs_mask(self, lungs_mask):
"""
Close lungs binary mask.
"""
# Do bounding box (to sepped up morph filters)
coords = self.bbox(lungs_mask)
bb_lungs_mask = lungs_mask[coords[0]:coords[1], coords[2]:coords[3], coords[4]:coords[5]]
# Binary closing
closed_bb_lung_mask = self.binary_closing_sitk(bb_lungs_mask, [30, 20])
# Error in lung segmentation
if not closed_bb_lung_mask.sum() > 1000:
self.lungSegmentationErrorBox()
raise Exception("Error in lung segmentation")
# Undo bounding box
closed_lung_mask = np.zeros_like(lungs_mask, dtype=np.uint8)
closed_lung_mask[coords[0]:coords[1], coords[2]:coords[3], coords[4]:coords[5]] = closed_bb_lung_mask
return closed_lung_mask
def run(self, CTVolume, outputSegmentation, averagedOutputSegmentation):
"""
Run intensity labeling
"""
# Import the required libraries
try:
import joblib
except ModuleNotFoundError:
slicer.util.pip_install("joblib")
import joblib
try:
import sklearn
except ModuleNotFoundError:
slicer.util.pip_install("scikit-learn")
import sklearn
# Get sitk/numpy images from Slicer
CT_sitk = sitk.Cast(sitkUtils.PullVolumeFromSlicer(CTVolume.GetName()), sitk.sitkFloat32)
CT_np = sitk.GetArrayFromImage(CT_sitk)
CT_np[CT_np<-1000]=-1000
# Compute lung mask
thr_CT = self.threshold_image(CT_np, -155)
lungs_mask = self.extract_only_lungs_islands(thr_CT)
closed_lungs_mask = self.close_lungs_mask(lungs_mask)
CT_np[closed_lungs_mask==0]=-1000
CT_flatten = CT_np.flatten()
# Remove background
indexes_to_remove = np.argwhere(closed_lungs_mask.flatten()==0)
lungs = np.delete(CT_flatten, indexes_to_remove)
# Run GMM
gmm_model_fn = __file__.replace("LungCTGMMSegmentation.py", "Resources%sGMM_parameters_COVID-19.joblib" % (os.sep))
gmm = joblib.load(gmm_model_fn)
gmm_labels = gmm.predict(lungs.reshape(-1,1)).reshape(lungs.shape)
# Make label values fixed
sorted_label = np.zeros_like(lungs, dtype=np.uint8)
sorted_gmm_means = np.argsort([i[0] for i in gmm.means_])
sorted_label[gmm_labels==[sorted_gmm_means[0]]]=1
sorted_label[gmm_labels==[sorted_gmm_means[1]]]=2
sorted_label[gmm_labels==[sorted_gmm_means[2]]]=3
sorted_label[gmm_labels==[sorted_gmm_means[3]]]=4
sorted_label[gmm_labels==[sorted_gmm_means[4]]]=5
# Restore background voxels
indexes_to_leave = np.argwhere(closed_lungs_mask.flatten()==1)
indexes_to_leave_list = [i[0] for i in indexes_to_leave]
final_label = np.zeros_like(CT_flatten, dtype=np.uint8)
counter = 0
for i in indexes_to_leave_list:
final_label[i] = sorted_label[counter]
counter += 1
# Reshape array labels. From 1D to 3D
final_label = final_label.reshape(CT_np.shape)
final_label_sitk = sitk.GetImageFromArray(final_label)
final_label_sitk.CopyInformation(CT_sitk)
# Average label
filtered_label = np.rint(scipy.ndimage.median_filter(final_label, 4)).astype(np.uint8)
filtered_label_sitk = sitk.GetImageFromArray(filtered_label)
filtered_label_sitk.CopyInformation(CT_sitk)
# Create labelmaps
final_label_slicer = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
final_label_slicer = sitkUtils.PushVolumeToSlicer(final_label_sitk, final_label_slicer)
filtered_label_slicer = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
filtered_label_slicer = sitkUtils.PushVolumeToSlicer(filtered_label_sitk, filtered_label_slicer)
# Convert labelmaps to segmentations
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(final_label_slicer, outputSegmentation)
outputSegmentation.CreateClosedSurfaceRepresentation()
slicer.mrmlScene.RemoveNode(final_label_slicer)
outputSegmentation.GetSegmentation().GetNthSegment(0).SetName("Air")
outputSegmentation.GetSegmentation().GetNthSegment(1).SetName("Healthy lungs")
outputSegmentation.GetSegmentation().GetNthSegment(2).SetName("Ground glass opacity")
outputSegmentation.GetSegmentation().GetNthSegment(3).SetName("Consolidation")
outputSegmentation.GetSegmentation().GetNthSegment(4).SetName("Other denser tissues")
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(filtered_label_slicer, averagedOutputSegmentation)
averagedOutputSegmentation.CreateClosedSurfaceRepresentation()
slicer.mrmlScene.RemoveNode(filtered_label_slicer)
averagedOutputSegmentation.GetSegmentation().GetNthSegment(0).SetName("Air")
averagedOutputSegmentation.GetSegmentation().GetNthSegment(1).SetName("Healthy lungs")
averagedOutputSegmentation.GetSegmentation().GetNthSegment(2).SetName("Ground glass opacity")
averagedOutputSegmentation.GetSegmentation().GetNthSegment(3).SetName("Consolidation")
averagedOutputSegmentation.GetSegmentation().GetNthSegment(4).SetName("Other denser tissues")
|
<reponame>phatollie/MQTT
# -*- coding: utf-8 -*-
###############################################
# Authored by <NAME> in the year 2021 #
###############################################
"""
Description: MQTT client script to help reduce the massive options to connect to a MQTT broker and publishing TOPICS. The optional console verbose logging has been put in place just to give immediate feedback that communications are occuring. Because MQTT is so light weight once you understand how to communicate and publish the verbose console logging is not really needed. A better option is to log to $SYSLOG or client specific file. This script represents the PUBLISH methods separated from the SUBSCIBE method to keeps things simple. Most hardware/software vendors who utilize MQTT provide what specific topics/messages are required for their sensors. Keep in mind network diversity/robustness may require you to tweak settings related to MQTT QOS and time.sleep to ensure topics/messages reach their destination. However, if the script is too overzealous you might break the pipe and drop packets. Broker redundancy can help when challenged on larger/bigger networks.
Design: Bare bones script to establish a quick broker connect. A better pythonic approach would be to separate out GLOBALS / LOGGING into a config module. Note that the order below for connect / publish allows for the MQTT broker to reply timely so callbacks work. Publishing is really fast and the callbacks themselves might be slow so don't worry about console logging timeliness. PUBLISH is more on the interactive side as opposed to SUBSCRIBE which waits and loops for new messages. MQTT_pub.py should be run after MQTT_Sub.py is running. Both scripts require a BROKER to be up and running.
"""
import paho.mqtt.client as paho
import time
import logging
LOG_FORMAT = '%(levelname)s: %(module)s: %(asctime)s - %(message)s'
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format = LOG_FORMAT)
logging.getLogger()
MQTT_BROKER_HOST = 'localhost'
MQTT_BROKER_PORT = 1883
MQTT_KEEP_ALIVE_INTERVAL = 60
def on_connect(client, userdata, flags, rc):
"""
`on_connect` called when the broker responds to a connection request
:param `client` current Client instance that is calling the callback.
:param `userdata` user data of any type and can be set when creating a new client
instance or with user_data_set(userdata).
:param `flags` dict that contains response flags from the broker.
:param `rc` return code determines success/faliure
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
"""
if rc==0:
print(f"[SUCCESS] Connected OK to MQTT Broker {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT} replied with RESULT CODE = {rc}")
logger.debug("Broker info: %s:%s", MQTT_BROKER_HOST, MQTT_BROKER_PORT)
else:
print(f"[FAILURE] Bad connection to MQTT Broker {MQTT_BROKER_HOST}:{MQTT_BROKER_PORT} replied with RESULT CODE = {rc}")
logger.debug("Broker info: %s:%s", MQTT_BROKER_HOST, MQTT_BROKER_PORT)
def on_publish(client, userdata, mid):
"""
`on_publish` called when a message that was to be sent using the publish() call has completed transmission to the broker
:param `client` is the current client.
:param `userdata` is exactly that user specific data.
:param `mid` mid variable used for comparing the mid variable returned for message tracking.
"""
print(f'[SUCCESS] Sent PUBLISH topic/message MID variable returned = {mid}')
client = paho.Client()
client.on_connect = on_connect
client.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT, MQTT_KEEP_ALIVE_INTERVAL)
print(f'Establishing a connection to the MQTT Broker...{MQTT_BROKER_HOST}:{MQTT_BROKER_PORT}')
time.sleep(2)
client.loop_start()
client.on_publish = on_publish
time.sleep(2)
logger.info("Publishing our test topics to: %s:%s", MQTT_BROKER_HOST, MQTT_BROKER_PORT)
client.publish("westside/led1", 'DOWN')
time.sleep(1)
client.publish("westside/led2", 'UP')
time.sleep(1)
client.publish("westside/led3", 'DOWN')
time.sleep(1)
client.loop_stop()
client.disconnect() # If you opt to run in -i comment this line so you can manually publish and stay connected.
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import timedelta
from dynamic_preferences.registries import global_preferences_registry
from danceschool.core.models import DanceRole, DanceType, DanceTypeLevel, ClassDescription, PricingTier, Location, StaffMember, Instructor, Event, Series, EventStaffMember, EventOccurrence
from danceschool.core.constants import getConstant
class DefaultSchoolTestCase(TestCase):
'''
This class just sets up standard data for the school, and it can be
inherited from, since many test classes in different apps may want to
use this same test data.
'''
@classmethod
def setUpTestData(cls):
# Ensure that necessary constants are initialized and that all
# needed categories are created within the database
gp = global_preferences_registry.manager()
gp.load_from_db()
# Create Lead and Follow roles
DanceRole.objects.create(name='Lead',order=1)
DanceRole.objects.create(name='Follow',order=2)
cls.defaultDanceRoles = DanceRole.objects.filter(name__in=['Lead','Follow'])
cls.defaultDanceType = DanceType.objects.create(name='Lindy Hop',order=1)
cls.defaultDanceType.roles = cls.defaultDanceRoles
cls.defaultDanceType.save()
# Create two levels for tests that involve different levels
cls.levelOne = DanceTypeLevel.objects.create(name='Level 1', order=1, danceType=cls.defaultDanceType)
cls.levelTwo = DanceTypeLevel.objects.create(name='Level 2', order=2, danceType=cls.defaultDanceType)
# Create two ClassDescriptions for classes, one in each level
cls.levelOneClassDescription = ClassDescription.objects.create(
title='Test Level One Class',
description='This is a test description',
danceTypeLevel=cls.levelOne,
slug='test-level-one',
)
cls.levelTwoClassDescription = ClassDescription.objects.create(
title='Test Level Two Class',
description='This is a test description',
danceTypeLevel=cls.levelTwo,
slug='test-level-two',
)
# Create a default PricingTier and a default Location
cls.defaultPricing = PricingTier.objects.create(
name='Default Pricing',
onlinePrice=50,
doorPrice=60,
dropinPrice=10,
)
cls.defaultLocation = Location.objects.create(
name='Default Location',
status=Location.StatusChoices.active,
address='This is a street address',
city='Boston',
state='MA',
zip='02114',
directions='These are directions to the default location.',
defaultCapacity=50,
)
# Create a superuser and a non-staff user
cls.superuser = User.objects.create_superuser(
'admin',
'<EMAIL>',
'pass',
first_name='Frankie',
last_name='Manning',
)
cls.nonStaffUser = User.objects.create_user(
'regularuser',
'<EMAIL>',
'pass',
is_staff=False,
first_name='New',
last_name='Student',
)
# Make the superuser an Instructor
cls.defaultInstructor = StaffMember.objects.create(
firstName='Frankie',
lastName='Manning',
userAccount=cls.superuser,
publicEmail='<EMAIL>',
privateEmail='<EMAIL>',
bio='This is <NAME>.',
)
Instructor.objects.create(
staffMember=cls.defaultInstructor,
status=Instructor.InstructorStatus.roster,
)
def create_series(self,**kwargs):
"""
This method just creates a new series with the loaded class
description that can be modified or used for various tests.
"""
# Create one or more occurrences. By default, the series
# starts tomorrow, is a Level One Lindy Hop class with default
# pricing and location, is enabled for registration, and is taught
# by <NAME>.
occurrences = max(kwargs.get('occurrences',1),1)
startTime = kwargs.get('startTime', timezone.now() + timedelta(days=1))
classDescription = kwargs.get('classDescription', self.levelOneClassDescription)
pricingTier = kwargs.get('pricingTier', self.defaultPricing)
location = kwargs.get('location', self.defaultLocation)
status = kwargs.get('status', Event.RegStatus.enabled)
instructors = kwargs.get('instructors', [self.defaultInstructor,])
s = Series(
classDescription=classDescription,
pricingTier=pricingTier,
location=location,
status=status,
)
s.save()
# Add an occurrence at the start Time
# and if requested to set more than one occurrence, then
# each additional occurrence is the day after the last one.
for k in range(1,occurrences + 1):
EventOccurrence.objects.create(
event=s,
startTime=startTime + timedelta(days=k - 1),
endTime=startTime + timedelta(days=k - 1,hours=1)
)
# Add instructors (<NAME> by default)
for i in instructors:
seriesteacher = EventStaffMember.objects.create(
event=s,
category=getConstant('general__eventStaffCategoryInstructor'),
staffMember=i,
)
seriesteacher.occurrences = s.eventoccurrence_set.all()
seriesteacher.save()
# Must save after adding event occurrences to ensure that
# registration status is updated properly.
s.save()
return s
def create_instructor(self,**kwargs):
'''
This method creates a new instructor (other than the default)
for testing things like substitute teaching.
'''
status = kwargs.get('status', Instructor.InstructorStatus.roster)
firstName = kwargs.get('firstName','Norma')
lastName = kwargs.get('lastName','Miller')
publicEmail = kwargs.get('publicEmail','<EMAIL>')
privateEmail = kwargs.get('privateEmail', '<EMAIL>')
bio = kwargs.get('bio', 'This is <NAME>.')
userAccount = kwargs.get('userAccount', None)
staffMember = StaffMember.objects.create(
firstName=firstName,
lastName=lastName,
userAccount=userAccount,
publicEmail=publicEmail,
privateEmail=privateEmail,
bio=bio,
)
Instructor.objects.create(
staffMember=staffMember,
status=status,
)
return staffMember
|
<filename>geotrek/signage/forms.py
from django import forms
from django.conf import settings
from django.contrib.gis.forms.fields import GeometryField
from django.db.models import Max
from django.forms.models import inlineformset_factory
from django.utils.translation import gettext_lazy as _
from leaflet.forms.widgets import LeafletWidget
from crispy_forms.layout import Div, Fieldset, Layout
from crispy_forms.helper import FormHelper
from geotrek.common.forms import CommonForm
from geotrek.core.fields import TopologyField
from geotrek.core.widgets import PointTopologyWidget
from geotrek.infrastructure.forms import BaseInfrastructureForm
from geotrek.signage.models import Signage, Blade, Line
class LineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(LineForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout('id', 'number', 'text', 'distance', 'pictogram_name', 'time')
self.fields['number'].widget.attrs['class'] = 'input-mini'
self.fields['text'].widget.attrs['class'] = 'input-xlarge'
self.fields['distance'].widget.attrs['class'] = 'input-mini'
self.fields['pictogram_name'].widget.attrs['class'] = 'input-mini'
self.fields['time'].widget.attrs['class'] = 'input-mini'
def save(self, *args, **kwargs):
return super(LineForm, self).save(*args, **kwargs)
class Meta:
fields = ('id', 'blade', 'number', 'text', 'distance', 'pictogram_name', 'time')
LineFormset = inlineformset_factory(Blade, Line, form=LineForm, extra=1)
class BaseBladeForm(CommonForm):
topology = TopologyField(label="")
geomfields = ['topology']
fieldslayout = [
Div(
'number',
'direction',
'type',
'condition',
'color',
Fieldset(_('Lines')),
)
]
def __init__(self, *args, **kwargs):
super(BaseBladeForm, self).__init__(*args, **kwargs)
self.helper.form_tag = False
if not self.instance.pk:
self.signage = kwargs.get('initial', {}).get('signage')
self.helper.form_action += '?signage=%s' % self.signage.pk
else:
self.signage = self.instance.signage
value_max = self.signage.blade_set.all().aggregate(max=Max('number'))['max']
if settings.BLADE_CODE_TYPE == int:
if not value_max:
self.fields['number'].initial = "1"
elif value_max.isdigit():
self.fields['number'].initial = str(int(value_max) + 1)
elif settings.BLADE_CODE_TYPE is str:
if not value_max:
self.fields['number'].initial = "A"
elif len(value_max) == 1 and "A" <= value_max[0] < "Z":
self.fields['number'].initial = chr(ord(value_max[0]) + 1)
def save(self, *args, **kwargs):
self.instance.set_topology(self.signage)
self.instance.signage = self.signage
return super(CommonForm, self).save(*args, **kwargs)
def clean_number(self):
blades = self.signage.blade_set.all()
if self.instance.pk:
blades = blades.exclude(number=self.instance.number)
already_used = ', '.join([str(number) for number in blades.values_list('number', flat=True)])
if blades.filter(number=self.cleaned_data['number']).exists():
raise forms.ValidationError(_("Number already exists, numbers already used : %s" % already_used))
return self.cleaned_data['number']
class Meta:
model = Blade
fields = ['id', 'number', 'direction', 'type', 'condition', 'color']
if settings.TREKKING_TOPOLOGY_ENABLED:
class BladeForm(CommonForm):
topology = TopologyField(label="")
geomfields = ['topology']
fieldslayout = [
Div(
'number',
'direction',
'type',
'condition',
'color',
Fieldset(_('Lines')),
)
]
def __init__(self, *args, **kwargs):
super(BladeForm, self).__init__(*args, **kwargs)
self.helper.form_tag = False
if not self.instance.pk:
self.signage = kwargs.get('initial', {}).get('signage')
self.helper.form_action += '?signage=%s' % self.signage.pk
else:
self.signage = self.instance.signage
self.fields['topology'].initial = self.signage
self.fields['topology'].widget.modifiable = True
self.fields['topology'].label = '%s%s %s' % (
self.instance.signage_display,
_("On %s") % _(self.signage.kind.lower()),
'<a href="%s">%s</a>' % (self.signage.get_detail_url(), str(self.signage))
)
value_max = self.signage.blade_set.all().aggregate(max=Max('number'))['max']
if settings.BLADE_CODE_TYPE == int:
if not value_max:
self.fields['number'].initial = "1"
elif value_max.isdigit():
self.fields['number'].initial = str(int(value_max) + 1)
elif settings.BLADE_CODE_TYPE is str:
if not value_max:
self.fields['number'].initial = "A"
elif len(value_max) == 1 and "A" <= value_max[0] < "Z":
self.fields['number'].initial = chr(ord(value_max[0]) + 1)
def save(self, *args, **kwargs):
self.instance.set_topology(self.signage)
self.instance.signage = self.signage
return super(CommonForm, self).save(*args, **kwargs)
def clean_number(self):
blades = self.signage.blade_set.all()
if self.instance.pk:
blades = blades.exclude(number=self.instance.number)
already_used = ', '.join([str(number) for number in blades.values_list('number', flat=True)])
if blades.filter(number=self.cleaned_data['number']).exists():
raise forms.ValidationError(_("Number already exists, numbers already used : %s" % already_used))
return self.cleaned_data['number']
class Meta:
model = Blade
fields = ['id', 'number', 'direction', 'type', 'condition', 'color']
else:
class BladeForm(BaseBladeForm):
geomfields = ['topology']
topology = GeometryField(label="")
def __init__(self, *args, **kwargs):
super(BladeForm, self).__init__(*args, **kwargs)
self.fields['topology'].initial = self.signage.geom
self.fields['topology'].widget = LeafletWidget(attrs={'geom_type': 'POINT'})
self.fields['topology'].widget.modifiable = False
self.fields['topology'].label = '%s%s %s' % (
self.instance.signage_display,
_("On %s") % _(self.signage.kind.lower()),
'<a href="%s">%s</a>' % (self.signage.get_detail_url(), str(self.signage))
)
self.helper.form_tag = False
if settings.TREKKING_TOPOLOGY_ENABLED:
class BaseSignageForm(BaseInfrastructureForm):
geomfields = ['topology']
def __init__(self, *args, **kwargs):
super(BaseSignageForm, self).__init__(*args, **kwargs)
if not settings.SIGNAGE_LINE_ENABLED and settings.TREKKING_TOPOLOGY_ENABLED:
modifiable = self.fields['topology'].widget.modifiable
self.fields['topology'].widget = PointTopologyWidget()
self.fields['topology'].widget.modifiable = modifiable
self.helper.form_tag = False
else:
class BaseSignageForm(BaseInfrastructureForm):
geomfields = ['geom']
class SignageForm(BaseSignageForm):
fieldslayout = [
Div(
'structure',
'name',
'description',
'type',
'condition',
'implantation_year',
'published',
'code',
'printed_elevation',
'manager',
'sealing',
)
]
class Meta(BaseInfrastructureForm.Meta):
model = Signage
fields = BaseInfrastructureForm.Meta.fields + ['code', 'printed_elevation', 'manager', 'sealing']
|
<reponame>kasper190/Simple-TMS-server
from datetime import datetime
import os
from osgeo import (
gdal,
osr,
)
from PIL import Image
from shutil import rmtree
import sqlite3
import subprocess
import sys
from time import (
gmtime,
strftime
)
input_path = 'TIF_FILES/'
output_path = 'static/img/maps/'
class TifInfo(object):
def __init__(self, filename):
# get the existing coordinate system
self.ds = gdal.Open(input_path + filename)
old_cs = osr.SpatialReference()
old_cs.ImportFromWkt(self.ds.GetProjectionRef())
# create the new coordinate system
wgs84_wkt = """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]"""
new_cs = osr.SpatialReference()
new_cs.ImportFromWkt(wgs84_wkt)
# create a transform object to convert between coordinate systems
self.transform = osr.CoordinateTransformation(old_cs, new_cs)
# get the point to transform, pixel (0,0) in this case
width = self.ds.RasterXSize
height = self.ds.RasterYSize
gt = self.ds.GetGeoTransform()
self.minx = gt[0]
self.miny = gt[3] + width * gt[4] + height * gt[5]
self.maxx = gt[0] + width * gt[1] + height * gt[2]
self.maxy = gt[3]
def get_lat_long(self):
latlong = self.transform.TransformPoint(self.minx, self.miny)
latlong += self.transform.TransformPoint(self.maxx, self.maxy)
return latlong
def get_center(self):
latlong = self.get_lat_long()
centerx = (latlong[0] + latlong[3]) / 2
centery = (latlong[1] + latlong[4]) / 2
return (centerx, centery)
def get_created(self):
created = self.ds.GetMetadataItem("TIFFTAG_DATETIME")
if created:
created = datetime.strptime(created, "%Y:%m:%d %H:%M:%S").strftime("%Y-%m-%d %H:%M:%S")
return created
class TifToDb(object):
def __init__(self):
self.con = sqlite3.connect('tms.sqlite3')
def __del__(self):
if self.con:
self.con.close()
def db_record_save(self, filename):
self.mapname = os.path.splitext(filename)[0]
self.extension = os.path.splitext(filename)[1]
self.created = TifInfo(filename).get_created()
latlong = TifInfo(filename).get_lat_long()
self.minx = latlong[0]
self.miny = latlong[1]
self.maxx = latlong[3]
self.maxy = latlong[4]
center = TifInfo(filename).get_center()
self.centerx = center[0]
self.centery = center[1]
with self.con:
cur = self.con.cursor()
publish = strftime("%Y-%m-%d %H:%M:%S", gmtime())
try:
cur.execute('''
INSERT INTO tiffmaps_overlay(mapname, extension, created, publish,
minx, miny, maxx, maxy, centerx, centery)
VALUES (:mapname, :extension, :created, :publish, :minx, :miny, :maxx, :maxy,
:centerx, :centery)''',
{
'mapname': self.mapname,
'extension': self.extension,
'created': self.created,
'publish': publish,
'minx': self.minx,
'miny': self.miny,
'maxx': self.maxx,
'maxy': self.maxy,
'centerx': self.centerx,
'centery': self.centery
}
)
except sqlite3.IntegrityError:
updated = strftime("%Y-%m-%d %H:%M:%S", gmtime())
cur.execute('''
UPDATE tiffmaps_overlay SET extension=:extension, created=:created, updated=:updated,
minx=:minx, miny=:miny, maxx=:maxx, maxy=:maxy, centerx=:centerx,
centery=:centery WHERE mapname=:mapname''',
{
'mapname': self.mapname,
'extension': self.extension,
'created': self.created,
'updated': updated,
'minx': self.minx,
'miny': self.miny,
'maxx': self.maxx,
'maxy': self.maxy,
'centerx': self.centerx,
'centery': self.centery
}
)
return True
def db_record_remove(self, filename):
self.mapname = os.path.splitext(filename)[0]
with self.con:
cur = self.con.cursor()
cur.execute('''SELECT * FROM tiffmaps_overlay WHERE mapname = ? ''', (self.mapname,))
if not cur.fetchone():
print('\x1b[1;31;38m' + 'The ' + filename + ' file does not exist in the database.' + '\x1b[0m')
return False
cur.execute('''DELETE FROM tiffmaps_overlay WHERE mapname = ? ''', (self.mapname,))
return True
class TifToJPG(object):
def img_save(self, input_path, output_path, filename):
mapname = os.path.splitext(filename)[0]
try:
img = Image.open(input_path + filename)
if img.format is not 'TIFF':
print('\x1b[1;31;38m' + 'The image is not in TIFF format.' + '\x1b[0m')
return False
except IOError:
print('\x1b[1;31;38m' + 'The file cannot be found, or the image cannot be opened and identified.'
+ '\x1b[0m')
return False
try:
basewidth = 560
print('The ' + mapname + ' file write operation in progress. Please wait.')
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img.save(output_path + mapname + '/' + mapname + '.jpg', optimize=True)
img.close()
except Exception:
print('\x1b[1;31;38m' + 'Preview of the ' + filename + ' file cannot be saved.' + '\x1b[0m')
finally:
print('\x1b[1;32;38m' + 'Preview of the ' + filename + ' file has been saved.' + '\x1b[0m')
return True
class TifToTiles(object):
def img_save(self, input_path, output_path, filename):
if not os.path.exists(input_path + filename):
print('\x1b[1;31;38m' + 'The ' + filename + ' file does not exist in the ' +
input_path + ' directory.' + '\x1b[0m')
return False
mapname = os.path.splitext(filename)[0]
print('\x1b[1;34;38m' + 'Processing the ' + filename + ' file in progress. Please wait.' + '\x1b[0m')
cmd_gdal = "python3 gdal2tiles.py -p mercator -z 0-19 -w none %s%s %s" % (
input_path, filename, output_path + mapname
)
p1 = subprocess.Popen(cmd_gdal, shell=True, stderr=subprocess.PIPE)
while True:
out = p1.stderr.read(1)
if out == b'' and p1.poll() != None:
break
if out != '' and not p1.stderr:
sys.stdout.write(out)
sys.stdout.flush()
cmd_sh = "./filename.sh %s" % output_path + mapname
p2 = subprocess.Popen(cmd_sh, shell=True, stdout=subprocess.PIPE)
output, err = p2.communicate()
print(output)
if err:
print(err)
print('\x1b[1;31;38m' + 'The ' + filename + ' file cannot be saved.' + '\x1b[0m')
return False
else:
TifToDb().db_record_save(filename)
TifToJPG().img_save(input_path, output_path, filename)
print('\x1b[1;32;38m' + 'The ' + filename + ' file has been saved.' + '\x1b[0m')
return True
def img_all_save(self, input_path, output_path):
filenames = [x for x in os.listdir(input_path) if x.endswith(".tif") or x.endswith(".tiff")]
for filename in filenames:
self.img_save(input_path, output_path, filename)
if filenames:
print('\x1b[1;32;42m' + 'All files have been saved.' + '\x1b[0m')
else:
print('\x1b[1;31;38m' + 'No files available in directory.' + '\x1b[0m')
return True
def img_remove(self, input_path, output_path, filename):
mapname = os.path.splitext(filename)[0]
TifToDb().db_record_remove(filename)
try:
os.remove(input_path + filename)
except OSError:
print('\x1b[1;31;38m' + 'The ' + filename + ' file does not exist in the ' + input_path + ' directory.' +
'\x1b[0m')
try:
rmtree(output_path + mapname, ignore_errors=False)
except OSError:
print('\x1b[1;31;38m' + 'The ' + output_path + mapname + ' directory does not exist.' + '\x1b[0m')
return False
print('\x1b[1;32;38m' + 'The ' + filename + ' file has been removed.' + '\x1b[0m')
return True
def img_all_remove(self, input_path, output_path):
filenames = [x for x in os.listdir(input_path) if x.endswith(".tif") or x.endswith(".tiff")]
for filename in filenames:
self.img_remove(input_path, output_path, filename)
print('\x1b[1;32;38m' + 'All files have been removed.' + '\x1b[0m')
return True
if __name__ == '__main__':
if len(sys.argv) == 2:
param_1 = sys.argv[1]
param_2 = None
elif len(sys.argv) == 3:
param_1 = sys.argv[1]
param_2 = sys.argv[2]
else:
param_1 = None
param_2 = None
if param_1 == 'save' and param_2 is not None:
TifToTiles().img_save(input_path, output_path, param_2)
elif param_1 == 'saveall' and param_2 is None:
TifToTiles().img_all_save(input_path, output_path)
elif param_1 == 'remove' and param_2 is not None:
TifToTiles().img_remove(input_path, output_path, param_2)
elif param_1 == 'removeall' and param_2 is None:
TifToTiles().img_all_remove(input_path, output_path)
elif param_1 == 'manual' and param_2 is None:
print("""
\nGenerate tiles and create a record in the database of the mapname.tif file:
\x1b[1;34;38mpython3 geotiff.py save mapname.tif\x1b[0m
Generate tiles and create a records in the database of the all files from TIF_FILES/ directory:
\x1b[1;34;38mpython3 geotiff.py saveall\x1b[0m
Remove tiles and record in the database of the mapname.tif file:
\x1b[1;34;38mpython3 geotiff.py remove mapname.tif\x1b[0m
Remove tiles and records of all files from TIF_FILES/ directory:
\x1b[1;34;38mpython3 geotiff.py removeall\x1b[0m
Display the manual:
\x1b[1;34;38mpython3 geotiff.py manual\x1b[0m\n
""")
else:
print('\x1b[1;33;38m' + 'Wrong arguments. Type "python3 geotiff.py manual" to display the manual.' + '\x1b[0m')
|
<filename>cnt4713-computer-networking-projects/script.py
# <NAME>
# Dr. Bou-Harb
# 2017F - CNT 4713: Computer Networking Projects
# Final Project
import pyshark
import requests
import math
import pprint
import itertools, sys
import time
import json
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import string
import matplotlib.cm as cm
def pcap_analyzer(filename):
spinner = itertools.cycle(['-', '/', '|', '\\'])
notification_point = 50000
pp = pprint.PrettyPrinter(indent=4)
pcap = pyshark.FileCapture(filename)
ips = {}
isps = {}
ddos_type = {}
rate = {}
cities = {}
print('Analyzing packets:')
i = 0
j = 1
for pkt in pcap:
sys.stdout.write(next(spinner))
sys.stdout.flush()
sys.stdout.write('\b')
time = float(pkt.sniff_timestamp)
ms = str(math.floor(time % 1000))
protocol = pkt.transport_layer
src_addr = pkt.ip.src
dst_addr = pkt.ip.dst
if src_addr not in ips:
ips[src_addr] = 0
if protocol not in ddos_type:
ddos_type[protocol] = 0
if ms not in rate:
rate[ms] = 0
ips[src_addr] += 1
ddos_type[protocol] += 1
rate[ms] += 1
i += 1
if i == notification_point:
print(str(i * j) + ' packets analyzed...')
i = 0
j += 1
print('Finally ' + str(i + j * notification_point) + ' packets analyzed...')
# prep ips for processing
ips_lst = [(ips[k], k) for k in ips]
ips_lst = sorted(ips_lst, reverse=True)
x = []
y = []
z = []
for i in range(0, 100):
ip = str(ips_lst[i][1])
r = requests.get('http://ipinfo.io/' + ip)
body = json.loads(r.content)
# pp.pprint(body)
# print(body)
# print(ips_lst[i][0], ips_lst[i][1], body['country_name'])
if 'loc' in body:
location = body['loc']
else :
location = '0,0'
location = location.split(',')
x.append(float(location[1]))
y.append(float(location[0]))
z.append(ips_lst[i][0])
if 'org' in body:
isp = body['org']
else:
isp = 'hidden'
if 'country' in body:
country = body['country']
else:
country = 'hidden'
if 'city' in body:
city = body['city']
else:
city = 'hidden'
location = city + ', ' + country
if isp not in isps:
isps[isp] = 0
if location not in cities:
cities[location] = 0
isps[isp] += 1 * ips_lst[i][0]
cities[location] += 1 * ips_lst[i][0]
# map.plot(x1, y1, 'ro', markersize=c/10., alpha=0.4)
# prep rate of attack for display
times = len(rate)
rate_lst = [rate[k] for k in rate]
average = np.mean(rate_lst)
ddos_type_lst = [(ddos_type[k], k) for k in ddos_type]
ddos_type_lst = sorted(ddos_type_lst, reverse=True)
isps_lst = [(isps[k], k) for k in isps]
isps_lst = sorted(isps_lst, reverse=True)
cities_lst = [(cities[k], k) for k in cities]
cities_lst = sorted(cities_lst, reverse=True)
# print(average, 'pkts / s')
# print(ddos_type)
# print(isps)
# print(cities)
suspect_location = cities_lst[0][1]
suspect_isp = isps_lst[0][1]
suspect_ddos = ddos_type_lst[0][1]
suspect_ip = ips_lst[0][1]
print('This attack is mostly likely coming from', suspect_location, \
'and hosted by the ISP known as', suspect_isp, \
'at a rate of', average, 'pkts / s',
'with a DDoS type most likely of', suspect_ddos,
'from the following ip address:', suspect_ip)
m = Basemap(projection='mill',lon_0=-50,lat_0=60,resolution='l')
m.drawcoastlines()
m.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
m.drawmeridians(np.arange(m.lonmin,m.lonmax+30,60),labels=[0,0,0,1])
m.drawmapboundary(fill_color='black') # fill to edge
m.drawcountries()
m.fillcontinents(color='white',lake_color='black',zorder=0)
norm = np.linalg.norm(z)
avgz = np.mean(z[25:])
sizes = [ i / avgz for i in z]
x1,y1=m(x,y)
m.scatter(x1,y1,s=sizes,marker="o",cmap=cm.cool,alpha=0.7)
title = suspect_ip + ', in ' + suspect_location
plt.title('Attacks, suspected origin: ' + title)
plt.show()
opts = {} # Empty dictionary to store key-value pairs.
argv = sys.argv
while argv: # While there are arguments left to parse...
if argv[0][0] == '-': # Found a "-name value" pair.
opts[argv[0]] = argv[1] # Add key and value to the dictionary.
argv = argv[1:] # Reduce the argument list by copying it starting from index 1
if '-file' in opts:
pcap_analyzer(opts['-file'])
else:
print('No file supplied')
|
<gh_stars>0
import copy
import functools
import itertools
import logging
import posixpath
import urllib.parse
import xml.etree.ElementTree as etree
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
import markdown
import markdown.extensions
import markdown.postprocessors
import markdown.preprocessors
import markdown.treeprocessors
import mkdocs.utils
from mkdocs_literate_nav import exceptions
log = logging.getLogger(f"mkdocs.plugins.{__name__}")
log.addFilter(mkdocs.utils.warning_filter)
_unescape = markdown.postprocessors.UnescapePostprocessor().run
NavItem = Union[str, Dict[Optional[str], Union[str, Any]]]
Nav = List[NavItem]
RootStack = Tuple[str, ...]
class NavParser:
def __init__(
self,
get_nav_for_dir: Callable[[str], Optional[Tuple[str, str]]],
globber,
implicit_index: bool = False,
):
self.get_nav_for_dir = get_nav_for_dir
self.globber = globber
self.implicit_index = implicit_index
self.seen_items = set()
self._warn = functools.lru_cache()(log.warning)
def markdown_to_nav(self, roots: Tuple[str, ...] = (".",)) -> Nav:
root = roots[0]
ext = _MarkdownExtension()
dir_nav = self.get_nav_for_dir(root)
if dir_nav:
nav_file_name, md = dir_nav
markdown.markdown(md, extensions=[ext])
if ext.nav:
self.seen_items.add(posixpath.normpath(posixpath.join(root, nav_file_name)))
first_item = None
if ext.nav and self.implicit_index and root != ".":
first_item = self.globber.find_index(root)
if first_item:
first_item = Wildcard(root, "/" + first_item, fallback=False)
if not ext.nav:
log.debug(f"Navigation for {root!r} will be inferred.")
return self._resolve_wildcards([Wildcard(root, "*", fallback=False)], roots)
return self._resolve_wildcards(self._list_element_to_nav(ext.nav, root, first_item), roots)
def _list_element_to_nav(
self, section: etree.Element, root: str, first_item: Optional[str] = None
):
assert section.tag in _LIST_TAGS
result = []
if first_item is not None:
if isinstance(first_item, str):
self.seen_items.add(first_item)
result.append(first_item)
for item in section:
assert item.tag == "li"
out_title = item.text
out_item = None
children = _iter_children_without_tail(item)
try:
child = next(children)
if not out_title and child.tag == "a":
link = child.get("href")
out_item = self._resolve_string_item(root, link)
if type(out_item) != DirectoryWildcard:
out_item_is_url = urllib.parse.urlparse(out_item)
if not all([out_item_is_url.scheme, out_item_is_url.netloc]):
out_item = urllib.parse.unquote(out_item)
out_title = _unescape("".join(child.itertext()))
child = next(children)
if child.tag in _LIST_TAGS:
out_item = self._list_element_to_nav(child, root, out_item)
child = next(children)
except StopIteration:
error = ""
else:
error = f"Expected no more elements, but got {_to_short_string(child)}.\n"
if out_title is None:
error += "Did not find any title specified." + _EXAMPLES
elif out_item is None:
if "*" in out_title:
out_item = Wildcard(root, out_title)
out_title = None
else:
error += "Did not find any item/section content specified." + _EXAMPLES
if error:
raise LiterateNavParseError(error, item)
if type(out_item) in (str, list, DirectoryWildcard) and out_title is not None:
out_item = {out_title: out_item}
result.append(out_item)
return result
def _resolve_string_item(self, root: str, link: str) -> Union["Wildcard", str]:
parsed = urllib.parse.urlsplit(link)
if parsed.scheme or parsed.netloc:
return link
abs_link = posixpath.normpath(posixpath.join(root, link))
self.seen_items.add(abs_link)
if link.endswith("/") and self.globber.isdir(abs_link):
return DirectoryWildcard(root, link)
return abs_link
def _resolve_wildcards(self, nav, roots: RootStack = (".",)) -> Nav:
def can_recurse(new_root):
if new_root in roots:
rec = " -> ".join(repr(r) for r in reversed((new_root,) + roots))
self._warn(f"Disallowing recursion {rec}")
return False
return True
# Ensure depth-first processing, so separate loop for recursive calls first.
for entry in nav:
if isinstance(entry, dict) and len(entry) == 1:
[(key, val)] = entry.items()
if isinstance(entry, str):
entry = val
if isinstance(entry, str):
self.seen_items.add(entry)
resolved: Nav = []
for entry in nav:
if isinstance(entry, dict) and len(entry) == 1:
[(key, val)] = entry.items()
if isinstance(val, list):
entry[key] = self._resolve_wildcards(val, roots)
elif isinstance(val, DirectoryWildcard):
entry[key] = (
self.markdown_to_nav((val.value,) + roots)
if can_recurse(val.value)
else val.fallback
)
elif isinstance(val, Wildcard):
entry[key] = self._resolve_wildcards([val], roots) or val.fallback
if entry[key]:
resolved.append(entry)
continue
assert not isinstance(entry, DirectoryWildcard)
if not isinstance(entry, Wildcard):
resolved.append(entry)
continue
any_matches = False
for item in self.globber.glob(entry.value.rstrip("/")):
any_matches = True
if item in self.seen_items:
continue
if self.globber.isdir(item):
title = mkdocs.utils.dirname_to_title(posixpath.basename(item))
subitems = self.markdown_to_nav((item,) + roots)
if subitems:
resolved.append({title: subitems})
else:
if entry.value.endswith("/"):
continue
resolved.append({None: item})
self.seen_items.add(item)
if not any_matches and entry.fallback:
resolved.append(entry.fallback)
return resolved
def resolve_yaml_nav(self, nav: Nav) -> Nav:
if not isinstance(nav, list):
return nav
return self._resolve_wildcards([self._resolve_yaml_nav(x) for x in nav])
def _resolve_yaml_nav(self, item: NavItem):
if isinstance(item, str) and "*" in item:
return Wildcard("", item)
if isinstance(item, dict) and len(item) == 1:
[(key, val)] = item.items()
if isinstance(val, list):
val = [self._resolve_yaml_nav(x) for x in val]
elif isinstance(val, str) and "*" in val:
val = Wildcard("", val)
elif isinstance(val, str):
val = self._resolve_string_item("", val)
return {key: val}
return item
_NAME = "mkdocs_literate_nav"
class _MarkdownExtension(markdown.extensions.Extension):
_treeprocessor: "_Treeprocessor"
@property
def nav(self) -> Optional[etree.Element]:
try:
return self._treeprocessor.nav
except AttributeError:
return None
def extendMarkdown(self, md):
md.inlinePatterns.deregister("html", strict=False)
md.inlinePatterns.deregister("entity", strict=False)
md.preprocessors.register(_Preprocessor(md), _NAME, 25)
self._treeprocessor = _Treeprocessor(md)
md.treeprocessors.register(self._treeprocessor, _NAME, 19)
class _Preprocessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
for line in lines:
if line.strip() == "<!--nav-->":
self.nav_placeholder = self.md.htmlStash.store("")
line = self.nav_placeholder + "\n"
yield line
class _Treeprocessor(markdown.treeprocessors.Treeprocessor):
nav: etree.Element
def run(self, doc):
try:
nav_placeholder = self.md.preprocessors[_NAME].nav_placeholder
except AttributeError:
# Will look for the last list.
items = reversed(doc)
else:
# Will look for the first list after the last <!--nav-->.
items = itertools.dropwhile(lambda el: el.text != nav_placeholder, doc)
for el in items:
if el.tag in _LIST_TAGS:
self.nav = copy.deepcopy(el)
break
_LIST_TAGS = ("ul", "ol")
_EXAMPLES = """
Examples:
* [Item title](item_content.md)
* Section title
* [Sub content](sub/content.md)
* *.md
"""
class Wildcard:
trim_slash = False
def __init__(self, *path_parts: str, fallback: bool = True):
norm = posixpath.normpath(posixpath.join(*path_parts).lstrip("/"))
if path_parts[-1].endswith("/") and not self.trim_slash:
norm += "/"
self.value = norm
self.fallback = path_parts[-1] if fallback else None
def __str__(self):
return f"{type(self).__name__}({self.value!r})"
class DirectoryWildcard(Wildcard):
trim_slash = True
def _iter_children_without_tail(element: etree.Element) -> Iterator[etree.Element]:
for child in element:
yield child
if child.tail:
raise LiterateNavParseError(
f"Expected no text after {_to_short_string(child)}, but got {child.tail!r}.",
element,
)
def _to_short_string(el: etree.Element) -> str:
el = copy.deepcopy(el)
for child in el:
if child:
del child[:]
child.text = "[...]"
el.tail = None
return etree.tostring(el, encoding="unicode")
class LiterateNavParseError(exceptions.LiterateNavError):
def __init__(self, message, el):
super().__init__(message + "\nThe problematic item:\n\n" + _to_short_string(el))
|
<reponame>iacobo/continual
"""
Functions for plotting results and descriptive analysis of data.
"""
#%%
import time
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from datetime import datetime
from collections import defaultdict
ROOT_DIR = Path(__file__).parents[1]
RESULTS_DIR = ROOT_DIR / 'results'
METRIC_FULL_NAME = {
'Top1_Acc': 'Accuracy',
'BalAcc': 'Balanced Accuracy',
'Loss': 'Loss'
}
STRATEGY_CATEGORY = {'Naive':'Baseline',
'Cumulative':'Baseline',
'EWC':'Regularization',
'OnlineEWC':'Regularization',
'SI':'Regularization',
'LwF':'Regularization',
'Replay':'Rehearsal',
'GEM':'Rehearsal',
'AGEM':'Rehearsal',
'GDumb':'Rehearsal'}
STRATEGY_COLOURS = {'Naive':'dodgerblue',
'Cumulative':'deepskyblue',
'EWC':'orange',
'OnlineEWC':'gold',
'SI':'tomato',
'LwF':'peru',
'Replay':'forestgreen',
'GEM':'limegreen',
'AGEM':'yellowgreen',
'GDumb':'palegreen'}
def get_timestamp():
"""
Returns current timestamp as string.
"""
ts = time.time()
return datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
###################################
# Plot figs (metrics over epoch)
###################################
def stack_results(results, metric, mode, type='experience'):
"""
Stacks results for multiple experiments along same axis in df.
Either stacks:
- multiple experiences' metric for same model/strategy, or
- multiple strategies' [avg/stream] metrics for same model
"""
results_dfs = []
# Get metrics for each training "experience"'s test set
n_repeats = len(results)
for i in range(n_repeats):
metric_dict = defaultdict(list)
for k,v in results[i].items():
if f'{metric}_Exp/eval_phase/{mode}_stream' in k:
new_k = k.split('/')[-1].replace('Exp00','Task ').replace('Exp0','Task ')
metric_dict[new_k] = v[1]
df = pd.DataFrame.from_dict(metric_dict)
df.index.rename('Epoch', inplace=True)
stacked = df.stack().reset_index()
stacked.rename(columns={'level_1': 'Task', 0: METRIC_FULL_NAME[metric]}, inplace=True)
results_dfs.append(stacked)
stacked = pd.concat(results_dfs, sort=False)
return stacked
def stack_avg_results(results_strats, metric, mode):
"""
Stack avg results for multiple strategies across epoch.
"""
results_dfs = []
# Get metrics for each training "experience"'s test set
n_repeats = len(list(results_strats.values())[0])
for i in range(n_repeats):
metric_dict = defaultdict(list)
# Get avg (stream) metrics for each strategy
for strat, metrics in results_strats.items():
for k, v in metrics[i].items():
# if train stream in keys "BalancedAccuracy_On_Trained_Experiences"
if f'{METRIC_FULL_NAME[metric].replace(" ","")}_On_Trained_Experiences/eval_phase/{mode}_stream' in k:
# JA: early stopping means uneven length arrays. Must subsample at n_tasks
metric_dict[strat] = v[1]
break
elif f'{metric}_Stream/eval_phase/{mode}_stream' in k:
metric_dict[strat] = v[1]
df = pd.DataFrame.from_dict(metric_dict)
df.index.rename('Epoch', inplace=True)
stacked = df.stack().reset_index()
stacked.rename(columns={'level_1': 'Strategy', 0: METRIC_FULL_NAME[metric]}, inplace=True)
results_dfs.append(stacked)
stacked = pd.concat(results_dfs, sort=False)
return stacked
def plot_metric(method, model, results, mode, metric, ax=None):
"""
Plots given metric from dict.
Stacks multiple plots (i.e. different per-task metrics) over training time.
`mode`: ['train','test'] (which stream to plot)
"""
ax = ax or plt.gca()
stacked = stack_results(results, metric, mode)
# Only plot task accuracies after examples have been encountered
# JA: this len() etc will screw up when plotting CI's
tasks = stacked['Task'].str.split(' ',expand=True)[1].astype(int)
n_epochs_per_task = (stacked['Epoch'].max()+1) // stacked['Task'].nunique()
stacked = stacked[tasks*n_epochs_per_task<=stacked['Epoch'].astype(int)]
sns.lineplot(data=stacked, x='Epoch', y=METRIC_FULL_NAME[metric], hue='Task', ax=ax)
ax.set_title(method, size=10)
ax.set_ylabel(model)
ax.set_xlabel('')
def plot_avg_metric(model, results, mode, metric, ax=None):
"""
Plots given metric from dict.
Stacks multiple plots (i.e. different strategies' metrics) over training time.
`mode`: ['train','test'] (which stream to plot)
"""
ax = ax or plt.gca()
stacked = stack_avg_results(results, metric, mode)
sns.lineplot(data=stacked, x='Epoch', y=METRIC_FULL_NAME[metric], hue='Strategy', ax=ax, palette=STRATEGY_COLOURS)
ax.set_title('Average performance over all tasks', size=10)
ax.set_ylabel(model)
ax.set_xlabel('')
def barplot_avg_metric(model, results, mode, metric, ax=None):
ax = ax or plt.gca()
stacked = stack_avg_results(results, metric, mode)
stacked = stacked[stacked['Epoch']==stacked['Epoch'].max()]
sns.barplot(data=stacked, x='Strategy', y=METRIC_FULL_NAME[metric], ax=ax, palette=STRATEGY_COLOURS)
ax.set_title('Final average performance over all tasks', size=10)
ax.set_xlabel('')
###################################
# Clean up plots
###################################
def clean_subplot(i, j, axes, metric):
"""Removes top and rights spines, titles, legend. Fixes y limits."""
ax = axes[i,j]
ax.spines[['top', 'right']].set_visible(False)
if i>0:
ax.set_title('')
if i>0 or j>0:
try:
ax.get_legend().remove()
except AttributeError:
pass
if metric=='Loss':
ylim = (0,4)
elif metric=='BalAcc':
ylim = (0.5,1)
plt.setp(axes, ylim=ylim)
else:
ylim = (0.5,1)
#plt.setp(axes, ylim=ylim)
def clean_plot(fig, axes, metric):
"""Cleans all subpots. Removes duplicate legends."""
for i in range(len(axes)):
for j in range(len(axes[0])):
clean_subplot(i,j,axes,metric)
handles, labels = axes[0,0].get_legend_handles_labels()
axes[0,0].get_legend().remove()
fig.legend(handles, labels, loc='center right', title='Task')
def annotate_plot(fig, domain, outcome, metric):
"""Adds x/y labels and suptitles."""
fig.supxlabel('Epoch')
fig.supylabel(METRIC_FULL_NAME[metric], x=0)
fig.suptitle(f'Continual Learning model comparison \n'
f'Outcome: {outcome} | Domain Increment: {domain}', y=1.1)
###################################
# Decorating functions for plotting everything
###################################
def plot_all_model_strats(data, domain, outcome, mode, metric, timestamp, savefig=True):
"""Pairplot of all models vs strategies."""
# Load results
with open(RESULTS_DIR / f'results_{data}_{outcome}_{domain}.json', encoding='utf-8') as handle:
res = json.load(handle)
models = res.keys()
strategies = next(iter(res.values())).keys()
n_rows = len(models)
n_cols = len(strategies)
# Experience plots
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, figsize=(2*20*4/n_cols,20*n_rows/n_cols), squeeze=False, dpi=250)
for i, model in enumerate(models):
for j, strategy in enumerate(strategies):
plot_metric(strategy, model, res[model][strategy], mode, metric, axes[i,j])
clean_plot(fig, axes, metric)
annotate_plot(fig, domain, outcome, metric)
if savefig:
file_loc = RESULTS_DIR / 'figs' / data / outcome / domain / timestamp / mode
file_loc.mkdir(parents=True, exist_ok=True)
plt.savefig(file_loc / f'Exp_{metric}.png')
# Stream plots
fig, axes = plt.subplots(n_rows, 2, sharex=False, sharey=True, figsize=(20,20*n_rows/n_cols), squeeze=False, dpi=250)
for i, model in enumerate(models):
plot_avg_metric(model, res[model], mode, metric, axes[i,0])
barplot_avg_metric(model, res[model], mode, metric, axes[i,1])
clean_plot(fig, axes, metric)
annotate_plot(fig, domain, outcome, metric)
if savefig:
file_loc = RESULTS_DIR / 'figs' / data / outcome / domain / timestamp / mode
file_loc.mkdir(parents=True, exist_ok=True)
plt.savefig(file_loc / f'Stream_{metric}.png')
def results_to_latex():
"""Returns results in LaTeX format for paper tables."""
raise NotImplementedError
def plot_all_figs(data, domain, outcome):
"""Plots all results figs for paper."""
timestamp = get_timestamp()
for mode in ['train','test']:
for metric in ['Loss','Top1_Acc','BalAcc']:
plot_all_model_strats(data, domain, outcome, mode, metric, timestamp)
#####################
# DESCRIPTIVE PLOTS
#####################
def plot_demographics():
"""
Plots demographic information of eICU dataset.
"""
df = pd.DataFrame() #data_processing.load_eicu(drop_dupes=True)
_, axes = plt.subplots(3,2, sharey=True, figsize=(18,18), squeeze=False)
df['gender'].value_counts().plot.bar(ax=axes[0,0], rot=0, title='Gender')
df['ethnicity'].value_counts().plot.bar(ax=axes[1,0], rot=0, title='Ethnicity')
df['ethnicity_coarse'].value_counts().plot.bar(ax=axes[1,1], rot=0, title='Ethnicity (coarse)')
df['age'].plot.hist(bins=20, label='age', ax=axes[0,1], title='Age')
df['region'].value_counts().plot.bar(ax=axes[2,0], rot=0, title='Region (North America)')
df['hospitaldischargestatus'].value_counts().plot.bar(ax=axes[2,1], rot=0, title='Outcome')
plt.show()
plt.close()
########################
# LATEX TABLES
########################
def ci_bound(std, count, ci=0.95):
"""Return Confidence Interval radius."""
return (1+ci)*std/np.sqrt(count)
def results_to_table(data, domain, outcome, mode, metric, verbose=False, n='max'):
"""Pairplot of all models vs strategies."""
# Load results
with open(RESULTS_DIR / f'results_{data}_{outcome}_{domain}.json', encoding='utf-8') as handle:
res = json.load(handle)
models = [k for k in res.keys() if k in ['MLP', 'CNN', 'LSTM', 'Transformer']]
dfs = []
for model in models:
df = stack_avg_results(res[model], metric, mode)
df['Model'] = model
dfs.append(df)
df = pd.concat(dfs)
# Get final performance val
if n=='max':
df = df[df['Epoch']==df['Epoch'].max()]
domain_col = domain
else:
df = df[df['Epoch']==n]
domain_col = f'{domain} ({n})'
stats = df.groupby(['Model','Strategy'])[METRIC_FULL_NAME[metric]].agg(['mean', 'count', 'std'])
stats['ci95'] = ci_bound(stats['std'], stats['count'])
if verbose:
stats['ci95_lo'] = stats['mean'] + stats['ci95']
stats['ci95_hi'] = stats['mean'] - stats['ci95']
stats[domain_col] = stats.apply(lambda x: f'{x["mean"]:.3f} ({x.ci95_lo:.3f}, {x.ci95_hi:.3f})', axis=1)
else:
stats[domain_col] = stats.apply(lambda x: f'{100*x["mean"]:.1f}$_{{\pm{100*x.ci95:.1f}}}$', axis=1)
stats = pd.DataFrame(stats[domain_col])
stats.reset_index(inplace=True)
stats['Category'] = stats['Strategy'].apply(lambda x: STRATEGY_CATEGORY[x])
stats = stats.pivot(['Category','Strategy'], 'Model')
return stats
def generate_table_results(data='mimic3',outcome='mortality_48h',mode='test',metric='BalAcc', latex=False):
"""
Latex table of main results
"""
domains = ['age','ethnicity_coarse','ward','time_season']
dfs = []
for domain in domains:
try:
dfs.append(results_to_table(data, domain, outcome, mode, metric))
except:
pass
df = pd.concat(dfs, axis=1)
if latex:
idx = pd.IndexSlice
sub_idx = idx['Regularization':'Rehearsal',:]
df = df.style.highlight_max(
axis=0,
props='bfseries: ;',
subset=sub_idx,
).to_latex()
return df
else:
return df
def generate_hp_table_super(outcome='mortality_48h'):
"""
Combines all tables into a nice latex format.
"""
prefix = r"""
\begin{table}[h]
\centering
"""
box_prefix = r"""
\begin{adjustbox}{max width=\columnwidth}
"""
old = r"""\begin{tabular}{lllllll}"""
repl = r"""\begin{tabular}{lllllll}
\multicolumn{7}{c}{\textsc{Age}} \\
"""
box_suffix = r"""
\end{adjustbox}
"""
suffix = fr"""
\caption{{Tuned hyperparameters for main experiments (outcome of {outcome}).}}
\label{{tab:hyperparameters}}
\end{{table}}
"""
latex = prefix + box_prefix + generate_hp_table(outcome=outcome, domain='age').to_latex().replace(old, repl) \
+ generate_hp_table(outcome=outcome, domain='ethnicity_coarse').to_latex().replace(old, repl.replace('Age','Ethnicity (broad)')) + box_suffix \
+ box_prefix + generate_hp_table(outcome=outcome, domain='time_season').to_latex().replace(old, repl.replace('Age','Time (season)')) \
+ generate_hp_table(outcome=outcome, domain='ward').to_latex().replace(old, repl.replace('Age','ICU Ward')) + box_suffix + suffix
return latex
def generate_table_hospitals(outcome='ARF_4h',mode='test',metric='BalAcc', hospitals=[6,12,18,24,30,36], latex=False):
"""
Latex table of main results
"""
dfs = [results_to_table('eicu', 'hospital', outcome, mode, metric, n=n) for n in hospitals]
df = pd.concat(dfs, axis=1)
if latex:
idx = pd.IndexSlice
sub_idx = idx['Regularization':'Rehearsal',:]
df = df.style.highlight_max(
axis=0,
props='bfseries: ;',
subset=sub_idx,
).to_latex()
return df
else:
return df
def generate_hp_table(data='mimic3',outcome='mortality_48h',domain='age'):
models = ['MLP','CNN','LSTM','Transformer']
strategies = ['EWC', 'OnlineEWC', 'LwF', 'SI', 'Replay','AGEM','GEM']
dfs = []
col_rename_map = {'ewc_lambda':'lambda', 'alpha':'lambda', 'si_lambda':'lambda',
'memory_strength':'temperature', 'mem_size':'sample_size'}
for model in models:
for strategy in strategies:
try:
with open(ROOT_DIR / 'config' / data / outcome / domain / f'config_{model}_{strategy}.json', encoding='utf-8') as handle:
res = json.load(handle)['strategy']
df = pd.DataFrame([res]).rename(columns=col_rename_map)
df['Model'] = model
df['Strategy'] = strategy
dfs.append(df)
except:
pass
df = pd.concat(dfs)
df = df.set_index(['Model','Strategy'])
df = df.replace(np.NaN, '')
df = df.drop('mode', axis=1)
return df
# %%
|
<gh_stars>0
import uiautomator2 as u2
import time
from utils import *
from cv import *
from Automator import *
import matplotlib.pylab as plt
# plt.ion()
# fig, ax = plt.subplots(1)
# plt.show()
a = Automator()
a.start()
def login_auth(ac,pwd):
need_auth = a.login(ac=ac,pwd=pwd)
if need_auth:
auth_name,auth_id = random_name(), CreatIDnum()
a.auth(auth_name =auth_name ,auth_id = auth_id)
def init_acc():#原作者的初始号初始化函数,不适用于农场号
while True:
screen_shot = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot)
if state_flag=='dark':
print('画面变暗,尝试进入引导模式点击')
screen_shot = a.d.screenshot(format="opencv")
a.jiaoxue(screen_shot)
elif state_flag=='zhandou':
print('侦测到加速按钮, 进入战斗模式')
a.zhandou()
elif state_flag=='shouye':
print('恭喜完成所有教学内容, 跳出循环')
a.d.click(1, 1)
time.sleep(1)
break
else:
template_paths = ['img/tiaoguo.jpg', 'img/ok.jpg','img/xiayibu.jpg', 'img/caidan.jpg', 'img/caidan_yuan.jpg',
'img/caidan_tiaoguo.jpg', 'img/dengji.jpg','img/tongyi.jpg','img/niudan_jiasu.jpg']
a.guochang(screen_shot,template_paths)
def init_home():
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.5)#保证回到首页
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.2)#保证回到首页
a.d.click(100,505)
def shouqu():#收取全部礼物
while True:#锁定回到首页
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
time.sleep(0.3)
a.d.click(1,1)
a.guochang(screen_shot_, ['img/liwu.jpg'],suiji=0)
while True:#锁定收取履历(礼品盒)
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/shouqulvli.jpg'):
a.guochang(screen_shot_, ['img/quanbushouqu.jpg'],suiji=0)
time.sleep(1)
a.d.click(589,472)#2020-5-29 19:41 bug fixed
break
while True:#锁定回到首页
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)#礼品盒有特殊性,不能点(100,505),会被挡住
time.sleep(0.3)
def shouqurenwu():#收取任务报酬
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/renwu.jpg'):
a.guochang(screen_shot_, ['img/renwu.jpg'],suiji=0)
break
a.d.click(1,1)
time.sleep(1)
while True:#锁定全部收取
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/quanbushouqu.jpg'):
a.guochang(screen_shot_, ['img/quanbushouqu.jpg'],suiji=0)
time.sleep(1)
break
while True:#锁定ok
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/guanbi.jpg'):
a.guochang(screen_shot_, ['img/guanbi.jpg'],suiji=0)
time.sleep(1)
break
while True:#锁定回到首页
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
time.sleep(0.5)
def niudan():#扭蛋函数
a.d.click(751,505)
time.sleep(1)
while True:
time.sleep(1)
active_list = ['img/sheding.jpg','img/ok.jpg','img/niudan_jiasu.jpg','img/zaicichouqu.jpg','img/shilian.jpg']
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot,active_list, suiji=1)
screen_shot_ = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot_)
if state_flag == 'baoshigoumai':
print('没钱了, 关闭')
a.d.click(373, 370)
break
def goumaimana():
a.d.click(189,62)
while True:#锁定取消2
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/quxiao2.jpg'):
break
a.d.click(189,62)
time.sleep(0.5)
a.d.click(596,471)#第一次购买的位置
while True:#锁定ok
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/ok.jpg'):
a.guochang(screen_shot_, ['img/ok.jpg'],suiji=0)
break
for i in range(7):#购买剩下的7次
while True:#锁定取消2
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/quxiao2.jpg'):
break
a.d.click(816,478)#购买10次
while True:#锁定ok
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/ok.jpg'):
a.guochang(screen_shot_, ['img/ok.jpg'],suiji=0)
break
while True:#锁定首页
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(1,1)
time.sleep(0.5)#保证回到首页
def write_log(account, pwd):#识别box函数
time.sleep(1)
a.d.click(209, 519)
time.sleep(1)
a.d.click(659, 30)
time.sleep(1)
a.d.click(728, 142)
time.sleep(1)
a.d.click(588, 481)
time.sleep(1)
base_path = 'img/touxiang/'
touxiang_path_list = []
for touxiang_path in os.listdir(base_path):
touxiang_path_list.append(base_path+touxiang_path)
screen_shot = a.d.screenshot(format="opencv")
exist_list = a.get_butt_stat(screen_shot, touxiang_path_list)
print(exist_list)
st = ''
for i in exist_list:
st = st + str(os.path.basename(i).split('.')[0]) + ','
with open('jieguo.txt', 'a') as f:
f.write(account+'\t'+ pwd+'\t'+st+'\n')
def change_acc():#切换账号
time.sleep(1)
a.d.click(871, 513)
a.d.click(871, 513)
a.d.click(871, 513)
time.sleep(1)
find_click('main_page/go_back_title.png')
time.sleep(1)
find_click('img/ok.jpg')
time.sleep(1)
def goumaitili():#购买体力,注意此函数参数默认在首页执行,其他地方执行要调整参数
for i in range(3):
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
time.sleep(1)#首页锁定,保证回到首页
a.d.click(320,31)
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot,['img/ok.jpg'], suiji=0)
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot,['img/zhandou_ok.jpg'], suiji=1)
a.d.click(100,505)#点击一下首页比较保险
def find_click(name):
for i in range(10):
screen_shot_ = a.d.screenshot(format="opencv")
flag = a.is_there_img(screen_shot_, name)
if flag:
x,y = flag
a.d.click(x, y)
time.sleep(0.5)
return
print("not found"+name)
def hanghui():#自动行会捐赠
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
time.sleep(1)#首页锁定,保证回到首页
time.sleep(1)
a.d.click(693, 436)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot_)
if state_flag == 'hanghui':
find_click('img/juanzeng.jpg')
time.sleep(1)
find_click('img/max.jpg')
time.sleep(1)
find_click('img/hanghui_ok.jpg')
time.sleep(1)
break
a.d.click(100, 505)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
a.d.click(1,1)
time.sleep(1)#首页锁定,保证回到首页
def shuatuzuobiao(x, y, times=1):#刷图函数,xy为该图的坐标,times为刷图次数
a.d.click(x,y)
time.sleep(0.5)
while True:#锁定加号
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/jiahao.jpg'):
break
a.d.click(x,y)
time.sleep(0.5)
screen_shot = a.d.screenshot(format="opencv")
for i in range(times-1):#基础1次
a.guochang(screen_shot,['img/jiahao.jpg'])
time.sleep(0.2)
time.sleep(0.3)
a.d.click(758,330)#使用扫荡券的位置 也可以用OpenCV但是效率不够而且不能自由设定次数
time.sleep(0.3)
# screen_shot = a.d.screenshot(format="opencv")
# a.guochang(screen_shot,['img/shiyongsanzhang.jpg'])
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot,['img/ok.jpg'])
while True:
a.d.click(1,1)
time.sleep(0.3)
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/normal.jpg'):
break
def shuatutiaozhan(x,y,direction = True,times=1):
if direction:
a.d.drag(600, 270, 200, 270, 0.1) # 最右
else:
a.d.drag(200, 270, 600, 270, 0.1) # 拖拽到最左
time.sleep(5)
a.d.click(x, y)
print("click"+str(x)+","+str(y))
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/tiaozhan.jpg'):
a.d.click(842, 464)
break
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/zhandoukaishi.png'):
a.d.click(829, 449)
break
time.sleep(0.5)
time.sleep(30)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
a.d.click(476, 372) # 升级
time.sleep(0.5)
a.d.click(378, 378) # 限时商店
if a.is_there_img(screen_shot_, 'img/xiayibu.jpg'):
a.d.click(839, 497)
break
time.sleep(0.5)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/xiayibu.jpg'):
a.d.click(839, 497)
break
if a.is_there_img(screen_shot_, 'img/expedition/return_expedition.png'):
a.d.click(825, 491)
break
time.sleep(0.5)
for i in range(3):
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/guanbi.jpg'):
a.d.click(839, 497)
break
time.sleep(0.5)
def shuatufaster(flag = 1):
#进入冒险
a.d.click(480, 505)
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
#print(screen_shot_)
print("find zhuxian")
a.d.click(480, 505)
if a.is_there_img(screen_shot_,'img/zhuxianguangka.png'):
break
a.d.click(562, 253)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
print("find normal")
if a.is_there_img(screen_shot_,'img/normal.jpg'):
break
if flag == 1:
a.d.drag(200, 270, 600, 270, 0.1) # 拖拽到最左
time.sleep(2)
shuatuzuobiao(518,332,4)#10-5
shuatuzuobiao(603,238,4)#10-4
shuatuzuobiao(430,239,4)#10-3
shuatuzuobiao(287,206,4)#10-2
shuatuzuobiao(146,197,4)#10-1
shuatuzuobiao(594,429,10)#10-7
shuatuzuobiao(411,408,10)#10-6
shuatuzuobiao(690,362,30)#10-8
else:
a.d.drag(600, 270, 200, 270, 0.1) # 最右
time.sleep(2)
shuatuzuobiao(583, 259,30)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
time.sleep(1)#保证回到首页
def shuatu():#刷图函数 注意此函数要在首页运行
#进入冒险
# a.d.click(480, 505)
# time.sleep(0.5)
# while True:
# screen_shot_ = a.d.screenshot(format="opencv")
# #print(screen_shot_)
# print("find zhuxian")
# if a.is_there_img(screen_shot_,'img/zhuxianguangka.png'):
# break
# a.d.click(562, 253)
# time.sleep(1)
# while True:
# screen_shot_ = a.d.screenshot(format="opencv")
# print("find normal")
# if a.is_there_img(screen_shot_,'img/normal.jpg'):
# break
# shuatutiaozhan(374, 234,1) #1-4
# shuatutiaozhan(481, 284,1) #1-5
# shuatutiaozhan(547, 374)#1-6
# shuatutiaozhan(606, 305)#1-7
# shuatutiaozhan(641, 217)#1-8
# shuatutiaozhan(735, 232)#1-9
# shuatutiaozhan(842, 324)#1-10
# shuatutiaozhan(842, 324)
# shuatutiaozhan(132, 414,False) # 2-1
# shuatutiaozhan(249, 414,False) # 2-2
# shuatutiaozhan(387, 378,False) # 2-3
# shuatutiaozhan(322, 266,False) # 2-4
# shuatutiaozhan(228, 214,False) # 2-5
# shuatutiaozhan(341, 170,False) # 2-6
# shuatutiaozhan(443, 234,False) # 2-7
# shuatutiaozhan(506, 320,False) # 2-8
# shuatutiaozhan(606, 368,False) # 2-9
# shuatutiaozhan(731, 372,False) # 2-10
# shuatutiaozhan(835, 345,False) # 2-11
# shuatutiaozhan(823, 241,False) # 2-12
# shuatutiaozhan(128, 184) #3-1
# shuatutiaozhan(195, 314) #3-2
# shuatutiaozhan(293, 218) #3-3
# shuatutiaozhan(420, 239) # 3-4
# shuatutiaozhan(395, 334) # 3-5
# shuatutiaozhan(478, 424) # 3-6
# shuatutiaozhan(522, 299) # 3-7
# shuatutiaozhan(635, 191) # 3-8
# shuatutiaozhan(698, 262) # 3-9
# shuatutiaozhan(685, 395) # 3-10
# shuatutiaozhan(821, 353) # 3-11
# shuatutiaozhan(821, 214) # 3-12
#
# for i in range(3):
# screen_shot_ = a.d.screenshot(format="opencv")
# flag = a.is_there_img(screen_shot_, 'img/tiaozhan.jpg')
# if flag:
# x,y = flag
# a.d.click(x, y)
# time.sleep(0.5)
# break
# shuatutiaozhan(199, 243,False) # 4-1
# shuatutiaozhan(295, 314,False) # 4-2
# shuatutiaozhan(401, 262,False) # 4-3
# shuatutiaozhan(510, 249,False) # 4-4
# shuatutiaozhan(503, 370,False) # 4-5
# shuatutiaozhan(631, 351,False) # 4-6
# shuatutiaozhan(257, 224) # 4-7
# shuatutiaozhan(360, 280) # 4-8
# shuatutiaozhan(480, 228) # 4-9
# shuatutiaozhan(608, 255) # 4-10
# shuatutiaozhan(746, 249) # 4-11
# shuatutiaozhan(773, 326) # 4-12
# shuatutiaozhan(645, 418) # 4-13
# # #
# #
# for i in range(5):
# screen_shot_ = a.d.screenshot(format="opencv")
# flag = a.is_there_img(screen_shot_, 'img/guanbi.jpg')
# if flag:
# x,y = flag
# a.d.click(x, y)
# time.sleep(0.5)
# break
# shuatutiaozhan(134, 187,False) # 5-1
# shuatutiaozhan(259, 182,False) # 5-2
# shuatutiaozhan(357, 230,False) # 5-3P
# shuatutiaozhan(501, 234,False) # 5-4
# shuatutiaozhan(443, 320,False) # 5-5
# shuatutiaozhan(353, 407,False) # 5-6
# shuatutiaozhan(547, 422,False) # 5-7
# shuatutiaozhan(197, 382) # 5-8
# shuatutiaozhan(297, 305) # 5-9
# shuatutiaozhan(426, 372) # 5-10
shuatutiaozhan(489, 272) # 5-11
shuatutiaozhan(600, 243) # 5-12
shuatutiaozhan(737, 245) # 5-13
for i in range(5):
screen_shot_ = a.d.screenshot(format="opencv")
flag = a.is_there_img(screen_shot_, 'img/guanbi.jpg')
if flag:
x,y = flag
a.d.click(x, y)
time.sleep(0.5)
break
#
shuatutiaozhan(203, 376, False) # 6-1
shuatutiaozhan(301, 291, False) # 6-2
shuatutiaozhan(401, 272, False) # 6-3
shuatutiaozhan(389, 393, False) # 6-4
shuatutiaozhan(522, 349, False) # 6-5
shuatutiaozhan(637, 397, False) # 6-6
shuatutiaozhan(645, 255, False) # 6-7
shuatutiaozhan(771, 228, False) # 6-8
shuatutiaozhan (247, 339) # h1-1
shuatutiaozhan (462, 255) # h1-2
shuatutiaozhan (700, 311) # h1-3
shuatutiaozhan(293, 255) # h2-1
shuatutiaozhan(464, 347) # h2-1
shuatutiaozhan(718, 335) # h2-1
shuatutiaozhan(255, 259) # h3-1
shuatutiaozhan(480, 328) # h3-1
shuatutiaozhan(733, 278) # h3-1
shuatutiaozhan(257, 276) # h4-1
shuatutiaozhan(497, 226) # h4-1
shuatutiaozhan(785, 245) # h4-1
# shuatuzuobiao(821,299,3)#10-17
# shuatuzuobiao(703,328,3)#10-16
# shuatuzuobiao(608,391,3)#10-15
# shuatuzuobiao(485,373,3)#10-14
# shuatuzuobiao(372,281,3)#10-13
# shuatuzuobiao(320,421,3)#10-12
# shuatuzuobiao(172,378,3)#10-11
# shuatuzuobiao(251,235,3)#10-10
# shuatuzuobiao(111,274,3)#10-9
#
# a.d.drag(200,270,600,270,0.1)#拖拽到最左
# time.sleep(2)
#
# shuatuzuobiao(690,362,3)#10-8
# shuatuzuobiao(594,429,3)#10-7
# shuatuzuobiao(411,408,3)#10-6
# shuatuzuobiao(518,332,3)#10-5
# shuatuzuobiao(603,238,3)#10-4
# shuatuzuobiao(430,239,3)#10-3
# shuatuzuobiao(287,206,3)#10-2
# shuatuzuobiao(146,197,3)#10-1
# while True:
# screen_shot_ = a.d.screenshot(format="opencv")
# if a.is_there_img(screen_shot_,'img/liwu.jpg'):
# break
# a.d.click(100,505)
# time.sleep(1)#保证回到首页
def expedition():
while True:
screen_shot_ = a.d.screenshot(format="opencv")
flag = a.is_there_img(screen_shot_, 'img/expedition/experience.png')
if flag:
x,y = flag
a.d.click(x, y)
time.sleep(0.5)
break
shuatutiaozhan(658, 149)
shuatutiaozhan(658, 149)
time.sleep(0.5)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
flag = a.is_there_img(screen_shot_, 'img/expedition/mana.png')
if flag:
x, y = flag
a.d.click(x, y)
time.sleep(0.5)
break
shuatutiaozhan(658, 149)
shuatutiaozhan(658, 149)
a.d.click(38, 34)
time.sleep(0.5)
def join_farm():
a.d.click(96, 507)
time.sleep(1)
find_click('img/hanghui.png')
time.sleep(1)
# find_click('img/hanghuisheding.png')
# time.sleep(5)
# while True:
# a.d.click(855.0, 80.0)
# time.sleep(1)
# screen_shot_ = a.d.screenshot(format="opencv")
# flag = a.is_there_img(screen_shot_, 'img/guild/guild_serch.png')
# if flag:
# time.sleep(1)
# break
# time.sleep(1)
# a.d.click(493, 180)
# time.sleep(2)
# a.d(text="请输入行会名").send_keys("zhfarm")
# time.sleep(2)
# a.d.click(493, 180)
# a.d.click(562, 430)
# find_click('img/zhfarm.png')
# time.sleep(5)
# a.d.click(835, 447)
# time.sleep(1)
# a.d.click(591, 376)
# time.sleep(1)
a.d.click(113, 499)
time.sleep(1)
return
def flatter():
a.d.click(96, 507)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/hanghui.png'):
a.d.click(687, 430)
break
time.sleep(0.5)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/guild/member_info.png'):
a.d.click(247, 355)
break
time.sleep(0.5)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/guild/guild_zhfarm.png'):
a.d.click(641, 91)
break
time.sleep(0.5)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_, 'img/ok.jpg'):
a.d.click(510, 232)
a.d.click(583, 368)
break
time.sleep(0.5)
time.sleep(1)
a.d.click(823, 197)
time.sleep(1)
for i in range(3):
a.d.click(92, 495)
# def write_log():
# time.sleep(1)
def dixiacheng():#地下城
a.d.click(480, 505)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot_)
if state_flag == 'maoxian':
break
a.d.click(480, 505)
time.sleep(1)
a.d.click(900, 138)
time.sleep(1)
#下面这段因为调试而注释了,实际使用时要加上
while True:
screen_shot_ = a.d.screenshot(format="opencv")
state_flag = a.get_screen_state(screen_shot_)
if state_flag == 'yunhai':
a.d.click(233, 311)
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/ok.jpg'):
break
else:
a.d.click(233, 311)
time.sleep(1)
a.guochang(screen_shot_, ['img/ok.jpg'],suiji=0)
time.sleep(1)
break
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/chetui.jpg'):
break
else:
a.d.click(470, 434)
time.sleep(1)
time.sleep(1)
a.d.click(667, 360)#1层
time.sleep(1)
a.d.click(833, 456)#挑战
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/zhiyuan.jpg'):
break
a.d.click(100, 173)#第一个人
time.sleep(1)
screen_shot = a.d.screenshot(format="opencv")
a.guochang(screen_shot, ['img/zhiyuan.jpg'],suiji=0)
if a.is_there_img(screen_shot_,'img/dengjixianzhi.jpg'):
a.d.click(213, 208)#如果等级不足,就支援的第二个人
time.sleep(1)
else:
a.d.click(100, 173)#支援的第一个人
time.sleep(1)
a.d.click(833, 470)#战斗开始
time.sleep(1)
while True:
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/ok.jpg'):
a.guochang(screen_shot_, ['img/ok.jpg'],suiji=0)
break
while True:#战斗中快进
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/caidan.jpg'):
a.guochang(screen_shot_, ['img/kuaijin.jpg'],suiji=0)
a.guochang(screen_shot_, ['img/kuaijin_1.jpg'],suiji=0)
break
while True:#结束战斗返回
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/shanghaibaogao.jpg'):
a.guochang(screen_shot_,['img/xiayibu.jpg','img/qianwangdixiacheng.jpg'], suiji=0)
break
a.d.click(1, 1)#取消显示结算动画
time.sleep(1)
find_click('img/white_ok.png')
time.sleep(1)
find_click('img/chetui.jpg')
time.sleep(1)
find_click('img/ok.jpg')
time.sleep(1)
while True:#首页锁定
screen_shot_ = a.d.screenshot(format="opencv")
if a.is_there_img(screen_shot_,'img/liwu.jpg'):
break
a.d.click(100,505)
time.sleep(1)#保证回到首页
#%%
#==============================================================================
#主程序
#join_farm()
account_dic = {}
#expedition()
#join_farm()
# dixiacheng()
# change_acc()
#dixiacheng()
#dixiacheng() # 地下城
# flatter()
# goumaitili() # 购买3次体力
# shuatufaster() # 刷全部10图3次
# hanghui()#行会捐赠
#dixiacheng()
# change_acc()
# goumaitili() # 购买3次体力
# shuatufaster() # 刷全部10图3次
#shuatu()
#hanghui()
# hanghui() # 行会捐赠
# shuatufaster()
#change_acc()
# goumaitili() # 购买3次体力
# shuatufaster() # 刷全部10图3次
with open('zhanghao.txt','r') as f:
for i,line in enumerate(f):
#print(line)
account,password = line.split(',')[0:2]
account_dic[account]=password.strip()
# dixiacheng() # 地下城
# # goumaitili()#购买3次体力
# shuatufaster() # 刷全部10图3次
#change_acc()
for account in account_dic:
print(account, account_dic[account])
login_auth(account, account_dic[account])
#init_acc()#账号初始化
init_home()#初始化,确保进入首页
shouqurenwu()#收取任务
# shuatu()
shouqu()#收取所有礼物
flatter()
#expedition()
#init_home()
hanghui()#行会捐赠
#dixiacheng()#地下城
#goumaitili()#购买3次体力
#shuatufaster()#刷全部10图3次
#join_farm()
#box管理功能,未启用
# niudan()#扭蛋扭光钻石
# write_log(account, account_dic[account])#列出box内容在jieguo.txt
change_acc()#退出当前账号,切换下一个
time.sleep(3)#确保切换账号稳定性 |
<reponame>MRCIEU/ewascatalog<filename>database/zenodo.py
# script to upload a file to zenodo sandbox via api
# seperate sandbox- and real-zenodo accounts and ACCESS_TOKENs each need to be created
# to adapt this script to real-zenodo (from sandbox implementation):
# update urls to zenodo.org from sandbox.zenodo.org
# update SANDBOX_TOKEN to a ACCESS_TOKEN from real-zenodo
import sys, json, requests
import pandas as pd
studyid = sys.argv[1]
file_dir = sys.argv[2]
access_token = sys.argv[3]
data_dir = file_dir+'/ewas-sum-stats/to-add/'+studyid
zfile=data_dir+'/zenodo.csv'
try:
zdata = pd.read_csv(zfile)
except FileNotFoundError:
print("Can't find the file "+zfile)
sys.exit()
print('Starting Zenodo upload process')
# specify ACCESS_TOKEN
# this needs to be generated for each sanbox/real account
ACCESS_TOKEN = access_token
# create empty upload
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
r.status_code
r.json()
# Get the deposition id from the previous response
# Upload the file to be deposited to Zenodo
deposition_id = r.json()['id']
data = {'name': 'results.csv'}
files = {'file': open(data_dir+'/results.csv')}
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
r.status_code
r.json()
# specify and attach the metadata for the upload
title = zdata.loc[0, 'title']
authors = zdata.loc[0, 'authors']
desc = zdata.loc[0, 'desc']
desc = desc + '\n\n' + 'Upload of this dataset was completed by The EWAS Catalog team. The data can be queried along with hundreds of other EWAS at ewascatalog.org. To upload your EWAS summary statistics and have a zenodo DOI generated for you go to ewascatalog.org/upload'
data = {'metadata':
{'title': title,
'upload_type': 'dataset',
'description': desc,
'creators': [{'name': authors}]}}
r = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
# r = requests.put('https://sandbox.zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
r.status_code
r.json()
# publish
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
status_code = r.status_code
if status_code != 202:
raise ValueError("Status code was" + str(status_code) + " and it should be 202. Check zenodo")
else:
print("Status code is 202. Happy days!")
# should be: 202
|
<reponame>tbsd/hehmda
#!/usr/bin/env python3
"""
Documentation
See also https://www.python-boilerplate.com/flask
"""
import os
import json
import pymongo
import dns
import time
import hashlib
import cgi
from flask import Flask, jsonify, render_template, send_from_directory, request, make_response, redirect, url_for
from flask_cors import CORS, cross_origin
from pymongo import MongoClient
from bson import json_util
from datetime import datetime
from utils import validate_session, push_to_db, random_string, random_id
# Cookies
from http import cookies
def create_app(config=None):
app = Flask(__name__)
CORS(app, support_credentials=True, resources={r"/*": {"origins": "*"}}, send_wildcard=True)
app.config['CORS_HEADERS'] = 'Content-Type'
# See http://flask.pocoo.org/docs/latest/config/
app.config.update(dict(DEBUG=True))
app.config.update(config or {})
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app, support_credentials=True)
# Definition of the routes. Put them into their own file. See also
# Flask Blueprints: http://flask.pocoo.org/docs/latest/blueprints
# MongoDB client
# global
client = pymongo.MongoClient("mongodb+srv://testing-repo:testing-repo@testing-repo-4xvfr.mongodb.net/admin?retryWrites=true&w=majority")
# local
# client = MongoClient('localhost', 27017)
db = client['db']
users = db['users']
chats = db['chats']
# 404 error handler
@app.errorhandler(404)
def not_found(error):
return json_util.dumps({'code': 404, 'status_msg': 'Не найдено.'})
# main page
@app.route("/")
@cross_origin()
def hello_world():
return render_template('index.html')
# used for loading js to page
@app.route('/js/<path:path>')
@cross_origin()
def get_js(path):
return send_from_directory('js', path)
# get user contacts
@app.route('/api/v1/users/contacts', methods=['POST'])
@cross_origin()
def get_contacts():
user = validate_session(users, request)
if user:
info = users.find_one({'id': user['id']},
{'_id': 0, 'id': 1, 'contacts': 1})
return json_util.dumps(info)
return json_util.dumps({'code': 401, 'status_msg': 'Вы не вы не авторизованы.'})
# get all chats
@app.route('/api/v1/users/chats', methods=['POST'])
@cross_origin()
def get_all_chats():
user = validate_session(users, request)
if user:
chats_id = user['chat_list']
info = list(chats.find({'id': {'$in': chats_id}}, {'_id': 0}))
res = [dict for x in chats_id for dict in info if dict['id'] == x ]
return json_util.dumps(res)
return json_util.dumps({'code': 401, 'status_msg': 'Вы не вы не авторизованы.'})
# adds contact to current user by given login
@app.route('/api/v1/users/addcontactbylogin', methods=['POST'])
@cross_origin()
def add_contact_by_login():
user = validate_session(users, request)
data = request.get_json(force=True)
new_contact = users.find_one({'login': data['login']},
{'_id': 0, 'id': 1, 'nickname': 1, 'login': 1})
new_contact_json = json_util.dumps(new_contact)
if (new_contact not in user['contacts']):
if new_contact:
push_to_db(users, user['id'], 'contacts', new_contact)
return new_contact_json
else:
return json_util.dumps({'code': 404,
'status_msg': 'Такого пользователя не существует.'})
return json_util.dumps({'code': 409, 'status_msg': 'Этот контакт уже есть в списке пользователя.'})
# adds contact to current user by given id
@app.route('/api/v1/users/addcontact', methods=['POST'])
@cross_origin()
def add_contact():
user = validate_session(users, request)
data = request.get_json(force=True)
new_contact = users.find_one({'id': data['id']},
{'_id': 0, 'id': 1, 'nickname': 1})
new_contact_json = json_util.dumps(new_contact)
if (new_contact not in user['contacts']):
if new_contact:
push_to_db(users, user['id'], 'contacts', new_contact)
return new_contact_json
else:
return json_util.dumps({'code': 404,
'status_msg': 'Такого пользователя не существует.'})
return json_util.dumps({'code': 409, 'status_msg': 'Этот контакт уже есть в списке пользователя.'})
# add user to chat
@app.route('/api/v1/chats/addtochat', methods=['POST'])
@cross_origin()
def add_to_chat():
user = validate_session(users, request)
data = request.get_json(force=True)
chat_id = data['chat_id']
new_user_id = data['new_user_id']
# if new chat created
if (user and chat_id == ''):
chat_id = random_string(30)
chat = chats.find_one({'id': chat_id},
{'_id': 0, 'id': 1, 'users': 1})
while chat:
chat_id = random_string(30)
chat = chats.find_one({'id': chat_id},
{'_id': 0, 'id': 1, 'users': 1})
chats.insert_one({'id': chat_id,
'users': [user['id']],
'messages': []})
push_to_db(users, user['id'], 'chat_list', chat_id)
user['chat_list'].append(chat_id)
# only if user is member of this chat
if (user and chat_id in user['chat_list']):
new_user = users.find_one({'id': new_user_id},
{'_id': 0, 'id': 1, 'chat_list': 1})
if (new_user and chat_id not in new_user['chat_list']):
push_to_db(chats, chat_id, 'users', new_user_id)
push_to_db(users, new_user_id, 'chat_list', chat_id)
updated_chat_users = json_util.dumps(
chats.find_one({'id': chat_id},
{'_id': 0, 'id': 1, 'users': 1}))
return updated_chat_users
# add message to chat
@app.route('/api/v1/chats/send', methods=['POST'])
@cross_origin()
def send():
user = validate_session(users, request)
data = request.get_json(force=True)
chat_id = data['chat_id']
# only if user is member of this chat
if (user and chat_id in user['chat_list']):
message_id = random_string()
# timestamp in milliseconds
timestamp = int(time.time()) * 1000
content = data['content']
# replace 'script' with its utf-8 code
# to prevent malicious code execution
content = content.replace('script',
'script')
message = {'id': message_id,
'author': user['id'],
'time': timestamp,
'content': content}
push_to_db(chats, chat_id, 'messages', message, False)
return json_util.dumps(message)
return json_util.dumps({'code': 401, 'status_msg': 'Вы не состоите в данном чате.'})
# get only new messages
@app.route('/api/v1/chats/getnewmessages', methods=['POST'])
@cross_origin()
def get_new_messages():
user = validate_session(users, request)
data = request.get_json(force=True)
chat_id = data['chat_id']
# only if user is member of this chat
if (user and chat_id in user['chat_list']):
last_id = data['last_id']
chat = chats.find_one({'id': chat_id},
{'_id': 0, 'id': 1, 'messages': 1})
messages = chat['messages']
last_index = 0
for last_index in range(len(messages)):
if last_id == messages[last_index]['id']:
break
# if there is such id, send only new messages
# else send all messages
if (last_index + 1 != len(messages)):
chat['messages'] = messages[last_index + 1: len(messages)]
else:
if last_id == messages[-1]['id']:
chat['messages'] = []
return json_util.dumps(chat)
return json_util.dumps({'code': 401, 'status_msg': 'Вы не состоите в данном чате.'})
# get members of the chat
@app.route('/api/v1/chats/getusers', methods=['POST'])
@cross_origin()
def get_users():
user = validate_session(users, request)
data = request.get_json(force=True)
chat_id = data['chat_id']
# only if user is member of this chat
if (user and chat_id in user['chat_list']):
chat = chats.find_one({'id': chat_id},
{'_id': 0, 'id': 1, 'users': 1})
return json_util.dumps(chat)
return json_util.dumps({'code': 401, 'status_msg': 'Вы не состоите в данном чате.'})
# Login and password for registration
@app.route('/api/v1/users/authorization', methods=['POST'])
@cross_origin()
def authorization():
# Считывание логина и пароля
data = request.get_json(force=True)
login = data['login']
password = data['password']
# Проверка, есть ли в базе данных эта личнасть
password_hash = hashlib.md5(password.strip().encode('utf-8')).hexdigest()
if users.find({"login": login, "password_hash": password_hash}).count() == 1:
token = random_string()
response = make_response()
user = users.find_one({"login": login, "password_hash": password_<PASSWORD>})
users.find_one_and_update({'id': user['id']}, {'$set': {'session': token}})
user = users.find_one({"login": login, "password_hash": password_hash})
response.set_cookie('session', user['session'])
return json_util.dumps({'session': user['session']})
else:
return json_util.dumps({'code': 401, 'status_msg': 'Неверный логин или пароль.'})
@app.route('/api/v1/users/registration', methods=['POST'])
@cross_origin()
def registration():
# Считывание логин, пароль, повтор пороля
data = request.get_json(force=True)
new_login = data['new_login']
new_password = data['<PASSWORD>']
new_repeat_password = data['new_repeat_password']
new_nickname = data['new_nickname']
# Проверка, логина на дубляж и сравнение двух паролей.
if users.find({"login": new_login}).count() == 0:
new_id = random_id()
while users.find_one({"id": new_id}):
new_id = random_id()
token = random_string()
response = make_response()
if new_password == new_repeat_password:
password_hash = hashlib.md5(new_password.strip().encode('utf-8'))
users.insert_one({"id": new_id, "login": new_login,
"password_hash": password_<PASSWORD>(),
"nickname": new_nickname,
"chat_list": [],
"contacts": [], "session": token})
response.set_cookie('session', token)
return json_util.dumps({'session': token})
return json_util.dumps({'code': 400, 'status_msg': 'Пароли не совпадают.'})
return json_util.dumps({'code': 400, 'status_msg': 'Такой логин уже занят.'})
# get personal user inforamtion
@app.route('/api/v1/users/personaldata', methods=['POST'])
@cross_origin()
def get_personal_data():
user = validate_session(users, request)
if user:
info = users.find_one({'id': user['id']},
{'_id': 0, 'id': 1, 'login': 1,
'nickname': 1, 'chat_list': 1,
'contacts': 1, 'session': 1})
return json_util.dumps(info)
return json_util.dumps({'code': 401, 'status_msg': 'Вы не вы не авторизованы.'})
# get public user inforamtion
@app.route('/api/v1/users/publicdata', methods=['POST'])
@cross_origin()
def get_public_data():
data = request.get_json(force=True)
other_id = data['id']
info = users.find_one({'id': other_id},
{'_id': 0, 'id': 1, 'nickname': 1})
if (len(info) != 0):
return json_util.dumps(info)
else:
return json_util.dumps({'code': 404,
'status_msg': 'Пользователя с таким id не существует.'})
return app
# need for cookies to work propertly in case of reactjs frontend
@app.after_request
def middleware_for_response(response):
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == "__main__":
port = int(os.environ.get("PORT", 8000))
app = create_app()
app.run(host="0.0.0.0", port=port)
|
<reponame>GuyLewin/plaso<gh_stars>0
# -*- coding: utf-8 -*-
"""The storage media CLI tool."""
from __future__ import unicode_literals
import getpass
import os
import sys
from dfdatetime import filetime as dfdatetime_filetime
from dfvfs.analyzer import analyzer as dfvfs_analyzer
from dfvfs.analyzer import fvde_analyzer_helper
from dfvfs.credentials import manager as credentials_manager
from dfvfs.helpers import source_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.volume import tsk_volume_system
from dfvfs.volume import vshadow_volume_system
from plaso.cli import logger
from plaso.cli import tools
from plaso.cli import views
from plaso.engine import configurations
from plaso.lib import errors
from plaso.lib import py2to3
from plaso.lib import timelib
try:
# Disable experimental FVDE support.
dfvfs_analyzer.Analyzer.DeregisterHelper(
fvde_analyzer_helper.FVDEAnalyzerHelper())
except KeyError:
pass
class StorageMediaTool(tools.CLITool):
"""Class that implements a storage media CLI tool."""
_DEFAULT_BYTES_PER_SECTOR = 512
# TODO: remove this redirect.
_SOURCE_OPTION = 'source'
_BINARY_DATA_CREDENTIAL_TYPES = ['key_data']
_SUPPORTED_CREDENTIAL_TYPES = [
'key_data', 'password', '<PASSWORD>_password', 'startup_key']
# For context see: http://en.wikipedia.org/wiki/Byte
_UNITS_1000 = ['B', 'kB', 'MB', 'GB', 'TB', 'EB', 'ZB', 'YB']
_UNITS_1024 = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'EiB', 'ZiB', 'YiB']
def __init__(self, input_reader=None, output_writer=None):
"""Initializes the CLI tool object.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
"""
super(StorageMediaTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._custom_artifacts_path = None
self._artifact_definitions_path = None
self._artifact_filters = None
self._credentials = []
self._credential_configurations = []
self._filter_file = None
self._partitions = None
self._partition_offset = None
self._process_vss = False
self._source_scanner = source_scanner.SourceScanner()
self._source_path = None
self._source_path_specs = []
self._vss_only = False
self._vss_stores = None
def _AddCredentialConfiguration(
self, path_spec, credential_type, credential_data):
"""Adds a credential configuration.
Args:
path_spec (dfvfs.PathSpec): path specification.
credential_type (str): credential type.
credential_data (bytes): credential data.
"""
credential_configuration = configurations.CredentialConfiguration(
credential_data=credential_data, credential_type=credential_type,
path_spec=path_spec)
self._credential_configurations.append(credential_configuration)
def _FormatHumanReadableSize(self, size):
"""Represents a number of bytes as a human readable string.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1000 = 0
size_1000 = float(size)
while size_1000 >= 1000:
size_1000 /= 1000
magnitude_1000 += 1
magnitude_1024 = 0
size_1024 = float(size)
while size_1024 >= 1024:
size_1024 /= 1024
magnitude_1024 += 1
size_string_1000 = None
if magnitude_1000 > 0 and magnitude_1000 <= 7:
size_string_1000 = '{0:.1f}{1:s}'.format(
size_1000, self._UNITS_1000[magnitude_1000])
size_string_1024 = None
if magnitude_1024 > 0 and magnitude_1024 <= 7:
size_string_1024 = '{0:.1f}{1:s}'.format(
size_1024, self._UNITS_1024[magnitude_1024])
if not size_string_1000 or not size_string_1024:
return '{0:d} B'.format(size)
return '{0:s} / {1:s} ({2:d} B)'.format(
size_string_1024, size_string_1000, size)
def _GetNormalizedTSKVolumeIdentifiers(
self, volume_system, volume_identifiers):
"""Retrieves the normalized TSK volume identifiers.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Returns:
list[int]: normalized volume identifiers.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Volume missing for identifier: {0:s}.'.format(volume_identifier))
try:
volume_identifier = int(volume.identifier[1:], 10)
normalized_volume_identifiers.append(volume_identifier)
except ValueError:
pass
return normalized_volume_identifiers
def _GetNormalizedVShadowVolumeIdentifiers(
self, volume_system, volume_identifiers):
"""Retrieves the normalized VShadow volume identifiers.
Args:
volume_system (dfvfs.VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Returns:
list[int]: normalized volume identifiers.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Volume missing for identifier: {0:s}.'.format(volume_identifier))
try:
volume_identifier = int(volume.identifier[3:], 10)
normalized_volume_identifiers.append(volume_identifier)
except ValueError:
pass
return normalized_volume_identifiers
# TODO: refactor this method that it become more clear what it is
# supposed to do.
def _GetTSKPartitionIdentifiers(
self, scan_node, partition_offset=None, partitions=None):
"""Determines the TSK partition identifiers.
This method first checks for the preferred partition number, then for
the preferred partition offset and falls back to prompt the user if
no usable preferences were specified.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
partition_offset (Optional[int]): preferred partition byte offset.
partitions (Optional[list[str]]): preferred partition identifiers.
Returns:
list[str]: partition identifiers.
Raises:
RuntimeError: if the volume for a specific identifier cannot be
retrieved.
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.')
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
self._output_writer.Write('[WARNING] No partitions found.\n')
return None
normalized_volume_identifiers = self._GetNormalizedTSKVolumeIdentifiers(
volume_system, volume_identifiers)
if partitions:
if partitions == ['all']:
partitions = range(1, volume_system.number_of_volumes + 1)
if not set(partitions).difference(normalized_volume_identifiers):
return [
'p{0:d}'.format(partition_number)
for partition_number in partitions]
if partition_offset is not None:
for volume in volume_system.volumes:
volume_extent = volume.extents[0]
if volume_extent.offset == partition_offset:
return [volume.identifier]
self._output_writer.Write((
'[WARNING] No such partition with offset: {0:d} '
'(0x{0:08x}).\n').format(partition_offset))
if len(volume_identifiers) == 1:
return volume_identifiers
try:
selected_volume_identifier = self._PromptUserForPartitionIdentifier(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
if selected_volume_identifier == 'all':
return volume_identifiers
return [selected_volume_identifier]
def _GetVSSStoreIdentifiers(self, scan_node, vss_stores=None):
"""Determines the VSS store identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
vss_stores (Optional[list[str]]): preferred VSS store identifiers.
Returns:
list[str]: VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError('Invalid scan node.')
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
try:
selected_store_identifiers = self._PromptUserForVSSStoreIdentifiers(
volume_system, volume_identifiers, vss_stores=vss_stores)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
return selected_store_identifiers
def _ParseCredentialOptions(self, options):
"""Parses the credential options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
credentials = getattr(options, 'credentials', [])
if not isinstance(credentials, list):
raise errors.BadConfigOption('Unsupported credentials value.')
for credential_string in credentials:
credential_type, _, credential_data = credential_string.partition(':')
if not credential_type or not credential_data:
raise errors.BadConfigOption(
'Badly formatted credential: {0:s}.'.format(credential_string))
if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES:
raise errors.BadConfigOption(
'Unsupported credential type for: {0:s}.'.format(
credential_string))
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode('hex')
except TypeError:
raise errors.BadConfigOption(
'Unsupported credential data for: {0:s}.'.format(
credential_string))
self._credentials.append((credential_type, credential_data))
def _ParsePartitionsString(self, partitions):
"""Parses the user specified partitions string.
Args:
partitions (str): partitions. A range of partitions can be defined
as: "3..5". Multiple partitions can be defined as: "1,3,5" (a list
of comma separated values). Ranges and lists can also be combined
as: "1,3..5". The first partition is 1. All partitions can be
defined as: "all".
Returns:
list[int|str]: partition numbers or "all" to represent all available
partitions.
Raises:
BadConfigOption: if the partitions string is invalid.
"""
if not partitions:
return []
if partitions == 'all':
return ['all']
partition_numbers = []
for partition_range in partitions.split(','):
# Determine if the range is formatted as 1..3 otherwise it indicates
# a single partition number.
if '..' in partition_range:
first_partition, last_partition = partition_range.split('..')
try:
first_partition = int(first_partition, 10)
last_partition = int(last_partition, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid partition range: {0:s}.'.format(partition_range))
for partition_number in range(first_partition, last_partition + 1):
if partition_number not in partition_numbers:
partition_numbers.append(partition_number)
else:
if partition_range.startswith('p'):
partition_range = partition_range[1:]
try:
partition_number = int(partition_range, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid partition range: {0:s}.'.format(partition_range))
if partition_number not in partition_numbers:
partition_numbers.append(partition_number)
return sorted(partition_numbers)
def _ParseSourcePathOption(self, options):
"""Parses the source path option.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)
if not self._source_path:
raise errors.BadConfigOption('Missing source path.')
self._source_path = os.path.abspath(self._source_path)
def _ParseStorageMediaOptions(self, options):
"""Parses the storage media options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._ParseStorageMediaImageOptions(options)
self._ParseVSSProcessingOptions(options)
self._ParseCredentialOptions(options)
self._ParseSourcePathOption(options)
def _ParseStorageMediaImageOptions(self, options):
"""Parses the storage media image options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
partitions = getattr(options, 'partitions', None)
self._partitions = self._ParsePartitionsString(partitions)
image_offset_bytes = getattr(options, 'image_offset_bytes', None)
if self._partitions and image_offset_bytes is not None:
raise errors.BadConfigOption((
'Option "--image_offset_bytes" can not be used in combination '
'with "--partitions" or "--partition".'))
image_offset = getattr(options, 'image_offset', None)
if self._partitions and image_offset is not None:
raise errors.BadConfigOption((
'Option "--image_offset" can not be used in combination with '
'"--partitions" or "--partition".'))
if (image_offset_bytes is not None and
isinstance(image_offset_bytes, py2to3.STRING_TYPES)):
try:
image_offset_bytes = int(image_offset_bytes, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid image offset bytes: {0:s}.'.format(image_offset_bytes))
if image_offset_bytes is None and image_offset is not None:
bytes_per_sector = getattr(
options, 'bytes_per_sector', self._DEFAULT_BYTES_PER_SECTOR)
if isinstance(image_offset, py2to3.STRING_TYPES):
try:
image_offset = int(image_offset, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid image offset: {0:s}.'.format(image_offset))
if isinstance(bytes_per_sector, py2to3.STRING_TYPES):
try:
bytes_per_sector = int(bytes_per_sector, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid bytes per sector: {0:s}.'.format(bytes_per_sector))
if image_offset_bytes:
self._partition_offset = image_offset_bytes
elif image_offset:
self._partition_offset = image_offset * bytes_per_sector
def _ParseVSSProcessingOptions(self, options):
"""Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
vss_only = False
vss_stores = None
self._process_vss = not getattr(options, 'no_vss', True)
if self._process_vss:
vss_only = getattr(options, 'vss_only', False)
vss_stores = getattr(options, 'vss_stores', None)
if vss_stores:
vss_stores = self._ParseVSSStoresString(vss_stores)
self._vss_only = vss_only
self._vss_stores = vss_stores
def _ParseVSSStoresString(self, vss_stores):
"""Parses the user specified VSS stores string.
Args:
vss_stores (str): VSS stores. A range of stores can be defined
as: "3..5". Multiple stores can be defined as: "1,3,5" (a list
of comma separated values). Ranges and lists can also be
combined as: "1,3..5". The first store is 1. All stores can be
defined as: "all".
Returns:
list[str]: VSS stores.
Raises:
BadConfigOption: if the VSS stores option is invalid.
"""
if not vss_stores:
return []
if vss_stores == 'all':
return ['all']
store_numbers = []
for vss_store_range in vss_stores.split(','):
# Determine if the range is formatted as 1..3 otherwise it indicates
# a single store number.
if '..' in vss_store_range:
first_store, last_store = vss_store_range.split('..')
try:
first_store = int(first_store, 10)
last_store = int(last_store, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid VSS store range: {0:s}.'.format(vss_store_range))
for store_number in range(first_store, last_store + 1):
if store_number not in store_numbers:
store_numbers.append(store_number)
else:
if vss_store_range.startswith('vss'):
vss_store_range = vss_store_range[3:]
try:
store_number = int(vss_store_range, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid VSS store range: {0:s}.'.format(vss_store_range))
if store_number not in store_numbers:
store_numbers.append(store_number)
return sorted(store_numbers)
def _PromptUserForEncryptedVolumeCredential(
self, scan_context, locked_scan_node, credentials):
"""Prompts the user to provide a credential for an encrypted volume.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
locked_scan_node (dfvfs.SourceScanNode): locked scan node.
credentials (dfvfs.Credentials): credentials supported by the locked
scan node.
Returns:
bool: True if the volume was unlocked.
"""
# TODO: print volume description.
if locked_scan_node.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
self._output_writer.Write('Found a BitLocker encrypted volume.\n')
else:
self._output_writer.Write('Found an encrypted volume.\n')
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append('skip')
self._output_writer.Write('Supported credentials:\n')
self._output_writer.Write('\n')
for index, name in enumerate(credentials_list):
self._output_writer.Write(' {0:d}. {1:s}\n'.format(index, name))
self._output_writer.Write('\nNote that you can abort with Ctrl^C.\n\n')
result = False
while not result:
self._output_writer.Write('Select a credential to unlock the volume: ')
# TODO: add an input reader.
input_line = self._input_reader.Read()
input_line = input_line.strip()
if input_line in credentials_list:
credential_type = input_line
else:
try:
credential_type = int(input_line, 10)
credential_type = credentials_list[credential_type]
except (IndexError, ValueError):
self._output_writer.Write(
'Unsupported credential: {0:s}\n'.format(input_line))
continue
if credential_type == 'skip':
break
getpass_string = 'Enter credential data: '
if sys.platform.startswith('win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string)
credential_data = getpass.getpass(getpass_string)
self._output_writer.Write('\n')
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode('hex')
except TypeError:
self._output_writer.Write('Unsupported credential data.\n')
continue
try:
result = self._source_scanner.Unlock(
scan_context, locked_scan_node.path_spec, credential_type,
credential_data)
except IOError as exception:
logger.debug('Unable to unlock volume with error: {0!s}'.format(
exception))
result = False
if not result:
self._output_writer.Write('Unable to unlock volume.\n')
self._output_writer.Write('\n')
self._output_writer.Write('\n')
if result:
self._AddCredentialConfiguration(
locked_scan_node.path_spec, credential_type, credential_data)
return result
def _PromptUserForPartitionIdentifier(
self, volume_system, volume_identifiers):
"""Prompts the user to provide a partition identifier.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Returns:
str: partition identifier or "all".
Raises:
SourceScannerError: if the source cannot be processed.
"""
self._output_writer.Write('The following partitions were found:\n')
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'Offset (in bytes)', 'Size (in bytes)'])
for volume_identifier in sorted(volume_identifiers):
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Volume missing for identifier: {0:s}.'.format(volume_identifier))
volume_extent = volume.extents[0]
volume_offset = '{0:d} (0x{0:08x})'.format(volume_extent.offset)
volume_size = self._FormatHumanReadableSize(volume_extent.size)
table_view.AddRow([volume.identifier, volume_offset, volume_size])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
while True:
self._output_writer.Write(
'Please specify the identifier of the partition that should be '
'processed.\nAll partitions can be defined as: "all". Note that you '
'can abort with Ctrl^C.\n')
selected_volume_identifier = self._input_reader.Read()
selected_volume_identifier = selected_volume_identifier.strip()
if not selected_volume_identifier.startswith('p'):
try:
partition_number = int(selected_volume_identifier, 10)
selected_volume_identifier = 'p{0:d}'.format(partition_number)
except ValueError:
pass
if (selected_volume_identifier == 'all' or
selected_volume_identifier in volume_identifiers):
break
self._output_writer.Write(
'\n'
'Unsupported partition identifier, please try again or abort '
'with Ctrl^C.\n'
'\n')
self._output_writer.Write('\n')
return selected_volume_identifier
def _PromptUserForVSSCurrentVolume(self):
"""Prompts the user if the current volume with VSS should be processed.
Returns:
bool: True if the current volume with VSS should be processed.
"""
while True:
self._output_writer.Write(
'Volume Shadow Snapshots (VSS) were selected also process current\n'
'volume? [yes, no]\n')
process_current_volume = self._input_reader.Read()
process_current_volume = process_current_volume.strip()
process_current_volume = process_current_volume.lower()
if (not process_current_volume or
process_current_volume in ('no', 'yes')):
break
self._output_writer.Write(
'\n'
'Unsupported option, please try again or abort with Ctrl^C.\n'
'\n')
self._output_writer.Write('\n')
return not process_current_volume or process_current_volume == 'yes'
def _PromptUserForVSSStoreIdentifiers(
self, volume_system, volume_identifiers, vss_stores=None):
"""Prompts the user to provide the VSS store identifiers.
This method first checks for the preferred VSS stores and falls back
to prompt the user if no usable preferences were specified.
Args:
volume_system (dfvfs.VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
vss_stores (Optional[list[str]]): preferred VSS store identifiers.
Returns:
list[str]: selected VSS store identifiers.
Raises:
SourceScannerError: if the source cannot be processed.
"""
normalized_volume_identifiers = self._GetNormalizedVShadowVolumeIdentifiers(
volume_system, volume_identifiers)
# TODO: refactor this to _GetVSSStoreIdentifiers.
if vss_stores:
if vss_stores == ['all']:
# We need to set the stores to cover all vss stores.
vss_stores = range(1, volume_system.number_of_volumes + 1)
if not set(vss_stores).difference(normalized_volume_identifiers):
return vss_stores
print_header = True
while True:
if print_header:
self._output_writer.Write(
'The following Volume Shadow Snapshots (VSS) were found:\n')
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'Creation Time'])
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
'Volume missing for identifier: {0:s}.'.format(
volume_identifier))
vss_creation_time = volume.GetAttribute('creation_time')
filetime = dfdatetime_filetime.Filetime(
timestamp=vss_creation_time.value)
vss_creation_time = filetime.GetPlasoTimestamp()
vss_creation_time = timelib.Timestamp.CopyToIsoFormat(
vss_creation_time)
if volume.HasExternalData():
vss_creation_time = (
'{0:s}\tWARNING: data stored outside volume').format(
vss_creation_time)
table_view.AddRow([volume.identifier, vss_creation_time])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
print_header = False
self._output_writer.Write(
'Please specify the identifier(s) of the VSS that should be '
'processed:\nNote that a range of stores can be defined as: 3..5. '
'Multiple stores can\nbe defined as: 1,3,5 (a list of comma '
'separated values). Ranges and lists can\nalso be combined '
'as: 1,3..5. The first store is 1. All stores can be defined\n'
'as "all". If no stores are specified none will be processed. You\n'
'can abort with Ctrl^C.\n')
selected_vss_stores = self._input_reader.Read()
selected_vss_stores = selected_vss_stores.strip()
if not selected_vss_stores:
return []
try:
selected_vss_stores = self._ParseVSSStoresString(selected_vss_stores)
except errors.BadConfigOption:
selected_vss_stores = []
if selected_vss_stores == ['all']:
# We need to set the stores to cover all vss stores.
selected_vss_stores = range(1, volume_system.number_of_volumes + 1)
if not set(selected_vss_stores).difference(normalized_volume_identifiers):
break
self._output_writer.Write(
'\n'
'Unsupported VSS identifier(s), please try again or abort with '
'Ctrl^C.\n'
'\n')
self._output_writer.Write('\n')
return selected_vss_stores
def _ScanVolume(self, scan_context, volume_scan_node):
"""Scans the volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
Raises:
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not volume_scan_node or not volume_scan_node.path_spec:
raise errors.SourceScannerError('Invalid or missing volume scan node.')
selected_vss_stores = []
if not volume_scan_node.sub_nodes:
self._ScanVolumeScanNode(
scan_context, volume_scan_node, selected_vss_stores)
else:
# Some volumes contain other volume or file systems e.g. BitLocker ToGo
# has an encrypted and unencrypted volume.
for sub_scan_node in volume_scan_node.sub_nodes:
self._ScanVolumeScanNode(
scan_context, sub_scan_node, selected_vss_stores)
def _ScanVolumeScanNode(
self, scan_context, volume_scan_node, selected_vss_stores):
"""Scans an individual volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
selected_vss_stores (list[str]): selected VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not volume_scan_node or not volume_scan_node.path_spec:
raise errors.SourceScannerError('Invalid or missing volume scan node.')
# Get the first node where where we need to decide what to process.
scan_node = volume_scan_node
while len(scan_node.sub_nodes) == 1:
# Make sure that we prompt the user about VSS selection.
if scan_node.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW:
location = getattr(scan_node.path_spec, 'location', None)
if location == '/':
break
scan_node = scan_node.sub_nodes[0]
# The source scanner found an encrypted volume and we need
# a credential to unlock the volume.
if scan_node.type_indicator in (
dfvfs_definitions.ENCRYPTED_VOLUME_TYPE_INDICATORS):
self._ScanVolumeScanNodeEncrypted(scan_context, scan_node)
elif scan_node.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW:
self._ScanVolumeScanNodeVSS(scan_node, selected_vss_stores)
elif scan_node.type_indicator in (
dfvfs_definitions.FILE_SYSTEM_TYPE_INDICATORS):
if (not self._vss_only or not selected_vss_stores or
self._PromptUserForVSSCurrentVolume()):
self._source_path_specs.append(scan_node.path_spec)
def _ScanVolumeScanNodeEncrypted(self, scan_context, volume_scan_node):
"""Scans an encrypted volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
"""
result = not scan_context.IsLockedScanNode(volume_scan_node.path_spec)
if not result:
credentials = credentials_manager.CredentialsManager.GetCredentials(
volume_scan_node.path_spec)
result = False
for credential_type, credential_data in self._credentials:
if credential_type not in credentials.CREDENTIALS:
continue
result = self._source_scanner.Unlock(
scan_context, volume_scan_node.path_spec, credential_type,
credential_data)
if result:
self._AddCredentialConfiguration(
volume_scan_node.path_spec, credential_type, credential_data)
break
if self._credentials and not result:
self._output_writer.Write(
'[WARNING] Unable to unlock encrypted volume using the provided '
'credentials.\n\n')
if not result:
result = self._PromptUserForEncryptedVolumeCredential(
scan_context, volume_scan_node, credentials)
if result:
self._source_scanner.Scan(
scan_context, scan_path_spec=volume_scan_node.path_spec)
self._ScanVolume(scan_context, volume_scan_node)
def _ScanVolumeScanNodeVSS(self, volume_scan_node, selected_vss_stores):
"""Scans a VSS volume scan node for volume and file systems.
Args:
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
selected_vss_stores (list[str]): selected VSS store identifiers.
Raises:
SourceScannerError: if a VSS sub scan node cannot be retrieved.
"""
if not self._process_vss:
return
# Do not scan inside individual VSS store scan nodes.
location = getattr(volume_scan_node.path_spec, 'location', None)
if location != '/':
return
vss_store_identifiers = self._GetVSSStoreIdentifiers(
volume_scan_node, vss_stores=self._vss_stores)
selected_vss_stores.extend(vss_store_identifiers)
# Process VSS stores starting with the most recent one.
vss_store_identifiers.reverse()
for vss_store_identifier in vss_store_identifiers:
location = '/vss{0:d}'.format(vss_store_identifier)
sub_scan_node = volume_scan_node.GetSubNodeByLocation(location)
if not sub_scan_node:
logger.error(
'Scan node missing for VSS store identifier: {0:d}.'.format(
vss_store_identifier))
continue
# We "optimize" here for user experience, ideally we would scan for
# a file system instead of hard coding a TSK child path specification.
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location='/',
parent=sub_scan_node.path_spec)
self._source_path_specs.append(path_spec)
def AddCredentialOptions(self, argument_group):
"""Adds the credential options to the argument group.
The credential options are use to unlock encrypted volumes.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--credential', action='append', default=[], type=str,
dest='credentials', metavar='TYPE:DATA', help=(
'Define a credentials that can be used to unlock encrypted '
'volumes e.g. BitLocker. The credential is defined as type:data '
'e.g. "password:<PASSWORD>". Supported credential types are: '
'{0:s}. Binary key data is expected to be passed in BASE-16 '
'encoding (hexadecimal). WARNING credentials passed via command '
'line arguments can end up in logs, so use this option with '
'care.').format(', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))
def AddStorageMediaImageOptions(self, argument_group):
"""Adds the storage media image options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--partitions', '--partition', dest='partitions', action='store',
type=str, default=None, help=(
'Define partitions to be processed. A range of '
'partitions can be defined as: "3..5". Multiple partitions can '
'be defined as: "1,3,5" (a list of comma separated values). '
'Ranges and lists can also be combined as: "1,3..5". The first '
'partition is 1. All partitions can be specified with: "all".'))
argument_group.add_argument(
'--offset', dest='image_offset', action='store', default=None,
type=int, help=(
'The offset of the volume within the storage media image in '
'number of sectors. A sector is {0:d} bytes in size by default '
'this can be overwritten with the --sector_size option.').format(
self._DEFAULT_BYTES_PER_SECTOR))
argument_group.add_argument(
'--ob', '--offset_bytes', '--offset_bytes',
dest='image_offset_bytes', action='store', default=None, type=int,
help=(
'The offset of the volume within the storage media image in '
'number of bytes.'))
argument_group.add_argument(
'--sector_size', '--sector-size', dest='bytes_per_sector',
action='store', type=int, default=self._DEFAULT_BYTES_PER_SECTOR,
help=(
'The number of bytes per sector, which is {0:d} by '
'default.').format(self._DEFAULT_BYTES_PER_SECTOR))
def AddVSSProcessingOptions(self, argument_group):
"""Adds the VSS processing options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--no_vss', '--no-vss', dest='no_vss', action='store_true',
default=False, help=(
'Do not scan for Volume Shadow Snapshots (VSS). This means that '
'Volume Shadow Snapshots (VSS) are not processed.'))
argument_group.add_argument(
'--vss_only', '--vss-only', dest='vss_only', action='store_true',
default=False, help=(
'Do not process the current volume if Volume Shadow Snapshots '
'(VSS) have been selected.'))
argument_group.add_argument(
'--vss_stores', '--vss-stores', dest='vss_stores', action='store',
type=str, default=None, help=(
'Define Volume Shadow Snapshots (VSS) (or stores that need to be '
'processed. A range of stores can be defined as: "3..5". '
'Multiple stores can be defined as: "1,3,5" (a list of comma '
'separated values). Ranges and lists can also be combined as: '
'"1,3..5". The first store is 1. All stores can be defined as: '
'"all".'))
def ScanSource(self, source_path):
"""Scans the source path for volume and file systems.
This function sets the internal source path specification and source
type values.
Args:
source_path (str): path to the source.
Returns:
dfvfs.SourceScannerContext: source scanner context.
Raises:
SourceScannerError: if the format of or within the source is
not supported.
"""
# Symbolic links are resolved here and not earlier to preserve the user
# specified source path in storage and reporting.
if os.path.islink(source_path):
source_path = os.path.realpath(source_path)
if (not source_path.startswith('\\\\.\\') and
not os.path.exists(source_path)):
raise errors.SourceScannerError(
'No such device, file or directory: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(source_path)
try:
self._source_scanner.Scan(scan_context)
except (dfvfs_errors.BackEndError, ValueError) as exception:
raise errors.SourceScannerError(
'Unable to scan source with error: {0!s}.'.format(exception))
if scan_context.source_type not in (
scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE,
scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE):
scan_node = scan_context.GetRootScanNode()
self._source_path_specs.append(scan_node.path_spec)
return scan_context
# Get the first node where where we need to decide what to process.
scan_node = scan_context.GetRootScanNode()
while len(scan_node.sub_nodes) == 1:
scan_node = scan_node.sub_nodes[0]
# The source scanner found a partition table and we need to determine
# which partition needs to be processed.
if scan_node.type_indicator != (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
partition_identifiers = None
else:
partition_identifiers = self._GetTSKPartitionIdentifiers(
scan_node, partition_offset=self._partition_offset,
partitions=self._partitions)
if not partition_identifiers:
self._ScanVolume(scan_context, scan_node)
else:
for partition_identifier in partition_identifiers:
location = '/{0:s}'.format(partition_identifier)
sub_scan_node = scan_node.GetSubNodeByLocation(location)
self._ScanVolume(scan_context, sub_scan_node)
if not self._source_path_specs:
raise errors.SourceScannerError(
'No supported file system found in source.')
return scan_context
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module to manage list of all know version of Blender in the system'''
import xml.etree.ElementTree as xmlMod
import re, os
from usefullFunctions import XML
class VersionList:
'''class dedicated to Blender version managing'''
def __init__(self, xml= None):
'''initialize Blender version list with default value or values extracted from an xml object'''
if xml is None:
self.defaultInit()
else:
self.fromXml(xml)
def defaultInit(self):
'''initialize Blender version list with default value'''
self.list = {'Standard Blender':'blender'}
self.default = 'Standard Blender'
def fromXml(self, xml):
'''initialize Blender version list with values extracted from an xml object'''
self.list = {}
for version in xml.findall('version'):
self.list[version.get('alias')] = version.get('path')
self.default = xml.get('default')
def toXml(self):
'''export Blender version list into xml syntaxed string'''
xml = ' <versionsList default ="'+self.default+'" >\n'
for k, v in self.list.items():
xml += ' <version alias="'+k+'" path="'+XML.encode(v)+'" />\n'
xml += ' </versionsList>\n'
return xml
def menu(self, log, preferences):
'''method to see Blender version list and access edition menu'''
change = False
log.menuIn('Blender Version List')
while True:
# print log and Blender versions list
log.print()
self.print()
print('''\n \033[4mMenu :\033[0m
1- Add version
2- Auto add version
3- Rename version
4- Remove version
5- Change Default Version
0- Save And Quit
''')
#treat available actions
choice= input('menu?').strip().lower()
if choice in ['0', 'q', 'quit', 'cancel']:
log.menuOut()# quit preferences menu
return change
elif choice == '1':
change = (self.add(log) or change)
elif choice == '2':
change = (self.addAuto(log) or change)
elif choice == '3':
change = (self.rename(log, preferences) or change)
elif choice == '4':
change = (self.remove(log, preferences) or change)
elif choice == '5':
change = (self.chooseDefault(log) or change)
else:
log.error('Unknow request', False)
def print(self, index = False, std = True, default = False):
'''a method to display the Blender version list'''
print('\n \033[4mBlender Version List :\033[0m\n')
keys = list(self.list.keys())
keys.sort(key = str.lower)
if not std:
# don't display Standard Blender version if std is False
keys.remove('Standard Blender')
if index:
for i, k in enumerate(keys):
print(str(i)+'- '+k+' :\n '+self.list[k]+'\n')
else:
for k in keys:
print(k+' :\n '+self.list[k]+'\n')
if default and index:
print(str(i+1)+'- [default] \n')
keys.append('[default]')
if not index:
print('\n\nDefault version : '+self.default)
return keys
def add(self, log):
'''a method to add a Blender version to the list'''
log.menuIn('Add A Version')
while True:
# print log
log.print()
# get new version path
choice= input('\nPath of the new version?').strip()
if choice == '':# quit
log.menuOut()
return False
#remove quote mark and apostrophe in first and last character
if choice[0] in ['\'', '"'] and choice[-1] == choice[0]:
choice = choice[1:len(choice)-1]
# check that the path is absolute: begin by '/'
if choice[0] != '/':
log.error('The path must be absolute (begin by «/»)!')
continue
# check path exist
if not os.path.exists(choice):
log.error('This path correspond to nothing!')
continue
# check path is a file
if not os.path.isfile(choice):
log.error('This path is not a file!')
continue
# check path is executable
if not os.access(choice, os.X_OK):
log.error('This file is not executable or you don\'t have the permission to do it!')
continue
# get blender version from blender path
path = choice
version = os.popen('"'+path+'" -b -P "'+os.path.realpath(__file__+'/..')+'/getter/getBlenderVersion.py" ').read()
version = re.search(r'<\?xml(.|\n)*</root>',version).group(0)
version = xmlMod.fromstring(version).find('version').get('version')
alias = 'Blender ('+version+')'
# recommand an unused alias
if alias in self.list.keys():
i = 0
while alias+'('+str(i)+')' in self.list.keys():
i+=1
alias = alias+'('+str(i)+')'
# get user alias confirmation
log.menuIn('Choose An Alias')
while True:
# print log
log.print()
print('\n\n\033[4mRecommanded alias :\033[0m '+alias)
# get alias
choice= input('\nPress enter to use recommanded alias or type wanted alias :').strip()
if choice == '':
log.menuOut()
break
elif re.search(r'^([-a-zA-Z0-9]| |\(|\)|\.){1,}$', choice) is None:
log.error('alias can only contain alphanumeric (unaccented) characters, spaces, parentheses points and -')
continue
elif choice in self.list.keys():
log.error('Alias already use for another version!')
continue
elif len(choice) < 7:
log.error('Too small alias name (7 characters minimal)!')
continue
else:
alias = choice
log.menuOut()
break
# add version
self.list[alias] = path
log.write('('+alias+' : '+path+') Blender version added to list')
log.menuOut()
return True
def addAuto(self, log):
'''a method to automatically add to the list numerous Blender version that is located in the same directory'''
log.menuIn('Automatically Add Versions')
while True:
# print log
log.print()
print('\n\nAll Blender version directory must be directly in a same directory. Script will not recursivly search for blender version')
# get new version path
choice= input('\nPath of the main directory?').strip()
if choice == '':# quit
log.menuOut()
return False
# remove quote mark and apostrophe in first and last character
if choice[0] in ['\'', '"'] and choice[-1] == choice[0]:
choice = choice[1:len(choice)-1]
# check that the path is absolute: begin by '/'
if choice[0] != '/':
log.error('The path must be absolute (begin by «/»)!')
continue
# check path exist
if not os.path.exists(choice):
log.error('This path correspond to nothing!')
continue
# check path is a file
if not os.path.isdir(choice):
log.error('This path is not a directory!')
continue
path = choice
if path[-1] != '/':
path += '/'
subdirectories = os.listdir(path)
for sub in subdirectories:
# check if ther is a blender version in this directory
versionPath = path+sub+'/blender'
if os.path.isdir(path+sub)\
and os.path.exists(versionPath)\
and os.path.isfile(versionPath)\
and os.access(versionPath, os.X_OK):
# get Blender version
version = os.popen('"'+versionPath+'" -b -P "'+os.path.realpath(__file__+'/..')+'/getter/getBlenderVersion.py" ').read()
version = re.search(r'<\?xml(.|\n)*</root>',version).group(0)
version = xmlMod.fromstring(version).find('version').get('version')
# generate an alias
alias = 'Blender ('+version+')'
if alias in self.list.keys():
i = 0
while alias+'('+str(i)+')' in self.list.keys():
i+=1
alias = alias+'('+str(i)+')'
# add to the list
self.list[alias] = versionPath
log.write('('+alias+' : '+versionPath+') Blender version added to list')
log.menuOut()
return True
def rename(self, log, preferences):
'''display a menu to rename version in the list'''
log.menuIn('Rename Version')
# choose version
oldAlias = self.choose(log)
if oldAlias is None:
return False
while True:
log.print()
print('\n\n \033[4mRename version :\033[0m')
print(oldAlias+'\n '+self.list[oldAlias])
choice = input('\nNew name :').strip()
if choice == '':
log.menuOut()
return False
if choice in self.list.keys():
log.error('This alias name is already use by another version.')
continue
self.list[choice] = self.list[oldAlias]
self.list.pop(oldAlias)
if self.default == oldAlias:
self.default = choice
preferences.presets.renameBlenderVersion( oldAlias, choice)
log.write(oldAlias+' version rename in '+choice+'.')
log.menuOut()
return True
def choose(self, log, std = False, default = False):
'''display a menu to choose a version to working on'''
log.menuIn('Choose Version')
while True:
log.print()
print('\n\n')
keys = self.print(True, std, default)
choice = input('\nIndex of the version that you want to use :').strip()
if choice == '':
log.menuOut()
return None
try:
choice = int(choice)
except ValueError:
log.error('Unvalid version choice : must be an irteger or an empty string')
continue
if choice >= 0 and choice < len(keys):
log.menuOut()
return keys[choice]
else:
log.error('Unvalid version choice : bad index.')
continue
def remove(self, log, preferences):
'''A method to manually remove version from the list'''
log.menuIn('Remove Version')
# choose version
alias = self.choose(log)
if alias is None:
log.menuOut()
return False
log.print()
print('\n\n \033[4mRemove version :\033[0m')
print(alias+'\n '+self.list[alias])
if self.default == alias:
print('\n\033[31mthis is actually the default version. if you erase it, default version will be set to de blender standard command.\033[0m')
versionUsed = preferences.presets.useBlenderVersion(alias)
if versionUsed:
print('\n\033[31mThis version is actually used by some preset. If you erase it, the preset will automatically be changed to use default version.\033[0m')
choice = input('\nDo you realy want to erase this version (y)?').strip().lower()
if choice in ['y', 'yes']:
self.list.pop(alias)
if self.default == alias:
self.default = 'Standard Blender'
if versionUsed:
preferences.presets.eraseBlenderVersion(alias)
log.write('Remove "'+alias+'" version.')
log.menuOut()
return True
log.menuOut()
return False
def chooseDefault(self, log):
'''A method to choose the default version to use'''
log.menuIn('Choose Default Version')
# choose version
alias = self.choose(log, True)
if alias is None:
log.menuOut()
return False
self.default = alias
log.write('Default version set to "'+self.default+'" version.')
log.menuOut()
return True
def getDefaultPath(self):
'''a method to get the path of the default version'''
return self.getVersionPath(self.default)
def getVersionPath(self, versionName):
'''a method to get the path of a version'''
if versionName == '[default]':
versionName = self.default
path = self.list[versionName]
if path != 'blender':
path = '"'+path+'"'
return path
|
<gh_stars>10-100
from __future__ import print_function
import argparse
import sys
import os
import time
import numpy as np
import mxnet as mx
from mxnet import ndarray as nd
import cv2
from rcnn.logger import logger
from rcnn.config import config, default, generate_config
#from rcnn.tools.test_rcnn import test_rcnn
#from rcnn.tools.test_rpn import test_rpn
from rcnn.processing.bbox_transform import nonlinear_pred, clip_boxes
from rcnn.processing.generate_anchor import generate_anchors_fpn, anchors_plane
from rcnn.processing.nms import gpu_nms_wrapper
from rcnn.processing.bbox_transform import bbox_overlaps
from rcnn.dataset import widerface
class SSHDetector:
def __init__(self, prefix, epoch, ctx_id=0, test_mode=False):
self.ctx_id = ctx_id
self.ctx = mx.gpu(self.ctx_id)
self.fpn_keys = []
fpn_stride = []
fpn_base_size = []
self._feat_stride_fpn = [32, 16, 8]
for s in self._feat_stride_fpn:
self.fpn_keys.append('stride%s'%s)
fpn_stride.append(int(s))
fpn_base_size.append(16)
self._scales = np.array([32,16,8,4,2,1])
self._ratios = np.array([1.0]*len(self._feat_stride_fpn))
#self._anchors_fpn = dict(zip(self.fpn_keys, generate_anchors_fpn(base_size=fpn_base_size, scales=self._scales, ratios=self._ratios)))
self._anchors_fpn = dict(zip(self.fpn_keys, generate_anchors_fpn()))
self._num_anchors = dict(zip(self.fpn_keys, [anchors.shape[0] for anchors in self._anchors_fpn.values()]))
self._rpn_pre_nms_top_n = 1000
#self._rpn_post_nms_top_n = rpn_post_nms_top_n
#self.score_threshold = 0.05
self.nms_threshold = 0.3
self._bbox_pred = nonlinear_pred
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
self.nms = gpu_nms_wrapper(self.nms_threshold, self.ctx_id)
self.pixel_means = np.array([103.939, 116.779, 123.68]) #BGR
self.pixel_means = config.PIXEL_MEANS
print('means', self.pixel_means)
if not test_mode:
image_size = (640, 640)
self.model = mx.mod.Module(symbol=sym, context=self.ctx, label_names = None)
self.model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))], for_training=False)
self.model.set_params(arg_params, aux_params)
else:
from rcnn.core.module import MutableModule
image_size = (2400, 2400)
data_shape = [('data', (1,3,image_size[0], image_size[1]))]
self.model = MutableModule(symbol=sym, data_names=['data'], label_names=None,
context=self.ctx, max_data_shapes=data_shape)
self.model.bind(data_shape, None, for_training=False)
self.model.set_params(arg_params, aux_params)
def detect(self, img, threshold=0.05, scales=[1.0]):
proposals_list = []
scores_list = []
for im_scale in scales:
if im_scale!=1.0:
im = cv2.resize(img, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
else:
im = img
im = im.astype(np.float32)
#self.model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))], for_training=False)
im_info = [im.shape[0], im.shape[1], im_scale]
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = im[:, :, 2 - i] - self.pixel_means[2 - i]
data = nd.array(im_tensor)
db = mx.io.DataBatch(data=(data,), provide_data=[('data', data.shape)])
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
pre_nms_topN = self._rpn_pre_nms_top_n
#post_nms_topN = self._rpn_post_nms_top_n
#min_size_dict = self._rpn_min_size_fpn
for s in self._feat_stride_fpn:
if len(scales)>1 and s==32 and im_scale==scales[-1]:
continue
_key = 'stride%s'%s
stride = int(s)
idx = 0
if s==16:
idx=2
elif s==8:
idx=4
print('getting', im_scale, stride, idx, len(net_out), data.shape, file=sys.stderr)
scores = net_out[idx].asnumpy()
#print(scores.shape)
idx+=1
#print('scores',stride, scores.shape, file=sys.stderr)
scores = scores[:, self._num_anchors['stride%s'%s]:, :, :]
bbox_deltas = net_out[idx].asnumpy()
#if DEBUG:
# print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
# print 'scale: {}'.format(im_info[2])
_height, _width = int(im_info[0] / stride), int(im_info[1] / stride)
height, width = bbox_deltas.shape[2], bbox_deltas.shape[3]
A = self._num_anchors['stride%s'%s]
K = height * width
anchors = anchors_plane(height, width, stride, self._anchors_fpn['stride%s'%s].astype(np.float32))
#print((height, width), (_height, _width), anchors.shape, bbox_deltas.shape, scores.shape, file=sys.stderr)
anchors = anchors.reshape((K * A, 4))
#print('pre', bbox_deltas.shape, height, width)
bbox_deltas = self._clip_pad(bbox_deltas, (height, width))
#print('after', bbox_deltas.shape, height, width)
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
scores = self._clip_pad(scores, (height, width))
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
#print(anchors.shape, bbox_deltas.shape, A, K, file=sys.stderr)
proposals = self._bbox_pred(anchors, bbox_deltas)
#proposals = anchors
proposals = clip_boxes(proposals, im_info[:2])
#keep = self._filter_boxes(proposals, min_size_dict['stride%s'%s] * im_info[2])
#proposals = proposals[keep, :]
#scores = scores[keep]
#print('333', proposals.shape)
scores_ravel = scores.ravel()
order = scores_ravel.argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
proposals /= im_scale
proposals_list.append(proposals)
scores_list.append(scores)
proposals = np.vstack(proposals_list)
scores = np.vstack(scores_list)
scores_ravel = scores.ravel()
order = scores_ravel.argsort()[::-1]
#if config.TEST.SCORE_THRESH>0.0:
# _count = np.sum(scores_ravel>config.TEST.SCORE_THRESH)
# order = order[:_count]
#if pre_nms_topN > 0:
# order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
det = np.hstack((proposals, scores)).astype(np.float32)
#if np.shape(det)[0] == 0:
# print("Something wrong with the input image(resolution is too low?), generate fake proposals for it.")
# proposals = np.array([[1.0, 1.0, 2.0, 2.0]]*post_nms_topN, dtype=np.float32)
# scores = np.array([[0.9]]*post_nms_topN, dtype=np.float32)
# det = np.array([[1.0, 1.0, 2.0, 2.0, 0.9]]*post_nms_topN, dtype=np.float32)
if self.nms_threshold<1.0:
keep = self.nms(det)
det = det[keep, :]
if threshold>0.0:
keep = np.where(det[:, 4] >= threshold)[0]
det = det[keep, :]
return det
@staticmethod
def _filter_boxes(boxes, min_size):
""" Remove all boxes with any side smaller than min_size """
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
@staticmethod
def _clip_pad(tensor, pad_shape):
"""
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
"""
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# testing
parser.add_argument('--prefix', help='model to test with', default=default.e2e_prefix, type=str)
parser.add_argument('--epoch', help='model to test with', default=0, type=int)
parser.add_argument('--gpu', help='GPU device to test with', default=7, type=int)
parser.add_argument('--output', help='output folder', default=os.path.join(default.root_path, 'output'), type=str)
parser.add_argument('--pyramid', help='enable pyramid test', action='store_true')
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=0.05, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--has_rpn', help='generate proposals on the fly', action='store_true', default=True)
parser.add_argument('--proposal', help='can be ss for selective search or rpn', default='rpn', type=str)
args = parser.parse_args()
return args
detector = None
args = None
def get_boxes(roi, pyramid):
im = cv2.imread(roi['image'])
if not pyramid:
target_size = 1200
max_size = 1600
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
else:
TEST_SCALES = [500, 800, 1200, 1600]
target_size = 800
max_size = 1200
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [float(scale)/target_size*im_scale for scale in TEST_SCALES]
boxes = detector.detect(im, threshold=args.thresh, scales = scales)
return boxes
def test(args):
print('test with', args)
global detector
output_folder = args.output
if not os.path.exists(output_folder):
os.mkdir(output_folder)
detector = SSHDetector(args.prefix, args.epoch, args.gpu, test_mode=True)
imdb = eval(args.dataset)(args.image_set, args.root_path, args.dataset_path)
roidb = imdb.gt_roidb()
gt_overlaps = np.zeros(0)
overall = [0.0, 0.0]
gt_max = np.array( (0.0, 0.0) )
num_pos = 0
for i in xrange(len(roidb)):
if i%10==0:
print('processing', i, file=sys.stderr)
roi = roidb[i]
boxes = get_boxes(roi, args.pyramid)
gt_boxes = roidb[i]['boxes'].copy()
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0] + 1) * (gt_boxes[:, 3] - gt_boxes[:, 1] + 1)
num_pos += gt_boxes.shape[0]
overlaps = bbox_overlaps(boxes.astype(np.float), gt_boxes.astype(np.float))
#print(im_info, gt_boxes.shape, boxes.shape, overlaps.shape, file=sys.stderr)
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
if boxes.shape[0]>0:
_gt_overlaps = overlaps.max(axis=0)
#print('max_overlaps', _gt_overlaps, file=sys.stderr)
for j in range(len(_gt_overlaps)):
if _gt_overlaps[j]>config.TEST.IOU_THRESH:
continue
print(j, 'failed', gt_boxes[j], 'max_overlap:', _gt_overlaps[j], file=sys.stderr)
# append recorded IoU coverage level
found = (_gt_overlaps > config.TEST.IOU_THRESH).sum()
_recall = found / float(gt_boxes.shape[0])
print('recall', _recall, gt_boxes.shape[0], boxes.shape[0], gt_areas, file=sys.stderr)
overall[0]+=found
overall[1]+=gt_boxes.shape[0]
#gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
#_recall = (gt_overlaps >= threshold).sum() / float(num_pos)
_recall = float(overall[0])/overall[1]
print('recall_all', _recall, file=sys.stderr)
_vec = roidb[i]['image'].split('/')
out_dir = os.path.join(output_folder, _vec[-2])
if not os.path.exists(out_dir):
os.mkdir(out_dir)
out_file = os.path.join(out_dir, _vec[-1].replace('jpg', 'txt'))
with open(out_file, 'w') as f:
name = '/'.join(roidb[i]['image'].split('/')[-2:])
f.write("%s\n"%(name))
f.write("%d\n"%(boxes.shape[0]))
for b in range(boxes.shape[0]):
box = boxes[b]
f.write("%d %d %d %d %g \n"%(box[0], box[1], box[2]-box[0], box[3]-box[1], box[4]))
def main():
global args
args = parse_args()
logger.info('Called with argument: %s' % args)
test(args)
if __name__ == '__main__':
main()
|
<filename>botogram/shared.py
# Copyright (c) 2015-2019 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import threading
import functools
import builtins
class dict(builtins.dict):
pass
class LocalDriver:
"""Local driver for the shared memory"""
def __init__(self):
self._memories = {}
self._locks = {}
def __reduce__(self):
return rebuild_local_driver, (self.export_data(),)
def get(self, component):
# Create the shared memory if it doesn't exist
new = False
if component not in self._memories:
self._memories[component] = dict()
new = True
return self._memories[component], new
def lock_acquire(self, lock_id):
# Create a new lock if it doesn't exist yet
if lock_id not in self._locks:
self._locks[lock_id] = {"obj": threading.Lock(), "acquired": False}
self._locks[lock_id]["obj"].acquire()
self._locks[lock_id]["acquired"] = True
def lock_release(self, lock_id):
if lock_id not in self._locks:
return
self._locks[lock_id]["acquired"] = False
self._locks[lock_id].release()
def lock_status(self, lock_id):
if lock_id not in self._locks:
return False
return self._locks[lock_id]["acquired"]
def import_data(self, data):
self._memories = dict(data["storage"])
# Rebuild the locks
self._locks = {}
for lock_id in data["locks"]:
self.lock_acquire(lock_id)
def export_data(self):
locks = [lock_id for lock_id, d in self._locks if not d["acquired"]]
return {"storage": self._memories.copy(), "locks": locks}
class Lock:
"""Lock backed by the botogram's shared memory"""
def __init__(self, parent, lock_id):
self._parent = parent
self._lock_id = lock_id
@property
def acquired(self):
return self._parent.driver.lock_status(self._lock_id)
def acquire(self):
"""Acquire the lock"""
self._parent.driver.lock_acquire(self._lock_id)
def release(self):
"""Release the lock"""
self._parent.driver.lock_release(self._lock_id)
__enter__ = acquire
def __exit__(self, *__):
self.release()
class SharedMemory:
"""Implementation of the shared memory for one bot"""
def __init__(self, driver=None):
# The default driver is LocalDriver
if driver is None:
driver = LocalDriver()
self.driver = driver
self._preparers = {}
def __reduce__(self):
return rebuild, (self.driver,)
def _key_of(self, *parts):
"""Get the key for a shared item"""
return ":".join(parts)
def register_preparers_list(self, component, inits):
"""Register a new list to pick preparers from"""
# Ignore the request if a list was already registered
if component in self._preparers:
return
self._preparers[component] = inits
def of(self, bot, component, *other):
"""Get the shared memory of a specific component"""
memory, is_new = self.driver.get(self._key_of(bot, component, *other))
# Treat as a standard shared memory only if no other names are provided
if not other:
# Be sure to initialize the shared memory if it's needed
if is_new:
self.apply_preparers(component, memory)
# Add the lock method to the object
memory.lock = functools.partial(self.lock, bot, component)
return memory
def apply_preparers(self, component, memory):
"""Apply all the preparers of a component to a memory"""
if component not in self._preparers:
return
for preparer in self._preparers[component]:
preparer.call(memory)
def switch_driver(self, driver=None):
"""Use another driver for this shared memory"""
if driver is None:
driver = LocalDriver()
driver.import_data(self.driver.export_data())
self.driver = driver
def lock(self, bot, component, name):
"""Get a shared lock"""
return Lock(self, self._key_of(bot, component, name))
def rebuild(driver):
return SharedMemory(driver)
def rebuild_local_driver(memories):
obj = LocalDriver()
obj.import_data(memories)
return obj
|
#(C) Copyright <NAME> 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
unit tests for image application. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from random import randint
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
from tschartslib.clinic.clinic import CreateClinic, DeleteClinic
from tschartslib.station.station import CreateStation, DeleteStation
from tschartslib.patient.patient import CreatePatient, DeletePatient
class CreateImage(ServiceAPI):
def __init__(self, host, port, token):
super(CreateImage, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._payload = {}
self.setPayload(self._payload)
self.setURL("tscharts/v1/image/")
def setClinic(self, clinic):
self._payload["clinic"] = clinic
self.setPayload(self._payload)
def setStation(self, station):
self._payload["station"] = station
self.setPayload(self._payload)
def setPatient(self, patient):
self._payload["patient"] = patient
self.setPayload(self._payload)
def setData(self, data):
self._payload["data"] = data
self.setPayload(self._payload)
def setType(self, imagetype):
self._payload["type"] = imagetype
self.setPayload(self._payload)
class GetImage(ServiceAPI):
def __init__(self, host, port, token):
super(GetImage, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._clinic = None
self._station = None
self._patient = None
self._type = None
self._id = None
self._sort = None
self._newest = None
self.makeURL();
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/image/{}/".format(self._id)
else:
base = "tscharts/v1/image/"
if not self._clinic == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "clinic={}".format(self._clinic)
hasQArgs = True
if not self._station == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "station={}".format(self._station)
hasQArgs = True
if not self._patient == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "patient={}".format(self._patient)
hasQArgs = True
if not self._type == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "type={}".format(self._type)
hasQArgs = True
if not self._newest == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "newest={}".format(self._newest)
hasQArgs = True
if not self._sort == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "sort={}".format(self._sort)
hasQArgs = True
self.setURL(base)
def setId(self, id):
self._id = id;
self.makeURL()
def setClinic(self, clinic):
self._clinic = clinic
self.makeURL()
def setNewest(self, val):
self._newest = val
self.makeURL()
def setStation(self, station):
self._station = station
self.makeURL()
def setPatient(self, patient):
self._patient = patient
self.makeURL()
def setType(self, imagetype):
self._type = imagetype
self.makeURL()
def setSort(self, sort):
self._sort = sort
self.makeURL()
class DeleteImage(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteImage, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/image/{}/".format(id))
class TestTSImage(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
def testCreateImage(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "<EMAIL>"
data["emergencyfullname"] = "<NAME>"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "<EMAIL>"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
for imageType in ["Xray", "Headshot", "Audiogram", "Surgery"]:
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType(imageType)
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetImage(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("station" in ret[1])
stationId = int(ret[1]["station"])
self.assertTrue(stationId == stationid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("type" in ret[1])
self.assertTrue(ret[1]["type"] == imageType)
self.assertTrue("data" in ret[1])
self.assertTrue(ret[1]["data"] == "ABCDEFG")
x = DeleteImage(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetImage(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
# non-existent clinic param
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(99999)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
x.setStation(stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
# non-existent station param
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(9999)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
# non-existent patient param
x = CreateImage(host, port, token)
x.setPatient(9999)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
# bogus clinic param
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic("fffff")
x.setStation(stationid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
# bogus station param
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(None)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
# bogus patient param
x = CreateImage(host, port, token)
x.setPatient("")
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
# missing patient
x = CreateImage(host, port, token)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
# missing clinic
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setStation(stationid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
# missing station
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
# Wrong type
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Bad Type")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
# Missing Data
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteStation(host, port, token, stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testDeleteImage(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
x = CreateStation(host, port, token, "ENT")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stationid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "<EMAIL>"
data["emergencyfullname"] = "<NAME>"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "<EMAIL>"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateImage(host, port, token)
x.setPatient(patientid)
x.setClinic(clinicid)
x.setStation(stationid)
x.setType("Headshot")
x.setData("ABCDEFG") # doesn't matter if it is actual image data
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetImage(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("clinic" in ret[1])
clinicId = int(ret[1]["clinic"])
self.assertTrue(clinicId == clinicid)
self.assertTrue("station" in ret[1])
stationId = int(ret[1]["station"])
self.assertTrue(stationId == stationid)
self.assertTrue("patient" in ret[1])
patientId = int(ret[1]["patient"])
self.assertTrue(patientId == patientid)
self.assertTrue("type" in ret[1])
self.assertTrue(ret[1]["type"] == "Headshot")
self.assertTrue("data" in ret[1])
self.assertTrue(ret[1]["data"] == "ABCDEFG")
x = DeleteImage(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetImage(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
x = DeleteImage(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteImage(host, port, token, "")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = DeleteImage(host, port, token, 9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteImage(host, port, token, None)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteStation(host, port, token, stationid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testGetAllImages(self):
clinics = []
stations = []
patients = []
images = []
nclinics = 3
nstations = 4
npatients = 5
nimages = 1
for i in xrange(1, nclinics + 1):
x = CreateClinic(host, port, token, "Ensenada", "{}/05/2016".format(i), "{}/06/2016".format(i))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinics.append(int(ret[1]["id"]))
for j in xrange(1, nstations + 1):
x = CreateStation(host, port, token, "Dental{}".format(j))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
stations.append(int(ret[1]["id"]))
for k in range(1, npatients + 1):
data = {}
data["paternal_last"] = "abcd1234{}".format(k)
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "<EMAIL>"
data["emergencyfullname"] = "<NAME>"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "<EMAIL>"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patients.append(int(ret[1]["id"]))
for i in clinics:
for j in stations:
for k in patients:
for l in xrange(0, nimages):
x = CreateImage(host, port, token)
x.setPatient(k)
x.setClinic(i)
x.setStation(j)
x.setType("Headshot")
x.setData("ABCDEFG{}".format(l))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
images.append(int(ret[1]["id"]))
# query by invalid search terms
x = GetImage(host, port, token)
x.setClinic(9999)
x.setStation(stations[0])
x.setPatient(patients[0])
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(9999)
x.setPatient(patients[0])
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(9999)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(patients[0])
x.setType("yadda")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(patients[0])
x.setSort("yadda")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(patients[0])
x.setSort("False")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(patients[0])
x.setSort("True")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(patients[0])
x.setSort("false")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetImage(host, port, token)
x.setClinic(clinics[0])
x.setStation(stations[0])
x.setPatient(patients[0])
x.setSort("true")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
sort = "true"
for c in clinics:
for s in stations:
for p in patients:
if sort =="true":
sort = "false"
else:
sort = "true"
# query by type
x = GetImage(host, port, token)
x.setPatient(p)
x.setSort(sort)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
# query by clinic
x = GetImage(host, port, token)
x.setClinic(c)
x.setSort(sort)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(len(ret[1]) == len(images) / nclinics)
# query by clinic and type
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(len(ret[1]) == len(images) / nclinics)
# query by station
x = GetImage(host, port, token)
x.setSort(sort)
x.setStation(s)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(len(ret[1]) == (len(images) / nstations))
# query by station and type
x = GetImage(host, port, token)
x.setStation(s)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
# query by clinic and station
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setStation(s)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(len(ret[1]) == len(images) / (nclinics * nstations))
# query by clinic, station and type
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setStation(s)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
# query by clinic and patient
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setPatient(p)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(len(ret[1]) == len(images) / (nclinics * npatients))
# query by clinic, patient and type
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setPatient(p)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
# query by clinic, station, and patient
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setStation(s)
x.setPatient(p)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue(len(ret[1]) == len(images) / (nclinics * nstations * npatients))
# query by clinic, station, patient and type
x = GetImage(host, port, token)
x.setSort(sort)
x.setClinic(c)
x.setStation(s)
x.setPatient(p)
x.setType("Headshot")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
for x in images:
y = DeleteImage(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
for x in patients:
y = DeletePatient(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
for x in stations:
y = DeleteStation(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
for x in clinics:
y = DeleteClinic(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
def usage():
print("image [-h host] [-p port] [-u username] [-w password]")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
global host
host = "127.0.0.1"
global port
port = 8000
global username
username = None
global password
password = None
for o, a in opts:
if o == "-h":
host = a
elif o == "-p":
port = int(a)
elif o == "-u":
username = a
elif o == "-w":
password = a
else:
assert False, "unhandled option"
unittest.main(argv=[sys.argv[0]])
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ==================================================
# Honeywell HMC5883L Magnetometer
# Datasheet : http://www51.honeywell.com/aero/common/documents/myaerospacecatalog-documents/Defense_Brochures-documents/HMC5883L_3-Axis_Digital_Compass_IC.pdf
# ==================================================
#
# Adapted from: http://think-bowl.com/raspberry-pi/i2c-python-library-3-axis-digital-compass-HMC58835883l-with-the-raspberry-pi/ for my own i2c core library
# ==================================================
#
# Breakout board known as GY-271
# ==================================================
import math
from i2c_core import i2c_core
class HMC5883(object):
# Define registers values from datasheet
ConfigurationRegisterA = 0x00
ConfigurationRegisterB = 0x01
ModeRegister = 0x02
AxisXDataRegisterMSB = 0x03
AxisXDataRegisterLSB = 0x04
AxisZDataRegisterMSB = 0x05
AxisZDataRegisterLSB = 0x06
AxisYDataRegisterMSB = 0x07
AxisYDataRegisterLSB = 0x08
StatusRegister = 0x09
IdentificationRegisterA = 0x10
IdentificationRegisterB = 0x11
IdentificationRegisterC = 0x12
MeasurementContinuous = 0x00
MeasurementSingleShot = 0x01
MeasurementIdle = 0x03
def __init__(self, address=0x1e, busnum=-1, gauss=1.3, debug=False):
self.debug = debug
self.i2c = i2c_core(address, busnum=busnum, debug=debug,)
self.i2c.write_8(self.ConfigurationRegisterA, 0b01110000) # Set to 8 samples @ 15Hz
self.set_scale(gauss, debug=debug)
self.set_continuous_mode() # Continuous sampling
# def read_word(self, reg):
# high = self.i2c.read_byte(address, reg)
# low = self.i2c.read_byte(address, reg+1)
# val = (high << 8) + low
# return val
# def read_word_2c(self, reg):
# val = read_word(reg)
# if (val >= 0x8000):
# return -((65535 - val) + 1)
# else:
# return val
def set_scale(self, gauss, debug=False):
if gauss == 0.88:
self.scale_reg = 0x00
self.scale = 0.73
elif gauss == 1.3:
self.scale_reg = 0x01
self.scale = 0.92
elif gauss == 1.9:
self.scale_reg = 0x02
self.scale = 1.22
elif gauss == 2.5:
self.scale_reg = 0x03
self.scale = 1.52
elif gauss == 4.0:
self.scale_reg = 0x04
self.scale = 2.27
elif gauss == 4.7:
self.scale_reg = 0x05
self.scale = 2.56
elif gauss == 5.6:
self.scale_reg = 0x06
self.scale = 3.03
elif gauss == 8.1:
self.scale_reg = 0x07
self.scale = 4.35
self.scale_reg = self.scale_reg << 5
self.set_option(self.ConfigurationRegisterB, self.scale_reg)
if debug == True:
print("HMC5883L set : gauss "+gauss+", scale "+scale)
def set_option(self, register, *function_set):
options = 0x00
for function in function_set:
options = options | function
self.i2c.write_8(register, options)
def get_axes(self):
magno_x = self.i2c.read_word_2c(self.AxisXDataRegisterMSB)
magno_y = self.i2c.read_word_2c(self.AxisYDataRegisterMSB)
magno_z = self.i2c.read_word_2c(self.AxisZDataRegisterMSB)
if (magno_x == -4096):
magno_x = None
else:
magno_x = round(magno_x * self.scale, 4)
if (magno_y == -4096):
magno_y = None
else:
magno_y = round(magno_y * self.scale, 4)
if (magno_z == -4096):
magno_z = None
else:
magno_z = round(magno_z * self.scale, 4)
return (magno_x, magno_y, magno_z)
def get_heading(self):
(scaled_x, scaled_y, scaled_z) = self.get_axes()
heading_rad = math.atan2(scaled_y, scaled_x)
heading_rad += self.declination
# Correct for reversed heading
if(heading_rad < 0):
heading_rad += 2 * math.pi
# Check for wrap and compensate
if(heading_rad > 2 * math.pi):
heading_rad -= 2 * math.pi
# Convert to degrees from radians
heading_deg = heading_rad * 180 / math.pi
degrees = math.floor(heading_deg)
minutes = round(((heading_deg - degrees) * 60))
return (degrees, minutes)
def set_declination(self, degree, min=0):
self.declinationDeg = degree
self.declinationMin = min
self.declination = (degree + min / 60) * (math.pi / 180)
def __str__(self):
ret_str = ""
(x, y, z) = self.get_axes()
ret_str += "Axis X: " + str(x) + "\n"
ret_str += "Axis Y: " + str(y) + "\n"
ret_str += "Axis Z: " + str(z) + "\n"
ret_str += "Declination: " + self.get_declination_string() + "\n"
ret_str += "Heading: " + self.get_heading_string() + "\n"
return ret_str
def get_declination_string(self):
return str(self.declinationDeg) + " deg, " + str(self.declinationMin) + " minutes"
def get_heading_string(self):
(degrees, minutes) = self.get_heading()
return str(degrees) + " deg, " + str(minutes) + " minutes"
def set_continuous_mode(self):
self.set_option(self.ModeRegister, self.MeasurementContinuous)
if __name__ == "__main__":
# constructor defaults : address=0x1e, gauss=1.3, debug=False
i2c_HMC5883l = HMC5883(gauss=1.3)
i2c_HMC5883l.set_declination(2, 18)
while True:
print i2c_HMC5883l.get_heading()
|
<filename>tests/tasks/test_extract_relevance_period.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import datetime
from data_quality import exceptions
from data_quality.tasks.extract_relevance_period import RelevancePeriodExtractor
from .test_task import TestTask
class TestRelevancePeriodExtractor(TestTask):
"""Test the RelevancePeriodExtractor task"""
def test_extract_dates(self):
"""Test the date extraction"""
self.maxDiff = None
examples = ['Transparency Data 1 to 30 April 2014',
'July 2011 return with descriptions',
'DH-May-2010-amnd4',
'April 2010 to December 2013',
'2010 October Return',
'MOD\'s spending over £25,000 for August2014',
'jncc-spend-over-25k-2012-01',
'12_03_15_data',
'Over_%C2%A325K_april_2014',
'Transparency_Sept2014_Final.csv',
'August - September 2015',
'20-12-2015/21-01-2016',
'17/07/2014 - 17/08/2014']
expected = [[datetime.datetime(2014,4,1), datetime.datetime(2014,4,30)],
[datetime.datetime(2011,7,31)],
[datetime.datetime(2010,5,31)],
[datetime.datetime(2010,4,30), datetime.datetime(2013,12,31)],
[datetime.datetime(2010,10,31)],
[datetime.datetime(2014,8,31)],
[datetime.datetime(2012,1,31)],
[datetime.datetime(2015,3,12)],
[datetime.datetime(2014,4,30)],
[datetime.datetime(2014,9,30)],
[datetime.datetime(2015,8,31), datetime.datetime(2015,9,30)],
[datetime.datetime(2015,12,20), datetime.datetime(2016,1,21)],
[datetime.datetime(2014,7,17), datetime.datetime(2014,8,17)]]
self.config['timeliness']['timeliness_strategy'] = ['title', 'data']
results = []
extractor = RelevancePeriodExtractor(self.config)
for line in examples:
dates = extractor.extract_dates(line)
results.append(dates)
for index, result in enumerate(results):
results[index] = sorted([extracted_date['date_obj']
for extracted_date in result])
self.assertSequenceEqual(results, expected)
def test_resolve_period(self):
"""Test that a period is extracted and formated properly"""
sources = [{
'title': 'MOD spending over £500 on a GPC and spending over £25,000, April 2010 to December 2013/December 2012 MOD GPC spend',
'data': 'https://www.gov.uk/government/uploads/GPC_transparency_data_travel_stationery_contracts_dec2012.csv'
},
{
'title': 'Spend over £25,000 in Natural England/July 2011 return',
'data': 'http://data.defra.gov.uk/ops/procurement/1107/ne-over-25k-1107.csv'
},
{
'title': 'Spending over £25,000, April 2010 to December 2013/1 to 29 February 2012 GPC spend',
'data': 'https://www.gov.uk/government/uploads/attachment_data/file/28883/GPCTRANSPARENCYDATA1FEBRUARYTO29FEBRUARY2012includingdescriptions.csv'
}]
expected = [(datetime.datetime(2012,12,1), datetime.datetime(2012,12,31)),
(datetime.datetime(2011,7,1), datetime.datetime(2011,7,31)),
# This will not be found because the title is uncertain and the file name doesn't have delimitators
None]
self.config['timeliness']['timeliness_strategy'] = ['title', 'data']
results = []
extractor = RelevancePeriodExtractor(self.config)
for source in sources:
results.append(extractor.identify_period(source))
self.assertSequenceEqual(results, expected)
def test_run_raises_if_field_not_provided(self):
"""Test that RelevancePeriodExtractor raises if the field in timeliness_strategy
doesn't exist in source_file
"""
self.config['assess_timeliness'] = True
self.config['timeliness']['timeliness_strategy'] = ['period_id']
extractor = RelevancePeriodExtractor(self.config)
self.assertRaisesRegexp(ValueError, 'timeliness_strategy', extractor.run)
def test_run_raises_if_insufficient_period(self):
"""Tests that RelevancePeriodExtractor raises if sources without `period_id`
make up over 10% of total sources
"""
self.config['assess_timeliness'] = True
self.config['timeliness']['timeliness_strategy'] = ['title', 'data']
extractor = RelevancePeriodExtractor(self.config)
self.assertRaises(exceptions.UnableToAssessTimeliness, extractor.run)
|
import pickle
import time
from tkinter import Tk, filedialog
import ipywidgets as widgets
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.matlib
import pandas as pd
import traitlets
from IPython.display import display
import functools
from tail_extrap import multivariate
debug_view = widgets.Output(layout={'border': '1px solid black'})
layout_section = {'margin': '12px 2px 12px 2px'}
class Interactive:
def __init__(self, mv):
self.mv = mv
self.uni_fit_button = widgets.Button() # In tab1
self.save_button = SaveFileButton(
description='Save session as...',
file_type=[("pickle archive", ".pkl")],
)
self.tab0 = Tab_config(self.mv, self.uni_fit_button)
self.tab1 = Tab_univariate(self.mv, self.uni_fit_button)
self.tab2 = Tab_contour(self.mv)
# self.tab3 = Tab_export(self.mv)
tab = widgets.Tab(children=[
self.tab0.tab, self.tab1.tab, self.tab2.tab
])
tab.set_title(0, 'Data config')
tab.set_title(1, 'Univariate fitting')
tab.set_title(2, 'Contour construction')
# tab.set_title(3, 'Export result')
self.save_button.on_click(self.save_session)
self.tab0.update_button.on_click(
functools.partial(self.tab0_update_clicked, mv=mv))
self.tab0.confirm_box.ok_button.on_click(
functools.partial(self.tab0_update_confirmed, mv=self.mv))
display(
widgets.VBox(children=[
self.save_button,
tab,
debug_view
])
)
def save_session(self, change):
# local function fitting_func in multivariate._CondY.fit cannot be
# pickled. As an workaround, mv.condY_cont_dists_bulk is removed and
# refit when loaded next time
if hasattr(self.mv, 'condY_cont_dists_bulk'):
delattr(self.mv, 'condY_cont_dists_bulk')
with open(self.save_button.file_name + '.pkl', 'wb') as f:
pickle.dump(self.mv, f)
def tab0_update_clicked(self, change, mv):
if hasattr(mv, 'x_dist'):
# Fitting result exists, confirm cleanup
self.tab0.confirm_box.show()
self.tab0.update_button.disabled = True
else:
# No fitting result exists
self.tab0_update_confirmed(change=None, mv=mv)
def tab0_update_confirmed(self, change, mv):
# Clean up fitting results
mv.condY_x = str_to_condY(self.tab0.condY_text.value)
for attr in ['x_dist', 'y_dist', 'condY_disc_dists']:
if hasattr(mv, attr):
delattr(mv, attr)
mv.ct = {}
# Update tab0 display
self.tab0.confirm_box.hide()
self.tab0.update_button.disabled = False
self.tab0.refresh_plot(mv)
button_visual(self.tab0.update_button)
# Reset tab1 display
self.tab1.uni_fit_button.description = 'Start Fitting'
set_widget_visibility(self.tab1.hide_list, 'hidden')
# Update tab2 display
# TODO
@classmethod
def from_archive(cls, archive_path):
with open(archive_path, 'rb') as f:
mv = pickle.load(f)
# Re-fit mv.condY_cont_dists_bulk as it was removed when pickling
mv.condY_cont_dists_bulk = mv._fit_condY_cont(mv.condY_para_bulk_df)
return cls(mv)
@classmethod
def from_df(cls, df, col_x=0, col_y=1):
mv = multivariate.Multivariate(df, col_x=col_x, col_y=col_y)
# Initialize session setting
mv.ss = {
# 'condY_x': mv.condY_x,
'x_dist': {
'maxima_extract': 'Annual Maxima',
'maxima_fit': 'Gumbel Chart',
'bulk_fit': 'Empirical',
'outlier_detect': 'None',
},
'y_dist': {
'maxima_extract': 'Annual Maxima',
'maxima_fit': 'Gumbel Chart',
'bulk_fit': 'Empirical',
'outlier_detect': 'None',
},
'condY_disc_dists': {
'maxima_extract': 'Annual Maxima',
'maxima_fit': 'Gumbel Chart',
'bulk_fit': 'Empirical',
'outlier_detect': 'None',
},
}
# Initialize contour results
mv.ct = {}
return cls(mv)
class Tab_config:
def __init__(self, mv, uni_fit_button):
# Update button
self.update_button = widgets.Button(
description='Update',
disabled=False,
tooltip='Save settings and update figure',
)
self.confirm_box = ConfirmDialog(
text='Update CondY_X will erase all the fitting results. Continue?'
)
self.update_section = widgets.VBox(
children=[self.update_button, self.confirm_box.box],
)
self.confirm_box.hide()
self.confirm_box.cancel_button.on_click(self.cancel_clicked)
# CondY_X section
self.condY_label = widgets.Label(
value='$x$ for evaluating $f(y|x)$: ',
)
self.condY_text = widgets.Text(
value=condY_to_str(mv.condY_x),
placeholder='start : interval : end',
layout=widgets.Layout(width='40%'),
)
self.condY_section = widgets.HBox(
children=[self.condY_label, self.condY_text],
layout = layout_section,
)
# Diagnostic plot
layout_disp = {'height': '400px'}
layout_disp.update(layout_section)
self.data_display = widgets.Output(layout=layout_disp)
self.tab = widgets.VBox(children=[
self.update_section, self.condY_section, self.data_display,
])
self.refresh_plot(mv)
def cancel_clicked(self, change):
self.confirm_box.hide()
self.update_button.disabled = False
def refresh_plot(self, mv):
'''Re-generate the plot in data_display using mv'''
self.data_display.clear_output(wait=True)
with self.data_display:
plt.figure(dpi=100)
plt.plot(
mv.x_data, mv.y_data, 'o',
markersize=3, alpha=0.2, color=[0.5, 0.5, 0.5])
ylm = plt.ylim()
plt.plot(
np.vstack([mv.condY_x, mv.condY_x]),
np.matlib.repmat(
np.array(ylm).reshape(2, 1), 1, len(mv.condY_x)),
'--', color=[1, 0.5, 0.5])
plt.ylim(ylm)
plt.xlabel(mv.x_name)
plt.ylabel(mv.y_name)
plt.grid(True)
plt.legend(['Raw data', 'CondY_X'], loc='upper left')
plt.show()
class Tab_univariate:
def __init__(self, mv, uni_fit_button):
# Fitting section
self.uni_fit_button = uni_fit_button
self.progress_bar = widgets.IntProgress(
min=0, max=5,
layout=widgets.Layout(width='10%', visibility='hidden'))
self.progress_label = widgets.Label(
layout=widgets.Layout(visibility='hidden')
)
self.fit_section = widgets.HBox(
children=[self.uni_fit_button, self.progress_bar, self.progress_label],
)
self.uni_fit_button.on_click(
functools.partial(self.update, mv=mv))
# Config section
self.dist_dropdown = widgets.Dropdown(
options=[
('Marginal X', 'x_dist'),
('Marginal Y', 'y_dist'),
('Conditional Y', 'condY_disc_dists')],
stylestyle={'description_width': 'initial'},
layout=widgets.Layout(width='120px')
)
self.condY_slider = widgets.SelectionSlider(
options=[None],
description=' for $x$ = ',
continuous_update=False,
readout=True,
style={'description_width': 'initial'},
layout=widgets.Layout(width='40%', visibility='hidden')
)
self.condY_prev = widgets.Button(
description='\u25C0',
tooltip='Select conditional Y for the previous value of x',
layout=widgets.Layout(width='50px', visibility='hidden')
)
self.condY_next = widgets.Button(
description='\u25B6',
tooltip='Select conditional Y for the previous value of x',
layout=widgets.Layout(width='50px', visibility='hidden'),
)
self.maxima_extract_label = widgets.Label(
value='Maxima extraction',
layout=widgets.Layout(width='25%'),
)
self.maxima_extract_dropdown = widgets.Dropdown(
options=['Annual Maxima'],
value=mv.ss[self.dist_dropdown.value]['maxima_extract'],
layout=widgets.Layout(width='25%'),
)
self.maxima_fit_label = widgets.Label(
value='Maxima fitting',
layout=widgets.Layout(width='25%'),
)
self.maxima_fit_dropdown = widgets.Dropdown(
options=['Gumbel Chart'],
value=mv.ss[self.dist_dropdown.value]['maxima_fit'],
layout=widgets.Layout(width='25%'),
)
self.bulk_fit_label = widgets.Label(
value='Bulk fitting',
layout=widgets.Layout(width='25%'),
)
self.bulk_fit_dropdown = widgets.Dropdown(
options=['Empirical', 'Parametric'],
value=mv.ss[self.dist_dropdown.value]['bulk_fit'],
layout=widgets.Layout(width='25%'),
)
self.outlier_detect_label = widgets.Label(
value='Outlier detection',
layout=widgets.Layout(width='25%'),
)
self.outlier_detect_dropdown = widgets.Dropdown(
options=['None', 'RANSAC Regression', 'Huber Regression'],
value=mv.ss[self.dist_dropdown.value]['outlier_detect'],
layout=widgets.Layout(width='25%'),
)
self.config_section = widgets.VBox(
children=[
widgets.HBox(
children=[
self.dist_dropdown, self.condY_slider,
self.condY_prev, self.condY_next
],
layout={'margin': '2px 2px 10px 2px'}
),
widgets.HBox(children=[
self.maxima_extract_label, self.maxima_fit_label,
self.bulk_fit_label, self.outlier_detect_label
]),
widgets.HBox(children=[
self.maxima_extract_dropdown, self.maxima_fit_dropdown,
self.bulk_fit_dropdown, self.outlier_detect_dropdown
]),
],
layout=layout_section,
)
self.update_condY_slider(mv)
self.dist_dropdown.observe(
functools.partial(self.refresh_plot, mv=mv), names='value')
self.condY_slider.observe(
functools.partial(self.refresh_plot, mv=mv), names='value')
self.condY_prev.on_click(self.condY_slider_prev)
self.condY_next.on_click(self.condY_slider_next)
# Diagnostic plot
layout_disp = {'height': '450px'}
layout_disp.update(layout_section)
self.data_display = widgets.Output(layout=layout_disp)
self.tab = widgets.VBox(children=[
self.fit_section,
self.config_section,
self.data_display,
])
self.hide_list = [self.config_section, self.data_display,
self.condY_next, self.condY_prev, self.condY_slider]
if not hasattr(mv, 'x_dist'):
self.uni_fit_button.description = 'Start Fitting'
set_widget_visibility(self.hide_list, 'hidden')
else:
self.uni_fit_button.description = 'Update'
self.refresh_plot(change=None, mv=mv)
def update_condY_slider(self, mv):
'''Update the option for condY_slider'''
condY_slider_dict = {f'{condY_x:.1f}': idx
for idx, condY_x in enumerate(mv.condY_x)}
self.condY_slider.options = condY_slider_dict
def condY_slider_prev(self, change):
self.condY_slider.value = max([0, self.condY_slider.value - 1])
def condY_slider_next(self, change):
self.condY_slider.value = min(
[len(self.condY_slider.options) - 1, self.condY_slider.value + 1])
def fit_all(self,mv):
''' Fit each univariate distribution '''
self.data_display.clear_output()
self.progress_bar.layout.visibility = 'visible'
self.progress_label.layout.visibility = 'visible'
self.progress_bar.value = 0
self.progress_label.value = 'Fitting marginal X'
mv._fit_marginalX(**mv.ss['x_dist'])
self.progress_bar.value += 1
self.progress_label.value = 'Fitting marginal Y'
mv._fit_marginalY(**mv.ss['y_dist'])
self.progress_bar.value += 1
self.progress_label.value = 'Fitting discrete conditional Y'
mv._fit_condY_disc(**mv.ss['condY_disc_dists'])
self.progress_bar.value += 1
self.progress_label.value = 'Fitting median of conditional Y'
mv._get_condY_median()
self.progress_bar.value += 1
self.progress_label.value = 'Fitting continuous conditional Y using bulk'
df = mv._get_condY_para_bulk()
mv.condY_cont_dists_bulk = mv._fit_condY_cont(df)
mv.condY_para_bulk_df = df # Save df as condY_cont_dists_bulk will be removed
self.progress_bar.value += 1
self.update_condY_slider(mv)
set_widget_visibility(self.hide_list, 'visible')
self.progress_bar.layout.visibility = 'hidden'
self.progress_label.layout.visibility = 'hidden'
self.uni_fit_button.description = 'Update'
self.refresh_plot(change=None, mv=mv)
def fit_single(self, mv):
'''Fit a specific univariate distribution defined by dist_dropdown'''
self.data_display.clear_output()
# Record current setting
mv.ss[self.dist_dropdown.value]['maxima_extract'] = \
self.maxima_extract_dropdown.value
mv.ss[self.dist_dropdown.value]['maxima_fit'] = \
self.maxima_fit_dropdown.value
mv.ss[self.dist_dropdown.value]['bulk_fit'] = \
self.bulk_fit_dropdown.value
mv.ss[self.dist_dropdown.value]['outlier_detect'] = \
self.outlier_detect_dropdown.value
if self.dist_dropdown.value == 'x_dist':
mv._fit_marginalX(**mv.ss[self.dist_dropdown.value])
elif self.dist_dropdown.value == 'y_dist':
mv._fit_marginalY(**mv.ss[self.dist_dropdown.value])
else:
mv._fit_condY_disc(**mv.ss[self.dist_dropdown.value])
self.refresh_plot(change=None, mv=mv)
def refresh_plot(self, change, mv):
'''Save fitting config and regenerate diagnostic plot'''
self.maxima_extract_dropdown.value = \
mv.ss[self.dist_dropdown.value]['maxima_extract']
self.maxima_fit_dropdown.value = \
mv.ss[self.dist_dropdown.value]['maxima_fit']
self.bulk_fit_dropdown.value = \
mv.ss[self.dist_dropdown.value]['bulk_fit']
self.outlier_detect_dropdown.value = \
mv.ss[self.dist_dropdown.value]['outlier_detect']
# Update condY_slider and data_display
if self.dist_dropdown.value == 'condY_disc_dists':
self.condY_slider.layout.visibility = 'visible'
self.condY_prev.layout.visibility = 'visible'
self.condY_next.layout.visibility = 'visible'
dist = getattr(mv, self.dist_dropdown.value)[self.condY_slider.value]
else:
self.condY_slider.layout.visibility = 'hidden'
self.condY_prev.layout.visibility = 'hidden'
self.condY_next.layout.visibility = 'hidden'
dist = getattr(mv, self.dist_dropdown.value)
self.data_display.clear_output(wait=True)
with self.data_display:
display(dist.diag_fig)
def update(self, change, mv):
'''Operation for the uni_fit_button'''
if self.uni_fit_button.description == 'Start Fitting':
self.fit_all(mv)
else:
self.fit_single(mv)
button_visual(self.uni_fit_button)
class Tab_contour:
def __init__(self, mv):
# Fitting status
self.fit_button = widgets.Button(
description='Start Fitting',
tooltip='Fit contour for the current MRP'
)
self.progress_bar = widgets.IntProgress(
min=0, max=5,
layout=widgets.Layout(width='10%', visibility='hidden'))
self.progress_label = widgets.Label(
layout=widgets.Layout(visibility='hidden')
)
self.confirm_box = ConfirmDialog(
text='MRP exists, overwrite?'
)
self.confirm_box.hide()
self.fit_section = widgets.VBox(
children=[
widgets.HBox(children=[
self.fit_button, self.progress_bar, self.progress_label]),
self.confirm_box.box
],
)
self.fit_button.on_click(functools.partial(self.fit_clicked, mv=mv))
self.confirm_box.ok_button.on_click(functools.partial(self.fit_confirmed, mv=mv))
self.confirm_box.cancel_button.on_click(self.cancel_clicked)
# MRP selection
self.mrp_from_new = widgets.Checkbox(
description='Create new MRP of: ',
value=True,
indent=False,
layout={'width': 'max-content'},
)
self.mrp_from_exist = widgets.Checkbox(
description='Overwirte existing MRP of: ',
value=False,
indent=False,
layout={'width': 'max-content'},
)
self.mrp_new = widgets.IntText(
value=1,
layout={'width': '100px'},
)
self.mrp_exist_select = widgets.Dropdown(
options=list(mv.ct.keys()),
layout={'width': '100px'},
)
self.mrp_section = widgets.HBox(
children=[
widgets.VBox(
children=[self.mrp_from_new, self.mrp_from_exist],
),
widgets.VBox(children=[self.mrp_new, self.mrp_exist_select],
)
],
layout=layout_section,
)
if not self.mrp_exist_select.options:
self.mrp_from_exist.disabled = True
self.mrp_from_new.observe(self.update_mrp_from_exist, names='value')
self.mrp_from_exist.observe(self.update_mrp_from_new, names='value')
self.mrp_exist_select.observe(self.update_diag, names='value')
# Contour distribution selection
self.contour_dropdown = widgets.Dropdown(
options=['Lower contour', 'Upper contour'],
layout=widgets.Layout(width='120px'),
)
self.select_button = widgets.Button(
description='Select',
tooltip='Use the current distribution for the contour',
layout=widgets.Layout(margin='2px 10px 2px 10px', width='100px'),
)
self.dist_slider = widgets.SelectionSlider(
description='using distribution: ',
options=['None'],
continuous_update=False,
readout=True,
layout=widgets.Layout(width='40%'),
style={'description_width': 'initial'},
)
self.dist_prev = widgets.Button(
description='\u25C0',
tooltip='Show results of the next distribution',
layout=widgets.Layout(width='50px'),
)
self.dist_next = widgets.Button(
description='\u25B6',
tooltip='Show results of the previous distribution',
layout=widgets.Layout(width='50px'),
)
self.dist_err = widgets.Label()
self.dist_section = widgets.VBox(
children=[
widgets.HBox(children=[
self.contour_dropdown, self.dist_slider,
self.dist_prev, self.dist_next, self.select_button
]),
widgets.HBox(children=[
self.dist_err
]),
],
layout=layout_section,
)
self.contour_dropdown.observe(
functools.partial(self.update_diag, mv=mv, mrp=self.get_mrp()), names='value')
self.dist_slider.observe(
functools.partial(self.update_diag, mv=mv, mrp=self.get_mrp()), names='value')
self.dist_prev.on_click(self.dist_slider_prev)
self.dist_next.on_click(self.dist_slider_next)
self.select_button.on_click(
functools.partial(self.update_selection, mv=mv))
# Diagnostic plots
layout_plot = {'width': '33%', 'height': '300px'}
self.repara_plot = widgets.Output(layout=layout_plot)
self.para_plot = widgets.Output(layout=layout_plot)
self.contour_plot = widgets.Output(layout=layout_plot)
self.plot_section = widgets.HBox(
children=[self.repara_plot, self.para_plot, self.contour_plot],
)
self.tab = widgets.VBox(children=[
self.fit_section, self.mrp_section,
self.dist_section, self.plot_section,
])
self.hide_list = list(self.dist_section.children) + \
list(self.plot_section.children)
set_widget_visibility(self.hide_list, 'hidden')
def get_mrp(self) -> int:
if self.mrp_from_new.value:
return self.mrp_new.value
else:
return self.mrp_exist_select.value
def cancel_clicked(self, change):
self.confirm_box.hide()
self.fit_button.disabled = False
def fit_clicked(self, change, mv):
mrp = self.get_mrp()
if self.mrp_from_new and mrp in self.mrp_exist_select.options:
# New mrp is in the existing mrp list
self.confirm_box.show()
self.fit_button.disabled = True
else:
self.fit_confirmed(change=None, mv=mv)
def fit_confirmed(self, change, mv):
self.confirm_box.hide()
self.fit_button.disabled = False
set_widget_visibility(self.hide_list, 'hidden')
self.progress_bar.layout.visibility = 'visible'
self.progress_label.layout.visibility = 'visible'
self.progress_bar.value = 0
mrp = self.get_mrp()
ct = {'mrp': mrp} # Initialize contour result
self.progress_label.value = 'Calculating marginal MRP value for X & Y'
ct['x_mrp'] = mv.x_dist.predict(mrp=mrp)
ct['y_mrp'] = mv.y_dist.predict(mrp=mrp)
self.progress_bar.value += 1
self.progress_label.value = 'Calculating jagged contour'
ct['jagged'] = mv._get_jaggaed_contour(mrp)
self.progress_bar.value += 1
self.progress_label.value = 'Calculating lower contour with MLE fitting'
ct['lower'], ct['df_lower'] = mv._smooth_contour_lower(ct)
self.progress_bar.value += 1
self.progress_label.value = 'Calculating upper contour with reparameterization'
ct['upper'], ct['df_upper'], ct['condY_cont_dists_tail'] = \
mv._smooth_contour_upper(ct, range_ratio=10)
self.progress_bar.value += 1
self.progress_label.value = 'Combining final contour'
ct['final_x'], ct['final_y'] = mv._smooth_contour_combine(ct)
self.progress_bar.value += 1
mv.ct[mrp] = ct # Record contour result
# Update display
self.mrp_exist_select.options = list(mv.ct.keys())
self.mrp_from_exist.disabled = False
self.progress_bar.layout.visibility = 'hidden'
self.progress_label.layout.visibility = 'hidden'
set_widget_visibility(self.hide_list, 'visible')
self.update_diag(change=None, mv=mv, mrp=mrp)
def update_diag(self, change, mv, mrp):
ct = mv.ct[mrp]
if self.contour_dropdown.value == 'Lower contour':
self.dist_slider.options = list(ct['df_lower'].index)
self.repara_plot.layout.width='0%'
self.update_lower_diag(mv, ct)
else:
self.dist_slider.options = list(ct['df_upper'].index)
self.repara_plot.layout.width='33%'
self.update_upper_diag(mv, ct)
def update_selection(self, change, mv):
ct = mv.ct[self.get_mrp()]
if self.contour_dropdown.value == 'Lower contour':
ct['lower'] = ct['df_lower'].loc[self.dist_slider.value, 'y_bot']
else:
ct['upper'] = ct['df_upper'].loc[self.dist_slider.value, 'y_top']
ct['final_x'], ct['final_y'] = mv._smooth_contour_combine(ct)
def plot_validations(self, mv, ct):
plt.plot(mv.x_data, mv.y_data, '.', color=[0.5, 0.5, 0.5], alpha=0.1, markersize=10)
plt.plot(ct['jagged']['x'], ct['jagged']['y_bot'], 'b.-')
plt.plot(ct['jagged']['x'], ct['jagged']['y_top'], 'b.-')
plt.plot([ct['x_mrp'], ct['x_mrp']], [0, ct['y_mrp']], 'b--')
plt.plot([0, ct['x_mrp']], [ct['y_mrp'], ct['y_mrp']], 'b--')
plt.plot(mv.x_dist.sample_coor, mv.median_pred, 'b-.')
plt.grid(True)
plt.xlim([0, ct['x_mrp'] * 1.1])
plt.ylim([
0, 1.1 * max([ct['y_mrp'], ct['jagged']['y_top'].max()])
])
plt.xlabel(mv.x_name)
plt.ylabel(mv.y_name)
def update_lower_diag(self, mv, ct):
self.dist_err.value = 'Error: ' \
f"{ct['df_lower'].loc[self.dist_slider.value, 'err']:.2f} " \
'(RMS error compared to the jagged lower contour)'
self.para_plot.clear_output(wait=True)
with self.para_plot:
mv.condY_cont_dists_bulk[self.dist_slider.value].plot_diagnosis()
plt.title('')
plt.xlabel(mv.x_name)
plt.show()
self.contour_plot.clear_output(wait=True)
with self.contour_plot:
self.plot_validations(mv, ct)
plt.plot(
mv.x_dist.sample_coor,
ct['df_lower'].loc[self.dist_slider.value, 'y_bot'],
'r-', LineWidth=2)
plt.show()
def update_upper_diag(self, mv, ct):
self.dist_err.value = 'Error: ' \
f"{ct['df_upper'].loc[self.dist_slider.value, 'err']:.2f} " \
r'(25% RMS error compared to the jagged upper contour ' +\
r'+ 75% absolute error compared to MRP of marginal y)'
self.repara_plot.clear_output(wait=True)
with self.repara_plot:
mv.plot_repara_result(ct, self.dist_slider.value)
self.para_plot.clear_output(wait=True)
with self.para_plot:
ct['condY_cont_dists_tail'][self.dist_slider.value].plot_diagnosis()
plt.title('')
plt.xlabel(mv.x_name)
plt.show()
self.contour_plot.clear_output(wait=True)
with self.contour_plot:
self.plot_validations(mv, ct)
plt.plot(
mv.x_dist.sample_coor,
ct['df_upper'].loc[self.dist_slider.value, 'y_top'],
'r-', LineWidth=2)
plt.show()
def update_mrp_from_exist(self, change):
self.mrp_from_exist.value = not self.mrp_from_new.value
self.update_diag(change=None)
def update_mrp_from_new(self, change):
self.mrp_from_new.value = not self.mrp_from_exist.value
def dist_slider_prev(self, change):
idx = self.dist_slider.options.index(self.dist_slider.value)
idx = max([0, idx - 1])
self.dist_slider.value = self.dist_slider.options[idx]
def dist_slider_next(self, change):
idx = self.dist_slider.options.index(self.dist_slider.value)
idx = min([len(self.dist_slider.options) - 1, idx + 1])
self.dist_slider.value = self.dist_slider.options[idx]
class SaveFileButton(widgets.Button):
"""A file widget that leverages tkinter.filedialog.
Modified from https://codereview.stackexchange.com/questions/162920/file-selection-button-for-jupyter-notebook
"""
def __init__(self, file_type=None, **kwargs):
super(SaveFileButton, self).__init__(**kwargs)
self.file_type = file_type
self.add_traits(file_name=traitlets.traitlets.Unicode())
self.on_click(self.select_file)
def select_file(self, b):
"""Generate instance of tkinter.filedialog"""
# Create Tk root
root = Tk()
# Hide the main window
root.withdraw()
# Raise the root to the top of all windows.
root.call('wm', 'attributes', '.', '-topmost', True)
# List of selected fileswill be set to b.value
b.file_name = filedialog.asksaveasfilename(filetypes=self.file_type)
class ConfirmDialog:
def __init__(self, text=None):
self.text = widgets.Label(value=text)
self.ok_button = widgets.Button(
description='OK', layout={'width': '80px'})
self.cancel_button = widgets.Button(
description='Cancel', layout={'width': '80px'})
self.box = widgets.VBox(
children=[
self.text,
widgets.HBox(children=[self.ok_button, self.cancel_button]),
],
layout={
'border': 'solid 1px',
'padding': '5px 5px 5px 5px',
'align_items': 'center',
'width': '40%',
}
)
def show(self):
self.box.layout.visibility = 'visible'
self.box.layout.height = None
def hide(self):
self.box.layout.visibility = 'hidden'
self.box.layout.height = '0px'
def button_visual(button_widget):
button_widget.style.button_color = 'lightgreen'
button_widget.icon = 'check'
time.sleep(1)
button_widget.style.button_color = None
button_widget.icon = ''
def condY_to_str(condY_x: list) -> str:
'''Convert a list into the format of "start : interval : end" for display
'''
return (f'{condY_x[0]:.1f} : '
f'{condY_x[1] - condY_x[0]:.1f} : '
f'{condY_x[-1]:.1f}')
def str_to_condY(s: str) -> list:
'''Convert condY_x expression from text to list
s has the format of "start : interval : end" or "start : end"
assuming an interval of 1
'''
condY_x = list(map(float, s.split(':')))
if len(condY_x) == 2:
condY_x = np.arange(condY_x[0], condY_x[1] * 1.0001, 1)
elif len(condY_x) == 3:
# add a small value to "end" so that it is included
condY_x = np.arange(condY_x[0], condY_x[2] * 1.0001, condY_x[1])
else:
raise ValueError('Please check format of CondY_X')
return condY_x
def set_widget_visibility(widget_list, visibility):
# print('********** set_widget_visibility called ************')
'''Hide all related widgets for fitting config'''
for widget in widget_list:
# print(type(widget))
setattr(widget.layout, 'visibility', visibility)
|
<reponame>OBITORASU/tomato-timer<filename>tests.py<gh_stars>0
import unittest
import requests
import json
import os
import shutil
from app import server
from app.helpers import Timer
def send_get_to_room_url(root_endpoint: str, room_name: str) -> requests.Response:
'''sends GET to <root_endpoint>/room/<name>, returns the Response object'''
return requests.get('{}{}{}'.format(root_endpoint, 'room/', room_name))
def send_post_to_room_url(root_endpoint: str,
room_name: str,
json_repr: dict) -> requests.Response:
'''sends POST to <root_endpoint>/room/<name>, returns the Response object'''
return requests.post('{}{}{}'.format(root_endpoint, 'room/', room_name),
json=json_repr)
class ApiRoutesCase(unittest.TestCase):
def setUp(self):
self.server = server
self.root_endpoint = 'http://localhost:5000/api/'
assert int(server.testing) == 1
self.db_path = server.config['DB_PATH']
try:
os.mkdir(server.config['DB_PATH'])
except FileExistsError:
pass
t = Timer(
duration=300,
is_playing=False,
)
t_json = open(self.db_path+'/test_room.json', 'w')
json.dump(t.json_repr(), t_json)
t_json.close()
def tearDown(self):
shutil.rmtree(self.db_path)
def test1_receiving_Json_from_existing_room(self):
'''makes sure that the test_room is up and that JSON can be retrieved from it'''
ep = self.root_endpoint
db_path = self.db_path
f = open(db_path+'/test_room.json')
test_json = json.load(f)
f.close()
t = Timer(
duration=test_json['duration'],
is_playing=test_json['is_playing'],
start_time=test_json['start_time'],
password=test_json['password']
)
r = send_get_to_room_url(ep, 'test_room')
self.assertEqual(r.status_code, 200,
'Expected status code 200, got {} instead'.format(
r.status_code
))
self.assertEqual(r.headers['content-type'], 'application/json',
'Expected "application/json" in header, got {} instead'.format(
r.headers['content-type']
))
timer_args = r.json()
timer_from_json = Timer(
timer_args['duration'],
timer_args['is_playing'],
t.start_time,
)
self.assertEqual(timer_from_json.json_repr(), t.json_repr())
def test2_creating_new_room(self):
'''Ensures that new rooms can be created via POST request'''
from app.helpers import hash_password
ep = self.root_endpoint
pw = '<PASSWORD>'
hashed_password = <PASSWORD>(pw)
r = send_get_to_room_url(ep, 'new_room')
self.assertEqual(type(r.json()), type(''),
'room already exists at this location, did you remember to kill the server?')
t = Timer(
duration=400,
is_playing=False,
start_time=0,
password=pw
)
self.assertEqual(t.password, <PASSWORD>_password,
"Timer object isn't hashing passwords properly")
j = t.json_repr()
j['password'] = pw
r = send_post_to_room_url(ep,
'new_room',
j)
self.assertEqual(r.status_code, 200,
'expected status code 200, got {} instead'.format(
r.status_code
))
self.assertEqual(r.headers['content-type'], 'application/json',
'Expected "application/json" in header, got {} instead'.format(
r.headers['content-type']
))
r_dict = r.json()
self.assertEqual(r_dict["password"], t.password,
"Expected response JSON Password to be \n {}".format(t.password) +
"\n Was \n {} \n instead".format(r_dict["password"]))
def test3_protecting_against_trolls(self):
'''tests if passwords are successful in keeping bad POST requests from modifying things'''
ep = self.root_endpoint
r = send_get_to_room_url(ep, 'new_room')
self.assertEqual(r.status_code, 200,
'expected status code 200, got {} instead'.format(
r.status_code
))
t = Timer(
duration=400,
is_playing=False,
start_time=0,
password='<PASSWORD>'
)
r = send_post_to_room_url(ep, 'new_room', t.json_repr())
self.assertEqual(r.status_code, 200,
'expected status code 200, got {} instead'.format(
r.status_code
))
self.assertEqual(r.headers['content-type'], 'application/json',
'Expected "application/json" in header, got {} instead'.format(
r.headers['content-type']
))
t = Timer(
duration=100,
is_playing=False,
password='<PASSWORD>'
)
r = send_post_to_room_url(ep, 'new_room', t.json_repr())
self.assertEqual(r.status_code, 200,
'expected status code 200, got {} instead'.format(
r.status_code
))
self.assertIsInstance(
r.json(), str, 'Allowing POSTs with bad password fields to edit the timer')
r = send_get_to_room_url(ep, 'new_room')
self.assertEqual(r.status_code, 200,
'expected status code 200, got {} instead'.format(
r.status_code
))
r = send_post_to_room_url(ep, 'new_room', r.json())
self.assertIsInstance(r.json(
), str, 'Not hashing the password server-side, can edit a timer just by doing a GET and submitting the same JSON back')
def test4_updating_server_timer_with_client_info(self):
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
|
<gh_stars>0
from collections import OrderedDict
import logging
import time
import sys
import pandas as pd
import pyprind
from joblib import Parallel, delayed
import cloudpickle as cp
import pickle
from py_entitymatching.blocker.blocker import Blocker
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, add_key_column
logger = logging.getLogger(__name__)
class BlackBoxBlocker(Blocker):
"""
Blocks based on a black box function specified by the user.
"""
def __init__(self, *args, **kwargs):
super(Blocker, self).__init__(*args, **kwargs)
self.black_box_function = None
def set_black_box_function(self, function):
"""Sets black box function to be used for blocking.
Args:
function (function): the black box function to be used for blocking .
"""
self.black_box_function = function
def block_tables(self, ltable, rtable,
l_output_attrs=None, r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
verbose=False, show_progress=True, n_jobs=1):
"""
Blocks two tables based on a black box blocking function specified
by the user.
Finds tuple pairs from left and right tables that survive the black
box function. A tuple pair survives the black box blocking function if
the function returns False for that pair, otherwise the tuple pair is
dropped.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus are the total number of CPUs in the
machine).Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If values in `l_output_attrs` is not of type
string.
AssertionError: If values in `r_output_attrs` is not of type
string.
AssertionError: If `l_output_prefix` is not of type
string.
AssertionError: If `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `show_progress` is not of type boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
Examples:
>>> def match_last_name(ltuple, rtuple):
# assume that there is a 'name' attribute in the input tables
# and each value in it has two words
l_last_name = ltuple['name'].split()[1]
r_last_name = rtuple['name'].split()[1]
if l_last_name != r_last_name:
return True
else:
return False
>>> import py_entitymatching as em
>>> bb = em.BlackBoxBlocker()
>>> bb.set_black_box_function(match_last_name)
>>> C = bb.block_tables(A, B, l_output_attrs=['name'], r_output_attrs=['name'] )
"""
# validate data types of standard input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
verbose, n_jobs)
# validate data type of show_progress
self.validate_show_progress(show_progress)
# validate black box function
assert self.black_box_function != None, 'Black box function is not set'
# validate output attributes
self.validate_output_attrs(ltable, rtable, l_output_attrs,r_output_attrs)
# get and validate metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger, verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger, verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger, verbose)
# do blocking
# # set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # remove l_key from l_output_attrs and r_key from r_output_attrs
l_output_attrs_1 = []
if l_output_attrs:
l_output_attrs_1 = [x for x in l_output_attrs if x != l_key]
r_output_attrs_1 = []
if r_output_attrs:
r_output_attrs_1 = [x for x in r_output_attrs if x != r_key]
# # determine the number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, len(l_df) * len(r_df))
# # pickle the black-box function before passing it as an arg to
# # _block_tables_split to be executed by each child process
black_box_function_pkl = cp.dumps(self.black_box_function)
if n_procs <= 1:
# single process
candset = _block_tables_split(l_df, r_df, l_key, r_key,
l_output_attrs_1, r_output_attrs_1,
l_output_prefix, r_output_prefix,
black_box_function_pkl, show_progress)
else:
# multiprocessing
m, n = self.get_split_params(n_procs, len(l_df), len(r_df))
l_splits = pd.np.array_split(l_df, m)
r_splits = pd.np.array_split(r_df, n)
c_splits = Parallel(n_jobs=m*n)(delayed(_block_tables_split)(l_splits[i], r_splits[j],
l_key, r_key,
l_output_attrs_1, r_output_attrs_1,
l_output_prefix, r_output_prefix,
black_box_function_pkl,
show_progress and i == len(l_splits) - 1 and j == len(r_splits) - 1)
for i in range(len(l_splits)) for j in range(len(r_splits)))
candset = pd.concat(c_splits, ignore_index=True)
# # determine the attributes to retain in the output candidate set
retain_cols = self.get_attrs_to_retain(l_key, r_key,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix)
if len(candset) > 0:
candset = candset[retain_cols]
else:
candset =pd.DataFrame(columns=retain_cols)
# update catalog
key = get_name_for_key(candset.columns)
candset = add_key_column(candset, key)
cm.set_candset_properties(candset, key, l_output_prefix+l_key,
r_output_prefix+r_key, ltable, rtable)
# return candidate set
return candset
def block_candset(self, candset, verbose=True, show_progress=True, n_jobs=1):
"""
Blocks an input candidate set of tuple pairs based on a black box
blocking function specified by the user.
Finds tuple pairs from an input candidate set of tuple pairs that
survive the black box function. A tuple pair survives the black box
blocking function if the function returns False for that pair,
otherwise the tuple pair is dropped.
Args:
candset (DataFrame): The input candidate set of tuple pairs.
verbose (boolean): A flag to indicate whether logging should be done
(defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_jobs (int): The number of parallel jobs to be used for computation
(defaults to 1). If -1 all CPUs are used. If 0 or 1,
no parallel computation is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used (where n_cpus is the total number of CPUs in the
machine).Thus, for n_jobs = -2, all CPUs but one are used.
If (n_cpus + 1 + n_jobs) is less than 1, then no parallel
computation is used (i.e., equivalent to the default).
Returns:
A candidate set of tuple pairs that survived blocking (DataFrame).
Raises:
AssertionError: If `candset` is not of type pandas
DataFrame.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `n_jobs` is not of type
int.
AssertionError: If `show_progress` is not of type boolean.
AssertionError: If `l_block_attr` is not in the ltable columns.
AssertionError: If `r_block_attr` is not in the rtable columns.
Examples:
>>> def match_last_name(ltuple, rtuple):
# assume that there is a 'name' attribute in the input tables
# and each value in it has two words
l_last_name = ltuple['name'].split()[1]
r_last_name = rtuple['name'].split()[1]
if l_last_name != r_last_name:
return True
else:
return False
>>> import py_entitymatching as em
>>> bb = em.BlackBoxBlocker()
>>> bb.set_black_box_function(match_last_name)
>>> D = bb.block_candset(C) # C is an output from block_tables
"""
# validate data types of standard input parameters
self.validate_types_params_candset(candset, verbose, show_progress, n_jobs)
# validate black box functionn
assert self.black_box_function != None, 'Black box function is not set'
# get and validate metadata
log_info(logger, 'Required metadata: cand.set key, fk ltable, fk rtable, '
'ltable, rtable, ltable key, rtable key', verbose)
# # get metadata
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(candset, logger, verbose)
# # validate metadata
cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key,
logger, verbose)
# do blocking
# # set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # project candset to keep only the ID attributes
c_df = candset[[key, fk_ltable, fk_rtable]]
# # determine the number of processes to launch parallely
n_procs = self.get_num_procs(n_jobs, len(c_df))
# # pickle the black-box function before passing it as an arg to
# # _block_candset_split to be executed by each child process
black_box_function_pkl = cp.dumps(self.black_box_function)
valid = []
if n_procs <= 1:
# single process
valid = _block_candset_split(c_df, l_df, r_df, l_key, r_key,
fk_ltable, fk_rtable,
black_box_function_pkl, show_progress)
else:
# multiprocessing
c_splits = pd.np.array_split(c_df, n_procs)
valid_splits = Parallel(n_jobs=n_procs)(delayed(_block_candset_split)(c_splits[i],
l_df, r_df,
l_key, r_key,
fk_ltable, fk_rtable,
black_box_function_pkl,
show_progress and i == len(c_splits) - 1)
for i in range(len(c_splits)))
valid = sum(valid_splits, [])
# construct output table
if len(c_df) > 0:
c_df = candset[valid]
else:
c_df = pd.DataFrame(columns=candset.columns)
# update catalog
cm.set_candset_properties(c_df, key, fk_ltable, fk_rtable, ltable, rtable)
# return candidate set
return c_df
def block_tuples(self, ltuple, rtuple):
"""
Blocks a tuple pair based on a black box blocking function specified
by the user.
Takes a tuple pair as input, applies the black box blocking function to
it, and returns True (if the intention is to drop the pair) or False
(if the intention is to keep the tuple pair).
Args:
ltuple (Series): input left tuple.
rtuple (Series): input right tuple.
Returns:
A status indicating if the tuple pair should be dropped or kept,
based on the black box blocking function (boolean).
Examples:
>>> def match_last_name(ltuple, rtuple):
# assume that there is a 'name' attribute in the input tables
# and each value in it has two words
l_last_name = ltuple['name'].split()[1]
r_last_name = rtuple['name'].split()[1]
if l_last_name != r_last_name:
return True
else:
return False
>>> import py_entitymatching as em
>>> bb = em.BlackBoxBlocker()
>>> bb.set_black_box_function(match_last_name)
>>> status = bb.block_tuples(A.ix[0], B.ix[0]) # A, B are input tables.
"""
# validate black box function
assert self.black_box_function is not None, 'Black box function is not set'
return self.black_box_function(ltuple, rtuple)
def _block_tables_split(l_df, r_df, l_key, r_key,
l_output_attrs, r_output_attrs,
l_output_prefix, r_output_prefix,
black_box_function_pkl, show_progress):
# initialize progress bar
if show_progress:
bar = pyprind.ProgBar(len(l_df)*len(r_df))
# create look up dictionaries for faster processing
l_dict = {}
for k, r in l_df.iterrows():
l_dict[k] = r
r_dict = {}
for k, r in r_df.iterrows():
r_dict[k] = r
# get the position of the ID attribute in the tables
l_id_pos = list(l_df.columns).index(l_key)
r_id_pos = list(r_df.columns).index(r_key)
# create candset column names for the ID attributes of the tables
ltable_id = l_output_prefix + l_key
rtable_id = r_output_prefix + r_key
# list to keep the tuple pairs that survive blocking
valid = []
# unpickle the black box function
black_box_function = pickle.loads(black_box_function_pkl)
# iterate through the two tables
for l_t in l_df.itertuples(index=False):
# # get ltuple from the look up dictionary
ltuple = l_dict[l_t[l_id_pos]]
for r_t in r_df.itertuples(index=False):
# # update the progress bar
if show_progress:
bar.update()
# # get rtuple from the look up dictionary
rtuple = r_dict[r_t[r_id_pos]]
# # apply the black box function to the tuple pair
res = black_box_function(ltuple, rtuple)
if res != True:
# # this tuple pair survives blocking
# # an ordered dictionary to keep a surviving tuple pair
d = OrderedDict()
# # add ltable and rtable ids to an ordered dictionary
d[ltable_id] = ltuple[l_key]
d[rtable_id] = rtuple[r_key]
# # add l/r output attributes to the ordered dictionary
l_out = ltuple[l_output_attrs]
l_out.index = l_output_prefix + l_out.index
d.update(l_out)
r_out = rtuple[r_output_attrs]
r_out.index = r_output_prefix + r_out.index
d.update(r_out)
# # add the ordered dict to the list
valid.append(d)
# construct candidate set
candset = pd.DataFrame(valid)
return candset
def _block_candset_split(c_df, l_df, r_df, l_key, r_key, fk_ltable, fk_rtable,
black_box_function_pkl, show_progress):
# initialize the progress bar
if show_progress:
bar = pyprind.ProgBar(len(c_df))
# create lookup dictionaries for faster processing
l_dict = {}
r_dict = {}
# list to keep track of valid ids
valid = []
# find positions of the ID attributes of the two tables in the candset
l_id_pos = list(c_df.columns).index(fk_ltable)
r_id_pos = list(c_df.columns).index(fk_rtable)
# unpickle the black box function
black_box_function = pickle.loads(black_box_function_pkl)
# iterate candidate set
for row in c_df.itertuples(index=False):
# # update progress bar
if show_progress:
bar.update()
# # get ltuple, try dictionary first, then dataframe
row_lkey = row[l_id_pos]
if row_lkey not in l_dict:
l_dict[row_lkey] = l_df.ix[row_lkey]
ltuple = l_dict[row_lkey]
# # get rtuple, try dictionary first, then dataframe
row_rkey = row[r_id_pos]
if row_rkey not in r_dict:
r_dict[row_rkey] = r_df.ix[row_rkey]
rtuple = r_dict[row_rkey]
# # apply the black box function to the tuple pair
res = black_box_function(ltuple, rtuple)
if res != True:
valid.append(True)
else:
valid.append(False)
return valid
|
<gh_stars>0
import os
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/.json"
client = language.LanguageServiceClient()
class Sentiment:
"""
A class containing the returned sentiment values and error message from a provider.
"""
def __init__(self, value, errorMessage):
self.value = value
self.errorMessage = errorMessage
class Entities:
"""
A class containing the returned NER values and error message from a provider.
"""
def __init__(self, values, errorMessage):
self.values = values
self.errorMessage = errorMessage
class Classiciation:
"""
A class containing the returned classification values and error message from a provider.
"""
def __init__(self, values, errorMessage):
self.values = values
self.errorMessage = errorMessage
class Syntax:
"""
A class containing the returned POS values and error message from a provider.
"""
def __init__(self, values, errorMessage):
self.values = values
self.errorMessage = errorMessage
def analyzeSentiment(text):
"""
Uses the NLP provider's SDK to perform a sentiment analysis operation.
Arguments:
text {String} -- Text to be analyzed.
"""
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
try:
response = client.analyze_sentiment(document=document)
overallSentiment = response.document_sentiment.score
return Sentiment(normalizeSentiment(overallSentiment), "")
except Exception as e:
return Sentiment(-999, str(e))
def normalizeSentiment(sentiment):
"""
Normalizes the provider's polarity score the match the format of our thesis.
Arguments:
sentiment {Double} -- Polarity score
Returns:
Double -- Normalized polarity score
"""
return (sentiment + 1) * 0.5
def analyzeEntities(text):
"""
Uses the NLP provider's SDK to perform an NER operation.
Arguments:
text {String} -- Text to be analyzed.
"""
try:
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
response = client.analyze_entities(document=document)
formattedEntities = []
for entity in response.entities:
formattedEntities.append(
{'entity': entity.name, 'type': enums.Entity.Type(entity.type).name})
normalizedEntities = normalizeEntities(formattedEntities)
return Entities(normalizedEntities, "")
except Exception as e:
return Entities([], str(e.args))
def normalizeEntities(formattedEntities):
"""
Normalizes the provider's entity types to match the ones used in our evaluation.
Arguments:
formattedEntities {List} -- List of recognized named entities and their types.
Returns:
List -- A copy of the input list with modified entity types.
"""
fEcopy = formattedEntities
for i in range(len(fEcopy)):
if fEcopy[i]['type'] == "PERSON":
fEcopy[i]['type'] = "Person"
elif fEcopy[i]['type'] == "LOCATION":
fEcopy[i]['type'] = "Location"
elif fEcopy[i]['type'] == "ORGANIZATION":
fEcopy[i]['type'] = "Organization"
elif fEcopy[i]['type'] == "EVENT":
fEcopy[i]['type'] = "Event"
elif fEcopy[i]['type'] == "CONSUMER_GOOD":
fEcopy[i]['type'] = "Product"
return fEcopy
def analyzeSyntax(text):
"""
Uses the NLP provider's SDK to perform an Part-of-Speech tagging (Syntax Analysis) operation.
Arguments:
text {String} -- Text to be analyzed.
"""
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
try:
response = client.analyze_syntax(
document=document, encoding_type='UTF8')
values = []
for token in response.tokens:
tokenText = token.text.content
tokenBeginOffset = token.text.begin_offset
tokenTag = u"{}".format(enums.PartOfSpeech.Tag(
token.part_of_speech.tag).name)
if tokenTag == "CONJ":
tokenTag = "CCONJ"
if tokenTag == "PRT":
tokenTag = "PART"
values.append({
"token_text": tokenText,
"token_begin_offset": tokenBeginOffset,
"pos_tag": tokenTag
})
return Syntax(values, "")
except Exception as e:
return Syntax([], str(e.args))
def classifyContent(text):
"""
Uses the NLP provider's SDK to perform a content classification operation.
Arguments:
text {String} -- Text to be analyzed.
"""
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
try:
response = client.classify_text(document=document)
values = []
for category in response.categories:
values.append({
"category": category.name,
"confidence": category.confidence
})
return(Classiciation(values, ""))
except Exception as e:
return Classiciation([], str(e.args))
# print('\n\n')
# print(analyzeSentiment(u"I hate this job.").value)
#print(analyzeEntities(u"My name is <NAME> and I am using a MacBook in California during World War II at Microsoft"))
#print(json.dumps(analyzeSyntax(u"Carly , confused about the situation , questions Nevel on how he won the contest .").values, sort_keys=True, indent=4))
#print(classifyContent(u'The 70-200mm f/2.8 is one of the most important lenses for many photographers and videographers, as they are typically of high optical quality and offer a very versatile focal length range coupled with a wide maximum aperture for a zoom. This excellent video review takes a look at the new Canon RF 70-200mm f/2.8L IS USM and what you can expect from it both in terms of performance and image quality.').values)
|
<filename>playlistcast/api/query.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Query"""
import os
from pathlib import Path
from typing import List
import graphene
from graphql_relay import from_global_id
from graphql.execution.base import ResolveInfo
from playlistcast import util, db, config, error
from playlistcast.protocol import m3u
from .model.resource_location import ResourceLocation, Directory, File
from .model.chromecast import ChromecastModel, CastStatus, CHROMECAST
from .model.playlist import PlaylistItem
class Query(graphene.ObjectType):
"""Query"""
class Meta:
"""API Description"""
description = 'Query'
resource_location_all = graphene.List(ResourceLocation)
resource_location = graphene.Field(ResourceLocation, id=graphene.ID(required=True))
list_directory = graphene.Field(
Directory,
name=graphene.String(required=True),
subpath=graphene.String()
)
chromecast_device_all = graphene.List(ChromecastModel)
playlist_items = graphene.Field(
graphene.List(PlaylistItem),
name=graphene.String(required=True),
path=graphene.String(required=True)
)
def resolve_resource_location_all(self, info: ResolveInfo) -> List[ResourceLocation]:
"""Return ResourceLocation list"""
return ResourceLocation.get_query(info).all()
def resolve_resource_location(self, info: ResolveInfo, id: graphene.ID) -> ResourceLocation: # pylint: disable=W0622
"""Return ResourceLocation"""
id = from_global_id(id)[1]
return ResourceLocation.get_node(info, id)
def resolve_list_directory(self, info: ResolveInfo, name: graphene.String, subpath: graphene.String = '') -> Directory:
""" Browse directories
name - ResourceLocation -> name
subpath - string path of current directory
"""
model = db.session.query(db.ResourceLocation).filter(db.ResourceLocation.name == name).first()
if not model:
raise error.ResourcePathError('Invalid path {}'.format(name))
d = Directory()
d.resource_name = name
d.resource_path = '/resource/{}/{}'.format(name, subpath)
d.subpath = subpath
path = os.path.join(model.location, subpath)
if not os.path.exists(path):
raise error.ResourcePathError('Path not exists {}'.format(path))
if not os.path.isdir(path):
raise error.ResourcePathError('Path is not directory {}'.format(path))
files = list()
for fname in sorted(os.listdir(path)):
p = Path(os.path.join(path, fname))
stat = p.stat()
f = File(name=fname, size=stat.st_size, is_dir=p.is_dir(), suffix=p.suffix)
files.append(f)
d.files = files
return d
def resolve_chromecast_device_all(self, info: ResolveInfo) -> List[ChromecastModel]:
"""List all chromecast models"""
output = []
for val in CHROMECAST.values():
# update model
cs = util.convert(val.device.status, CastStatus, ('media_controller', 'status', 'uuid'))
cs.uuid = val.data.uuid
val.data.status = cs
output.append(val.data)
return output
def resolve_playlist_items(self, info: ResolveInfo, name: graphene.String, path: graphene.String) -> List[PlaylistItem]:
"""Get list of playlist items"""
model = db.session.query(db.ResourceLocation).filter(db.ResourceLocation.name == name).first()
if not model:
raise error.ResourcePathError('Invalid path {}'.format(name))
playlist = m3u.M3UPlaylist()
m3udir = playlist.load(model.location, path)
output = list()
for p in playlist.items:
urlpath = 'http://'+util.get_ip()+':'+str(config.PORT)+'/resource/'+name+'/'+str(m3udir)+'/'+p.path
item = PlaylistItem(index=p.index, name=p.name, path=urlpath)
output.append(item)
return output
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v2ray.com/core/transport/internet/tls/config.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='v2ray.com/core/transport/internet/tls/config.proto',
package='v2ray.core.transport.internet.tls',
syntax='proto3',
serialized_options=b'\n%com.v2ray.core.transport.internet.tlsP\001Z\003tls\252\002!V2Ray.Core.Transport.Internet.Tls',
serialized_pb=b'\n2v2ray.com/core/transport/internet/tls/config.proto\x12!v2ray.core.transport.internet.tls\"\xba\x01\n\x0b\x43\x65rtificate\x12\x13\n\x0b\x43\x65rtificate\x18\x01 \x01(\x0c\x12\x0b\n\x03Key\x18\x02 \x01(\x0c\x12\x43\n\x05usage\x18\x03 \x01(\x0e\x32\x34.v2ray.core.transport.internet.tls.Certificate.Usage\"D\n\x05Usage\x12\x10\n\x0c\x45NCIPHERMENT\x10\x00\x12\x14\n\x10\x41UTHORITY_VERIFY\x10\x01\x12\x13\n\x0f\x41UTHORITY_ISSUE\x10\x02\"\xf2\x01\n\x06\x43onfig\x12\x16\n\x0e\x61llow_insecure\x18\x01 \x01(\x08\x12\x1e\n\x16\x61llow_insecure_ciphers\x18\x05 \x01(\x08\x12\x43\n\x0b\x63\x65rtificate\x18\x02 \x03(\x0b\x32..v2ray.core.transport.internet.tls.Certificate\x12\x13\n\x0bserver_name\x18\x03 \x01(\t\x12\x15\n\rnext_protocol\x18\x04 \x03(\t\x12\"\n\x1a\x64isable_session_resumption\x18\x06 \x01(\x08\x12\x1b\n\x13\x64isable_system_root\x18\x07 \x01(\x08\x42R\n%com.v2ray.core.transport.internet.tlsP\x01Z\x03tls\xaa\x02!V2Ray.Core.Transport.Internet.Tlsb\x06proto3'
)
_CERTIFICATE_USAGE = _descriptor.EnumDescriptor(
name='Usage',
full_name='v2ray.core.transport.internet.tls.Certificate.Usage',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ENCIPHERMENT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTHORITY_VERIFY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTHORITY_ISSUE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=208,
serialized_end=276,
)
_sym_db.RegisterEnumDescriptor(_CERTIFICATE_USAGE)
_CERTIFICATE = _descriptor.Descriptor(
name='Certificate',
full_name='v2ray.core.transport.internet.tls.Certificate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Certificate', full_name='v2ray.core.transport.internet.tls.Certificate.Certificate', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Key', full_name='v2ray.core.transport.internet.tls.Certificate.Key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='usage', full_name='v2ray.core.transport.internet.tls.Certificate.usage', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CERTIFICATE_USAGE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=276,
)
_CONFIG = _descriptor.Descriptor(
name='Config',
full_name='v2ray.core.transport.internet.tls.Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='allow_insecure', full_name='v2ray.core.transport.internet.tls.Config.allow_insecure', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_insecure_ciphers', full_name='v2ray.core.transport.internet.tls.Config.allow_insecure_ciphers', index=1,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='certificate', full_name='v2ray.core.transport.internet.tls.Config.certificate', index=2,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='server_name', full_name='v2ray.core.transport.internet.tls.Config.server_name', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_protocol', full_name='v2ray.core.transport.internet.tls.Config.next_protocol', index=4,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_session_resumption', full_name='v2ray.core.transport.internet.tls.Config.disable_session_resumption', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_system_root', full_name='v2ray.core.transport.internet.tls.Config.disable_system_root', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=279,
serialized_end=521,
)
_CERTIFICATE.fields_by_name['usage'].enum_type = _CERTIFICATE_USAGE
_CERTIFICATE_USAGE.containing_type = _CERTIFICATE
_CONFIG.fields_by_name['certificate'].message_type = _CERTIFICATE
DESCRIPTOR.message_types_by_name['Certificate'] = _CERTIFICATE
DESCRIPTOR.message_types_by_name['Config'] = _CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Certificate = _reflection.GeneratedProtocolMessageType('Certificate', (_message.Message,), {
'DESCRIPTOR' : _CERTIFICATE,
'__module__' : 'v2ray.com.core.transport.internet.tls.config_pb2'
# @@protoc_insertion_point(class_scope:v2ray.core.transport.internet.tls.Certificate)
})
_sym_db.RegisterMessage(Certificate)
Config = _reflection.GeneratedProtocolMessageType('Config', (_message.Message,), {
'DESCRIPTOR' : _CONFIG,
'__module__' : 'v2ray.com.core.transport.internet.tls.config_pb2'
# @@protoc_insertion_point(class_scope:v2ray.core.transport.internet.tls.Config)
})
_sym_db.RegisterMessage(Config)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
# Modules by me
import carnatic_util
import mohanam
import markov_analyser
import sys
import optparse
from pydub import AudioSegment
_standard_length = 4
def GetOptions():
usage = "usage: %prog [options] [music score(s)]"
parser = optparse.OptionParser(usage)
parser.add_option("-q"
,"--qpm"
,action="store"
,type="int"
,default=80
,dest="qpm"
,help="Quarters per minute - an indicator of the beat (80 by default)")
parser.add_option("-n"
,"--notes"
,action="store"
,type="int"
,default=40
,dest="num_notes"
,help="number of notes to be generated (default 40)")
parser.add_option("-w"
,"--width"
,action="store"
,type="int"
,default=4
,dest="width"
,help="memory width (in notes) of the state. (default 4 - stores the last 4 notes played)")
parser.add_option("-t"
,"--octave"
,action="store"
,type="int"
,default=4
,dest="octave"
,help="The octave to generate music at (default 4)")
parser.add_option("-o"
,"--output_file"
,action="store"
,type="str"
,default="output.wav"
,dest="output_filename"
,help="wav output filename ('output.wav' by default)")
parser.add_option("-s"
,"--output_score"
,action="store"
,type="str"
,default="score_output.txt"
,dest="output_score_file"
,help="prints the output score ('score_output.txt' by default)")
parser.add_option("-p"
,"--pysnth_module"
,action="store"
,type="str"
,default="n"
,dest="pysynth_module"
,help="s uses the string pysynth module (very slow), p the piano with caching (default is the plain piano with no caching), (s and p require numpy/scipy) ")
(options, args) = parser.parse_args(args=None, values=None)
# Open the file to be read
if args is []:
print "Reading Music from stdin since no files were given"
file_handles = [sys.stdin]
else:
file_handles = [open(x, "r") for x in args]
return (file_handles, options.qpm, options.output_filename, options.octave, options.num_notes, options.width, options.output_score_file, options.pysynth_module)
def ImportPysynthModule(c):
try:
if c == 'p':
import pysynth_b as pysynth
elif c == 's':
import pysynth_s as pysynth
else:
import pysynth as pysynth
except ImportError, e:
print "Error Importing pysynth"
print e
make_wave = pysynth.make_wav
return make_wave
def main():
(read_file_handles, qpm, output_filename, octave, num_notes, width, output_score_file, pysynth_module) = GetOptions()
make_wav = ImportPysynthModule(pysynth_module)
carnatic_songs= []
for f in read_file_handles:
s = f.read()
song = carnatic_util.CollectNotes(carnatic_util.PreProcessScore(s))
carnatic_songs.append(song)
f.close()
markov_song_generator = markov_analyser.MarkovAnalyser(width)
print "Reading Songs.."
for song in carnatic_songs:
markov_song_generator.AddSong(song)
print "Analysing Songs.."
markov_song_generator.MarkovAnalyse()
print "Generating Song.."
markov_song_generator.MarkovGenerate(num_notes)
generated_song = markov_song_generator.GetGeneratedSong(output_score_file)
generated_song = carnatic_util.ConvertLengthToTempo(generated_song)
print "Converting to WAV.."
english_notes = []
base_note = mohanam.Base_Note()
base_line = []
total_length = 5
for (note, length) in generated_song:
english_note = mohanam.Translate(note, octave)
english_notes.append((english_note, length))
total_length+=1
base_line.append((base_note, length))
#base_line.append((base_note, total_length))
make_wav(english_notes, fn=output_filename, bpm = qpm)
make_wav(base_line, fn="base_line.wav", bpm = qpm)
sound1 = AudioSegment.from_wav(output_filename)
sound2 = AudioSegment.from_wav("base_line.wav")
# mix sound2 with sound1, starting at 5000ms into sound1)
output = sound1.overlay(sound2)
# save the result
output.export("final_output2.wav", format="wav")
return
if __name__ == '__main__':
main()
|
<reponame>bruce1408/detectron2_modify
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import glob
import logging
import os
import pickle
import sys
from typing import Any, ClassVar, Dict, List
import torch
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.structures.boxes import BoxMode
from detectron2.structures.instances import Instances
from detectron2.utils.logger import setup_logger
from densepose import add_densepose_config
from densepose.utils.logger import verbosity_to_level
from densepose.vis.base import CompoundVisualizer
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
from densepose.vis.densepose import (
DensePoseResultsContourVisualizer,
DensePoseResultsFineSegmentationVisualizer,
DensePoseResultsUVisualizer,
DensePoseResultsVVisualizer,
)
from densepose.vis.extractor import CompoundExtractor, create_extractor
DOC = """Apply Net - a tool to print / visualize DensePose results
"""
LOGGER_NAME = "apply_net"
logger = logging.getLogger(LOGGER_NAME)
_ACTION_REGISTRY: Dict[str, "Action"] = {}
class Action(object):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
parser.add_argument(
"-v",
"--verbosity",
action="count",
help="Verbose mode. Multiple -v options increase the verbosity.",
)
def register_action(cls: type):
"""
Decorator for action classes to automate action registration
"""
global _ACTION_REGISTRY
_ACTION_REGISTRY[cls.COMMAND] = cls
return cls
class InferenceAction(Action):
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(InferenceAction, cls).add_arguments(parser)
parser.add_argument("cfg", metavar="<config>", help="Config file")
parser.add_argument("model", metavar="<model>", help="Model file")
parser.add_argument("input", metavar="<input>", help="Input data")
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
@classmethod
def execute(cls: type, args: argparse.Namespace):
logger.info(f"Loading config from {args.cfg}")
opts = []
cfg = cls.setup_config(args.cfg, args.model, args, opts)
logger.info(f"Loading model from {args.model}")
predictor = DefaultPredictor(cfg)
logger.info(f"Loading data from {args.input}")
file_list = cls._get_input_file_list(args.input)
if len(file_list) == 0:
logger.warning(f"No input images for {args.input}")
return
context = cls.create_context(args)
for file_name in file_list:
img = read_image(file_name, format="BGR") # predictor expects BGR image.
with torch.no_grad():
outputs = predictor(img)["instances"]
cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs)
cls.postexecute(context)
@classmethod
def setup_config(
cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
):
cfg = get_cfg()
add_densepose_config(cfg)
cfg.merge_from_file(config_fpath)
cfg.merge_from_list(args.opts)
if opts:
cfg.merge_from_list(opts)
cfg.MODEL.WEIGHTS = model_fpath
cfg.freeze()
return cfg
@classmethod
def _get_input_file_list(cls: type, input_spec: str):
if os.path.isdir(input_spec):
file_list = [
os.path.join(input_spec, fname)
for fname in os.listdir(input_spec)
if os.path.isfile(os.path.join(input_spec, fname))
]
elif os.path.isfile(input_spec):
file_list = [input_spec]
else:
file_list = glob.glob(input_spec)
return file_list
@register_action
class DumpAction(InferenceAction):
"""
Dump action that outputs results to a pickle file
"""
COMMAND: ClassVar[str] = "dump"
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(DumpAction, cls).add_arguments(parser)
parser.add_argument(
"--output",
metavar="<dump_file>",
default="results.pkl",
help="File name to save dump to",
)
@classmethod
def execute_on_outputs(
cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
):
image_fpath = entry["file_name"]
logger.info(f"Processing {image_fpath}")
result = {"file_name": image_fpath}
if outputs.has("scores"):
result["scores"] = outputs.get("scores").cpu()
if outputs.has("pred_boxes"):
result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
if outputs.has("pred_densepose"):
boxes_XYWH = BoxMode.convert(
result["pred_boxes_XYXY"], BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
)
result["pred_densepose"] = outputs.get("pred_densepose").to_result(boxes_XYWH)
context["results"].append(result)
@classmethod
def create_context(cls: type, args: argparse.Namespace):
context = {"results": [], "out_fname": args.output}
return context
@classmethod
def postexecute(cls: type, context: Dict[str, Any]):
out_fname = context["out_fname"]
out_dir = os.path.dirname(out_fname)
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(out_fname, "wb") as hFile:
pickle.dump(context["results"], hFile)
logger.info(f"Output saved to {out_fname}")
@register_action
class ShowAction(InferenceAction):
"""
Show action that visualizes selected entries on an image
"""
COMMAND: ClassVar[str] = "show"
VISUALIZERS: ClassVar[Dict[str, object]] = {
"dp_contour": DensePoseResultsContourVisualizer,
"dp_segm": DensePoseResultsFineSegmentationVisualizer,
"dp_u": DensePoseResultsUVisualizer,
"dp_v": DensePoseResultsVVisualizer,
"bbox": ScoredBoundingBoxVisualizer,
}
@classmethod
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
cls.add_arguments(parser)
parser.set_defaults(func=cls.execute)
@classmethod
def add_arguments(cls: type, parser: argparse.ArgumentParser):
super(ShowAction, cls).add_arguments(parser)
parser.add_argument(
"visualizations",
metavar="<visualizations>",
help="Comma separated list of visualizations, possible values: "
"[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
)
parser.add_argument(
"--min_score",
metavar="<score>",
default=0.8,
type=float,
help="Minimum detection score to visualize",
)
parser.add_argument(
"--nms_thresh", metavar="<threshold>", default=None, type=float, help="NMS threshold"
)
parser.add_argument(
"--output",
metavar="<image_file>",
default="outputres.png",
help="File name to save output to",
)
@classmethod
def setup_config(
cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
):
opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
opts.append(str(args.min_score))
if args.nms_thresh is not None:
opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST")
opts.append(str(args.nms_thresh))
cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts)
return cfg
@classmethod
def execute_on_outputs(
cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
):
import cv2
import numpy as np
visualizer = context["visualizer"]
extractor = context["extractor"]
image_fpath = entry["file_name"]
logger.info(f"Processing {image_fpath}")
image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
data = extractor(outputs)
image_vis = visualizer.visualize(image, data)
entry_idx = context["entry_idx"] + 1
out_fname = cls._get_out_fname(entry_idx, context["out_fname"])
out_dir = os.path.dirname(out_fname)
if len(out_dir) > 0 and not os.path.exists(out_dir):
os.makedirs(out_dir)
cv2.imwrite(out_fname, image_vis)
logger.info(f"Output saved to {out_fname}")
context["entry_idx"] += 1
@classmethod
def postexecute(cls: type, context: Dict[str, Any]):
pass
@classmethod
def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
base, ext = os.path.splitext(fname_base)
return base + ".{0:04d}".format(entry_idx) + ext
@classmethod
def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
vis_specs = args.visualizations.split(",")
visualizers = []
extractors = []
for vis_spec in vis_specs:
vis = cls.VISUALIZERS[vis_spec]()
visualizers.append(vis)
extractor = create_extractor(vis)
extractors.append(extractor)
visualizer = CompoundVisualizer(visualizers)
extractor = CompoundExtractor(extractors)
context = {
"extractor": extractor,
"visualizer": visualizer,
"out_fname": args.output,
"entry_idx": 0,
}
return context
def create_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=DOC,
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
)
parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
subparsers = parser.add_subparsers(title="Actions")
for _, action in _ACTION_REGISTRY.items():
action.add_parser(subparsers)
return parser
def main():
parser = create_argument_parser()
args = parser.parse_args()
verbosity = args.verbosity if hasattr(args, "verbosity") else None
global logger
logger = setup_logger(name=LOGGER_NAME)
logger.setLevel(verbosity_to_level(verbosity))
args.func(args)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from copy import deepcopy
import cv2
from cv2 import VideoWriter, VideoWriter_fourcc
import sys
import math
r = 0.038*20
L = 0.354*20
# node class that each spot in the map will occupy
# cell location and goal_location are tuples representing index
# of current cell location and goal cell locations
# local path represents the path to get to this
# node from the optimal parent node
class Node:
def __init__(self, parent, cell_location, region, c2c, c2g, local_path, command):
self.parent = parent
self.cell_location = cell_location
self.region = region
self.c2c = c2c
self.c2g = c2g
self.h = c2c+c2g
self.local_path = local_path
self.command = command
# given 2 points of a line, retrun a lambda function which caluclates the
# y value of an x
def generate_line_eq(p1, p2):
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
m = (y2-y1)/(x2-x1)
b = y1-m*x1
lin_func = lambda x: m*x+b
return lin_func
# hardcoded obstacles defined by their vertices and origins
# we just see if the current x and y are within bounding lines
def check_obstacle(x, y):
# check bottom circle
if y <= 60 and y >= 20 and x <= np.sqrt(20**2 - (y-40)**2) + 40 and x >= -np.sqrt(20**2 - (y-40)**2) + 40:
return True
#check top circle
if y <= 180 and y >= 140 and x <= np.sqrt(20**2 - (y-160)**2) + 40 and x >= -np.sqrt(20**2 - (y-160)**2) + 40:
return True
# check left square
if x >= 5 and x <= 35 and y >= 85 and y <= 115:
return True
# check middle rectangle
if x >= 75 and x <= 125 and y >= 85 and y <= 115:
return True
# check right rectangle
if x >= 145 and x <= 175 and y >= 40 and y <= 80:
return True
return False
# iterate over the board, and if the cell is an obstacle, generate
# the a circle of points around it which are padding
def generate_margin(color_map, radius):
height = len(color_map)
width = len(color_map[0])
for y in range(len(color_map)):
for x in range(len(color_map[y])):
# read the color map and check to see if the current space is an obstacle
if (color_map[y][x][0] == 255 and color_map[y][x][1] == 0 and
color_map[y][x][2] == 0):
# generate circle bounds for a point if it is an obstacle
x_range = range(x-radius, x+radius+1)
for x_i in x_range:
y_upper_limit = np.ceil(np.sqrt(radius**2-(x_i-x)**2) + y)
y_lower_limit = np.floor(-np.sqrt(radius**2-(x_i-x)**2) + y)
y_range = np.arange(y_lower_limit, y_upper_limit+1)
for y_i in y_range:
if (x_i >= 0 and x_i < width) and (y_i >= 0 and y_i < height):
if not (color_map[int(y_i)][x_i][0] == [255] and
color_map[int(y_i)][x_i][1] == [0] and
color_map[int(y_i)][x_i][2] == [0]):
color_map[int(y_i)][x_i] = [0,255,0]
return color_map
# draw a circle (red in numpy, blue in opencv) to represent the acceptable goal zone
def expand_goal(color_map, goal_location, radius):
x = goal_location[1]
y = goal_location[0]
height = len(color_map)
width = len(color_map[0])
x_range = range(x-radius, x+radius+1)
for x_i in x_range:
y_upper_limit = np.ceil(np.sqrt(radius**2-(x_i-x)**2) + y)
y_lower_limit = np.floor(-np.sqrt(radius**2-(x_i-x)**2) + y)
y_range = np.arange(y_lower_limit, y_upper_limit+1)
for y_i in y_range:
if (x_i >= 0 and x_i < width) and (y_i >= 0 and y_i < height):
if not (int(color_map[int(y_i)][int(x_i)][0]) == 255 or\
int(color_map[int(y_i)][int(x_i)][1]) == 255):
color_map[int(y_i)][int(x_i)] = [0,0,255]
return color_map
# read the board and depending on each nodes status
# write the proper color in a numpy array as BGR colors
def create_color_map(height, width, radius, goal_location):
color_map = np.zeros(shape=[height, width, 3], dtype=np.uint8)
for row in range(height):
for col in range(width):
if check_obstacle(col, row):
color_map[row][col][0] = 255
color_map[row][col][1] = 0
color_map[row][col][2] = 0
else:
color_map[row][col][0] = 0
color_map[row][col][1] = 0
color_map[row][col][2] = 0
color_map = expand_goal(color_map, goal_location, 2)
color_map = generate_margin(color_map, radius)
return color_map
# pass in color map coordinates and convert them to board
# coordinates which have been compressed/expanded coordinates
# by the neighborhood threshold
def compress_coordinates(x, y, theta, thresh):
compressed_x = int(np.floor(x/thresh))
compressed_y = int(np.floor(y/thresh))
theta = theta % 360
compressed_angle = (int(np.floor(theta/30)))%11
return compressed_x, compressed_y, compressed_angle
# will be used when iterating over closed nodes
# updates the previous color map given the current node to a specifies color
def update_color_map(curr_node, color_map, brg_color):
row = int(np.floor(curr_node.cell_location[0]))
col = int(np.floor(curr_node.cell_location[1]))
color_map[row][col][0] = brg_color[0]
color_map[row][col][1] = brg_color[1]
color_map[row][col][2] = brg_color[2]
return color_map
# create the board
# returns a 3d array
# dimensions are height width and angle. Takes in a compressed version of
# the height width and angle which handles the region/node similarity
def create_board(width, height, thresh):
compressed_width, compressed_height, compressed_angle = compress_coordinates(x=width, y=height, theta=30, thresh=thresh)
board = []
for row_num in range(0, compressed_height):
temp_row = []
for col_num in range(0, compressed_width):
temp_configuration = []
for angle in range(0,12):
c2c = np.Infinity
c2g = np.Infinity
# c2g = np.sqrt((row_num-goal_location[0])**2 + (col_num-goal_location[1]**2))
new_node = Node(parent=None,
c2c=c2c,
c2g=c2g,
cell_location=[int(row_num*thresh), int(col_num*thresh), angle*30],
region=[row_num, col_num, angle],
local_path=[],
command = [0,0])
temp_configuration.append(new_node)
temp_row.append(temp_configuration)
board.append(temp_row)
return board
# generates a series of x and y values representing a curve given wheel velocities
# also returns the final theta orientation of the node, as well as the total cost of the path
# path validity is not checked here
def generate_curve(x,y,theta,UL,UR):
# robot parameters
t = 0
dt = 0.05
cost= 0
# list of x and y values
# will be set to the nodes local_path so we can graph it
x_res = [x]
y_res = [y]
theta = 3.14 * theta / 180
# generate the subpoints for the curve and
# append the points to the x and y list
while t<1:
t = t + dt
x += 0.5*r * (UL + UR) * math.cos(theta) * dt
y += 0.5*r * (UL + UR) * math.sin(theta) * dt
theta += (r / L) * (UR - UL) * dt
cost = cost+ math.sqrt(math.pow((0.5*r * (UL + UR) * math.cos(theta) * dt),2)+math.pow((0.5*r * (UL + UR) * math.sin(theta) * dt),2))
x_res.append(x)
y_res.append(y)
theta = 180 * (theta) / 3.14
# retrun the x and ys to be plotted as well as the end theta, and cost of the curve
return x_res, y_res, theta, cost
# uses the predefined differential commands to generate the arc of the robots path
# for each point in the arc, check bounds and if in opstacle or margin, and disqualify arcs which contain invalid points
# if arc is valid then pull the node it ends on compare costs, and if cheaper, then update the cost and local path to the node
def gen_next_nodes(curr_node, color_map, board, goal_location, thresh, rpms):
curr_y = curr_node.cell_location[0]
curr_x = curr_node.cell_location[1]
curr_angle = curr_node.cell_location[2]
next_nodes = []
actions=[[9, 7], [10,3], [7, 7], [3,10], [7, 9]]
for action in actions:
x_res, y_res, theta, cost = generate_curve(curr_x, curr_y, curr_angle, action[0], action[1])
valid = True
# bounds checking
for x in x_res:
if int(x) < 0 or int(x) > 399:
valid = False
for y in y_res:
if int(y) < 0 or int(y) > 249:
valid = False
# obstacle or margin checking
if valid:
for i in range(len(x_res)):
if int(color_map[int(y_res[i])][int(x_res[i])][0]) == 255 and\
int(color_map[int(y_res[i])][int(x_res[i])][1]) == 0 and\
int(color_map[int(y_res[i])][int(x_res[i])][2]) == 0:
valid = False
for i in range(len(x_res)):
if int(color_map[int(y_res[i])][int(x_res[i])][0]) == 0 and\
int(color_map[int(y_res[i])][int(x_res[i])][1]) == 255 and\
int(color_map[int(y_res[i])][int(x_res[i])][2]) == 0:
valid = False
if valid:
# use compressed coordinates to access a new node
comp_x, comp_y, comp_angle = compress_coordinates(
math.floor(x_res[-1]),
math.floor(y_res[-1]),
theta,
thresh=thresh
)
c2c = curr_node.c2c + cost
c2g = np.sqrt((x_res[-1]-goal_location[1])**2 + (y_res[-1]-goal_location[0])**2)
h = c2g+c2c
new_node = board[comp_y][comp_x][comp_angle]
if h < new_node.h:
new_node.parent = curr_node
new_node.cell_location = [y_res[-1], x_res[-1], theta]
new_node.c2c = c2c
new_node.c2g = c2g
new_node.h = h
new_node.local_path = [x_res, y_res]
new_node.command = action
next_nodes.append(new_node)
return next_nodes
# this is the backtracking function
# returns a list of nodes in order to find the solution
def get_solution_path(curr_node):
solution_path= []
while curr_node:
solution_path.insert(0, curr_node)
curr_node = curr_node.parent
return solution_path
# get the command to get to each spot in the solution path
# the first element will always be 0 since we dont have to move to get there
def get_commands(solution_path):
commands = list(node.command for node in solution_path)
commands = list(filter(None, commands))
return commands
# plot the chronologically ordered list of nodes in closed nodes, and then generate and plot the path for the solution
def animate(color_map, closed_nodes, solution_path):
# draw explored nodes
out = cv2.VideoWriter('test.avi',cv2.VideoWriter_fourcc(*'DIVX'), 60, (200, 200))
for node in closed_nodes[1:]:
xs = node.local_path[0]
ys = node.local_path[1]
if len(xs) > 0:
for i in range(1, len(xs)): # get the number of points in the local path
cv2.line(color_map,np.array([xs[i], ys[i]], dtype=np.int32),np.array([xs[i-1], ys[i-1]], dtype=np.int32),[255, 255, 255],1)
out.write(np.flipud(color_map))
# draw the backtracked best path
for node in solution_path[1:]:
xs = node.local_path[0]
ys = node.local_path[1]
if len(xs) > 0:
for i in range(1, len(xs)): # get the number of points in the local path
cv2.line(color_map,np.array([xs[i], ys[i]], dtype=np.int32),np.array([xs[i-1], ys[i-1]], dtype=np.int32),[0, 0, 255],1)
out.write(np.flipud(color_map))
out.release()
# get the start and end locations bounded by board size.
# does not check for obstacles and margin
def get_inputs():
start_x = int(float(input('What is your start x coordinate in meters [0, 10)'))*20)
if start_x not in range(0, 200):
start_x = int(float(input('What is your start x coordinate in meters [0, 10)'))*20)
start_y = int(float(input('What is your start y coordinate in meters [0, 10)'))*20)
if start_y not in range(0, 200):
start_y = int(float(input('What is your start y coordinate in meters [0, 10)'))*20)
start_theta = float(input('What is your start theta in degrees'))%365
start_location = [start_y, start_x, start_theta]
goal_x = int(float(input('What is your goal x coordinate in meters [0, 10)'))*20)
if goal_x not in range(0, 200):
goal_x = int(float(input('What is your goal x coordinate in meters [0, 10)'))*20)
goal_y = int(float(input('What is your goal y coordinate in meters [0, 10)'))*20)
if goal_y not in range(0, 200):
goal_y = int(float(input('What is your goal y coordinate in meters [0, 10)'))*20)
goal_theta = float(input('What is your goal theta in degrees'))%365
goal_location = [goal_y, goal_x, goal_theta]
return start_location, goal_location
rpms = [] # useless
# color map size
# board size will be based off of the color map and threshold
width = 200
height = 200
thresh = 1
# robot_radius = 0.177 m * 20 blocks/meter = 3.54 round up to 4
clearance = 4
clearance = int(float(input("What is your clearance in meters: 0.2 is default"))*20)
while clearance not in range(0, height):
clearance = int(float(input("What is your clearance in meters: 0.2 is default"))*20)
# starting paramters
start_location, goal_location = get_inputs()
print('Building Color Map')
color_map = create_color_map(height = height, width = width, radius=4 + clearance, goal_location=goal_location)
print('Building Board')
board = create_board(width=width, height=height, thresh=thresh)
plt.figure(figsize=(10, 10))
plt.imshow(color_map, origin='lower')
compressed_x_start, compressed_y_start, compressed_angle_start = compress_coordinates(
start_location[1],
start_location[0],
start_location[2],
thresh=thresh
)
compressed_x_goal, compressed_y_goal, compressed_angle_goal = compress_coordinates(
goal_location[1],
goal_location[0],
goal_location[2],
thresh=thresh
)
print(f'Starting in region x: {compressed_x_start}, y: {compressed_y_start}, theta: {compressed_angle_start}')
print(f'Goal in region x: {compressed_x_goal}, y: {compressed_y_goal}, theta: {compressed_angle_goal}')
start_node = board[compressed_y_start][compressed_x_start][compressed_angle_start]
goal_node = board[compressed_y_goal][compressed_x_goal][compressed_angle_goal]
start_node.c2c = 0
goal_region = goal_node.region
open_nodes = [start_node]
closed_nodes = []
found = False
solution_path = None
commands = None
print(f'Searching for goal region: {goal_region}')
while len(open_nodes) > 0:
open_nodes.sort(key=lambda x: x.h)
curr_node = open_nodes.pop(0)
closed_nodes.append(curr_node)
curr_x = curr_node.cell_location[1]
curr_y = curr_node.cell_location[0]
print(f"Current node has exact coordinates of x:{curr_x} y:{curr_y} Theta:{curr_node.cell_location[2]}")
print(f"Current node is in region coordinates of {curr_node.region}")
# print(f"Current node has coordinates of {curr_node.cell_location}")
if int(color_map[int(curr_y)][int(curr_x)][0]) == 0 and\
int(color_map[int(curr_y)][int(curr_x)][1]) == 0 and\
int(color_map[int(curr_y)][int(curr_x)][2]) == 255:
# if curr_node.region[:2] == goal_node.region[:2]:
print('Found Solution')
found = True
print('Animating Search Pattern')
# back track and animate the search and solution
solution_path = get_solution_path(curr_node)
commands = get_commands(solution_path)
animate(color_map, closed_nodes, solution_path)
break
else:
next_possible_nodes = gen_next_nodes(
curr_node=curr_node,
board=board,
goal_location=goal_location,
color_map=color_map,
thresh=thresh,
rpms = rpms
)
for node in next_possible_nodes:
appendable = True
for o_node in open_nodes:
if o_node.region == node.region:
appendable = False
break
if appendable:
for c_node in closed_nodes:
if c_node.region == node.region:
appendable = False
break
if appendable:
open_nodes.append(node)
if not found:
print('No Solution')
# plt.imsave('test.jpg', np.flipud(color_map))
import rospy
from geometry_msgs.msg import Twist
import math
def calc_vels(command):
rospy.init_node('a_star_turtle')
cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
rate = rospy.Rate(10)
rads_per_s = ((command[0]) + (command[1]))/2
d_lin = r*rads_per_s/20
d_theta = (r/L)*(command[1]-command[0])
for num in range(10):
print(f"X_d: {d_lin}, Th_d: {d_theta}")
move_cmd = Twist()
move_cmd.linear.x = d_lin
move_cmd.angular.z = d_theta
cmd_vel.publish(move_cmd)
rate.sleep()
# set all veocities out to 0
def stop_bot():
cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
move_cmd = Twist()
move_cmd.linear.x = 0
move_cmd.angular.z = 0
cmd_vel.publish(move_cmd)
# this loop skips the first node which has no command,
# but does have the previous theta which we need
# scale doen the angular velocities, to map form board coordinates to gazebo coordinates
# for node in solution_path:
# calc_vels(node.command)
# stop_bot()
|
<filename>analysis/str_parser.py
from ipaddress import ip_address
from typing import List
from scipy.spatial import distance
import analysis.p_types as p_types
from analysis.ip_base import IPv6_or_IPv4_obj
from analysis.itxyek_base import ITXYEK
POINT_SPLITTER = ":"
COORDINATE_SPLITTER = ","
class ITXYStrToArray:
"""The client provides a string like
"1520095100,25,690:1520095100, 30, 650:"
"""
def __init__(self, data_string: str):
self.txy_string = data_string
def points_as_list_of_strings(self) -> list:
return [s for s in self.txy_string.split(POINT_SPLITTER) if s]
@property
def itxyek_lists(self) -> ITXYEK:
itxyek_lists = ITXYEK()
for i, p in enumerate(self.points_as_list_of_strings()):
t, x, y = p.split(',')
itxyek_lists.indices.append(i)
itxyek_lists.time.append(int(t))
itxyek_lists.x.append(int(x))
itxyek_lists.y.append(-int(y)) # y-axis goes downwards in browsers unlike cartesian
itxyek_lists.e.append(p_types.EntryOrExit())
itxyek_lists.k.append(p_types.KeyOrMouse())
return itxyek_lists
class DataExtractor:
def __init__(self, req):
self.req = req
self.json = req.json
self._itxyek_lists = ITXYStrToArray(data_string=self._mouse_txy_str()).itxyek_lists
self.maximum_itxyek_index = self._itxyek_lists.indices[-1]
def _mouse_txy_str(self) -> str:
return self.json["mouse_txy"]
def user_id(self) -> int:
return int(self.json["userID"])
def user_ip(self) -> IPv6_or_IPv4_obj:
return ip_address(self.req.remote_addr)
def _exit_indices_str(self) -> str:
return self.json["mouse_exit_txy_indices"]
def _mouse_exit_indices(self) -> List[int]:
return [int(s) for s in self._exit_indices_str().split(POINT_SPLITTER) if s]
def _key_exit_indices(self) -> List[int]:
return AltTabPoints().exit_indices(itxyek=self._itxyek_lists)
def exit_indices(self) -> List[int]:
indices_list = self._mouse_exit_indices() + self._key_exit_indices()
indices_list.sort()
return indices_list
def entry_point_index_out_of_range(self, index) -> bool:
return index > self.maximum_itxyek_index
def _entry_indices_base(self, exit_indices) -> List[int]:
entry_i_list = [0, ] # first point in TXY, is always an entry point
for exit_i in exit_indices:
# the next point after an exit point, is always an entry point
entry_i = exit_i + 1
if self.entry_point_index_out_of_range(index=entry_i):
break
entry_i_list.append(entry_i)
return entry_i_list
def _mouse_entry_indices(self) -> List[int]:
return self._entry_indices_base(exit_indices=self._mouse_exit_indices())
def _key_entry_indices(self) -> List[int]:
return self._entry_indices_base(exit_indices=self._key_exit_indices())
def itxyek_lists(self) -> ITXYEK:
full_itxyek_lists = self._itxyek_lists
for mouse_exit_i in self._mouse_exit_indices():
full_itxyek_lists.e[mouse_exit_i] = p_types.Exit()
full_itxyek_lists.k[mouse_exit_i] = p_types.Mouse()
for key_exit_i in self._key_exit_indices():
full_itxyek_lists.e[key_exit_i] = p_types.Exit()
full_itxyek_lists.k[key_exit_i] = p_types.Key()
for mouse_entry_i in self._mouse_entry_indices():
full_itxyek_lists.e[mouse_entry_i] = p_types.Entry()
full_itxyek_lists.k[mouse_entry_i] = p_types.Mouse()
for key_entry_i in self._key_entry_indices():
full_itxyek_lists.e[key_entry_i] = p_types.Entry()
full_itxyek_lists.k[key_entry_i] = p_types.Key()
return full_itxyek_lists
class AltTabPoints:
"""
When pressing ALT TAB in Tor, the ALT key isn't registered.
It could be deduced from seeing the mouse stationary for a while,
then suddenly appearing in a distant location.
WARNING: prone to false positives.
The same pattern is probably observed when:
- using CTR SHIFT PRINTSCREEN.
- a popup window appears
- ALT TABs to a non browser window
Thankfully, it has to coincide with respective critical point in the other browser
to become a false positive.
"""
MIN_INACTIVITY = 300 # min delta-t of entry/exit (in same browser)
MAX_INACTIVITY = 30000
MIN_S = 50
@staticmethod
def _inactivity_in_bounds(t2: int, t1: int) -> bool:
return AltTabPoints.MIN_INACTIVITY < t2 - t1 < AltTabPoints.MAX_INACTIVITY
@staticmethod
def _distance_adequate(s: float) -> bool:
"""
When switching tab with ALT TAB, usually the user will move his mouse,
until he gets back to the original browser.
Meaning there should be a distance between the point he stopped moving the mouse
and the point he started moving it again.
"""
return s > AltTabPoints.MIN_S
def exit_indices(self, itxyek: ITXYEK) -> List[int]:
extra_indices = []
for i, t1, x1, y1, *_ in itxyek.as_iterator():
if i + 1 not in itxyek.indices:
break
t2 = itxyek.time[i + 1]
x2 = itxyek.x[i + 1]
y2 = itxyek.y[i + 1]
space = distance.euclidean([x1, y1], [x2, y2])
if self._inactivity_in_bounds(t2=t2, t1=t1) and self._distance_adequate(s=space):
extra_indices.append(i)
return extra_indices
|
#!/usr/bin/env python
"""
Common utility functions
"""
import os
import re
import sys
import gzip
import bz2
import numpy
def init_gene():
"""
Initializing the gene structure
"""
gene_det = [('id', 'f8'),
('anno_id', numpy.dtype),
('confgenes_id', numpy.dtype),
('name', 'S25'),
('source', 'S25'),
('gene_info', numpy.dtype),
('alias', 'S15'),
('name2', numpy.dtype),
('strand', 'S2'),
('score', 'S15'),
('chr', 'S15'),
('chr_num', numpy.dtype),
('paralogs', numpy.dtype),
('start', 'f8'),
('stop', 'f8'),
('transcripts', numpy.dtype),
('transcript_type', numpy.dtype),
('transcript_info', numpy.dtype),
('transcript_status', numpy.dtype),
('transcript_valid', numpy.dtype),
('exons', numpy.dtype),
('exons_confirmed', numpy.dtype),
('cds_exons', numpy.dtype),
('utr5_exons', numpy.dtype),
('utr3_exons', numpy.dtype),
('tis', numpy.dtype),
('tis_conf', numpy.dtype),
('tis_info', numpy.dtype),
('cdsStop', numpy.dtype),
('cdsStop_conf', numpy.dtype),
('cdsStop_info', numpy.dtype),
('tss', numpy.dtype),
('tss_info', numpy.dtype),
('tss_conf', numpy.dtype),
('cleave', numpy.dtype),
('cleave_info', numpy.dtype),
('cleave_conf', numpy.dtype),
('polya', numpy.dtype),
('polya_info', numpy.dtype),
('polya_conf', numpy.dtype),
('is_alt', 'f8'),
('is_alt_spliced', 'f8'),
('is_valid', numpy.dtype),
('transcript_complete', numpy.dtype),
('is_complete', numpy.dtype),
('is_correctly_gff3_referenced', 'S5'),
('splicegraph', numpy.dtype) ]
return gene_det
def open_file(fname):
"""
Open the file (supports .gz .bz2) and returns the handler
@args fname: input file name for reading
@type fname: str
"""
try:
if os.path.splitext(fname)[1] == ".gz":
FH = gzip.open(fname, 'rb')
elif os.path.splitext(fname)[1] == ".bz2":
FH = bz2.BZ2File(fname, 'rb')
else:
FH = open(fname, 'rU')
except Exception as error:
sys.exit(error)
return FH
def add_CDS_phase(strand, cds):
"""
Calculate CDS phase and add to the CDS exons
@args strand: feature strand information
@type strand: +/-
@args cds: coding exon coordinates
@type cds: numpy array [[int, int, int]]
"""
cds_region, cds_flag = [], 0
if strand == '+':
for cdspos in cds:
if cds_flag == 0:
cdspos = (cdspos[0], cdspos[1], 0)
diff = (cdspos[1]-(cdspos[0]-1))%3
else:
xy = 0
if diff == 0:
cdspos = (cdspos[0], cdspos[1], 0)
elif diff == 1:
cdspos = (cdspos[0], cdspos[1], 2)
xy = 2
elif diff == 2:
cdspos = (cdspos[0], cdspos[1], 1)
xy = 1
diff = ((cdspos[1]-(cdspos[0]-1))-xy)%3
cds_region.append(cdspos)
cds_flag = 1
elif strand == '-':
cds.reverse()
for cdspos in cds:
if cds_flag == 0:
cdspos = (cdspos[0], cdspos[1], 0)
diff = (cdspos[1]-(cdspos[0]-1))%3
else:
xy = 0
if diff == 0:
cdspos = (cdspos[0], cdspos[1], 0)
elif diff == 1:
cdspos = (cdspos[0], cdspos[1], 2)
xy = 2
elif diff == 2:
cdspos = (cdspos[0], cdspos[1], 1)
xy = 1
diff = ((cdspos[1]-(cdspos[0]-1))-xy)%3
cds_region.append(cdspos)
cds_flag = 1
cds_region.reverse()
return cds_region
def buildUTR(cc, ec, strand):
"""
Build UTR regions from a given set of CDS and exon coordiantes of a gene
@args cc: coding exon coordinates
@type cc: numpy array [[int, int, int]]
@args ec: exon coordinates
@type ec: numpy array [[int, int]]
@args strand: feature strand information
@type strand: +/-
"""
utr5 = []
utr3 = []
if strand == '+':
cds_s = cc[0][0]
for ex in ec:
if ex[0] <= cds_s and cds_s <= ex[1]:
if ex[0] != cds_s:utr5.append((ex[0], cds_s-1))
break
else:
utr5.append(ex)
cds_e = cc[-1][1]
for i in range(len(ec)):
i += 1
if ec[-i][0] <= cds_e and cds_e <= ec[-i][1]:
if ec[-i][1] != cds_e:utr3.append((cds_e +1, ec[-i][1]))
break
else:
utr3.append(ec[-i])
utr3.reverse()
elif strand == '-':
cds_s = cc[-1][1]
for i in range(len(ec)):
i += 1
if ec[-i][0] <= cds_s and cds_s <= ec[-i][1]:
if ec[-i][1] != cds_s:utr5.append((cds_s+1, ec[-i][1]))
break
else:
utr5.append(ec[-i])
utr5.reverse()
cds_e = cc[0][0]
for ex in ec:
if ex[0] <= cds_e and cds_e <= ex[1]:
if ex[0] != cds_e:utr3.append((ex[0], cds_e-1))
break
else:
utr3.append(ex)
return utr5, utr3
def make_Exon_cod(strand_p, five_p_utr, cds_cod, three_p_utr):
"""
Create exon cordinates from UTR's and CDS region
@args strand_p: feature strand information
@type strand_p: +/-
@args five_p_utr: five prime utr exon coordinates
@type five_p_utr: numpy array [[int, int]]
@args cds_cod: coding exon coordinates
@type cds_cod: numpy array [[int, int, int]]
@args three_p_utr: three prime utr exon coordinates
@type three_p_utr: numpy array [[int, int]]
"""
exon_pos = []
if strand_p == '+':
utr5_start, utr5_end = 0, 0
if five_p_utr != []:
utr5_start, utr5_end = five_p_utr[-1][0], five_p_utr[-1][1]
cds_5start, cds_5end = cds_cod[0][0], cds_cod[0][1]
jun_exon = []
if cds_5start-utr5_end == 0 or cds_5start-utr5_end == 1:
jun_exon = [utr5_start, cds_5end]
if len(cds_cod) == 1:
five_prime_flag = 0
if jun_exon != []:
five_p_utr = five_p_utr[:-1]
five_prime_flag = 1
for utr5 in five_p_utr:
exon_pos.append(utr5)
jun_exon = []
utr3_start, utr3_end = 0, 0
if three_p_utr != []:
utr3_start = three_p_utr[0][0]
utr3_end = three_p_utr[0][1]
if utr3_start-cds_5end == 0 or utr3_start-cds_5end == 1:
jun_exon = [cds_5start, utr3_end]
three_prime_flag = 0
if jun_exon != []:
cds_cod = cds_cod[:-1]
three_p_utr = three_p_utr[1:]
three_prime_flag = 1
if five_prime_flag == 1 and three_prime_flag == 1:
exon_pos.append([utr5_start, utr3_end])
if five_prime_flag == 1 and three_prime_flag == 0:
exon_pos.append([utr5_start, cds_5end])
cds_cod = cds_cod[:-1]
if five_prime_flag == 0 and three_prime_flag == 1:
exon_pos.append([cds_5start, utr3_end])
for cds in cds_cod:
exon_pos.append(cds)
for utr3 in three_p_utr:
exon_pos.append(utr3)
else:
if jun_exon != []:
five_p_utr = five_p_utr[:-1]
cds_cod = cds_cod[1:]
for utr5 in five_p_utr:
exon_pos.append(utr5)
exon_pos.append(jun_exon) if jun_exon != [] else ''
jun_exon = []
utr3_start, utr3_end = 0, 0
if three_p_utr != []:
utr3_start = three_p_utr[0][0]
utr3_end = three_p_utr[0][1]
cds_3start = cds_cod[-1][0]
cds_3end = cds_cod[-1][1]
if utr3_start-cds_3end == 0 or utr3_start-cds_3end == 1:
jun_exon = [cds_3start, utr3_end]
if jun_exon != []:
cds_cod = cds_cod[:-1]
three_p_utr = three_p_utr[1:]
for cds in cds_cod:
exon_pos.append(cds)
exon_pos.append(jun_exon) if jun_exon != [] else ''
for utr3 in three_p_utr:
exon_pos.append(utr3)
elif strand_p == '-':
utr3_start, utr3_end = 0, 0
if three_p_utr != []:
utr3_start = three_p_utr[-1][0]
utr3_end = three_p_utr[-1][1]
cds_3start = cds_cod[0][0]
cds_3end = cds_cod[0][1]
jun_exon = []
if cds_3start-utr3_end == 0 or cds_3start-utr3_end == 1:
jun_exon = [utr3_start, cds_3end]
if len(cds_cod) == 1:
three_prime_flag = 0
if jun_exon != []:
three_p_utr = three_p_utr[:-1]
three_prime_flag = 1
for utr3 in three_p_utr:
exon_pos.append(utr3)
jun_exon = []
(utr5_start, utr5_end) = (0, 0)
if five_p_utr != []:
utr5_start = five_p_utr[0][0]
utr5_end = five_p_utr[0][1]
if utr5_start-cds_3end == 0 or utr5_start-cds_3end == 1:
jun_exon = [cds_3start, utr5_end]
five_prime_flag = 0
if jun_exon != []:
cds_cod = cds_cod[:-1]
five_p_utr = five_p_utr[1:]
five_prime_flag = 1
if three_prime_flag == 1 and five_prime_flag == 1:
exon_pos.append([utr3_start, utr5_end])
if three_prime_flag == 1 and five_prime_flag == 0:
exon_pos.append([utr3_start, cds_3end])
cds_cod = cds_cod[:-1]
if three_prime_flag == 0 and five_prime_flag == 1:
exon_pos.append([cds_3start, utr5_end])
for cds in cds_cod:
exon_pos.append(cds)
for utr5 in five_p_utr:
exon_pos.append(utr5)
else:
if jun_exon != []:
three_p_utr = three_p_utr[:-1]
cds_cod = cds_cod[1:]
for utr3 in three_p_utr:
exon_pos.append(utr3)
if jun_exon != []:
exon_pos.append(jun_exon)
jun_exon = []
(utr5_start, utr5_end) = (0, 0)
if five_p_utr != []:
utr5_start = five_p_utr[0][0]
utr5_end = five_p_utr[0][1]
cds_5start = cds_cod[-1][0]
cds_5end = cds_cod[-1][1]
if utr5_start-cds_5end == 0 or utr5_start-cds_5end == 1:
jun_exon = [cds_5start, utr5_end]
if jun_exon != []:
cds_cod = cds_cod[:-1]
five_p_utr = five_p_utr[1:]
for cds in cds_cod:
exon_pos.append(cds)
if jun_exon != []:
exon_pos.append(jun_exon)
for utr5 in five_p_utr:
exon_pos.append(utr5)
return exon_pos
|
import asyncio
import json
import os
from crypt import Bcrypt
from datetime import datetime
import numpy as np
import pandas as pd
import cherrypy
def convert(o):
if isinstance(o, np.int64):
return int(o)
if isinstance(o, np.float64):
return float(o)
class HomePage(object):
@cherrypy.expose
def index(self):
return open('./static/client.html')
@cherrypy.expose
class APIv1(object):
def __init__(self):
with open("./config.json") as f, open("./default-config.json") as df:
user_config = json.loads(f.read())
default_config = json.loads(df.read())
self.config = {**default_config, **user_config}
self.inputFile = self.config['inputFile']
self.outputFile = self.config['outputFile']
self.fieldNames = [self.config['idField']
] + [f['fieldName'] for f in self.config['fields']]
self.fieldAliases = ['id'] + [
f['fieldAlias'] for f in self.config['fields']
]
if self.config['updateTime']:
self.fieldNames.append("updatedAt")
self.ntoa = dict(zip(self.fieldNames, self.fieldAliases))
self.aton = dict(zip(self.fieldAliases, self.fieldNames))
self.ntoa[self.config['idField']] = 'id'
self.aton['id'] = self.config['idField']
self.inputDf = pd.read_excel(self.inputFile)
if os.path.exists(self.outputFile):
self.outputDf = pd.read_excel(self.outputFile)
else:
self.outputDf = pd.DataFrame(columns=self.fieldNames)
self.allColumns = list(
set(self.inputDf.columns) | set(self.outputDf.columns))
idField = self.config['idField']
self.inputDf.set_index(idField, inplace=True)
self.outputDf.set_index(idField, inplace=True)
self.inputDf.index = self.inputDf.index.astype(str)
self.outputDf.index = self.outputDf.index.astype(str)
@cherrypy.tools.accept(media='application/json')
@cherrypy.tools.json_in()
def POST(self):
data = cherrypy.request.json
action = data['action']
variables = data['variables']
if action == "AUTH_STUDENT_QUERY":
try:
res = self.authStudentQuery(variables)
except Exception as e:
res = self.done(None, [{
"type": "Exception",
"message": str(e),
}])
return res
if action == "UPDATE_STUDENT_MUTATION":
try:
res = self.updateStudentMutation(variables)
except Exception as e:
res = self.done(None, [{
"type": "Exception",
"message": str(e),
}])
return res
def authStudentQuery(self, data):
pwField = self.config['passwordField']
inputDfAlias = self.inputDf.rename(self.ntoa, axis=1)
outputDfAlias = self.outputDf.rename(self.ntoa, axis=1)
outputFields = self.ntoa.values()
errors = []
if data['id'] in inputDfAlias.index:
student = inputDfAlias.loc[data['id']].copy()
if data['id'] in outputDfAlias.index: # If record exists, combine
student = outputDfAlias.loc[data['id']].combine_first(student)
student['id'] = data['id']
else:
errors.append({"type": "NotExistError", "message": "ID not exist"})
return self.done(None, errors)
if (self.checkPassword(data['pw'], student[pwField])):
# TODO: Simplify Series -> json -> dict -> json procedure
data = json.loads(student.reindex(outputFields).to_json())
return self.done(data)
else:
errors.append({
"type": "PasswordError",
"message": "WrongPassword"
})
return self.done(None, errors)
def updateStudentMutation(self, data):
ser = pd.Series(data)
ser.name = ser.id
del ser['id']
if self.config['updateTime']:
ser['updatedAt'] = datetime.now().isoformat()
if ser.name not in self.inputDf.index:
return self.done(None, [{
"type": "NotExistError",
"message": "ID not exist",
}])
if ser.name in self.outputDf.index:
self.outputDf.loc[ser.name] = ser.rename(self.aton)
else:
self.outputDf = self.outputDf.append(ser.rename(self.aton))
self.save_output()
return self.done(data)
def checkPassword(self, pwraw, pwenc):
encryption = self.config['encryption']
if encryption == 'none':
return pwraw == pwenc
if encryption == 'bcrypt':
return Bcrypt.check_password(pwraw, pwenc)
def done(self, data, errors=None):
return json.dumps(
{
"data": data,
"errors": errors,
}, default=convert).encode("utf-8")
def save_output(self):
if (self.config['mergeInput']):
outputIndex = self.inputDf.index
outputDf = self.outputDf.reindex(
index=outputIndex, columns=self.allColumns)
inputDf = self.inputDf.reindex(
index=outputIndex, columns=self.allColumns)
output = outputDf.combine_first(inputDf)
output.to_excel(self.outputFile)
else:
self.outputDf.to_excel(self.outputFile)
if __name__ == '__main__':
conf = {
'/': {
'tools.staticdir.root': os.path.abspath(os.getcwd()),
},
'/apiv1': {
'request.dispatch':
cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on':
True,
'tools.response_headers.headers': [('Content-Type',
'application/json')],
'tools.encode.on':
True,
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './static',
},
}
webapp = HomePage()
webapp.apiv1 = APIv1()
cherrypy.quickstart(webapp, '/', conf)
|
# Copyright 2016-2018 Hortonworks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import time
from fabric.context_managers import hide
from bman.kerberos_config_manager import KEYTABS_DEFAULT_DIR
from fabric.tasks import execute
from bman import constants
from fabric.api import task, sudo, env, put, local
from fabric.decorators import parallel
from fabric.operations import run
from bman.logger import get_logger
from fabric.contrib.files import exists as remote_exists
"""
Utilities used by other modules in bman.
"""
@task
def copy(source_file=None, remote_file=None):
"""Copies a file to remote machine if needed."""
if is_wildcard_path(source_file) or (source_file, remote_file):
put(source_file, remote_file)
else:
get_logger().info('%s with the same hash already exists in destination. '
'skipping copy.', source_file)
return True
def copy_hadoop_config_files(cluster):
""" Copy the config to the right location."""
for config_file in glob.glob(os.path.join(cluster.get_generated_hadoop_conf_tmp_dir(), "*")):
filename = os.path.basename(config_file)
full_file_name = os.path.join(cluster.get_hadoop_conf_dir(), filename)
put(config_file, full_file_name, use_sudo=True)
def copy_tez_config_files(cluster):
for config_file in glob.glob(os.path.join(cluster.get_generated_tez_conf_tmp_dir(), "*")):
filename = os.path.basename(config_file)
full_file_name = os.path.join(cluster.get_tez_conf_dir(), filename)
sudo('mkdir -p {}'.format(cluster.get_tez_conf_dir()))
put(config_file, full_file_name, use_sudo=True)
@task
def do_untar(tarball=None, target_folder=None, strip_level=0):
""" untar the tarball to the right location."""
sudo('mkdir -p {}'.format(target_folder))
return sudo('tar -poxvzf {} -C {} --strip {}'.format(tarball, target_folder, strip_level)).succeeded
@task
def start_stop_service(cluster, action, service_name, user=None):
""" Starts or stops a service """
install_dir = cluster.get_hadoop_install_dir()
cmd = 'nohup {}/bin/hdfs --daemon {} {}'.format(install_dir, action, service_name)
get_logger().info('{} {} on {}'.format(action, service_name, env.host_string))
return sudo(cmd, user=user).succeeded
def get_md5(source_file, local_file):
"""Returns MD5 of a file based on it is local or remote."""
cmd = get_command(source_file, local_file)
output = local(cmd, capture=True) if local_file else run(cmd)
return get_hash_string(output, local_file)
def get_command(source_file, local_file):
""" Gets the command to run based on OS. Linux vs. OS X"""
if local_file:
name = local('uname -s', capture=True)
if name.startswith('Darwin'):
return 'md5 -q ' + source_file
else:
return 'md5sum ' + source_file
else:
# TODO: our remote machines are centos
return 'md5sum ' + source_file
def get_hash_string(hash_string, local_file):
"""Parses the hash string based on which on we are running on."""
if local_file:
name = local('uname -s', capture=True)
if name.startswith('Darwin'):
return hash_string.strip()
else:
return hash_string.split()[0]
else:
return hash_string.split()[0].strip()
def prompt_for_yes_no(msg):
"""returns a bool based on whether the user presses y/n."""
choice = None
while not choice or choice.lower() not in ['y', 'n', 'yes', 'no']:
choice = input('%s (y/n) ' % msg).lower()
return choice.lower() in ['y', 'yes']
def get_tarball_destination(local_file):
"""
Get the path for copying a tarball to a remote host.
"""
return os.path.join('/', 'tmp', os.path.basename(local_file))
@task
def should_copy(source_file, remote_file):
"""Decides if we should copy a file or not by checking hash of the file"""
if remote_exists(remote_file):
localmd5 = get_md5(source_file, True)
remotemd5 = get_md5(remote_file, False)
return localmd5 == remotemd5
else:
return True
@task
@parallel
def run_cmd(cmd_string=None, user=None):
"""
Run the given command on a remote node.
If user is not supplied then run as root.
"""
if user:
return sudo(cmd_string, user=user).succeeded
else:
return sudo(cmd_string).succeeded
@task
def fast_copy(cluster, remote_file=None):
"""
scp a file from one cluster node to the rest.
This is useful when deploying from a geographically distant location
since the uploads can take a while. With this we copy to a namenode
and then use scp to copy from namenode to datanodes to get fast copy.
We always run scp as the 'hdfs' user as password-less ssh between
cluster hosts is guaranteed to work (we set it up during the deploy
phase).
The caller must later change permissions on the file on all hosts.
"""
targets = set(cluster.get_all_hosts()).symmetric_difference({env.host})
for i, host_name in enumerate(targets):
scp_cmd = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {} {}:{}'.format(
remote_file, host_name, remote_file)
get_logger().debug('Copying {} from {} to {} (node {} of {})'.format(
remote_file, env.host, host_name, i+1, len(targets)))
get_logger().debug('The copy command is {}'.format(scp_cmd))
sudo(scp_cmd)
# saved_password = <PASSWORD>.sudo_password
# env.sudo_password = cluster.get_user_password(constants.HDFS_USER)
# sudo(scp_cmd, user=constants.HDFS_USER)
# env.sudo_password = <PASSWORD> # Restore the global fabric environment.
def put_to_all_nodes(cluster=None, source_file=None, remote_file=None):
"""
Copy a file to all cluster nodes.
"""
source_node = sorted(list(cluster.get_all_hosts()))[0]
get_logger().info("Copying the tarball {} to {}.".format(
source_file, source_node))
with hide('status', 'warnings', 'running', 'stdout', 'stderr',
'user', 'commands'):
if not execute(copy, hosts=source_node, source_file=source_file,
remote_file=remote_file):
get_logger().error('copy failed.')
return False
if not execute(fast_copy, hosts=source_node, cluster=cluster, remote_file=remote_file):
get_logger().error('fast copy failed.')
return False
def run_dfs_command(cluster=None, cmd=None):
if cluster.is_kerberized():
# Prepend a command to login as the hdfs superuser, and append a command
# to destroy the credentials when done.
hdfs_headless_principal = '{}@{}'.format(constants.HDFS_USER, cluster.get_realm())
hdfs_headless_keytab = os.path.join(
KEYTABS_DEFAULT_DIR, '{}.headless.keytab'.format(constants.HDFS_USER))
cmd = 'kinit -kt {} {}'.format(hdfs_headless_keytab, hdfs_headless_principal) + \
' && ' + cmd + ' && ' + 'kdestroy'
# Run the command on a NameNode host and as the 'hdfs' user.
get_logger().debug("Running command '{}'".format(cmd))
execute(run_cmd, hosts=cluster.get_hdfs_master_config().get_nn_hosts()[0:1],
cmd_string=cmd, user=constants.HDFS_USER)
def is_true(input_string):
"""
Return True if the input is a boolean True, or a string that
matches 'true' or 'yes' (case-insensitive).
Return False for all other string inputs.
Raise ValueError otherwise.
"""
if isinstance(input_string, bool):
return input_string # Return as-is
if isinstance(input_string, str):
return input_string.lower() in ['true', 'yes']
raise TypeError("Expected True/False. Got {}".format(input_string))
def is_wildcard_path(source_file):
return '*' in source_file or '?' in source_file
def do_sleep(seconds):
get_logger().info("sleeping for {} seconds".format(seconds))
time.sleep(seconds)
if __name__ == '__main__':
pass
|
<reponame>HumanCellAtlas/ingest-common
#!/usr/bin/env python
"""
Class encapsulating implementation details on the Descriptor classes. Descriptors represent a portion of a metadata
schema.
"""
import re
IDENTIFIABLE_PROPERTIES = ["biomaterial_id", "process_id", "protocol_id", "file_name"]
class Descriptor():
""" Parent class type. A Descriptor type encapsulate a small isolated amount of information about a portion of a
metadata schema.
"""
def get_dictionary_representation_of_descriptor(self):
""" Returns a dict representing the Descriptor object. """
raise NotImplementedError("Subclasses of Descriptor are required to override this method.")
class SchemaTypeDescriptor(Descriptor):
""" Descriptor encapsulating "metadata" information about a single metadata schema file. """
def __init__(self, metadata_schema_url):
url_validation_regex = re.compile(
r'^http[s]?://(?P<location>([^/]+/)*[^/]+)/' +
r'(?P<high_level_entity>(type)|(module)|(core)|(system))/' +
r'((?P<domain_entity>([^/]+/)*[^/]+)/)?' +
r'(?P<version>(?P<version_number>(?P<major>\d+)(\.(?P<minor>\d+))?(\.(?P<rev>\d+))?)|(?P<latest>latest))/' +
r'(?P<module>.*)$'
)
if not url_validation_regex.match(metadata_schema_url):
raise Exception(
f"ERROR: The metadata schema URL passed in for parsing {metadata_schema_url} does not conform to "
f"expected format.")
self.high_level_entity = url_validation_regex.match(metadata_schema_url).group("high_level_entity")
self.domain_entity = url_validation_regex.match(metadata_schema_url).group("domain_entity")
self.module = url_validation_regex.match(metadata_schema_url).group("module")
self.version = url_validation_regex.match(metadata_schema_url).group("version")
self.url = metadata_schema_url
def get_module(self):
return self.module
def get_dictionary_representation_of_descriptor(self):
""" Returns a dictionary representation of the current schema descriptor object. """
return self.__dict__
class SimplePropertyDescriptor(Descriptor):
""" A Descriptor encapsulating information about a simple property of a metadata schema. A simple property is
designated as having no children properties which arises when the property is associated with its own metadata
schema.
"""
def __init__(self, json_data):
""" Initialize the simply property descriptor using the top level fields in given json data. """
self.value_type = json_data.get("type")
self.multivalue = False
if self.value_type == "array":
self.multivalue = True
# Get the type of elements in the array which is nested inside the "items" key.
self.value_type = json_data["items"]["type"]
self.format = json_data.get("format")
self.user_friendly = json_data.get("user_friendly")
self.description = json_data.get("description")
self.example = json_data.get("example")
self.guidelines = json_data.get("guidelines")
# For now, required, external_reference and identifiable are set to false because the value of these properties
# exist in the parent metadata schema and not in the property description itself. They will be back-populated
# later.
self.required = False
self.identifiable = False
self.external_reference = False
def get_dictionary_representation_of_descriptor(self):
""" Only include information in the class where the value is not None or empty OR if the value is a boolean
since in that case, both True and False are valid values."""
return dict((key, value) for (key, value) in self.__dict__.items() if value or isinstance(value, bool))
class ComplexPropertyDescriptor(SimplePropertyDescriptor, Descriptor):
""" A Descriptor encapsulating information about a complex property of a metadata schema. A complex property
means that there exists an entire metadata schema to describe the property itself and usually contains children
properties."""
def __init__(self, json_data):
super().__init__(json_data)
# Populate metadata/information about the schema itself, derived from the URL
if "$id" in json_data.keys():
self.schema = SchemaTypeDescriptor(json_data["$id"])
elif "id" in json_data.keys():
self.schema = SchemaTypeDescriptor(json_data["id"])
else:
self.schema = None
# Add required fields
self.required_properties = json_data.get("required")
# Add children properties
self.children_properties = {}
if "properties" in json_data.keys():
for property_name, property_values in json_data["properties"].items():
if "$schema" in property_values or "schema" in property_values:
child_property_descriptor = ComplexPropertyDescriptor(property_values)
elif "items" in property_values and ("$schema" in property_values["items"] or "schema" in property_values["items"]):
child_property_descriptor = ComplexPropertyDescriptor(property_values["items"])
child_property_descriptor.multivalue = True
else:
child_property_descriptor = SimplePropertyDescriptor(property_values)
# Make it required if the child property name is in the list of required properties
if self.required_properties and property_name in self.required_properties:
child_property_descriptor.required = True
# Make the property identifiable if the child property name is one of the listed hardcoded
# identifiable properties
if property_name in IDENTIFIABLE_PROPERTIES:
child_property_descriptor.identifiable = True
self.children_properties[property_name] = child_property_descriptor
def get_schema_module_name(self):
return self.schema.get_module()
def get_dictionary_representation_of_descriptor(self):
""" Returns a representation of the class as a dictionary with the following caveats:
1) If the value of a key is None or empty but is NOT a boolean, then the attribute it omitted from the
dictionary.
2) If the value is of a SchemaTypeDescriptor type, convert it to a dictionary.
3) Any child descriptors are flattened from being a list to simply added attributes where the key is the
metadata schema name and the dictionary is the corresponding descriptor.
"""
dictionary_representation = {}
for (key, value) in self.__dict__.items():
if key == "children_properties":
for child_key, child_value in value.items():
self.add_key_value_to_dictionary_if_valid(child_key, child_value, dictionary_representation)
else:
self.add_key_value_to_dictionary_if_valid(key, value, dictionary_representation)
return dictionary_representation
@staticmethod
def add_key_value_to_dictionary_if_valid(key, value, dictionary):
if not value and not isinstance(value, bool):
return
if issubclass(type(value), Descriptor):
dictionary[key] = value.get_dictionary_representation_of_descriptor()
else:
dictionary[key] = value
|
<reponame>pulumi/pulumi-kubernetes-crds
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'IBMBlockCSISpec',
'IBMBlockCSISpecController',
'IBMBlockCSISpecControllerAffinity',
'IBMBlockCSISpecControllerAffinityNodeAffinity',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'IBMBlockCSISpecControllerAffinityPodAffinity',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAntiAffinity',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerTolerations',
'IBMBlockCSISpecNode',
'IBMBlockCSISpecNodeAffinity',
'IBMBlockCSISpecNodeAffinityNodeAffinity',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'IBMBlockCSISpecNodeAffinityPodAffinity',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAntiAffinity',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeTolerations',
'IBMBlockCSISpecSidecars',
'IBMBlockCSIStatus',
]
@pulumi.output_type
class IBMBlockCSISpec(dict):
"""
IBMBlockCSISpec defines the desired state of IBMBlockCSI
"""
def __init__(__self__, *,
controller: 'outputs.IBMBlockCSISpecController',
node: 'outputs.IBMBlockCSISpecNode',
image_pull_secrets: Optional[Sequence[str]] = None,
sidecars: Optional[Sequence['outputs.IBMBlockCSISpecSidecars']] = None):
"""
IBMBlockCSISpec defines the desired state of IBMBlockCSI
:param 'IBMBlockCSISpecControllerArgs' controller: IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
:param 'IBMBlockCSISpecNodeArgs' node: IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
pulumi.set(__self__, "controller", controller)
pulumi.set(__self__, "node", node)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if sidecars is not None:
pulumi.set(__self__, "sidecars", sidecars)
@property
@pulumi.getter
def controller(self) -> 'outputs.IBMBlockCSISpecController':
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
"""
return pulumi.get(self, "controller")
@property
@pulumi.getter
def node(self) -> 'outputs.IBMBlockCSISpecNode':
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
return pulumi.get(self, "node")
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "image_pull_secrets")
@property
@pulumi.getter
def sidecars(self) -> Optional[Sequence['outputs.IBMBlockCSISpecSidecars']]:
return pulumi.get(self, "sidecars")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecController(dict):
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
"""
def __init__(__self__, *,
repository: str,
tag: str,
affinity: Optional['outputs.IBMBlockCSISpecControllerAffinity'] = None,
image_pull_policy: Optional[str] = None,
tolerations: Optional[Sequence['outputs.IBMBlockCSISpecControllerTolerations']] = None):
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
:param 'IBMBlockCSISpecControllerAffinityArgs' affinity: Affinity is a group of affinity scheduling rules.
:param str image_pull_policy: PullPolicy describes a policy for if/when to pull a container image
"""
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def repository(self) -> str:
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
return pulumi.get(self, "tag")
@property
@pulumi.getter
def affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinity']:
"""
Affinity is a group of affinity scheduling rules.
"""
return pulumi.get(self, "affinity")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
PullPolicy describes a policy for if/when to pull a container image
"""
return pulumi.get(self, "image_pull_policy")
@property
@pulumi.getter
def tolerations(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerTolerations']]:
return pulumi.get(self, "tolerations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinity(dict):
"""
Affinity is a group of affinity scheduling rules.
"""
def __init__(__self__, *,
node_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinity'] = None,
pod_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinity'] = None,
pod_anti_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinity'] = None):
"""
Affinity is a group of affinity scheduling rules.
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityArgs' node_affinity: Describes node affinity scheduling rules for the pod.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityArgs' pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityArgs' pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinity']:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinity']:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinity']:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinity(dict):
"""
Describes node affinity scheduling rules for the pod.
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution'] = None):
"""
Describes node affinity scheduling rules for the pod.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
def __init__(__self__, *,
node_selector_terms: Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs'] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms(dict):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinity(dict):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinity(dict):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerTolerations(dict):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
"""
def __init__(__self__, *,
effect: Optional[str] = None,
key: Optional[str] = None,
operator: Optional[str] = None,
toleration_seconds: Optional[int] = None,
value: Optional[str] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param str effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param str key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param str operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param int toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param str value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[int]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNode(dict):
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
def __init__(__self__, *,
repository: str,
tag: str,
affinity: Optional['outputs.IBMBlockCSISpecNodeAffinity'] = None,
image_pull_policy: Optional[str] = None,
tolerations: Optional[Sequence['outputs.IBMBlockCSISpecNodeTolerations']] = None):
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
:param 'IBMBlockCSISpecNodeAffinityArgs' affinity: Affinity is a group of affinity scheduling rules.
:param str image_pull_policy: PullPolicy describes a policy for if/when to pull a container image
"""
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def repository(self) -> str:
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
return pulumi.get(self, "tag")
@property
@pulumi.getter
def affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinity']:
"""
Affinity is a group of affinity scheduling rules.
"""
return pulumi.get(self, "affinity")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
PullPolicy describes a policy for if/when to pull a container image
"""
return pulumi.get(self, "image_pull_policy")
@property
@pulumi.getter
def tolerations(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeTolerations']]:
return pulumi.get(self, "tolerations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinity(dict):
"""
Affinity is a group of affinity scheduling rules.
"""
def __init__(__self__, *,
node_affinity: Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinity'] = None,
pod_affinity: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinity'] = None,
pod_anti_affinity: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinity'] = None):
"""
Affinity is a group of affinity scheduling rules.
:param 'IBMBlockCSISpecNodeAffinityNodeAffinityArgs' node_affinity: Describes node affinity scheduling rules for the pod.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityArgs' pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityArgs' pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinity']:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinity']:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinity']:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinity(dict):
"""
Describes node affinity scheduling rules for the pod.
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution'] = None):
"""
Describes node affinity scheduling rules for the pod.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
def __init__(__self__, *,
node_selector_terms: Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs'] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms(dict):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinity(dict):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinity(dict):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
"""
def __init__(__self__, *,
pod_affinity_term: 'outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
weight: int):
"""
The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermArgs' pod_affinity_term: Required. A pod affinity term, associated with the corresponding weight.
:param int weight: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
pulumi.set(__self__, "pod_affinity_term", pod_affinity_term)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="podAffinityTerm")
def pod_affinity_term(self) -> 'outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm':
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
return pulumi.get(self, "pod_affinity_term")
@property
@pulumi.getter
def weight(self) -> int:
"""
weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm(dict):
"""
Required. A pod affinity term, associated with the corresponding weight.
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Required. A pod affinity term, associated with the corresponding weight.
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
"""
def __init__(__self__, *,
topology_key: str,
label_selector: Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector'] = None,
namespaces: Optional[Sequence[str]] = None):
"""
Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
:param str topology_key: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
:param 'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorArgs' label_selector: A label query over a set of resources, in this case pods.
:param Sequence[str] namespaces: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
pulumi.set(__self__, "topology_key", topology_key)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespaces is not None:
pulumi.set(__self__, "namespaces", namespaces)
@property
@pulumi.getter(name="topologyKey")
def topology_key(self) -> str:
"""
This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
"""
return pulumi.get(self, "topology_key")
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector']:
"""
A label query over a set of resources, in this case pods.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter
def namespaces(self) -> Optional[Sequence[str]]:
"""
namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
"""
return pulumi.get(self, "namespaces")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector(dict):
"""
A label query over a set of resources, in this case pods.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
A label query over a set of resources, in this case pods.
:param Sequence['IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecNodeTolerations(dict):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
"""
def __init__(__self__, *,
effect: Optional[str] = None,
key: Optional[str] = None,
operator: Optional[str] = None,
toleration_seconds: Optional[int] = None,
value: Optional[str] = None):
"""
The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
:param str effect: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
:param str key: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
:param str operator: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
:param int toleration_seconds: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
:param str value: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
if effect is not None:
pulumi.set(__self__, "effect", effect)
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if toleration_seconds is not None:
pulumi.set(__self__, "toleration_seconds", toleration_seconds)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> Optional[str]:
"""
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
"""
return pulumi.get(self, "effect")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="tolerationSeconds")
def toleration_seconds(self) -> Optional[int]:
"""
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
"""
return pulumi.get(self, "toleration_seconds")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecSidecars(dict):
def __init__(__self__, *,
name: str,
repository: str,
tag: str,
image_pull_policy: Optional[str] = None):
"""
:param str name: The name of the csi sidecar image
:param str repository: The repository of the csi sidecar image
:param str tag: The tag of the csi sidecar image
:param str image_pull_policy: The pullPolicy of the csi sidecar image
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the csi sidecar image
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def repository(self) -> str:
"""
The repository of the csi sidecar image
"""
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
"""
The tag of the csi sidecar image
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
The pullPolicy of the csi sidecar image
"""
return pulumi.get(self, "image_pull_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSIStatus(dict):
"""
IBMBlockCSIStatus defines the observed state of IBMBlockCSI
"""
def __init__(__self__, *,
controller_ready: bool,
node_ready: bool,
phase: str,
version: str):
"""
IBMBlockCSIStatus defines the observed state of IBMBlockCSI
:param str phase: Phase is the driver running phase
:param str version: Version is the current driver version
"""
pulumi.set(__self__, "controller_ready", controller_ready)
pulumi.set(__self__, "node_ready", node_ready)
pulumi.set(__self__, "phase", phase)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="controllerReady")
def controller_ready(self) -> bool:
return pulumi.get(self, "controller_ready")
@property
@pulumi.getter(name="nodeReady")
def node_ready(self) -> bool:
return pulumi.get(self, "node_ready")
@property
@pulumi.getter
def phase(self) -> str:
"""
Phase is the driver running phase
"""
return pulumi.get(self, "phase")
@property
@pulumi.getter
def version(self) -> str:
"""
Version is the current driver version
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
from CrossRatio import *
from CrossRadonTransform import *
from HoughTransform import *
from ShapeDescriptor import *
from MatchRaysPairs import *
from Plotter import *
#from ransac import *
from LineEstimation import *
from HorizonLine import *
from VanishPencilsTable import *
from Image import *
from Scanner import *
import time
## Close window and change progress in code
def press(event):
#print('press', event.key)
if event.key == 'enter':
plt.close()
# =============================================================================
# ============================= FLAG and Parameters ===========================
# =============================================================================
# Scan rays and Match rays
showScanRays = True # RED Rays
showMatchRays = True # Cyan Rays
# Grid
showPixelGrid = False # show pixel grid
# SCAN CONFIGURATION
nTraj = 11#15#491#301#401#201#191#101#7 #adiddas:(491, 18)
nProj = 1#18#27#9#180#32#9
nTrajTemplate = nTraj
nProjTemplate = nProj
nTrajTest = nTraj
nProjTest = nProj
# MATCH CARDINALITY
N_By_M_Match_Cardinality = False # N:M
N_By_One_Match_Cardinality = False # N:1
One_by_N_Match_Cardinality = False # 1:N
One_by_One_Match_Cardinality = True # 1:1
# VANISH POINTS FLAGS
CRVectorlenThreshold = 1
showAllVanishPoints = True
ignoreDistance = True
limitDistance = 1000
# LINE ESTIMATION
show_Least_Square_line = True
show_Hough_line = True
show_Wighted_Hough_line = True
show_RANSAC_line = True
# PENCILS CLUSTERS
show_VanishPoint_by_angle = False
show_discarted_vanishPoints = True
# =============================================================================
# ================================= MAIN ======================================
# =============================================================================
fig = plt.figure()
# =============================================================================
# ============================== LOAD IMAGES ==================================
# =============================================================================
filename = askopenfilename(filetypes=[("all files","*"),("Bitmap Files","*.bmp; *.dib"),
("JPEG", "*.jpg; *.jpe; *.jpeg; *.jfif"),
("PNG", "*.png"), ("TIFF", "*.tiff; *.tif")])
templateImage = Image(misc.imread(filename, mode = 'RGB'))
filename = askopenfilename(filetypes=[("all files","*"),("Bitmap Files","*.bmp; *.dib"),
("JPEG", "*.jpg; *.jpe; *.jpeg; *.jfif"),
("PNG", "*.png"), ("TIFF", "*.tiff; *.tif")])
testImage = Image(misc.imread(filename, mode = 'RGB'))
# =============================================================================
# ============================== SHOW ORIGINAL IMAGE ==========================
# =============================================================================
fig.canvas.set_window_title('Original Image')
fig.canvas.mpl_connect('key_press_event', press)
#plotterTemplateImg = Plotter(templateImage)
start_time = time.time()
#(templateSinograma, templateDescriptor) = crossRadonTransform2(templateImage, nTrajTemplate, nProjTemplate) # 2,1
templateScanner = Scanner(templateImage)
templateDescriptor = templateScanner.tomographic_scan(nTrajTemplate, nProjTemplate)
print("--- %s seconds ---" % (time.time() - start_time))
print("#### TEMPLATE STATISTICS ####")
print("n. template rays: ", len(templateDescriptor.rays))
for i in range(1, len(templateDescriptor.countCrossRatioVectorLengths)):
countLen = templateDescriptor.countCrossRatioVectorLengths[i]
if countLen > 0:
print("CrossRatio vector with size %d, have %d Rays" %(i, countLen))
print("---------------------------")
#plotterTestImg = Plotter(testImage)
start_time = time.time()
#(testSinograma, testDescriptor) = crossRadonTransform2(testImage, nTrajTest, nProjTest) #(67, 27)
testScanner = Scanner(testImage)
testDescriptor = testScanner.tomographic_scan(nTrajTest, nProjTest)
print("--- %s seconds ---" % (time.time() - start_time))
print("#### TEST STATISTICS ####")
print("n. test rays: ", len(testDescriptor.rays))
for i in range(1, len(testDescriptor.countCrossRatioVectorLengths)):
countLen = testDescriptor.countCrossRatioVectorLengths[i]
if countLen > 0:
print("CrossRatio vector with size %d, have %d Rays" %(i, countLen))
print("---------------------------")
templateGreenRays = []
testGreenRays = []
testZeroRays = []
bestRaysPairs_1_N = MatchRaysPairs()
bestRaysPairs_N_1 = MatchRaysPairs()
templateRedRays = []
testRedRays = []
# =============================================================================
# ================================ MATCHING ===================================
# =============================================================================
countMatch = 0
totalComp = 0
countMatchCrossRatioVectorLengths = 60*[0]
for templateRay in templateDescriptor.rays:
testBestMatchRay = None
minDistance = 10000
for testRay in testDescriptor.rays:
totalComp += 1
#print("templateRay.crossRatioVector = ", templateRay.crossRatioVector)
#print("testRay.crossRatioVector = ", testRay.crossRatioVector)
if templateRay.isMatch(testRay) and testRay.CRV_length() >= CRVectorlenThreshold:# testRay.numberOfEdgePoints >= 4:
if N_By_M_Match_Cardinality:
testRay.estimateVanishPoints(templateRay)
testGreenRays.append(testRay) # Não pode comentar!!!!
if showMatchRays:
templateGreenRays.append(templateRay)
idxLen = len(testRay.crossRatioVector)
countMatchCrossRatioVectorLengths[idxLen] += 1
if N_By_One_Match_Cardinality or One_by_One_Match_Cardinality: # N:1 OU 1:1
testRay.estimateVanishPoints(templateRay)
beforeLen = bestRaysPairs_N_1.length()
if bestRaysPairs_N_1.updatePair(testRay, templateRay):
afterLen = bestRaysPairs_N_1.length()
if afterLen > beforeLen:
idxLen = len(testRay.crossRatioVector)
if idxLen < len(countMatchCrossRatioVectorLengths):
countMatchCrossRatioVectorLengths[idxLen] += 1
if One_by_N_Match_Cardinality or One_by_One_Match_Cardinality: # 1:N OU 1:1
testRay.estimateVanishPoints(templateRay)
beforeLen = bestRaysPairs_1_N.length()
if bestRaysPairs_1_N.updatePair(templateRay, testRay):
afterLen = bestRaysPairs_1_N.length()
if afterLen > beforeLen:
idxLen = len(testRay.crossRatioVector)
if idxLen < len(countMatchCrossRatioVectorLengths):
countMatchCrossRatioVectorLengths[idxLen] += 1
# Adicionar nos arrays de raios para exibir depois!
else:
if showScanRays:
templateRedRays.append(templateRay)
testRedRays.append(testRay)
if One_by_N_Match_Cardinality:
testGreenRays = bestRaysPairs_1_N.getValues()
elif N_By_One_Match_Cardinality:
testGreenRays = bestRaysPairs_N_1.getValues()
elif One_by_One_Match_Cardinality:
bestPairsList = bestRaysPairs_1_N.intersection(bestRaysPairs_N_1)
testGreenRays = [testRay for (k, testRay) in bestPairsList]
templateGreenRays = [k for (k, testRay) in bestPairsList]
countMatchCrossRatioVectorLengths = 60*[0]
for testRay in testGreenRays:
idxLen = len(testRay.crossRatioVector)
if idxLen >= CRVectorlenThreshold and idxLen < len(countMatchCrossRatioVectorLengths):
countMatchCrossRatioVectorLengths[idxLen] += 1
# # ########## PLOT ##########
# ax = fig.add_subplot(1,2,1)
# ax.set_title('Template Image')
# plt.imshow(templateImage)
#### PLOT TEMPLATE RAYS
if showPixelGrid:
templateImg.plotPixelGrid()
if showScanRays:
for templateRay in templateRedRays:
templateImage.plotRay(templateRay)
if showMatchRays:
for templateRay in templateGreenRays:
templateImage.plotRay(templateRay, 'c', 'co')
# ax = fig.add_subplot(1,2,2)
# ax.set_title('Test Image')
# plt.imshow(testImage)
vanishPoints = []
vanishPColors = ["kx", "mx", "kx", "gx", "kx", "yx", "kx", "bx", "kx", "rx", "kx", "cx", "kx", "mx", "kx", "gx", "kx", "yx", "kx", "kx"]
validSizeCrossRatioLengths = []
print("#### MATCH STATISTICS ####")
print("Total comparation: ", totalComp)
print("Count Match: ", countMatch)
countRaysTotal = 0
countTestRaysTotal = 0
for size in range(1, len(countMatchCrossRatioVectorLengths)):
countRays = countMatchCrossRatioVectorLengths[size]
countTemplateRays = templateDescriptor.countCrossRatioVectorLengths[size]
countTestRays = testDescriptor.countCrossRatioVectorLengths[size]
if countRays > 0:
print("CrossRatio vector with size %d, have %d Rays -- Percentual match: %3.2f %%" %(size, countRays, 100*countRays/countTestRays))
countRaysTotal += countRays
countTestRaysTotal += countTestRays
if countRays <= min(countTemplateRays, countTestRays)*1 and countRays != 0:
validSizeCrossRatioLengths.append(size)
print("validSizeCrossRatioLengths: ", validSizeCrossRatioLengths)
if countTestRaysTotal != 0:
print("Percentual total matches: %3.2f %%" %(100*countRaysTotal/countTestRaysTotal))
print("---------------------------")
pencilsTable = VanishPencilsTable()
#### PLOT TEST RAYS
if showPixelGrid:
testImage.plotPixelGrid()
if showScanRays:
for testRay in testRedRays:
testImage.plotRay(testRay)
for testRay in testGreenRays:
if testRay.numberOfEdgePoints >= 4:
if showMatchRays:
testImage.plotRay(testRay, 'c', 'co')
if (len(testRay.crossRatioVector) in validSizeCrossRatioLengths) or showAllVanishPoints:
if showMatchRays:
testImage.plotRay(testRay, 'c', 'co')
vP1 = testRay.getVanishPoint()
if vP1:
distance = vP1.euclideanDistance(R2_Point(0,0))
if (distance <= limitDistance) or ignoreDistance:
vanishPoints.append(vP1)
(x1, y1) = vP1.toTuple()
crvLen = len(testRay.crossRatioVector)
if crvLen <= 19:
vpColor = vanishPColors[len(testRay.crossRatioVector)]
else:
vpColor = "kx"
pencilsTable.updatePencil(testRay.pencil_id, testRay)
if show_discarted_vanishPoints:
testImage.plotPoint(x1, y1, color=vpColor)
#testImage.plotCircle(x1, y1, 2+testRay.crossRatioVectorLength, 100*testRay.pencil_id/nProjTemplate)
if show_VanishPoint_by_angle:
vanishPoints = []
for (vPi, iDi) in pencilsTable.getVanishPoints():
vanishPoints.append(vPi)
(xi, yi) = vPi.toTuple()
wi = vPi.w
if wi <= 19:
vpColor = vanishPColors[wi]
else:
vpColor = "kx"
testImage.plotPoint(xi, yi, color=vpColor)
testImage.plotCircle(xi, yi, 2+wi, 100*iDi/nProjTemplate)
vanishPoints = []
for (vPi, iDi) in pencilsTable.getVirtualPoints():
vanishPoints.append(vPi)
(xi, yi) = vPi.toTuple()
wi = vPi.w
if wi <= 19:
vpColor = vanishPColors[wi]
else:
vpColor = "kx"
testImage.plotPoint(xi, yi, color=vpColor)
testImage.plotHexagon(xi, yi, 10*wi, 100*iDi/nProjTemplate)
#### PLOT ####
# vanishPoints_set = set(vanishPoints)#set([x for x in vanishPoints if vanishPoints.count(x) > 1])#
# vanishPoints = list(vanishPoints_set)
# print(vanishPoints)
# testImage.plotLinePoints(vanishPoints)
# =============================================================================
# ============================ HORIZON LINE ===================================
# =============================================================================
if show_Hough_line:
## TRADITIONAL HOUGH TRANSFORM
vanishPointsHoughSpace, houghLines = points_houghTransform(vanishPoints, weighted=True)
vphs = vanishPointsHoughSpace.tocoo()
try:
idxMaxVal = vphs.data.argmax()
maxVal = vphs.data[idxMaxVal]
print("hough space maxVal = ", maxVal)
#print("vanishPointsHoughSpace: ")
#print(vanishPointsHoughSpace)
for linePoints in houghLines.getValues():
if len(linePoints) >= maxVal:
testImage.plotLinePoints(linePoints, color='m')
except ValueError:
print("Hough space is empty, no hough line!")
if One_by_One_Match_Cardinality and show_Hough_line:
## WEIGHTED HOUGH TRANSFORM
vanishPointsHoughSpace, vanishHoughLines = vanishRays_houghTransform(bestPairsList, weighted = True) #points_houghTransform(vanishPoints, weighted = True)
vphs = vanishPointsHoughSpace.tocoo()
vanishRaysPairs = []
try:
idxMaxVal = vphs.data.argmax()
maxVal = vphs.data[idxMaxVal]
print("weighted hough space maxVal = ", maxVal)
print("vanishHoughLines size = ", len(vanishHoughLines.getValues()))
for raysTupleList in vanishHoughLines.getValues():
bestVanishPoints = []
testRays = [testRay for (templateRay, testRay) in raysTupleList]
scoreWeightPoints = 0
for testRay in testRays:
point = testRay.getVanishPoint()
scoreWeightPoints += point.w
bestVanishPoints.append(point)
if scoreWeightPoints >= maxVal:
vanishRaysPairs = raysTupleList
testImage.plotLinePoints(bestVanishPoints, color='g')
except ValueError:
print("Weighted Hough space is empty, no hough line!")
#horizon = HorizonLine(bestVanishPoints)
#print("horizon = ", horizon.getRepr())
"""
if One_by_One_Match_Cardinality:
## PLOT VANISH RAYS
for (templateVanishRay, testVanishRay) in vanishRaysPairs:
templateImage.plotRay(templateVanishRay, 'r--', 'ro')
testImage.plotVanishRay(testVanishRay, 'r--', 'ro')
"""
if show_Least_Square_line:
# LeastSquare
try:
(Xlstsq, Ylstsq, _, _) = leastSquares(vanishPoints, weighted=True)
P0 = R2_Point(Xlstsq[0], Ylstsq[0])
Pf = R2_Point(Xlstsq[1], Ylstsq[1])
testImage.plotLinePoints([P0,Pf], color="orange")
except numpy.linalg.linalg.LinAlgError:
print("Least Square method: not is possible!")
model = LinearLeastSquaresModel()
if show_RANSAC_line:
# RANSAC
try:
ransacReturn = ransac(vanishPoints,model, int(len(vanishPoints)*0.4), 1000, 7e3, int(len(vanishPoints)*0.2), debug=False,return_all=True, weighted=True)
if ransacReturn:
(XRansac, YRansac, a, b) = ransacReturn
P0 = R2_Point(XRansac[0], YRansac[0])
Pf = R2_Point(XRansac[1], YRansac[1])
testImage.plotLinePoints([P0, Pf], color="cyan")
except numpy.linalg.linalg.LinAlgError:
print("RANSAC method: not is possible!")
# ########## PLOT ##########
ax = fig.add_subplot(1,2,1)
ax.set_title('Template Image')
(cols, rows) = templateImage.getShape()
plt.imshow(templateImage.image, interpolation='none', origin='upper', extent=[0, rows, cols, 0])
templateImage.showPatches(fig, ax)
templateImage.show()
#testImage.plotCircle(0, 0, 10, 20)
#testImage.plotCircle(50, 50, 15, 100)
ax = fig.add_subplot(1,2,2)
ax.set_title('Test Image')
(cols, rows) = testImage.getShape()
plt.imshow(testImage.image, interpolation='none', origin='upper', extent=[0, rows, cols, 0])
testImage.showPatches(fig, ax)
testImage.show()
plt.show()
#plt.imshow(vanishPointsHoughSpace)
#plt.show()
|
<filename>timetomodel/tests/test_series_specs.py<gh_stars>0
from datetime import datetime, timedelta
import pytest
import pandas as pd
import numpy as np
import pytz
from timetomodel.speccing import ObjectSeriesSpecs, CSVFileSeriesSpecs
from timetomodel.transforming import Transformation
from timetomodel.tests.utils import MyMultiplicationTransformation
from timetomodel.exceptions import MissingData, NaNData, IncompatibleModelSpecs
def test_load_series_without_datetime_index():
with pytest.raises(Exception) as e_info:
s = ObjectSeriesSpecs(data=pd.Series([1, 2, 3]), name="mydata")
s.load_series(expected_frequency=timedelta(hours=1))
assert "DatetimeIndex" in str(e_info.value)
def test_load_series():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
)
assert (
s.load_series(expected_frequency=timedelta(minutes=15)).loc[
dt + timedelta(minutes=30)
]
== 3
)
def test_load_series_with_expected_time_window():
dt = datetime(2019, 1, 29, 15, 15, tzinfo=pytz.utc)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
)
assert (
s.load_series(
expected_frequency=timedelta(minutes=15),
check_time_window=(dt, dt + timedelta(minutes=30)),
).loc[dt + timedelta(minutes=30)]
== 3
)
def test_load_series_with_larger_expected_time_window():
dt = datetime(2019, 1, 29, 15, 15, tzinfo=pytz.utc)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
)
with pytest.raises(MissingData) as e_info:
s.load_series(
expected_frequency=timedelta(minutes=15),
check_time_window=(dt - timedelta(minutes=15), dt + timedelta(minutes=45)),
)
assert "starts too late" in str(e_info.value)
assert "ends too early" in str(e_info.value)
def test_load_series_with_frequency_resampling():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
)
series = s.load_series(expected_frequency=timedelta(hours=1))
assert len(series) == 1
assert series[0] == 2 # the mean
def test_load_series_with_non_existing_custom_frequency_resampling():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
resampling_config={"aggregation": "GGG"},
)
with pytest.raises(IncompatibleModelSpecs) as e_info:
s.load_series(expected_frequency=timedelta(hours=1))
assert "Cannot find resampling aggregation GGG" in str(e_info.value)
def test_load_series_with_custom_frequency_resampling():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
resampling_config={"aggregation": "sum"},
)
series = s.load_series(expected_frequency=timedelta(hours=1))
assert len(series) == 1
assert series[0] == 6 # the sum
def test_load_series_without_data():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[np.nan, np.nan, np.nan],
),
name="mydata",
)
with pytest.raises(NaNData) as e_info:
s.load_series(expected_frequency=timedelta(hours=1))
assert "Nan values" in str(e_info.value)
def test_load_series_with_missing_data():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, np.nan, 3],
),
name="mydata",
)
with pytest.raises(NaNData) as e_info:
s.load_series(expected_frequency=timedelta(hours=1))
assert "Nan values" in str(e_info.value)
def test_load_series_with_transformation():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, 2, 3],
),
name="mydata",
feature_transformation=MyMultiplicationTransformation(factor=11),
)
assert (
s.load_series(expected_frequency=timedelta(minutes=15)).loc[
dt + timedelta(minutes=15)
]
== 2
)
assert (
s.load_series(
expected_frequency=timedelta(minutes=15), transform_features=True
).loc[dt + timedelta(minutes=15)]
== 2 * 11
)
def test_load_series_from_csv_with_post_load_processing(tmpdir):
highscore_data = """Time,Name,Highscore,
2019-02-05T12:57:00,Mel,8,
2019-02-05T10:30:00,Jack,5,
2019-02-05T11:36:00,David,10,
2019-02-05T10:34:00,Peter,6,
2019-02-05T09:11:00,David,5,
2019-02-05T11:17:00,Ryan,9,
2019-02-05T12:27:00,Ryan,9,
"""
f = tmpdir.join("highscore.csv")
f.write(highscore_data)
def to_hour(dt: datetime) -> datetime:
return dt.replace(minute=0, second=0, microsecond=0)
class BestHighscorePerHour(Transformation):
def transform_dataframe(self, df):
df["Time"] = pd.to_datetime(df["Time"], utc=True)
df["Time"] = df["Time"].apply(to_hour)
return (
df.sort_values(by=["Highscore"], ascending=False)
.drop_duplicates(subset=["Time"], keep="first")
.sort_values(by=["Time"])
)
s = CSVFileSeriesSpecs(
file_path=f.realpath(),
time_column="Time",
value_column="Highscore",
post_load_processing=BestHighscorePerHour(),
name="mydata",
feature_transformation=MyMultiplicationTransformation(factor=100),
)
data = s.load_series(expected_frequency=timedelta(hours=1))
assert data[datetime(2019, 2, 5, 9)] == 5
assert data[datetime(2019, 2, 5, 10)] == 6
assert data[datetime(2019, 2, 5, 11)] == 10
assert data[datetime(2019, 2, 5, 12)] == 9
data = s.load_series(expected_frequency=timedelta(hours=1), transform_features=True)
assert data[datetime(2019, 2, 5, 9)] == 500
assert data[datetime(2019, 2, 5, 10)] == 600
assert data[datetime(2019, 2, 5, 11)] == 1000
assert data[datetime(2019, 2, 5, 12)] == 900
def test_load_series_with_non_existing_interpolation():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, np.nan, 3],
),
name="mydata",
interpolation_config={"method": "GGG"},
)
with pytest.raises(IncompatibleModelSpecs) as e_info:
s.load_series(expected_frequency=timedelta(minutes=15))
assert "Cannot call interpolate function with arguments {'method': 'GGG'}" in str(
e_info.value
)
def test_load_series_with_interpolation():
dt = datetime(2019, 1, 29, 15, 15)
s = ObjectSeriesSpecs(
data=pd.Series(
index=pd.date_range(dt, dt + timedelta(minutes=30), freq="15T"),
data=[1, np.nan, 3],
),
name="mydata",
interpolation_config={"method": "time"},
)
series = s.load_series(expected_frequency=timedelta(minutes=15))
assert len(series) == 3
assert series[1] == 2 # the interpolated value
|
<filename>sos_trades_core/tools/post_processing/spider_charts/instantiated_spider_chart.py
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
Class that define a spider chart display as post post processing
"""
import plotly.graph_objects as go
from sos_trades_core.tools.post_processing.post_processing_plotly_tooling import AbstractPostProcessingPlotlyTooling
class SpiderChartTrace:
""" Class that define spider chart trace
"""
def __init__(self, trace_name='', theta_values=[], radius_values=[]):
""" Init of the class
@param trace_name, name of the trace
@param str
@param theta_values, values of spider chart axis = Name of the axis
@param list
@param radius_values, values of spider chart on radius with value as text
@type list
"""
self.trace_name = trace_name
if not isinstance(theta_values, list):
message = f'"theta_values" argument is intended to be a list not {type(theta_values)}'
raise TypeError(message)
self.theta_values = theta_values
if not isinstance(radius_values, list):
message = f'"radius_values" argument is intended to be a list not {type(radius_values)}'
raise TypeError(message)
self.radius_values = radius_values
if len(self.theta_values) != len(self.radius_values):
message = f'"theta_values" and "radius_values" must have same length ' \
f'{type(theta_values)} != {len(radius_values)}'
raise ValueError(message)
class InstantiatedSpiderChart(AbstractPostProcessingPlotlyTooling):
""" Class that define spider chart display as post post processing
"""
def __init__(self, chart_name=''):
""" Init of the class
@param chart_name: name of the chart
@type str
"""
super().__init__()
self.__traces = []
# Chart name
self.chart_name = chart_name
def add_trace(self, trace):
""" Method to add trace to current spider chart
@param trace: trace instance to add
@type SpiderChartTrace
"""
if not isinstance(trace, SpiderChartTrace):
message = f'"trace" argument is intended to be a SpiderChartTrace not {type(trace)}'
raise TypeError(message)
self.__traces.append(trace)
def to_plotly(self, logger=None):
""" Convert current instance into a plotly object
@param logger: logging object to log message
@type Logging.logger
@return plotly.graph_objects.go instance
"""
fig = go.Figure()
for trace in self.__traces:
radius_values = trace.radius_values
theta_values = trace.theta_values
# Adding last point to close lines
radius_values.append(trace.radius_values[0])
theta_values.append(trace.theta_values[0])
fig.add_trace(go.Scatterpolar(
name=trace.trace_name,
r=[rad['value'] for rad in radius_values],
text=[rad['text'] for rad in radius_values],
theta=theta_values,
mode='lines'
))
layout = {}
layout.update(
{'title': self.get_default_title_layout(self.chart_name)})
layout.update({'width': 600})
layout.update({'height': 450})
layout.update({'autosize': False})
layout.update({'font': self.get_default_font_layout()})
fig.update_layout(layout)
return fig
def to_plotly_dict(self, logger=None):
""" Method that convert current instance to plotly object and then to a dictionary
@param logger: logger instance
@type Logging.loger
"""
json = self.to_plotly(logger).to_dict()
json[self.CSV_DATA] = self._plot_csv_data
json[self.LOGO_NOTOFFICIAL] = self.logo_notofficial
json[self.LOGO_OFFICIAL] = self.logo_official
json[self.LOGO_WORK_IN_PROGRESS] = self.logo_work_in_progress
return json
|
<gh_stars>0
'''
File name: NavMerge_test.py
Programmed by: <NAME>
Date: 2019-11-05
Unit tests for NavMerge.py.
'''
from numpy import array, allclose
from numpy.linalg import norm
from nav.NavMerge import *
from nav.utils.common_utils import unit_test
from nav.utils.constants import PASS, FAIL
def merge_accel_test_null():
# setup
description = 'merge_accel_test_null - Test merge_accel with zeroed-out inputs'
prev_position = array([0.0, 0.0, 0.0])
accel_nc = array([0.0, 0.0, 0.0])
accel_c = array([0.0, 0.0, 0.0])
# expected results
exp = array([0.0, 0.0, 0.0])
# unit test
ret = merge_accel(prev_position, accel_nc, accel_c)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_accel_test_values():
# setup
description = 'merge_accel_test_values - Test merge_accel with non-zero inputs'
prev_position = array([0.0, 0.0, 6371000.0])
accel_nc = array([1.0, 1.0, 0.0])
accel_c = array([1.0, 1.0, -G_E/6371000**2])
# expected results
exp = array([1.0, 1.0, 0.0])
# unit test
ret = merge_accel(prev_position, accel_nc, accel_c)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_position_test_null():
# setup
description = 'merge_position_test_null - Test merge_position with zeroed-out inputs'
prev_position = array([0.0, 0.0, 0.0])
prev_velocity = array([0.0, 0.0, 0.0])
dt = 0.0
accel_merged = array([0.0, 0.0, 0.0])
gps = array([0.0, 0.0, 0.0])
altitude = 0.0
# expected results
exp = array([0.0, 0.0, 0.0])
# unit test
ret = merge_position(prev_position, prev_velocity, dt, accel_merged, gps, altitude)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_position_test_values():
# setup
description = 'merge_position_test_values - Test merge_position with non-zero inputs'
prev_position = array([1.0, 1.0, 1.0])
prev_velocity = array([1.0, 1.0, 1.0])
dt = 0.1
accel_merged = array([5.0, 5.0, 5.0])
gps = array([1.2, 1.2, 1.2])
altitude = 1.1
# expected results
exp = array([1.1625, 1.1625, 1.1375])
# unit test
ret = merge_position(prev_position, prev_velocity, dt, accel_merged, gps, altitude)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_velocity_test_null():
# setup
description = 'merge_velocity_test_null - Test merge_velocity with zeroed-out inputs'
prev_velocity = array([0.0, 0.0, 0.0])
dt = 0.0
accel_merged = array([0.0, 0.0, 0.0])
# expected results
exp = array([0.0, 0.0, 0.0])
# unit test
ret = merge_velocity(prev_velocity, dt, accel_merged)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_velocity_test_values():
# setup
description = 'merge_velocity_test_null - Test merge_velocity with non-zero inputs'
prev_velocity = array([1.0, 1.0, 1.0])
dt = 0.1
accel_merged = array([10.0, 10.0, 10.0])
# expected results
exp = array([2.0, 2.0, 2.0])
# unit test
ret = merge_velocity(prev_velocity, dt, accel_merged)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_attitude_test():
# setup
description = 'merge_attitude_test - Test merge_attitude function with an input (F2019 version just returns the input)'
prev_attitude = array([0.5, 0.5, 0.5, 0.5])
current_attitude = array([1.0, 0.0, 0.0, 0.0])
delta_theta = array([1.0, 1.0, 1.0])
# expected results
exp = array([1.0, 0.0, 0.0, 0.0])
# unit test
ret = merge_attitude(prev_attitude, current_attitude, delta_theta)
# results
return (PASS, description) if allclose(ret, exp, atol=0.001) \
else (FAIL, description)
def merge_main_test():
'''
This test is covered by the overall integration test found in
NavMain_test.py.
'''
pass
# Test Loop
def main():
module_name = 'NavMerge.py'
tests = [
merge_accel_test_null,
merge_accel_test_values,
merge_position_test_null,
merge_position_test_values,
merge_velocity_test_null,
merge_velocity_test_values,
merge_attitude_test
]
unit_test(module_name, tests)
if __name__ == '__main__':
main()
|
<filename>prune/stats.py<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
"""
Gets stats and plots stuff given a protocol
Usage:
stats.py <database.task.protocol> [--set=<set> --filter_unk --crop=<crop> --hist --verbose --save]
stats.py -h | --help
Common options:
<database.task.protocol> Experimental protocol (e.g. "Etape.SpeakerDiarization.TV")
"""
import os
from docopt import docopt
from allies.utils import print_stats
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from pyannote.database import get_protocol
sns.set_style("whitegrid", {'axes.grid': False})
np.set_printoptions(precision=2, suppress=True)
FIGURE_DIR = '.'
def plot_speech_duration(values, protocol_name, set, hist=True, crop=None, save=False):
keep_n = len(values) if crop is None else int(len(values) * crop)
values.sort()
values = values[-keep_n:]
mean = np.mean(values)
std = np.std(values)
print(f"mean: {mean:.2f}")
print(f"std: {std:.2f}")
print(f"mean+std: {mean + std:.2f}")
plt.figure(figsize=(12, 10))
title = (
f"of the speech duration in {protocol_name}.{set} "
f"of the {keep_n} biggest speakers"
)
if hist:
sns.distplot(values, kde=False, norm_hist=True)
plt.ylabel("density")
plt.xlabel("speech duration (s)")
plt.title("Normed histogram " + title)
else:
plt.title("Plot " + title)
plt.ylabel("speech duration (s)")
plt.xlabel("speaker #")
plt.plot(values, ".")
plt.errorbar(np.arange(len(values)), [mean for _ in values],
[std for _ in values])
plt.legend()
fig_type = "hist" if hist else "plot"
save_path = os.path.join(FIGURE_DIR,
f"speech_duration.{protocol_name}.{set}.{fig_type}.{keep_n}.png")
if save:
plt.savefig(save_path)
print(f"succesfully saved {save_path}")
else:
plt.show()
def quartiles(array, **kwargs):
return np.quantile(array, [0., 0.25, 0.5, 0.75, 1.0], **kwargs)
def deciles(array, **kwargs):
return np.quantile(array, np.arange(0, 1.1, 0.1), **kwargs)
def main(args):
protocol_name = args['<database.task.protocol>']
set = args['--set'] if args['--set'] else "train"
filter_unk = args['--filter_unk']
crop = float(args['--crop']) if args['--crop'] else None
hist = args['--hist']
verbose = args['--verbose']
save = args['--save']
protocol = get_protocol(protocol_name)
print(f"getting stats from {protocol_name}.{set}...")
stats = protocol.stats(set)
print_stats(stats)
if filter_unk:
values = [value for label, value in stats['labels'].items() if
'#unknown#' not in label]
else:
values = list(stats['labels'].values())
print(f"n_speaking_speakers: {np.array(values).nonzero()[0].shape[0]}")
print("quartiles:")
print(quartiles(values))
print("deciles:")
print(deciles(values))
plot_speech_duration(values, protocol_name, set, hist, crop, save)
if __name__ == '__main__':
args = docopt(__doc__)
main(args)
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import json
from threathunter_common.util import json_dumps
from nebula.views.base import BaseHandler
from nebula.dao.user_dao import authenticated
from nebula.dao.user_dao import UserDao
from nebula.dao.group_dao import GroupDao
logger = logging.getLogger('nebula.api.user')
class UserListHandler(BaseHandler):
REST_URL = '/auth/users'
@authenticated
def get(self):
"""
list all users
@API
summary: list all users
notes: get detail of users
tags:
- auth
responses:
'200':
description: users
schema:
$ref: '#/definitions/User'
default:
description: Unexcepted error
schema:
$ref: '#/definitions/Error'
"""
self.set_header('content-type', 'application/json')
try:
user_list = UserDao().get_user_detail_list()
# root用户组可以查看root、manager用户组成员
# manager用户组可以查看普通用户组
# 普通用户组不可用查看用户组
manage_groups = GroupDao().get_manage_groups(self.group.id)
result = [user for user in user_list if user[
'group_id'] in manage_groups]
self.finish(json_dumps(
{'status': 200, 'msg': 'ok', 'values': result}))
except Exception as e:
logger.error(e)
self.process_error(-1, '查询用户失败,请联系管理员')
@authenticated
def post(self):
"""
add a list of users
@API
summary: add a list of users
notes: add a list of users
tags:
- auth
parameters:
-
name: users
in: body
required: true
type: json
description: the list of the users json
produces:
- application/json
"""
self.set_header('content-type', 'application/json')
body = self.request.body
try:
# root用户组成员可以新增root、manager用户组成员
# manager用户组成员可以新增普通用户组成员
# 普通用户组成员不可用新增用户
group_dao = GroupDao()
manage_groups = group_dao.get_manage_groups(self.group.id)
user_dao = UserDao()
creator = self.user.id
for user in json.loads(body):
group_id = user['group_id']
if group_id in manage_groups:
user['creator'] = creator
result = user_dao.add_user_and_group(user)
if not result:
self.process_error(-1, '已存在相同名字用户')
else:
self.process_error(-1, '权限不足,请联系管理员')
self.finish(json_dumps({'status': 200, 'msg': 'ok', 'values': []}))
except Exception as e:
logger.error(e)
self.process_error(-1, '新增用户失败,请联系管理员')
class UserQueryHandler(BaseHandler):
REST_URL = '/auth/users/{id}'
@authenticated
def get(self, id):
"""
get a specific user detail
@API
summary: get a specific user detail
notes: get a specific user detail
tags:
- auth
parameters:
-
name: id
in: path
required: true
type: integer
description: id of the user
"""
self.set_header('content-type', 'application/json')
try:
user = UserDao().get_user_detail_by_id(id)
# root用户组可以查看root、manager用户组成员
# manager用户组可以查看普通用户组
# 普通用户组不可用查看用户组
manage_groups = GroupDao().get_manage_groups(self.group.id)
if user['group_id'] not in manage_groups:
user = {}
self.finish(json_dumps(
{'status': 200, 'msg': 'ok', 'values': user}))
except Exception as e:
logger.error(e)
self.process_error(-1, '查询用户失败,请联系管理员')
@authenticated
def post(self, id):
"""
modify a specific user
@API
summary: modify a specific user
notes: modify a specific user
tags:
- auth
parameters:
-
name: id
in: path
required: true
type: integer
description: the id of the user
-
name: user
in: body
required: true
type: json
description: the body of the user
"""
self.set_header('content-type', 'application/json')
user = json.loads(self.request.body)
try:
# root用户组可以修改root、manager用户组成员
# manager用户组可以修改普通用户组成员
# 普通用户组不可以修改用户组成员
manage_groups = GroupDao().get_manage_groups(self.group.id)
user_dao = UserDao()
old_user = user_dao.get_user_detail_by_id(id)
old_group_id = old_user['group_id']
new_group_id = user.get('group_id', None)
if old_group_id in manage_groups:
if new_group_id and new_group_id not in manage_groups:
return self.process_error(-1, '权限不足,请联系管理员')
result = user_dao.update_user(id, user)
if result:
self.finish(json_dumps(
{'status': 200, 'msg': 'ok', 'values': []}))
else:
self.process_error(-1, '已存在相同用户名用户')
else:
self.process_error(-1, '权限不足,请联系管理员')
except Exception as e:
logger.error(e)
self.process_error(-1, '修改用户失败,请联系管理员')
|
<reponame>binary-signal/newsapi.org<filename>newsapi/client.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .objects import Source, Article
from .exceptions import *
import requests
import json
import logging
module_logger = logging.getLogger('news-api')
class Client:
api = 'https://newsapi.org/v2/'
def __init__(self, api_key):
self.logger = logging.getLogger("news-api")
if not isinstance(api_key, str):
self.logger.error("Api key must be string type")
raise TypeError("Api key must be string type")
self.api_key = api_key
def api_call(self, endpoint, payload={}):
""" low level api call to newsapi.org """
url = self.api + endpoint
payload['apiKey'] = self.api_key
try:
resp = requests.get(url, params=payload)
except requests.exceptions as e:
logging.error(e)
print(e)
return
response = json.loads(resp.text)
""" on error """
if resp.status_code != 200:
self.logger.error("{} {} {}".format(response['message'], response['status'], response['code'], ))
if resp.status_code == 400:
raise BadRequest(response['message'])
elif resp.status_code == 401:
raise UnauthorizedRequest(response['message'])
elif resp.status_code == 429:
raise ApiRateLimit(response['message'])
elif resp.status_code == 500:
raise ServerError(response['message'])
else:
""" capture a generic error return code"""
raise NewsApiError(response['message'])
""" on success """
return response
def top_headlines(self, sources=None, country=None, category=None, q=None, pageSize=20, page=None):
"""
:param sources:
:param country:
:param category:
:param q:
:param pageSize:
:param page:
:return:
"""
response = self.api_call(endpoint='top-headlines', payload={'sources': sources,
'country': country,
'category': category,
'q': q,
'pageSize': pageSize,
'page': page})
return [Article(**s) for s in response['articles']], response['totalResults']
def everything(self, q=None, sources=None, domains=None, from_=None, to=None,
language=None, sortBy=None, pageSize=None, page=None):
"""
:param q:
:param sources:
:param domains:
:param from_:
:param to:
:param language:
:param sortBy:
:param pageSize:
:param page:
:return:
"""
response = self.api_call(endpoint='everything', payload={'q': q,
'sources': sources,
'domains': domains,
'from': from_,
'to': to,
'language': language,
'sortBy': sortBy,
'pageSize': pageSize,
'page': page})
return [Article(**s) for s in response['articles']]
def sources(self, category=None, language=None, country=None):
"""
Provides a list of the news sources and blogs available on News API.
You will need this to programmatically locate the identifier for the
source you want articles from when querying the /articles endpoint.
:param category:
:param language: optional) - The category you would like to get sources for.
:param country: (optional) The 2-letter ISO 3166-1 code of the country
:return:
"""
data = self.api_call(endpoint='sources', payload={'category': category,
'language': language,
'country': country})
return [Source(**s) for s in data['sources']]
|
import h5py
import numpy as np
import sys
import os
def join(path, key):
if path[-1] != '/':
path += '/'
return path + key
#def build_mocap_models(rootdir, h5file):
# sgrp_root = 'mocap/models'
# sds_wb = join(sgrp_root, 'wb.vsk')
#
# print 'creating group: ' + sgrp_root
# #h5file.create_group(sgrp_root)
# print 'creating dataset: ' + sds_wb
# #h5file.create_dataset(sds_wb,,dtype='f')
def build_mocap_sessions(rootdir, h5file, session):
sgrp_parent = 'mocap/sessions'
path_parent = os.path.join(os.path.join(rootdir, 'mocap'), 'sessions')
sgrp_root = join(sgrp_parent, session)
path_root = os.path.join(path_parent, session)
sgrp_raw = join(sgrp_root, 'raw')
path_raw = os.path.join(path_root, 'raw')
sds_raw_state = join(sgrp_raw, 'state')
path_raw_state = os.path.join(path_raw, 'state.txt')
# Note: limits on git suggest video should not be integrated directly
# into the dataset. If the data is hosted through a different means
# video can be embedded, but for now, video is excluded and maintained
# as individual files
#sds_raw_video = join(sgrp_raw, 'video')
sds_raw_signals = join(sgrp_raw, 'signals')
sgrp_interp = join(sgrp_root, 'interpolated')
sds_interp_state = join(sgrp_interp, 'state')
print 'creating group: ' + sgrp_raw
h5file.create_group(sgrp_raw)
print 'creating dataset: ' + sds_raw_state
arr = np.loadtxt(path_raw_state)
#print arr
ds = h5file.create_dataset(sds_raw_state, data=arr, compression='gzip')
ds.attrs['fields'] = 't, shell(7){pos(x,y,z),rot(qx,qy,qz,qw)}'
#ds.attrs['sample rate'] = 1e-2
#print ds.attrs['fields']
print 'creating dataset: ' + sds_raw_signals
#h5file.create_dataset(sds_raw_signals,,dtype='f')
print 'creating group: ' + sgrp_interp
#h5file.create_group(sgrp_interp)
print 'creating dataset: ' + sds_interp_state
#h5file.create_dataset(sds_interp_state,,dtype='f')
def build_mocap_branch(rootdir, h5file):
#build_mocap_models(rootdir, h5file)
a = np.arange(10) + 1
for i in a:
session = str(i).zfill(2)
build_mocap_sessions(rootdir, h5file, session)
def build_simulation_gazebo_branch(rootdir, h5file):
sims = ['ode','dart']
sgrp_root = 'simulation/gazebo'
for sim in sims:
sgrp_sim = join(sgrp_root, sim)
#sds_10us = join(sgrp_sim, 'step=10us')
sds_state = join(sgrp_sim, 'state')
print 'creating group: ' + sgrp_sim
#h5file.create_group(sgrp_sim)
#print 'creating dataset: ' + sds_10us
#h5file.create_dataset(sds_10us,,dtype='f')
print 'creating dataset: ' + sds_state
arr = np.loadtxt(path_state)
#print arr
#ds = h5file.create_dataset(sds_state, data=arr, compression='gzip')
#ds.attrs['step'] = 1e-5
#ds.attrs['fields'] = 't,shell(13){pos(x,y,z),rot(qx,qy,qz,qw),lvel(dx,dy,dz),avel(omegax,omegay,omegaz)},joint(2){angle,vel}'
#ds.attrs['sample rate'] = 1e-2
def build_simulation_simwise_branch(rootdir, h5file):
sgrp_root = 'simulation/simwise4d'
sds_1ms = join(sgrp_root, 'step=1ms')
print 'creating group: ' + sgrp_root
#h5file.create_group(sgrp_root)
print 'creating dataset: ' + sds_1ms
#h5file.create_dataset(sds_1ms,,dtype='f')
#Note: should import the simulation file here as well
def build_simulation_branch(rootdir, h5file):
build_simulation_gazebo_branch(rootdir, h5file)
build_simulation_simwise_branch(rootdir, h5file)
#def build_models_branch(f):
# sgrp_root = 'models'
# #sds_shell = join(sgrp_root, '')
#
# print 'creating group: ' + sgrp_root
# #h5file.create_group(sgrp_root)
# #print 'creating dataset: ' + sds_shell
# #h5file.create_dataset(sds_shell,,dtype='f')
# root dir containing the filesystem hierarchy that maps the hdf5 structure
rootdir = sys.argv[1]
h5fpath = sys.argv[2]
h5file = h5py.File(h5fpath, 'w')
#h5file = []
build_mocap_branch(rootdir, h5file)
build_simulation_branch(rootdir, h5file)
#build_models_branch(rootdir, h5file)
#print h5file.keys()
# print out all sessions
#for session in f[gsessions]:
# print session
h5file.close()
|
import logging
from argparse import ArgumentParser
from collections import OrderedDict
import numpy as np
import pandas as pd
from ampligraph.datasets import load_wn18
from ampligraph.latent_features import ComplEx, HolE, TransE
from ampligraph.evaluation import evaluate_performance, mrr_score, hits_at_n_score
from ampligraph.latent_features import ComplEx
from ampligraph.utils import save_model, restore_model
import os
import tensorflow as tf
import random
from numpy import cumsum
from more_itertools import flatten
from sklearn.utils import Memory
import pprint
from tspy import TSP
import numpy as np
from pandas import CategoricalDtype
from scipy.spatial.distance import cdist
logging.getLogger().setLevel(logging.INFO)
#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
parser = ArgumentParser(description='Projecting graph to 3d (and embeddings)')
parser.add_argument('csv',
nargs='?',
type=str,
help='csv with n1, n2, rel columns',
default="./test")
args = parser.parse_args()
# getting whole wordnet graph
ke_model_path = "./knowledge_graph_model/csv_ke.amplimodel"
ke_wnkeys_path = "./knowledge_graph_model/csv_ke.wnkeys"
table = pd.read_csv(args.csv, sep='|', header=0)
whole_graph = list(zip(table['n1'], table['rel'], table['n2']))
if True: #not os.path.isfile(ke_wnkeys_path) or not os.path.isfile(ke_model_path):
pprint.pprint (whole_graph[:60])
random.shuffle(whole_graph)
def percentage_split(seq, percentage_dict):
cdf = cumsum(list(percentage_dict.values()))
assert cdf[-1] == 1.0
stops = list(map(int, cdf * len(seq)))
return {key: seq[a:b] for a, b, key in zip([0]+stops, stops, percentage_dict.keys())}
corpus_split_layout = {
'train': 0.8,
'test': 0.1,
'valid': 0.1
}
X = percentage_split(whole_graph, corpus_split_layout)
known_entities = set (flatten([r[0], r[2]] for r in X['train']))
id2tok = {i:tok for i, tok in enumerate(known_entities)}
tok2id = {tok:i for i, tok in enumerate(known_entities)}
import pickle
with open(ke_wnkeys_path, 'wb') as handle:
pickle.dump((tok2id, id2tok), handle)
X['train'] = np.array([list((tok2id[r[0]], r[1], tok2id[r[2]])) for r in X['train'] if r[0] in known_entities and r[2] in known_entities])
X['valid'] = np.array([list((tok2id[r[0]], r[1], tok2id[r[2]])) for r in X['valid'] if r[0] in known_entities and r[2] in known_entities])
X['test'] = np.array([list((tok2id[r[0]], r[1], tok2id[r[2]])) for r in X['test'] if r[0] in known_entities and r[2] in known_entities])
#import guppy
#h = guppy.hpy()
#print (h.heap())
X_train, X_valid = X['train'], X['valid']
print('Train set size: ', X_train.shape)
print('Test set size: ', X_valid.shape)
"""
k=DEFAULT_EMBEDDING_SIZE,
eta=DEFAULT_ETA,
epochs=DEFAULT_EPOCH,
batches_count=DEFAULT_BATCH_COUNT,
seed=DEFAULT_SEED,
embedding_model_params={'norm': DEFAULT_NORM_TRANSE,
'normalize_ent_emb': DEFAULT_NORMALIZE_EMBEDDINGS,
'negative_corruption_entities': DEFAULT_CORRUPTION_ENTITIES,
'corrupt_sides': DEFAULT_CORRUPT_SIDE_TRAIN},
optimizer=DEFAULT_OPTIM,
optimizer_params={'lr': DEFAULT_LR},
loss=DEFAULT_LOSS,
loss_params={},
regularizer=DEFAULT_REGULARIZER,
regularizer_params={},
initializer=DEFAULT_INITIALIZER,
initializer_params={'uniform': DEFAULT_XAVIER_IS_UNIFORM},
verbose=DEFAULT_VERBOSE):
"""
model = TransE(verbose=True, k=70, epochs=300)
"""
model = ComplEx(batches_count=10, seed=0, epochs=60, k=50, eta=10,
# Use adam optimizer with learning rate 1e-3
optimizer='adam', optimizer_params={'lr': 1e-3},
# Use pairwise loss with margin 0.5
loss='pairwise', loss_params={'margin': 0.5},
# Use L2 regularizer with regularizer weight 1e-5
regularizer='LP', regularizer_params={'p': 2, 'lambda': 1e-5},
# Enable stdout messages (set to false if you don't want to display)
verbose=True)"""
print ("Training...")
x_orig = load_wn18()
model.fit(X_train)
save_model(model, model_name_path=ke_model_path)
model2 = TransE(verbose=True, k=3, epochs=300)
model2.fit(X_train)
save_model(model2, model_name_path=ke_model_path + '2')
#filter_triples = np.concatenate((X_train, X_valid))
#filter = np.concatenate((X['train'], X['valid'], X['test']))
#ranks = evaluate_performance(X['test'],
# model=model,
# filter_triples=filter,
# use_default_protocol=True, # corrupt subj and obj separately while evaluating
# verbose=True)
#mrr = mrr_score(ranks)
#hits_10 = hits_at_n_score(ranks, n=10)
#print("MRR: %f, Hits@10: %f" % (mrr, hits_10))
# Output: MRR: 0.886406, Hits@10: 0.935000
else:
model = restore_model(model_name_path=ke_model_path)
model2 = restore_model(model_name_path=ke_model_path+'2')
import pickle
with open(ke_wnkeys_path, 'rb') as handle:
tok2id, id2tok = pickle.load(handle)
import pprint
def find_in_tok2id(w):
for s in tok2id.keys():
if w in s:
print (w, s, "it is alphabetically there")
tok2id = OrderedDict (tok2id)
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
print("Extracting Embeddings..")
alle = table['n1'].tolist() + table['n2'].tolist()
embedding_map = dict([(str(a), (model.get_embeddings(str(tok2id[str(a)])), tok2id[str(a)])) for a in alle if str(a) in tok2id])
embedding_map2 = dict([(str(a), (model2.get_embeddings(str(tok2id[str(a)])), tok2id[str(a)])) for a in alle if str(a) in tok2id])
embeddings_array = np.array([i[0] for i in embedding_map.values()])
print ("PCA")
embeddings_3d_pca = PCA(n_components=3).fit_transform(embeddings_array)
print ("TSNE")
embeddings_3d_tsne = TSNE(n_components=3).fit_transform(embeddings_array)
print("k2")
embeddings_k2 = np.array([i[0] for i in embedding_map2.values()])
print (embeddings_3d_pca.shape)
print (embeddings_k2.shape)
print ("pandas")
table = pd.DataFrame(data={'name':list(s.replace("Synset('", '').replace("')", "") for s in embedding_map.keys()),
'id': [i[1] for i in embedding_map.values()],
'x_pca': embeddings_3d_pca[:, 0],
'y_pca': embeddings_3d_pca[:, 1],
'z_pca': embeddings_3d_pca[:, 2],
'x_tsne': embeddings_3d_tsne[:, 0],
'y_tsne': embeddings_3d_tsne[:, 1],
'z_tsne': embeddings_3d_tsne[:, 2],
'x_k2': embeddings_k2[:, 0],
'y_k2': embeddings_k2[:, 1],
'z_k2': embeddings_k2[:, 2]
})
print ('clusters')
import hdbscan
std_args = {
'algorithm':'best',
'alpha':1.0,
'approx_min_span_tree':True,
'gen_min_span_tree':False,
'leaf_size':20,
'memory': Memory(cachedir=None),
'metric':'euclidean',
'min_cluster_size':13,
'min_samples':None,
'p':None
}
def cluster(embeddings_array, **kwargs):
print ('dimensionality', embeddings_array.shape)
clusterer = hdbscan.HDBSCAN(**kwargs)
clusterer.fit(np.array(embeddings_array))
print ('number of clusters: ', max(clusterer.labels_))
return clusterer.labels_
table['cl_pca'] = cluster(embeddings_3d_pca, **std_args)
table['cl_tsne'] = cluster(embeddings_3d_tsne, **std_args)
table['cl_k2'] = cluster(embeddings_k2, **std_args)
table['cl_kn'] = cluster(embeddings_array, **std_args)
table.to_csv("./knowledge_graph_coords/knowledge_graph_3d_choords.csv", sep='\t', header=True,
index=False)
table = pd.read_csv("./knowledge_graph_coords/knowledge_graph_3d_choords.csv", index_col=0, sep='\t')
things = ['pca', 'tsne', 'k2', 'kn']
def make_path (X, D):
tsp = TSP()
# Using the data matrix
tsp.read_data(X)
# Using the distance matrix
tsp.read_mat(D)
from tspy.solvers import TwoOpt_solver
two_opt = TwoOpt_solver(initial_tour='NN', iter_num=100000)
two_opt_tour = tsp.get_approx_solution(two_opt)
#tsp.plot_solution('TwoOpt_solver')
best_tour = tsp.get_best_solution()
return best_tour
for kind in things:
print ("writing table for %s " % kind)
table['cl'] = table['cl_%s' % kind]
cl_cols = table[['cl_%s' % k for k in things]]
cl_df = table.groupby(by='cl').mean().reset_index()
# Initialize fitness function object using coords_list
print ("optimizing the path through all centers")
if kind == "kn":
subkind = "tsne"
else:
sub_kind = kind
subset = cl_df[[c + "_" + sub_kind for c in ['x', 'y', 'z']]]
print (subset[:10])
points = [list(x) for x in subset.to_numpy()]
print (points[:10])
print (len(points))
arr = np.array(points)
dist = Y = cdist(arr, arr, 'euclidean')
new_path = make_path(np.array(points), dist)[:-1]
print (new_path)
cl_df[['cl_%s' % k for k in things]] = cl_cols
path_order_categories = CategoricalDtype(categories=new_path, ordered = True)
cl_df['cl_%s' % kind] = cl_df['cl'].astype(path_order_categories)
cl_df.sort_values(['cl_%s' % kind], inplace=True)
cl_df['cl_%s' % kind] = cl_df['cl'].astype('int32')
cl_df.to_csv('./knowledge_graph_coords/%s_clusters_mean_points.csv' % kind, sep='\t', header=True,
index=False)
print (kind + " " + str(new_path))
logging.info("ampligraph and clustering finished") |
<reponame>HarshCasper/mergify-engine<filename>mergify_engine/branch_updater.py
# -*- encoding: utf-8 -*-
#
# Copyright © 2018–2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import typing
import uuid
import tenacity
from mergify_engine import check_api
from mergify_engine import config
from mergify_engine import context
from mergify_engine import gitter
from mergify_engine.clients import http
class BranchUpdateFailure(Exception):
def __init__(self, msg=""):
error_code = "err-code: %s" % uuid.uuid4().hex[-5:].upper()
self.message = msg + "\n" + error_code
super(BranchUpdateFailure, self).__init__(self.message)
class BranchUpdateNeedRetry(Exception):
pass
class AuthenticationFailure(Exception):
pass
GIT_MESSAGE_TO_EXCEPTION = collections.OrderedDict(
[
("This repository was archived so it is read-only.", BranchUpdateFailure),
("organization has enabled or enforced SAML SSO.", BranchUpdateFailure),
("Invalid username or password", AuthenticationFailure),
("Repository not found", AuthenticationFailure),
("The requested URL returned error: 403", AuthenticationFailure),
("Patch failed at", BranchUpdateFailure),
("remote contains work that you do", BranchUpdateNeedRetry),
("remote end hung up unexpectedly", BranchUpdateNeedRetry),
("cannot lock ref 'refs/heads/", BranchUpdateNeedRetry),
("Could not resolve host", BranchUpdateNeedRetry),
("Operation timed out", BranchUpdateNeedRetry),
("No such device or address", BranchUpdateNeedRetry),
("Protected branch update failed", BranchUpdateFailure),
("Couldn't find remote ref", BranchUpdateFailure),
]
)
GIT_MESSAGE_TO_UNSHALLOW = set(["shallow update not allowed", "unrelated histories"])
def pre_rebase_check(ctxt: context.Context) -> typing.Optional[check_api.Result]:
# If PR from a public fork but cannot be edited
if (
ctxt.pull_from_fork
and not ctxt.pull["base"]["repo"]["private"]
and not ctxt.pull["maintainer_can_modify"]
):
return check_api.Result(
check_api.Conclusion.FAILURE,
"Pull request can't be updated with latest base branch changes",
"Mergify needs the permission to update the base branch of the pull request.\n"
f"{ctxt.pull['base']['repo']['owner']['login']} needs to "
"[authorize modification on its base branch]"
"(https://help.github.com/articles/allowing-changes-to-a-pull-request-branch-created-from-a-fork/).",
)
# If PR from a private fork but cannot be edited:
# NOTE(jd): GitHub removed the ability to configure `maintainer_can_modify` on private
# fork we which make rebase impossible
elif (
ctxt.pull_from_fork
and ctxt.pull["base"]["repo"]["private"]
and not ctxt.pull["maintainer_can_modify"]
):
return check_api.Result(
check_api.Conclusion.FAILURE,
"Pull request can't be updated with latest base branch changes",
"Mergify needs the permission to update the base branch of the pull request.\n"
"GitHub does not allow a GitHub App to modify base branch for a private fork.\n"
"You cannot `rebase` a pull request from a private fork.",
)
else:
return None
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=0.2),
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_type(BranchUpdateNeedRetry),
)
async def _do_rebase(ctxt: context.Context, token: str) -> None:
# NOTE(sileht):
# $ curl https://api.github.com/repos/sileht/repotest/pulls/2 | jq .commits
# 2
# $ git clone https://XXXXX@github.com/sileht-tester/repotest \
# --depth=$((2 + 1)) -b sileht/testpr
# $ cd repotest
# $ git remote add upstream https://XXXXX@github.com/sileht/repotest.git
# $ git log | grep Date | tail -1
# Date: Fri Mar 30 21:30:26 2018 (10 days ago)
# $ git fetch upstream master --shallow-since="Fri Mar 30 21:30:26 2018"
# $ git rebase upstream/master
# $ git push origin sileht/testpr:sileht/testpr
head_repo = (
ctxt.pull["head"]["repo"]["owner"]["login"]
+ "/"
+ ctxt.pull["head"]["repo"]["name"]
)
base_repo = (
ctxt.pull["base"]["repo"]["owner"]["login"]
+ "/"
+ ctxt.pull["base"]["repo"]["name"]
)
head_branch = ctxt.pull["head"]["ref"]
base_branch = ctxt.pull["base"]["ref"]
git = gitter.Gitter(ctxt.log)
try:
await git.init()
await git.configure()
await git.add_cred(token, "", head_repo)
await git.add_cred(token, "", base_repo)
await git("remote", "add", "origin", f"{config.GITHUB_URL}/{head_repo}")
await git("remote", "add", "upstream", f"{config.GITHUB_URL}/{base_repo}")
depth = len(await ctxt.commits) + 1
await git("fetch", "--quiet", "--depth=%d" % depth, "origin", head_branch)
await git("checkout", "-q", "-b", head_branch, "origin/%s" % head_branch)
output = await git("log", "--format=%cI")
last_commit_date = [d for d in output.split("\n") if d.strip()][-1]
await git(
"fetch",
"--quiet",
"upstream",
base_branch,
"--shallow-since='%s'" % last_commit_date,
)
# Try to find the merge base, but don't fetch more that 1000 commits.
for _ in range(20):
await git("repack", "-d")
try:
await git(
"merge-base",
f"upstream/{base_branch}",
f"origin/{head_branch}",
)
except gitter.GitError as e: # pragma: no cover
if e.returncode == 1:
# We need more commits
await git("fetch", "-q", "--deepen=50", "upstream", base_branch)
continue
raise
else:
break
try:
await git("rebase", "upstream/%s" % base_branch)
await git("push", "--verbose", "origin", head_branch, "-f")
except gitter.GitError as e: # pragma: no cover
for message in GIT_MESSAGE_TO_UNSHALLOW:
if message in e.output:
ctxt.log.info("Complete history cloned")
# NOTE(sileht): We currently assume we have only one parent
# commit in common. Since Git is a graph, in some case this
# graph can be more complicated.
# So, retrying with the whole git history for now
await git("fetch", "--unshallow")
await git("fetch", "--quiet", "origin", head_branch)
await git("fetch", "--quiet", "upstream", base_branch)
await git("rebase", "upstream/%s" % base_branch)
await git("push", "--verbose", "origin", head_branch, "-f")
break
else:
raise
expected_sha = await git("log", "-1", "--format=%H")
# NOTE(sileht): We store this for dismissal action
await ctxt.redis.setex("branch-update-%s" % expected_sha, 60 * 60, expected_sha)
except gitter.GitError as in_exception: # pragma: no cover
if in_exception.output == "":
# SIGKILL...
raise BranchUpdateNeedRetry()
for message, out_exception in GIT_MESSAGE_TO_EXCEPTION.items():
if message in in_exception.output:
raise out_exception(
"Git reported the following error:\n"
f"```\n{in_exception.output}\n```\n"
)
else:
ctxt.log.error(
"update branch failed: %s",
in_exception.output,
exc_info=True,
)
raise BranchUpdateFailure()
except Exception: # pragma: no cover
ctxt.log.error("update branch failed", exc_info=True)
raise BranchUpdateFailure()
finally:
await git.cleanup()
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=0.2),
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_type(BranchUpdateNeedRetry),
)
async def update_with_api(ctxt: context.Context) -> None:
try:
await ctxt.client.put(
f"{ctxt.base_url}/pulls/{ctxt.pull['number']}/update-branch",
api_version="lydian", # type: ignore[call-arg]
json={"expected_head_sha": ctxt.pull["head"]["sha"]},
)
except http.HTTPClientSideError as e:
if e.status_code == 422:
refreshed_pull = await ctxt.client.item(
f"{ctxt.base_url}/pulls/{ctxt.pull['number']}"
)
if refreshed_pull["head"]["sha"] != ctxt.pull["head"]["sha"]:
ctxt.log.info(
"branch updated in the meantime",
status_code=e.status_code,
error=e.message,
)
return
ctxt.log.info(
"update branch failed",
status_code=e.status_code,
error=e.message,
)
raise BranchUpdateFailure(e.message)
except (http.RequestError, http.HTTPStatusError) as e:
status_code: typing.Optional[int] = None
if isinstance(e, http.HTTPStatusError) and http.HTTPStatusError:
status_code = e.response.status_code
ctxt.log.info(
"update branch failed",
status_code=status_code,
error=str(e),
)
raise BranchUpdateNeedRetry()
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=0.2),
stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_type(AuthenticationFailure),
)
async def rebase_with_git(
ctxt: context.Context, user: typing.Optional[str] = None
) -> None:
if user:
token = ctxt.subscription.get_token_for(user)
if token:
creds = {user.lower(): token}
else:
raise BranchUpdateFailure(
f"Unable to rebase: user `{user}` is unknown. "
f"Please make sure `{user}` has logged in Mergify dashboard."
)
else:
creds = ctxt.subscription.tokens
for login, token in creds.items():
try:
return await _do_rebase(ctxt, token)
except AuthenticationFailure as e: # pragma: no cover
ctxt.log.info(
"authentification failure, will retry another token: %s",
e,
login=login,
)
ctxt.log.warning("unable to update branch: no tokens are valid")
if ctxt.pull_from_fork and ctxt.pull["base"]["repo"]["private"]:
raise BranchUpdateFailure(
"Rebasing a branch for a forked private repository is not supported by GitHub"
)
raise AuthenticationFailure(
f"No registered tokens allows Mergify to push to `{ctxt.pull['head']['label']}`"
)
|
# Copyright 2020
# Author: <NAME> <<EMAIL>>
import time
import random
import gym
from datetime import datetime
from gym import wrappers
import numpy as np
import os
from collections import deque
from torch.utils.tensorboard import SummaryWriter
import torch
from agent import TD3
from memory import ReplayBuffer
def mkdir(base, name):
"""
Creates a direction if its not exist
Args:
param1(string): base first part of pathname
param2(string): name second part of pathname
Return: pathname
"""
path = os.path.join(base, name)
if not os.path.exists(path):
os.makedirs(path)
return path
def evaluate_policy(policy, writer, total_timesteps, args, episode=10):
"""
Args:
param1(): policy
param2(): writer
param3(): episode default 1 number for path to save the video
"""
avg_reward = 0.
env = gym.make(args.env_name)
seeds = [x for x in range(10)]
for s in seeds:
env.seed(s)
obs = env.reset()
done = False
while not done:
action = policy.select_action(np.array(obs))
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_reward /= len(seeds)
writer.add_scalar('Evaluation reward', avg_reward, total_timesteps)
print("---------------------------------------")
print("Average Reward over the Evaluation Step: %f" % (avg_reward))
print("---------------------------------------")
return avg_reward
def write_into_file(pathname, text):
"""
"""
with open(pathname+".txt", "a") as myfile:
myfile.write(text)
myfile.write('\n')
def time_format(sec):
"""
Args:
param1():
"""
hours = sec // 3600
rem = sec - hours * 3600
mins = rem // 60
secs = rem - mins * 60
return hours, mins, secs
def train(args, param):
"""
Args:
param1(args): hyperparameter
"""
# in case seed experements
args.seed = param
now = datetime.now()
dt_string = now.strftime("%d_%m_%Y_%H:%M:%S")
torch.manual_seed(args.seed)
np.random.seed(args.seed)
pathname = str(args.env_name)
pathname += 'lr_critic_' + str(args.lr_critic)
pathname += 'lr_actor_' + str(args.lr_actor)
pathname += '_repeat_' + str(args.repeat)
pathname += '_policy_update_' + str(args.policy_freq)
pathname += '_batch_size__' + str(args.batch_size)
if args.agent == "TD3_ad":
pathname += '_update_freq_' + str(args.target_update_freq)
pathname += "_num_q_target_" + str(args.num_q_target)
pathname += "_seed_" + str(args.seed) + "_agent_" + args.agent
tensorboard_name = args.locexp + '/runs/' + pathname
writer = SummaryWriter(tensorboard_name)
env = gym.make(args.env_name)
env.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
print(state_dim)
if args.agent == "TD3_ad":
print("use own version")
policy = TD31v1(state_dim, action_dim, max_action, args)
elif args.agent == "TD3":
policy = TD3(state_dim, action_dim, max_action, args)
replay_buffer = ReplayBuffer()
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
t0 = time.time()
scores_window = deque(maxlen=100)
episode_reward = 0
evaluations = []
file_name = "%s_%s_%s" % (args.agent, args.env_name, str(args.seed))
print("---------------------------------------")
print("Settings: %s" % (file_name))
print("---------------------------------------")
# We start the main loop over 500,000 timesteps
tb_update_counter = 0
while total_timesteps < args.max_timesteps:
tb_update_counter += 1
# If the episode is done
if done:
episode_num += 1
#env.seed(random.randint(0, 100))
scores_window.append(episode_reward)
average_mean = np.mean(scores_window)
if total_timesteps > args.start_timesteps:
policy.compute_beta(replay_buffer)
#policy.train(replay_buffer, writer, episode_timesteps)
if tb_update_counter > args.tensorboard_freq:
tb_update_counter = 0
writer.add_scalar('Reward', episode_reward, total_timesteps)
writer.add_scalar('Reward mean ', average_mean, total_timesteps)
# If we are not at the very beginning, we start the training process of the model
if total_timesteps != 0:
text = "Total Timesteps: {} Episode Num: {} Reward: {} Average Re: {:.2f} Time: {}".format(total_timesteps, episode_num, episode_reward, np.mean(scores_window), time_format(time.time()-t0))
print(text)
write_into_file('search-' + pathname, text)
# We evaluate the episode and we save the policy
if timesteps_since_eval >= args.eval_freq:
timesteps_since_eval %= args.eval_freq
evaluations.append(evaluate_policy(policy, writer, total_timesteps, args, episode_num))
# When the training step is done, we reset the state of the environment
obs = env.reset()
# Set the Done to False
done = False
# Set rewards and episode timesteps to zero
episode_reward = 0
episode_timesteps = 0
# Before 10000 timesteps, we play random actions
if total_timesteps < args.start_timesteps:
action = env.action_space.sample()
else: # After 10000 timesteps, we switch to the model
action = policy.select_action(np.array(obs))
# If the explore_noise parameter is not 0, we add noise to the action and we clip it
if args.expl_noise != 0:
action = (action + np.random.normal(0, args.expl_noise, size=env.action_space.shape[0])).clip(env.action_space.low, env.action_space.high)
if args.agent == "TD3_ad":
if total_timesteps % args.target_update_freq == 0:
policy.hardupdate()
# The agent performs the action in the environment, then reaches the next state and receives the reward
new_obs, reward, done, _ = env.step(action)
# We check if the episode is done
done_bool = 1 if episode_timesteps + 1 == 1000 else float(done)
# We increase the total reward
episode_reward += reward
# We store the new transition into the Experience Replay memory (ReplayBuffer)
replay_buffer.add((obs, new_obs, action, reward, done_bool))
# We update the state, the episode timestep, the total timesteps, and the timesteps since the evaluation of the policy
obs = new_obs
episode_timesteps += 1
total_timesteps += 1
timesteps_since_eval += 1
if total_timesteps > args.start_timesteps:
policy.compute_beta(replay_buffer)
# policy.train(replay_buffer, writer, args.repeat)
# We add the last policy evaluation to our list of evaluations and we save our model
evaluations.append(evaluate_policy(policy, writer, total_timesteps, args, episode_num))
if args.save_model:
policy.save("%s" % (file_name), directory="./pytorch_models")
np.save("./results/%s" % (file_name), evaluations)
|
<filename>kme/extern/senn/datasets/dataloaders.py
import os
import shutil
import urllib.request
from pathlib import Path
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader, random_split
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import datasets
def get_dataloader(config):
"""Dispatcher that calls dataloader function depending on the configs.
Parameters
----------
config : SimpleNameSpace
Contains configs values. Needs to at least have a `dataloader` field.
Returns
-------
Corresponding dataloader.
"""
if config.dataloader.lower() == 'mnist':
return load_mnist(**config.__dict__)
elif config.dataloader.lower() == 'compas':
return load_compas(**config.__dict__)
def load_mnist(data_path, batch_size, num_workers=0, valid_size=0.1, **kwargs):
"""
Load mnist data.
Loads mnist dataset and performs the following preprocessing operations:
- converting to tensor
- standard mnist normalization so that values are in (0, 1)
Parameters
----------
data_path: str
Location of mnist data.
batch_size: int
Batch size.
num_workers: int
the number of workers to be used by the Pytorch DataLoaders
valid_size : float
a float between 0.0 and 1.0 for the percent of samples to be used for validation
Returns
-------
train_loader
Dataloader for training set.
valid_loader
Dataloader for validation set.
test_loader
Dataloader for testing set.
"""
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_set = datasets.MNIST(data_path, train=True, download=True, transform=transform)
test_set = datasets.MNIST(data_path, train=False, download=True, transform=transform)
train_size = len(train_set)
split = int(np.floor(valid_size * train_size))
indices = list(range(train_size))
train_sampler = SubsetRandomSampler(indices[split:])
valid_sampler = SubsetRandomSampler(indices[:split])
dataloader_args = dict(batch_size=batch_size, num_workers=num_workers, drop_last=True)
train_loader = DataLoader(train_set, sampler=train_sampler, **dataloader_args)
valid_loader = DataLoader(train_set, sampler=valid_sampler, **dataloader_args)
test_loader = DataLoader(test_set, shuffle=False, **dataloader_args)
return train_loader, valid_loader, test_loader
# --------------- Compas Dataset ---------------
class CompasDataset(Dataset):
def __init__(self, data_path, verbose=True):
"""ProPublica Compas dataset.
Dataset is read in from preprocessed compas data: `propublica_data_for_fairml.csv`
from fairml github repo.
Source url: 'https://github.com/adebayoj/fairml/raw/master/doc/example_notebooks/propublica_data_for_fairml.csv'
Following approach of Alvariz-Melis et al (SENN).
Parameters
----------
data_path : str
Location of Compas data.
"""
df = pd.read_csv(data_path)
# don't know why square root
df['Number_of_Priors'] = (df['Number_of_Priors'] / df['Number_of_Priors'].max()) ** (1 / 2)
# get target
compas_rating = df.score_factor.values # This is the target?? (-_-)
df = df.drop('score_factor', axis=1)
pruned_df, pruned_rating = find_conflicting(df, compas_rating)
if verbose:
print('Finish preprocessing data..')
self.X = pruned_df
self.y = pruned_rating.astype(float)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
# Convert idx from tensor to list due to pandas bug (that arises when using pytorch's random_split)
if isinstance(idx, torch.Tensor):
idx = idx.tolist()
return self.X.iloc[idx].values.astype(float), self.y[idx]
def load_compas(data_path='senn/datasets/data/compas/compas.csv', train_percent=0.8, batch_size=200,
num_workers=0, valid_size=0.1, **kwargs):
"""Return compas dataloaders.
If compas data can not be found, will download preprocessed compas data: `propublica_data_for_fairml.csv`
from fairml github repo.
Source url: 'https://github.com/adebayoj/fairml/raw/master/doc/example_notebooks/propublica_data_for_fairml.csv'
Parameters
----------
data_path : str
Path of compas data.
train_percent : float
What percentage of samples should be used as the training set. The rest is used
for the test set.
batch_size : int
Number of samples in minibatches.
Returns
-------
train_loader
Dataloader for training set.
valid_loader
Dataloader for validation set.
test_loader
Dataloader for testing set.
"""
if not os.path.isfile(data_path):
Path(data_path).parent.mkdir(parents=True, exist_ok=True)
compas_url = 'https://github.com/adebayoj/fairml/raw/master/doc/example_notebooks/propublica_data_for_fairml.csv'
download_file(data_path, compas_url)
dataset = CompasDataset(data_path)
# Split into training and test
train_size = int(train_percent * len(dataset))
test_size = len(dataset) - train_size
train_set, test_set = random_split(dataset, [train_size, test_size])
indices = list(range(train_size))
validation_split = int(valid_size * train_size)
train_sampler = SubsetRandomSampler(indices[validation_split:])
valid_sampler = SubsetRandomSampler(indices[:validation_split])
# Dataloaders
dataloader_args = dict(batch_size=batch_size, num_workers=num_workers, drop_last=True)
train_loader = DataLoader(train_set, sampler=train_sampler, **dataloader_args)
valid_loader = DataLoader(train_set, sampler=valid_sampler, **dataloader_args)
test_loader = DataLoader(test_set, shuffle=False, **dataloader_args)
return train_loader, valid_loader, test_loader
def find_conflicting(df, labels, consensus_delta=0.2):
"""
Find examples with same exact feature vector but different label.
Finds pairs of examples in dataframe that differ only in a few feature values.
From SENN authors' code.
Parameters
----------
df : pd.Dataframe
Containing compas data.
labels : iterable
Containing ground truth labels
consensus_delta : float
Decision rule parameter.
Return
------
pruned_df:
dataframe with `inconsistent samples` removed.
pruned_lab:
pruned labels
"""
def finder(df, row):
for col in df:
df = df.loc[(df[col] == row[col]) | (df[col].isnull() & pd.isnull(row[col]))]
return df
groups = []
all_seen = set([])
full_dups = df.duplicated(keep='first')
for i in (range(len(df))):
if full_dups[i] and (i not in all_seen):
i_dups = finder(df, df.iloc[i])
groups.append(i_dups.index)
all_seen.update(i_dups.index)
pruned_df = []
pruned_lab = []
for group in groups:
scores = np.array([labels[i] for i in group])
consensus = round(scores.mean())
for i in group:
if (abs(scores.mean() - 0.5) < consensus_delta) or labels[i] == consensus:
# First condition: consensus is close to 50/50, can't consider this "outliers", so keep them all
pruned_df.append(df.iloc[i])
pruned_lab.append(labels[i])
return pd.DataFrame(pruned_df), np.array(pruned_lab)
def download_file(store_path, url):
"""Download a file from `url` and write it to a file `store_path`.
Parameters
----------
store_path : str
Data storage location.
"""
# Download the file from `url` and save it locally under `file_name`
with urllib.request.urlopen(url) as response, open(store_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
|
<gh_stars>0
import time
import logging.config
from scapy.all import get_if_hwaddr, sendp, sniff, UDP, BOOTP, IP, DHCP, Ether
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
logger = logging.getLogger(name="elchicodepython.honeycheck")
def apply_controls(control_modules, **kwargs):
for control_object in control_modules:
control_object.apply_actions(**kwargs)
class DHCPServer:
def __init__(self, ip, hw):
self.ip = ip
self.hw = hw
def __repr__(self):
return "<DHCPServer Object (ip = %s, hw = %s)>" % (self.ip, self.hw)
def __str__(self):
return "<DHCPServer Object (ip = %s, hw = %s)>" % (self.ip, self.hw)
class Status:
OK = 1
ROGUE_DETECTED = 2
class DHCPWatchmen:
def __init__(self, iface, fail_test, pass_test, final_exec, whitelist):
"""
:param iface: interface to watch
:param fail_test: action to trigger if a rogue dhcp server is detected
:param pass_test: action to trigger if there are no rogue dhcp servers detected
:param final_exec: action to trigger always after fail_test or pass_test
:param whitelist: list of IPs of verified DHCP servers to ignore.
"""
self.iface = iface
self.hw = get_if_hwaddr(iface)
self.fail_test = fail_test
self.pass_test = pass_test
self.final_exec = final_exec
self.whitelist = whitelist
self.dhcp_servers = {}
self.last_status = Status.OK
def check_dhcp_servers(self, number_allowed):
"""
Check if the number of DHCP Servers detected is allowed
and trigger the corresponding action to each situation
:param number_allowed: number of dhcp_servers allowed
"""
if len(self.dhcp_servers) > number_allowed:
if self.last_status != Status.ROGUE_DETECTED:
logger.warning("MORE DHCP SERVERS THAN ALLOWED: ")
self.last_status = Status.ROGUE_DETECTED
apply_controls(self.fail_test, watchmen=self)
self.dhcp_servers = {}
else:
if self.last_status != Status.OK:
logger.info("All seems right")
self.last_status = Status.OK
apply_controls(self.pass_test, watchmen=self)
apply_controls(self.final_exec, watchmen=self)
def check_packet(self, packet):
if packet.payload.op == 2:
if self.whitelist:
if packet.payload.src not in self.whitelist:
self.dhcp_servers[packet.payload.src] = DHCPServer(
packet.payload.src, packet.src
)
else:
self.dhcp_servers[packet.payload.src] = DHCPServer(
packet.payload.src, packet.src
)
def send_dhcp_discovery(self):
dhcp_discover = (
Ether(dst="ff:ff:ff:ff:ff:ff")
/ IP(src="0.0.0.0", dst="255.255.255.255")
/ UDP(sport=68, dport=67)
/ BOOTP(chaddr=self.hw, flags=0x8000)
/ DHCP(options=[("message-type", "discover"), "end"])
)
sendp(dhcp_discover, verbose=0)
logger.debug("DHCP DISCOVER SEND")
def dhcp_discovery_daemon(self, timeout):
if self.whitelist:
# There are not supposed to be any DHCP server that does
# not belongs to the whitelist
logger.info("Whitelist enabled for " + self.iface)
max_servers_allowed = 0
else:
# It is suppose to be at least one DHCP Server in the network
logger.info(
"Executing HoneyCheck in %s without Whitelist" % self.iface
)
max_servers_allowed = 1
while True:
self.send_dhcp_discovery()
time.sleep(timeout)
self.check_dhcp_servers(max_servers_allowed)
def sniff_dhcp(self):
sniff(iface=self.iface, filter="udp port 68", prn=self.check_packet)
def __repr__(self):
return "<DHCPSWatchmen Object (iface = %s)>" % (self.iface)
def __str__(self):
return "<DHCPSWatchmen Object (iface = %s)>" % (self.iface)
|
################################################################################
# This module calculates the PMI of n-grams
# Parameters df_ac_ngram_q: input pandas.DataFrame of n-grams, it should have,
# at least, n-gram count columns with the 'AC_Doc_ID's
# as the index of the DataFrame
# ngram_clm_start: integer column number (starting from zero)
# specifying the starting point of n-gram
# count columns in the question DataFrame, from
# the point to the end, all the columns should be
# the n-gram count columns
# df_ac_p_x: pandas.DataFrame of the proportion of terms
# lemma_sum_total: the total number of unigram terms
# gram = 'bigram': specify bigram or trigram
# decimal_places = None: specify the decimal places to round at
# Returns Result: pandas.DataFrame as the PMI of n-grams
################################################################################
def ac_bi_trigram_pmi(df_ac_ngram_q, ngram_clm_start, df_ac_p_x,
lemma_sum_total, gram = 'bigram', decimal_places = None):
import pandas as pd
import numpy as np
from math import log
df_ac_buf = df_ac_ngram_q[:]
df_ac_buf_ngram = df_ac_buf.iloc[:, ngram_clm_start:]
if gram == 'bigram':
df_ac_ngram_q_res = pd.DataFrame({ 'Bigram_sum' : df_ac_buf_ngram.sum() })
df_ac_ngram_q_res.index.name = 'Bigram'
else:
df_ac_ngram_q_res = pd.DataFrame({ 'Trigram_sum' : df_ac_buf_ngram.sum() })
df_ac_ngram_q_res.index.name = 'Trigram'
t = df_ac_ngram_q_res.shape
row_lgth = t[0]
ac_ngram_q_res_index = df_ac_ngram_q_res.index
#Updated 9/26/2017 <EMAIL>
ac_p_x_index = df_ac_p_x.index
if gram == 'bigram':
df_ac_sum_t_bigram_q_p_x = pd.DataFrame(np.empty((row_lgth, 3),
dtype=np.float64), ac_ngram_q_res_index,
['p_ab', 'p_a_x_p_b', 'PMI'])
for i, x in enumerate(ac_ngram_q_res_index):
#Updated 3/7/2017 <EMAIL>
if df_ac_ngram_q_res.iloc[i, 0] > 0:
df_ac_sum_t_bigram_q_p_x.iloc[i, 0] = df_ac_ngram_q_res.iloc[i, 0] / lemma_sum_total
print('Bigram: ' + x)
grams = x.split('_')
#Updated 9/26/2017 <EMAIL>
if grams[0] in ac_p_x_index and grams[1] in ac_p_x_index:
df_ac_sum_t_bigram_q_p_x.iloc[i, 1] = df_ac_p_x.loc[grams[0], 'p_x'] * df_ac_p_x.loc[grams[1], 'p_x']
else:
df_ac_sum_t_bigram_q_p_x.iloc[i, 1] = df_ac_sum_t_bigram_q_p_x.iloc[i, 0]
print('WARNING: ' + 'The unigram(s) of ' + x + ' cannot be found!!')
if decimal_places != None:
df_ac_sum_t_bigram_q_p_x.iloc[i, 2] = round(log( df_ac_sum_t_bigram_q_p_x.iloc[i, 0] / df_ac_sum_t_bigram_q_p_x.iloc[i, 1], 2), decimal_places)
else:
df_ac_sum_t_bigram_q_p_x.iloc[i, 2] = log( df_ac_sum_t_bigram_q_p_x.iloc[i, 0] / df_ac_sum_t_bigram_q_p_x.iloc[i, 1], 2)
df_ac_ngram_q_res['p_ab'] = df_ac_sum_t_bigram_q_p_x['p_ab']
df_ac_ngram_q_res['p_a_x_p_b'] = df_ac_sum_t_bigram_q_p_x['p_a_x_p_b']
df_ac_ngram_q_res['PMI'] = df_ac_sum_t_bigram_q_p_x['PMI']
else:
df_ac_sum_t_trigram_q_p_x = pd.DataFrame(np.empty((row_lgth, 3),
dtype=np.float64), ac_ngram_q_res_index,
['p_ab', 'p_a_x_p_b_x_p_c', 'PMI'])
for i, x in enumerate(ac_ngram_q_res_index):
#Updated 3/7/2017 <EMAIL>
if df_ac_ngram_q_res.iloc[i, 0] > 0:
df_ac_sum_t_trigram_q_p_x.iloc[i, 0] = df_ac_ngram_q_res.iloc[i, 0] / lemma_sum_total # / trigram_sum_total
print('Trigram: ' + x)
grams = x.split('_')
#Updated 9/26/2017 <EMAIL>
if grams[0] in ac_p_x_index and grams[1] in ac_p_x_index and grams[2] in ac_p_x_index:
df_ac_sum_t_trigram_q_p_x.iloc[i, 1] = (df_ac_p_x.loc[grams[0], 'p_x'] * df_ac_p_x.loc[grams[1], 'p_x'] * df_ac_p_x.loc[grams[2], 'p_x'])
else:
df_ac_sum_t_trigram_q_p_x.iloc[i, 1] = df_ac_sum_t_trigram_q_p_x.iloc[i, 0]
print('WARNING: ' + 'The unigram(s) of ' + x + ' cannot be found!!')
if decimal_places != None:
df_ac_sum_t_trigram_q_p_x.iloc[i, 2] = round(log( df_ac_sum_t_trigram_q_p_x.iloc[i, 0] / df_ac_sum_t_trigram_q_p_x.iloc[i, 1], 2), decimal_places)
else:
df_ac_sum_t_trigram_q_p_x.iloc[i, 2] = log( df_ac_sum_t_trigram_q_p_x.iloc[i, 0] / df_ac_sum_t_trigram_q_p_x.iloc[i, 1], 2)
df_ac_ngram_q_res['p_abc'] = df_ac_sum_t_trigram_q_p_x['p_ab']
df_ac_ngram_q_res['p_a_x_p_b_x_p_c'] = df_ac_sum_t_trigram_q_p_x['p_a_x_p_b_x_p_c']
df_ac_ngram_q_res['PMI'] = df_ac_sum_t_trigram_q_p_x['PMI']
return df_ac_ngram_q_res
|
<gh_stars>1-10
#!/usr/bin/env python
from JumpScale import j
import time
import os
import netaddr
class Lxc:
def __init__(self):
self.__jslocation__ = "j.sal.lxc"
self._prefix = "" # no longer use prefixes
self._basepath = None
def execute(self, command):
"""
Execute command.
@param command str: command to run
"""
env = os.environ.copy()
env.pop('PYTHONPATH', None)
(exitcode, stdout, stderr) = j.sal.process.run(
command, showOutput=False, captureOutput=True, stopOnError=False, env=env)
if exitcode != 0:
raise j.exceptions.RuntimeError("Failed to execute %s: Error: %s, %s" % (command, stdout, stderr))
return stdout
@property
def basepath(self):
if not self._basepath:
if j.application.config.exists('lxc.basepath'):
self._basepath = j.application.config.get('lxc.basepath')
else:
self._basepath = "/mnt/vmstor/lxc" # btrfs subvol create
if not j.sal.fs.exists(path=self._basepath):
raise j.exceptions.RuntimeError("only btrfs lxc supported for now")
return self._basepath
def _getChildren(self, pid, children):
process = j.sal.process.getProcessObject(pid)
children.append(process)
for child in process.get_children():
children = self._getChildren(child.pid, children)
return children
def _get_rootpath(self, name):
rootpath = j.sal.fs.joinPaths(self.basepath, '%s%s' % (self._prefix, name), 'rootfs')
return rootpath
def _getMachinePath(self, machinename, append=""):
if machinename == "":
raise j.exceptions.RuntimeError("Cannot be empty")
base = j.sal.fs.joinPaths(self.basepath, '%s%s' % (self._prefix, machinename))
if append != "":
base = j.sal.fs.joinPaths(base, append)
return base
def list(self):
"""
names of running & stopped machines
@return (running,stopped)
"""
cmd = "lxc-ls --fancy -P %s" % self.basepath
out = self.execute(cmd)
stopped = []
running = []
current = None
for line in out.split("\n"):
line = line.strip()
if line.find('RUNNING') != -1:
current = running
elif line.find('STOPPED') != -1:
current = stopped
else:
continue
name = line.split(" ")[0]
if name.find(self._prefix) == 0:
name = name.replace(self._prefix, "")
current.append(name)
running.sort()
stopped.sort()
return (running, stopped)
def getIp(self, name, fail=True):
"""
Get IP of container
@param name str: containername.
"""
hrd = self.getConfig(name)
return hrd.get("ipaddr")
def getConfig(self, name):
configpath = j.sal.fs.joinPaths(self.basepath, '%s%s' % (self._prefix, name), "jumpscaleconfig.hrd")
if not j.sal.fs.exists(path=configpath):
content = """
ipaddr=
"""
j.sal.fs.writeFile(configpath, contents=content)
return j.data.hrd.get(path=configpath)
def getPid(self, name, fail=True):
out = self.execute("lxc-info -n %s%s -p" % (self._prefix, name))
pid = 0
for line in out.splitlines():
line = line.strip().lower()
name, pid = line.split(':')
pid = int(pid.strip())
if pid == 0:
if fail:
raise j.exceptions.RuntimeError("machine:%s is not running" % name)
else:
return 0
return pid
def getProcessList(self, name, stdout=True):
"""
Get process list on a container.
@return [["$name",$pid,$mem,$parent],....,[$mem,$cpu]]
last one is sum of mem & cpu
"""
pid = self.getPid(name)
children = list()
children = self._getChildren(pid, children)
result = list()
pre = ""
mem = 0.0
cpu = 0.0
cpu0 = 0.0
prevparent = ""
for child in children:
if child.parent.name != prevparent:
pre += ".."
prevparent = child.parent.name
# cpu0=child.get_cpu_percent()
mem0 = int(round(child.get_memory_info().rss / 1024, 0))
mem += mem0
cpu += cpu0
if stdout:
print(("%s%-35s %-5s mem:%-8s" % (pre, child.name, child.pid, mem0)))
result.append([child.name, child.pid, mem0, child.parent.name])
cpu = children[0].get_cpu_percent()
result.append([mem, cpu])
if stdout:
print(("TOTAL: mem:%-8s cpu:%-8s" % (mem, cpu)))
return result
def exportRsync(self, name, backupname, key="pub"):
self.removeRedundantFiles(name)
ipaddr = j.application.config.get("jssync.addr")
path = self._getMachinePath(name)
if not j.sal.fs.exists(path):
raise j.exceptions.RuntimeError("cannot find machine:%s" % path)
if backupname[-1] != "/":
backupname += "/"
if path[-1] != "/":
path += "/"
cmd = "rsync -a %s %s::upload/%s/images/%s --delete-after --modify-window=60 --compress --stats --progress --exclude '.Trash*'" % (
path, ipaddr, key, backupname)
# print cmd
j.sal.process.executeWithoutPipe(cmd)
def _btrfsExecute(self, cmd):
cmd = "btrfs %s" % cmd
print(cmd)
return self.execute(cmd)
def btrfsSubvolList(self):
out = self._btrfsExecute("subvolume list %s" % self.basepath)
res = []
for line in out.split("\n"):
if line.strip() == "":
continue
if line.find("path ") != -1:
path = line.split("path ")[-1]
path = path.strip("/")
path = path.replace("lxc/", "")
res.append(path)
return res
def btrfsSubvolNew(self, name):
if not self.btrfsSubvolExists(name):
cmd = "subvolume create %s/%s" % (self.basepath, name)
self._btrfsExecute(cmd)
def btrfsSubvolCopy(self, nameFrom, NameDest):
if not self.btrfsSubvolExists(nameFrom):
raise j.exceptions.RuntimeError("could not find vol for %s" % nameFrom)
if j.sal.fs.exists(path="%s/%s" % (self.basepath, NameDest)):
raise j.exceptions.RuntimeError(
"path %s exists, cannot copy to existing destination, destroy first." % nameFrom)
cmd = "subvolume snapshot %s/%s %s/%s" % (self.basepath, nameFrom, self.basepath, NameDest)
self._btrfsExecute(cmd)
def btrfsSubvolExists(self, name):
subvols = self.btrfsSubvolList()
# print subvols
return name in subvols
def btrfsSubvolDelete(self, name):
if self.btrfsSubvolExists(name):
cmd = "subvolume delete %s/%s" % (self.basepath, name)
self._btrfsExecute(cmd)
path = "%s/%s" % (self.basepath, name)
if j.sal.fs.exists(path=path):
j.sal.fs.removeDirTree(path)
if self.btrfsSubvolExists(name):
raise j.exceptions.RuntimeError("vol cannot exist:%s" % name)
def removeRedundantFiles(self, name):
basepath = self._getMachinePath(name)
j.sal.fs.removeIrrelevantFiles(basepath, followSymlinks=False)
toremove = "%s/rootfs/var/cache/apt/archives/" % basepath
j.sal.fs.removeDirTree(toremove)
def importRsync(self, backupname, name, basename="", key="pub"):
"""
@param basename is the name of a start of a machine locally, will be used as basis and then the source will be synced over it
"""
ipaddr = j.application.config.get("jssync.addr")
path = self._getMachinePath(name)
self.btrfsSubvolNew(name)
# j.sal.fs.createDir(path)
if backupname[-1] != "/":
backupname += "/"
if path[-1] != "/":
path += "/"
if basename != "":
basepath = self._getMachinePath(basename)
if basepath[-1] != "/":
basepath += "/"
if not j.sal.fs.exists(basepath):
raise j.exceptions.RuntimeError("cannot find base machine:%s" % basepath)
cmd = "rsync -av -v %s %s --delete-after --modify-window=60 --size-only --compress --stats --progress" % (
basepath, path)
print(cmd)
j.sal.process.executeWithoutPipe(cmd)
cmd = "rsync -av -v %s::download/%s/images/%s %s --delete-after --modify-window=60 --compress --stats --progress" % (
ipaddr, key, backupname, path)
print(cmd)
j.sal.process.executeWithoutPipe(cmd)
def exportTgz(self, name, backupname):
"""
Export a container to a tarball
@param backupname str: backupname
@param name str: container name.
"""
self.removeRedundantFiles(name)
path = self._getMachinePath(name)
bpath = j.sal.fs.joinPaths(self.basepath, "backups")
if not j.sal.fs.exists(path):
raise j.exceptions.RuntimeError("cannot find machine:%s" % path)
j.sal.fs.createDir(bpath)
bpath = j.sal.fs.joinPaths(bpath, "%s.tgz" % backupname)
cmd = "cd %s;tar Szcf %s ." % (path, bpath)
j.sal.process.executeWithoutPipe(cmd)
return bpath
def importTgz(self, backupname, name):
"""
Import a container from a tarball
@param backupname str: backupname
@param name str: container name.
"""
path = self._getMachinePath(name)
bpath = j.sal.fs.joinPaths(self.basepath, "backups", "%s.tgz" % backupname)
if not j.sal.fs.exists(bpath):
raise j.exceptions.RuntimeError("cannot find import path:%s" % bpath)
j.sal.fs.createDir(path)
cmd = "cd %s;tar xzvf %s -C ." % (path, bpath)
j.sal.process.executeWithoutPipe(cmd)
def create(self, name="", stdout=True, base="base", start=False, nameserver="8.8.8.8", replace=True):
"""
Create new container
@param name if "" then will be an incremental nr
@param start bool: start the container after creation.
"""
print(("create:%s" % name))
if replace:
if j.sal.fs.exists(self._getMachinePath(name)):
self.destroy(name)
running, stopped = self.list()
machines = running + stopped
if name == "":
nr = 0 # max
for m in machines:
if j.data.types.int.checkString(m):
if int(m) > nr:
nr = int(m)
nr += 1
name = nr
lxcname = "%s%s" % (self._prefix, name)
# cmd="lxc-clone --snapshot -B overlayfs -B btrfs -o %s -n %s -p %s -P %s"%(base,lxcname,self.basepath,self.basepath)
# print cmd
# out=self.execute(cmd)
self.btrfsSubvolCopy(base, lxcname)
# if lxcname=="base":
self._setConfig(lxcname, base)
# is in path need to remove
resolvconfpath = j.sal.fs.joinPaths(self._get_rootpath(name), "etc", "resolv.conf")
if j.sal.fs.isLink(resolvconfpath):
j.sal.fs.unlink(resolvconfpath)
hostpath = j.sal.fs.joinPaths(self._get_rootpath(name), "etc", "hostname")
j.sal.fs.writeFile(filename=hostpath, contents=name)
# add host in own hosts file
hostspath = j.sal.fs.joinPaths(self._get_rootpath(name), "etc", "hosts")
lines = j.sal.fs.fileGetContents(hostspath)
out = ""
for line in lines:
line = line.strip()
if line.strip() == "" or line[0] == "#":
continue
if line.find(name) != -1:
continue
out += "%s\n" % line
out += "%s %s\n" % ("127.0.0.1", name)
j.sal.fs.writeFile(filename=hostspath, contents=out)
j.sal.netconfig.root = self._get_rootpath(name) # makes sure the network config is done on right spot
j.sal.netconfig.interfaces_reset()
j.sal.netconfig.nameserver_set(nameserver)
j.sal.netconfig.root = "" # set back to normal
hrd = self.getConfig(name)
ipaddrs = j.application.config.getDict("lxc.mgmt.ipaddresses")
if name in ipaddrs:
ipaddr = ipaddrs[name]
else:
# find free ip addr
import netaddr
existing = [netaddr.ip.IPAddress(item).value for item in list(ipaddrs.values()) if item.strip() != ""]
ip = netaddr.IPNetwork(j.application.config.get("lxc.mgmt.ip"))
for i in range(ip.first + 2, ip.last - 2):
if i not in existing:
ipaddr = str(netaddr.ip.IPAddress(i))
break
ipaddrs[name] = ipaddr
j.application.config.setDict("lxc.mgmt.ipaddresses", ipaddrs)
# mgmtiprange=j.application.config.get("lxc.management.iprange")
# TODO: make sure other ranges also supported
self.networkSet(name, netname="mgmt0", bridge="lxc", pubips=["%s/24" % ipaddr])
# set ipaddr in hrd file
hrd.set("ipaddr", ipaddr)
if start:
return self.start(name)
self.setHostName(name)
self.pushSSHKey(name)
return self.getIp(name)
def setHostName(self, name):
"""
Set hostname on the container
@param name: new hostname
"""
lines = j.sal.fs.fileGetContents("/etc/hosts")
out = ""
for line in lines.split("\n"):
if line.find(name) != -1:
continue
out += "%s\n" % line
out += "%s %s\n" % (self.getIp(name), name)
j.sal.fs.writeFile(filename="/etc/hosts", contents=out)
def pushSSHKey(self, name):
"""
Push sshkey
@param name str: keyname
"""
path = j.sal.fs.joinPaths(self._get_rootpath(name), "root", ".ssh", "authorized_keys")
content = j.sal.fs.fileGetContents("/root/.ssh/id_dsa.pub")
j.sal.fs.writeFile(filename=path, contents="%s\n" % content)
path = j.sal.fs.joinPaths(self._get_rootpath(name), "root", ".ssh", "known_hosts")
j.sal.fs.writeFile(filename=path, contents="")
def destroyAll(self):
"""
Destroy all running containers.
"""
running, stopped = self.list()
alll = running + stopped
for item in alll:
self.destroy(item)
def destroy(self, name):
"""
Destroy container by name
@param name str: name
"""
running, stopped = self.list()
alll = running + stopped
print(("running:%s" % ",".join(running)))
print(("stopped:%s" % ",".join(stopped)))
if name in running:
# cmd="lxc-destroy -n %s%s -f"%(self._prefix,name)
cmd = "lxc-kill -P %s -n %s%s" % (self.basepath, self._prefix, name)
self.execute(cmd)
while name in running:
running, stopped = self.list()
time.sleep(0.1)
print("wait stop")
alll = running + stopped
self.btrfsSubvolDelete(name)
# #TODO: put timeout in
def stop(self, name):
"""
Stop a container by name
@param name str: container name.
"""
# cmd="lxc-stop -n %s%s"%(self._prefix,name)
cmd = "lxc-stop -P %s -n %s%s" % (self.basepath, self._prefix, name)
self.execute(cmd)
def start(self, name, stdout=True, test=True):
"""
Start container
@param name str: container name.
"""
print(("start:%s" % name))
cmd = "lxc-start -d -P %s -n %s%s" % (self.basepath, self._prefix, name)
print(cmd)
# cmd="lxc-start -d -n %s%s"%(self._prefix,name)
self.execute(cmd)
start = time.time()
now = start
found = False
while now < start + 20:
running = self.list()[0]
if name in running:
found = True
break
time.sleep(0.2)
now = time.time()
if found is False:
msg = "could not start new machine, did not start in 20 sec."
if stdout:
print(msg)
raise j.exceptions.RuntimeError(msg)
self.setHostName(name)
ipaddr = self.getIp(name)
print(("test ssh access to %s" % ipaddr))
timeout = time.time() + 10
while time.time() < timeout:
if j.sal.nettools.tcpPortConnectionTest(ipaddr, 22):
return
time.sleep(0.1)
raise j.exceptions.RuntimeError("Could not connect to machine %s over port 22 (ssh)" % ipaddr)
def networkSet(self, machinename, netname="pub0", pubips=[], bridge="public", gateway=None):
bridge = bridge.lower()
print(("set pub network %s on %s" % (pubips, machinename)))
machine_cfg_file = j.sal.fs.joinPaths(self.basepath, '%s%s' % (self._prefix, machinename), 'config')
machine_ovs_file = j.sal.fs.joinPaths(self.basepath, '%s%s' % (self._prefix, machinename), 'ovsbr_%s' % bridge)
# mgmt = j.application.config.get('lxc.mgmt.ip')
# netaddr.IPNetwork(mgmt)
config = '''
lxc.network.type = veth
lxc.network.flags = up
#lxc.network.veth.pair = %s_%s
lxc.network.name = %s
lxc.network.script.up = $basedir/%s/ovsbr_%s
lxc.network.script.down = $basedir/%s/ovsbr_%s
''' % (machinename, netname, netname, machinename, bridge, machinename, bridge)
config = config.replace("$basedir", self.basepath)
Covs = """
#!/bin/bash
if [ "$3" = "up" ] ; then
/usr/bin/ovs-vsctl --may-exist add-port %s $5
else
/usr/bin/ovs-vsctl --if-exists del-port %s $5
fi
""" % (bridge, bridge)
j.sal.fs.writeFile(filename=machine_ovs_file, contents=Covs)
j.sal.fs.chmod(machine_ovs_file, 0o755)
ed = j.tools.code.getTextFileEditor(machine_cfg_file)
ed.setSection(netname, config)
def networkSetPrivateVXLan(self, name, vxlanid, ipaddresses):
raise j.exceptions.RuntimeError("not implemented")
def _setConfig(self, name, parent):
print("SET CONFIG")
base = self._getMachinePath(name)
baseparent = self._getMachinePath(parent)
machine_cfg_file = self._getMachinePath(name, 'config')
C = """
lxc.tty = 4
lxc.pts = 1024
lxc.arch = x86_64
lxc.cgroup.devices.deny = a
lxc.cgroup.devices.allow = c *:* m
lxc.cgroup.devices.allow = b *:* m
lxc.cgroup.devices.allow = c 1:3 rwm
lxc.cgroup.devices.allow = c 1:5 rwm
lxc.cgroup.devices.allow = c 5:1 rwm
lxc.cgroup.devices.allow = c 5:0 rwm
lxc.cgroup.devices.allow = c 1:9 rwm
lxc.cgroup.devices.allow = c 1:8 rwm
lxc.cgroup.devices.allow = c 136:* rwm
lxc.cgroup.devices.allow = c 5:2 rwm
lxc.cgroup.devices.allow = c 254:0 rm
lxc.cgroup.devices.allow = c 10:229 rwm
lxc.cgroup.devices.allow = c 10:200 rwm
lxc.cgroup.devices.allow = c 1:7 rwm
lxc.cgroup.devices.allow = c 10:228 rwm
lxc.cgroup.devices.allow = c 10:232 rwm
lxc.utsname = $name
lxc.cap.drop = sys_module
lxc.cap.drop = mac_admin
lxc.cap.drop = mac_override
lxc.cap.drop = sys_time
lxc.hook.clone = /usr/share/lxc/hooks/ubuntu-cloud-prep
#lxc.rootfs = overlayfs:$baseparent/rootfs:$base/delta0
lxc.rootfs = $base/rootfs
lxc.pivotdir = lxc_putold
#lxc.mount.entry=/var/lib/lxc/jumpscale $base/rootfs/jumpscale none defaults,bind 0 0
#lxc.mount.entry=/var/lib/lxc/shared $base/rootfs/shared none defaults,bind 0 0
lxc.mount = $base/fstab
"""
C = C.replace("$name", name)
C = C.replace("$baseparent", baseparent)
C = C.replace("$base", base)
j.sal.fs.writeFile(machine_cfg_file, C)
# j.sal.fs.createDir("%s/delta0/jumpscale"%base)
# j.sal.fs.createDir("%s/delta0/shared"%base)
|
<reponame>mehdirezaie/LSSutils<filename>lssutils/stats/smoother.py
""" Kernel Smoother SN Hubble Diagram
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from lssutils.utils import Cosmology
def kernel(z_i, z, delta=0.05, **kw):
arg = (np.log10((1.+z_i)/(1.+z))/delta)
return np.exp(-0.5*arg*arg)
def chi2(y1, y2, sigma):
# computes RMSE
# change mean to sum, MSE to chi2
chi = ((y1-y2)/(sigma))
return np.sqrt((chi*chi).mean())
class KernelSmoother(object):
#
def __init__(self, fn='./data/union.txt', test_size=0.33, random_state=0):
data = np.loadtxt(fn)
#
# add train test split
train, test = train_test_split(data,
random_state=random_state,
test_size=test_size)
#
#
self.z_data = train[:,0]
self.mu_data = train[:,1]
self.mu_err = train[:,2]
self.z_data_t = test[:,0]
self.mu_data_t = test[:,1]
self.mu_err_t = test[:,2]
self.mu_guess = None
def _init_cosmology(self, om_m=0.26, om_L=0.7, h=0.69, zmin=0.01, zmax=1.5, nbin=30):
# 0.69
# 0.26
# theoretical mu
self.z_grid = np.linspace(zmin, zmax, nbin)
#self.z_grid = np.logspace(np.log10(zmin), np.log10(zmax), nbin) # logarithmic grid
theory = Cosmology(om_m=om_m, om_L=om_L, h=h)
self.mu_th = 5*np.log10(np.vectorize(theory.DL)(self.z_grid)) + 25
# interpolate the theory on data points
self.mu_th_spl = IUS(self.z_grid, self.mu_th)
# guess
self.mu_g_grid = self.mu_th
self.mu_g_data = self.mu_th_spl(self.z_data)
self.mu_g_data_t = self.mu_th_spl(self.z_data_t)
# chi2
self.chi2 = [chi2(self.mu_g_data, self.mu_data, self.mu_err)]
self.err = [chi2(self.mu_g_data_t, self.mu_data_t, self.mu_err_t)]
self.baseline = [chi2(np.mean(self.mu_data), self.mu_data, self.mu_err),
chi2(np.mean(self.mu_data), self.mu_data_t, self.mu_err_t)]
def _init_weights(self, **kw):
# kernel on data/grid points
weights_zdata = []
for zi in self.z_data:
kr = kernel(self.z_data, zi, **kw)
krn = kr.sum() # normalization
weights_zdata.append(kr/krn)
self.weights_zdata = np.array(weights_zdata)
#
weights_zgrid = []
for zi in self.z_grid:
kr = kernel(self.z_data, zi, **kw)
krn = kr.sum() # normalization
weights_zgrid.append(kr/krn)
self.weights_zgrid = np.array(weights_zgrid)
weights_zdata_t = []
for zi in self.z_data_t:
kr = kernel(self.z_data, zi, **kw)
krn = kr.sum() # normalization
weights_zdata_t.append(kr/krn)
self.weights_zdata_t = np.array(weights_zdata_t)
def smooth(self, marginalize=False, verbose=False):
# smooth on data points
self.delta_mu_data = self.mu_data - self.mu_g_data
self.smooth_deltamu_d = self.weights_zdata.dot(self.delta_mu_data)
self.smooth_deltamu_g = self.weights_zgrid.dot(self.delta_mu_data)
self.smooth_deltamu_t = self.weights_zdata_t.dot(self.delta_mu_data)
#
self.smooth_mu_data = self.smooth_deltamu_d + self.mu_g_data
self.smooth_mu_grid = self.smooth_deltamu_g + self.mu_g_grid
self.smooth_mu_data_t = self.smooth_deltamu_t + self.mu_g_data_t
#
#
self.mu_g_data = self.smooth_mu_data
self.mu_g_grid = self.smooth_mu_grid
self.mu_g_data_t = self.smooth_mu_data_t
#
if marginalize:
''' results in poor performance '''
raise RuntimeWarning('Not implemented yet')
#offset1 = np.mean((self.mu_g_data-self.mu_data)/self.mu_err)
#self.mu_g_data += offset1
#self.mu_g_grid += offset1
#self.smooth_deltamu_g -= offset1
chi2_train = chi2(self.mu_g_data, self.mu_data, self.mu_err)
chi2_test = chi2(self.mu_g_data_t, self.mu_data_t, self.mu_err_t)
self.chi2.append(chi2_train)
self.err.append(chi2_test)
return chi2_train, chi2_test
def plot_mu_rmse(self, ax=None):
if ax is None:fig, ax = plt.subplots(ncols=2, figsize=(12,4))
#
ax[0].plot(self.z_data, self.mu_data, '.', color='k', alpha=0.1, label='Union')
ax[0].plot(self.z_data_t, self.mu_data_t, '.', color='navy', alpha=0.5, label=None)
ax[0].plot(self.z_grid, self.smooth_mu_grid, 'r-', label='Smoothed')
ax[0].plot(self.z_grid, self.mu_th, 'k--', label=r'$\Lambda$CDM')
#ax[1].axhline(chi2(SN.mu_th_spl(SN.z_data), SN.mu_data, SN.mu_err))
ax[1].text(0, self.chi2[0]*1.01, 'LCDM', color='k')
ax[1].scatter(0, self.chi2[0], marker='.', color='k')
ax[1].scatter(0, self.err[0], marker='.', color='r')
ax[1].plot(self.chi2, ls='-', label='train RMSE', color='k')
ax[1].plot(self.err, ls='--', label='test RMSE', color='r')
#ax[1].axhline(1, color='k', ls=':')
# annotation
ax[0].set_xscale('log')
ax[0].legend(loc=4)
ax[0].set_xlabel('redshift')
ax[0].set_ylabel(r'$\mu$')
ax[1].set_xlabel('iteration')
ax[1].set_ylabel(r'RMSE')
ax[1].set_ylim(0.8, 1.2)
# ax[1].set_ylim(0.99, 1.01)
ax[1].legend()
if __name__ == '__main__':
N_iteration = 5
SN = KernelSmoother(fn='../../data/union.txt')
SN._init_cosmology(nbin=100)
SN._init_weights(delta=0.05)
for i in range(N_iteration):
chi2s = SN.smooth()
print(f'{i}, {chi2s}')
#fig, ax = plt.subplots(nrows=2, figsize=(6,8))
#SN.plot_mu_rmse(ax=ax) |
<reponame>Unique-Divine/test-repo<gh_stars>0
"""Module that defines custom grid environment with an API similar to AI Gym.
An agent moves around in the grid. The agent is...
1. Rewarded for reaching a goal.
2. Punished for falling in a hole.
3. Punished for taking too many scenes to solve.
Classes:
Env: A custom grid environment with an API similar to that of AI Gym.
Observation: An observation of the environment, i.e. what is observed
by an agent.
ObservationSeq: TODO -> docs, dev
EnvStep: A step in the environment
Point: A 1D np.ndarray of size 2 that contains the row and column
indices for a point in the environment
PathMaker: Helper class that guarantees the environment is solvable.
"""
import numpy as np
import torch
import os, sys
import copy
import random
import collections
import copy
import dataclasses
try:
import rl_memory
except:
exec(open('__init__.py').read())
import rl_memory
import rl_memory as rlm
from typing import List, Union, Generator, Optional, Dict
from torch import Tensor
Array = np.ndarray
import warnings; warnings.filterwarnings("ignore")
class Point(np.ndarray):
"""A 1D np.ndarray of size 2 that contains the row and column
indices for a point in the environment.
Examples:
>>> p1 = Point(1, 2)
>>> p1
array([1, 2], dtype=int16)
>>> p2 = Point([1, 3])
>>> p2
array([1, 3], dtype=int16)
>>> p1 == p2
array([ True, False])
"""
def __new__(cls, *args):
if len(args) == 2:
self = np.asarray([*args], dtype=np.int16)
elif (((len(args) == 1) and isinstance(args[0], list))
or ((len(args) == 1) and isinstance(args[0], tuple))):
self = np.asarray(args[0], dtype=np.int16)
else:
raise ValueError(f"args: {args}, type(args[0]): {type(args[0])}")
return self
class Observation(torch.Tensor):
"""An observation of the environment, i.e. what is observed by an agent.
An observation is a partial description of an environment state. Note that
an observation may omit information, hence being called a partial
description.
A state is a complete description of the state of the environment. No
information about the environment is hidden from a state.
Args:
env (Optional[rlm.Env]): An environment with an agent in it. The
environment contains all information needed to get states for
reinforcement learning.
env_grid (Optional[np.ndarray]): An array that captures describes the
env state.
env_char_grid (Optional[np.ndarray]):
dtype (torch.dtype): The data type for the observation.
sight_distance (int): How far in each direction the agent can
see in the environment. This affects the size of the observation.
Defaults to 4.
Attributes:
center_abs (Point): The agent's position in the 'env.grid'.
center (Point): The agent's position in the current sight window.
"""
def __new__(cls,
env: Optional['rlm.Env'] = None,
env_grid: Optional[np.ndarray] = None,
env_char_grid: Optional[np.ndarray] = None,
dtype: torch.dtype = torch.float,
sight_distance: Optional[int] = None,
) -> torch.Tensor:
env_state_given: bool = ((env is not None)
or (env_grid is not None)
or (env_char_grid is not None))
if not env_state_given:
raise ValueError(
"Some format of environment must be given. Any of the 'env', "
+ "'env_grid', or 'env_char_grid' arguments will suffice.")
env_interactables = Env().interactables
if env_grid is not None:
env_position_space = Env(grid_shape=env_grid.shape).position_space
env_grid = env_grid
elif env_char_grid is not None:
env_position_space = Env(
grid_shape=env_char_grid.shape).position_space
env_grid = Env.render_as_grid(char_grid = env_char_grid)
else:
assert env is not None, ("If 'env_grid' and 'env_char_grid' aren't"
+ " given, 'env' must be given.")
env_position_space = env.position_space
env_grid = env.grid
assert env.grid is not None
assert env_position_space is not None
# Specify 'sight_distance'
if env is None:
if (sight_distance is None):
raise ValueError()
else:
sight_distance = sight_distance
if env is not None:
if (sight_distance is None):
sight_distance = env.sight_distance
else:
if not env.sight_distance == sight_distance:
raise ValueError(
"You cant't give a value for 'sight_distance' if an "
+ "'env' instance is given.")
center: Point = Point([sight_distance] * 2)
is_agent: np.ndarray = (env_grid == env_interactables['agent'])
env_agent_indices: np.ndarray = np.argwhere(is_agent)
if env_agent_indices.size == 0:
raise ValueError("There's no agent in this environment. Try using "
+ "'env.reset()' before making this Observation.")
env_agent_position = Point(env_agent_indices[0].tolist())
center_abs: Point = env_agent_position
def observe() -> Tensor:
sd: int = sight_distance
observation = np.empty(
shape= [sight_distance * 2 + 1] * 2,
dtype = np.int16)
row_view: range = range(center_abs[0] - sd, center_abs[0] + sd + 1)
col_view: range = range(center_abs[1] - sd, center_abs[1] + sd + 1)
def views(row_view, col_view) -> Generator:
for row_idx in row_view:
for col_idx in col_view:
displacement = Point(row_idx, col_idx) - center_abs
relative_position: Point = center + displacement
rel_row, rel_col = relative_position
yield row_idx, col_idx, rel_row, rel_col
for view in views(row_view, col_view):
row_idx, col_idx, rel_row, rel_col = view
if [row_idx, col_idx] in env_position_space:
observation[rel_row, rel_col] = env_grid[row_idx, col_idx]
else:
observation[rel_row, rel_col] = env_interactables[
'blocked']
return torch.from_numpy(observation).float()
obs: Tensor = observe()
setattr(obs, "center", center)
setattr(obs, "center_abs", center_abs)
def as_color_img(obs: Tensor, env = env):
pass # TODO
return obs
def __repr__(self: Tensor):
obs_grid: Array = self.numpy()
return f"{Env.render_as_char(grid = obs_grid)}"
class ObservationSeq(list):
"""[summary] TODO -> docs, dev
Args:
observations (List[Observation]):
"""
def __new__(cls, observations: List[Observation], K: int = 2) -> list:
assert cls.check_for_valid_args(observations, K)
obs_seq: List[Observation]
if K == 1:
obs_seq = observations
if len(observations) < K:
obs_seq = observations
duplications = K - len(observations)
for _ in range(duplications):
obs_seq.insert(0, observations[0])
return obs_seq
@classmethod
def check_for_valid_args(cls, observations, K):
if len(observations) < 1:
raise ValueError("Attribute 'observations' (list) is empty.")
elif K < 1:
raise ValueError("Attribute 'K' (int) is must be >= 1.")
else:
return True
@dataclasses.dataclass
class EnvStep:
"""A step in the environment.
Attributes:
next_obs (Observation): An observation of the environment after the
agent takes an action. This is the observation at time t+1.
reward (float): Reward received after taking an action.
done (bool): Specifies whether the episode is complete.
info (str): Unused attribute.
"""
next_obs: 'Observation'
reward: float
done: bool
info: str = ""
values: tuple = dataclasses.field(init = False)
def __post_init__(self):
self.values = (self.next_obs, self.reward, self.done, self.info)
def __len__(self):
return len(self.values)
def __getitem__(self, idx):
return self.values[idx]
class Env:
"""A custom grid environment with an API similar to that of AI Gym.
An agent moves around in the grid. The agent is...
1. Rewarded for reaching a goal.
2. Punished for falling in a hole.
3. Punished for taking too many scenes to solve.
This grid environment allows for varying starting position for the agent,
holes, and goal(s). Movements are deterministic rather than stochastic and
each environment is solvable, so a "perfect" agent can get reward 1
on every episode.
Args:
grid_shape (list-like, optional): The matrix dimensions of the environment.
hole_pct (float, optional): The probability of any open spot to be a hole.
An "open spot" is any spot on the grid that is not an agent,
goal, or blocked. Defaults to 0.2.
n_goals (int, optional): Number of goals in the environment. Reaching a goal gives
a positive reward signal. Defaults to 2.
sight_distance (int, optional): How far in each direction the agent can
see in the environmet. Defaults to 4.
Attributes:
interactables (dict): key-value pairs for the various items that can
take up space on the frozen lake. This would be the agent, goal,
holes, etc. The 'blocked' key refers to spaces that can't
be traversed.
grid (np.ndarray): A matrix with the encodings for each interactable.
sight_distance (int): How far in each direction the agent can
see in the environment. This affects the size of the observation.
Defaults to 4.
Examples:
>>> import rl_memory as rlm
>>> env = rlm.Env() # initializes an environment
>>> env.reset() # creates or resets the environment
```python
# An episode could then look like:
replay_buffer: list = []
done = False
while done!= True:
obs = rlm.Observation(env = env)
random_action: int = random.randrange(8)
step: rlm.EnvStep = env.step(action_idx = random_action, obs = obs)
observation, reward, done, info = step.values
replay_buffer.append( ... )
```
"""
interactables: Dict[str, int] = {
'frozen': 0, 'hole': 1, 'goal': 2, 'agent': 7, 'blocked': 3}
def __init__(self,
grid_shape = (10, 10),
hole_pct: float = 0.2,
n_goals: int = 2,
sight_distance: int = 4):
# Set board dimensions and initalize to an "empty" grid.
if len(grid_shape) != 2:
raise ValueError("'grid_shape' must be a list-like of length 2.")
self.empty_grid = np.full(shape = grid_shape,
fill_value = self.interactables['frozen'],
dtype = np.int32)
self.grid = copy.deepcopy(self.empty_grid)
assert self.grid.shape == grid_shape
self.sight_distance = sight_distance
# Initialize grid helper parameters
self._position_space: List[list] = self.position_space
self.action_space: List[Point] = self.get_action_space()
self.open_positions: List[list] = self._position_space
self._agent_position: List[int] = self.agent_position
self.goal_position: List[int] = None
# Initial grid - for env.reset()
self.agent_start: List[int] = None
self.valid_path: List[List[int]]
self._env_start: np.ndarray = copy.deepcopy(self.empty_grid)
# Declare board parameters as class attributes
if (hole_pct < 0) or (hole_pct >= 1):
raise ValueError("'hole_pct' must be between 0 and 1.")
self.hole_pct = hole_pct
self.n_goals = n_goals
def __repr__(self) -> str:
return f"Env:\n{self.render_as_char(self.grid)}"
def __str__(self) -> str:
return str(self.grid)
def __eq__(self, other) -> bool:
checks: bool
if isinstance(other, np.ndarray):
checks = np.all(self.grid == other)
elif isinstance(other, Env):
checks = np.all([
np.all(self.grid == other.grid),
self.agent_start == other.agent_start,
self.open_positions == other.open_positions,
self.valid_path == other.valid_path,
self.n_goals == other.n_goals,
self.hole_pct == other.hole_pct, ])
else:
raise ValueError(f"{other} must be an environment instance.")
return checks
def render(self):
raise NotImplementedError
pass
@classmethod
def render_as_char(cls, grid) -> np.ndarray:
interactables_to_char = {
cls.interactables['frozen']: "_",
cls.interactables['hole']: "o",
cls.interactables['goal']: "G",
cls.interactables['agent']: "A",
cls.interactables['blocked']: "'"}
char_grid = np.asarray(
[interactables_to_char[e] for e in grid.flatten()],
dtype = str).reshape(grid.shape)
return char_grid
@classmethod
def render_as_grid(cls, char_grid) -> np.ndarray:
char_to_interactables = {
"_": cls.interactables["frozen"],
"o": cls.interactables["hole"],
"G": cls.interactables["goal"],
"A": cls.interactables["agent"],
"'": cls.interactables["blocked"]}
grid = np.asarray(
[char_to_interactables[e] for e in char_grid.flatten()],
dtype = np.int32).reshape(char_grid.shape)
return grid
# --------------------------------------------------------------------
# Properties
# --------------------------------------------------------------------
def get_action_space(self) -> List[Point]:
action_space: List[list] = [[-1, 1], [-1, 0], [-1, -1], [0, -1],
[1, -1], [1, 0], [1, 1], [0, 1]]
action_space: List[Point] = [Point(p) for p in action_space]
return action_space
@property
def position_space(self) -> List[List[int]]:
row_dim, col_dim = self.grid.shape
position_space: List[list] = []
for i in range(row_dim):
for j in range(col_dim):
position_space.append([i, j])
return position_space
@position_space.deleter
def position_space(self):
raise AttributeError("`position_space` attribute of class "
+ "`Env` is read-only.")
@property
def agent_position(self) -> List[int]:
is_agent: np.ndarray = (self.grid == self.interactables['agent'])
if np.any(is_agent):
return np.argwhere(is_agent)[0].tolist()
else:
return None
@property
def env_start(self):
return self._env_start
@env_start.setter
def env_start(self, grid):
self._env_start = grid
@env_start.deleter
def env_start(self):
self._env_start = None
# --------------------------------------------------------------------
# Helper functions for creating an env from scratch
# --------------------------------------------------------------------
def randomly_select_open_position(self) -> List[int]:
position: List[int] = random.choice(self.open_positions)
return position
def set_agent_goal(self):
# positions_ag: The positions for the agent and goal(s)
positions_ag: List[list] = []
# Randomly select starting point for agent.
agent_start = self.randomly_select_open_position()
self.agent_start = agent_start
self.open_positions.remove(agent_start)
positions_ag.append(agent_start)
# Randomly select starting point for each goal.
for _ in np.arange(self.n_goals):
goal_position = self.randomly_select_open_position()
self.open_positions.remove(goal_position)
positions_ag.append(goal_position)
self.goal_position = goal_position
assert len(positions_ag) >= 2, "We expect at least 1 agent and 1 goal."
# Label the agent on the grid.
x, y = positions_ag[0]
self.grid[x, y] = self.interactables['agent']
# Label the goals on the grid.
for goal_idx in np.arange(self.n_goals):
x, y = positions_ag[goal_idx + 1]
self.grid[x, y] = self.interactables['goal']
def set_holes(self, hole_pct: float = None):
"""[summary]
Args:
hole_pct (float, optional): The probability that any open spot is a
hold. An "open spot" is any spot on the grid that is not an
agent, goal, or blocked. Defaults to 'env.hole_pct' attribute.
See the first line of this method to understand the default
behavior.
"""
hole_pct = self.hole_pct if (hole_pct == None) else hole_pct
n_holes: int = int(len(self.open_positions) * self.hole_pct)
if len(self.open_positions) > 0:
if n_holes == 0:
n_holes = 1
for _ in range(n_holes):
hole_position = self.randomly_select_open_position()
self.open_positions.remove(hole_position)
x, y = hole_position
self.grid[x, y] = self.interactables['hole']
# --------------------------------------------------------------------
# Functions for the user
# --------------------------------------------------------------------
def create_new(self):
"""Place all of the interactables on the grid to create a new env.
Changes the 'env.env_start' attribute, the environment you reset to
when calling 'env.reset'.
Examples:
--------
>>> env0 = Env()
>>> env0.reset() # Initializes board with interactable env objects.
You can also call 'env0.create_new()' instead of 'env0.reset()'
>>> env1 = env0.create_new() # randomly generate new env
"""
def setup_blank_env(env):
env.set_agent_goal() # Create agent and goals
# Clear a path for the agent
valid_path = PathMaker(env).make_valid_path()
env.valid_path = valid_path
for position in valid_path:
if position in env.open_positions:
env.open_positions.remove(position)
# Place holes in some of the empty spaces
env.set_holes()
# Save initial state if this is the first time create() has been called.
if np.all(self.env_start == self.empty_grid):
setup_blank_env(env = self)
self.env_start: np.ndarray = self.grid
else: # Make a new environment and save that as the initial state.
# Create new, blank environment
new_env = Env(grid_shape = self.grid.shape,
hole_pct = self.hole_pct,
n_goals = self.n_goals)
assert np.all(new_env.env_start == self.empty_grid)
any_holes: bool = lambda grid: np.any(
grid == self.interactables['hole'])
# assert any_holes(new_env.grid) == False, (
# "The 'new_env' should start out frozen after initialization.")
# Place agent, goal(s), and holes on 'new_env'.
setup_blank_env(env = new_env)
# if self.hole_pct > 0:
# assert any_holes(new_env.grid) == True
# Set 'new_env' initial grid state
new_env.env_start = new_env.grid
assert np.any(self.env_start != self.empty_grid)
# Reset to this new environment
self.env_start = copy.deepcopy(new_env.grid)
self.grid = copy.deepcopy(self.env_start)
# TODO: Check that there are holes on the grid.
# TODO: Check that none of the positions in valid path now have holes.
def reset(self):
"""Resets the environment grid to 'env_start', the initial environment
if it has been set. If 'env_start' hasn't been set, this method
randomly generates a new env and declares that to be 'env_start'.
Returns:
Env: The initial environment.
"""
start_is_not_empty: bool = not np.all(self.env_start == self.empty_grid)
start_is_empty = not start_is_not_empty
if isinstance(self.env_start, np.ndarray) and start_is_not_empty:
self.grid = copy.deepcopy(self.env_start)
elif isinstance(self.env_start, type(None)) or start_is_empty:
self.create_new()
else:
raise AttributeError("'env_start' must be an ndarray or None.")
def step(self, action_idx: int,
obs: Union['Observation', 'ObservationSeq']) -> EnvStep:
action: Point = self.action_space[action_idx]
desired_position: Point = obs.center + action
new_x, new_y = desired_position
interactable: int = obs[new_x, new_y].item()
# TODO: Currently, 'obs' is assumed to be an Observation instance.
# Come back and implement the case that 'obs' is an 'obs_seq'.
def move():
x, y = self.agent_position
new_x, new_y = Point(self.agent_position) + action
self.grid[x, y] = self.interactables['frozen']
self.grid[new_x, new_y] = self.interactables['agent']
def unable_to_move():
pass
observation: np.ndarray
reward: float
done: bool
info: str
if interactable == self.interactables['frozen']:
move()
reward = 0
done = False
elif interactable == self.interactables['hole']:
move()
reward = -1
done = True
elif interactable == self.interactables['goal']:
move()
reward = 1
done = True
elif interactable == self.interactables['blocked']:
unable_to_move()
reward = -0.1
done = False
elif interactable == self.interactables['agent']:
raise NotImplementedError("There shouldn't be two agents yet.")
# TODO
else:
raise ValueError(f"interactable: '{interactable}' is not in "
+f"interactables: {self.interactables}")
next_observation = Observation(env = self)
info = ""
return EnvStep(
next_obs = next_observation, reward = reward, done = done,
info = info)
class PathMaker:
"""Helper class that guarantees the environment is solvable."""
def __init__(self, env: Env) -> None:
self.env = env
self.valid_path: list = None
def init_unexplored_spots(self) -> List[np.ndarray]:
"""Initialize the `unexplored_spots` attribute for the pathfinding
algorithm. Unexplored spots are everything on the board that isn't an
agent, hole, or blocked.
Returns:
unexplored_spots (List[list]): List of coordinate pairs to be used
as indices of the env.grid matrix."""
env = self.env
# Explored spots: Locations in the grid with agent or hole
is_agent: np.ndarray = (env.grid == env.interactables['agent'])
is_hole: np.ndarray = (env.grid == env.interactables['hole'])
is_explored = (is_agent | is_hole)
explored_spots: List[list] = [
A.tolist() for A in np.argwhere(is_explored)]
assert len(env.position_space) >= len(explored_spots)
# Store unexplored spots
unexplored_spots: list = []
unexplored_spots[:] = [p for p in env.position_space
if (p not in explored_spots)]
return [np.array(spot) for spot in unexplored_spots]
def generate_shifted_spots(self, spot) -> Generator[List[int], None, None]:
"""Generator for a viable position adjacent to the input position.
Args:
spot (list): An ordered pair (x, y) for a particular matrix element
on the grid.
Returns:
shifted_spot (List[list]): A position that neighbors the input
'spot' argument. This shifted coordinate is randomly selected
from the available options on the 'env.grid'.
"""
nsew_shifts = [[1, 0], [0, 1], [0, -1], [-1, 0]]
cross_shifts = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
shifts: List[list] = nsew_shifts + cross_shifts
shifted_spots = []
x_0, y_0 = spot
for shift in shifts:
dx, dy = shift
x, y = x_0 + dx, y_0 + dy
shifted_spot = [x, y]
if shifted_spot in self.env.position_space:
shifted_spots.append(shifted_spot)
random.shuffle(shifted_spots) # randomize the order of the shifts
for shifted_spot in shifted_spots:
yield shifted_spot
def random_steps(self, n_steps: int, starting_spot):
"""Helper function for 'random_walk()'. This generates a step in the
discrete random walk.
Args:
n_steps (int): Number of steps
starting_spot (List[int]): A position (x, y) on the env.grid
Yields:
shifted_spot (List[int]): Position of the next random step.
"""
spot = starting_spot
for _ in range(n_steps):
shifted_spot: List[int]
for shifted_spot in self.generate_shifted_spots(spot):
yield shifted_spot
spot = shifted_spot
break
def random_walk(self, n_steps: int,
start: List[Union[int, List[int]]]) -> List[List[int]]:
assert isinstance(start, list), "'start' must be a list."
assert len(start) > 0, \
"'start' cannot be empty. The random walk needs an starting point."
if isinstance(start[0], int):
assert len(start) == 2, "..." # TODO
spot = start
path = []; path.append(spot)
elif isinstance(start[0], list):
assert np.all([len(pt) == 2 for pt in start]), (
"The current value for 'start' has type List(list). As a list "
+ "of ordered pairs, each of element of 'start' should have a "
+ "length of 2. ")
spot = start[-1]
path = copy.deepcopy(start)
else:
raise ValueError("'start' must have type List[int] or List[list]")
starting_spot = spot
for step in self.random_steps(n_steps, starting_spot):
path.append(step)
# for _ in range(n_steps):
# shifted_spot: List[int]
# for shifted_spot in self.generate_shifted_spots(spot):
# path.append(shifted_spot)
# spot = shifted_spot
# break
proper_path_length: bool = ((len(path) == n_steps + 1)
or (len(path) == n_steps + len(start)))
assert proper_path_length, ("'path' is too short. "
+ f"len(path): {len(path)}, n_steps: {n_steps}")
return path
def diag_path(self, starting_pt: List[int], ending_pt: List[int]):
"""[summary] TODO
Args:
starting_pt (List[int]): [description]
ending_pt (List[int]): [description]
Returns:
[type]: [description]
"""
displacement = np.array(ending_pt) - np.array(starting_pt)
if np.all(displacement == 0):
# Case 1: 'ending_pt' has already been reached
return [starting_pt]
elif np.any(displacement == 0):
# Case 2: 'displacement' is vertical or horizontal
return self.straight_shot([starting_pt], ending_pt)
directions = (displacement / np.abs(displacement)).astype(int)
magnitude: int = np.min(np.abs(displacement))
diag = np.full(shape = (magnitude + 1, 2), fill_value = starting_pt)
for row_idx in range(1, magnitude + 1):
diag[row_idx] = diag[row_idx - 1] + directions
diag_path = [pt.tolist() for pt in diag]
assert diag_path[0] == starting_pt, \
"'diag_path[0]' should be the starting point."
assert np.any(np.array(diag_path[-1]) == np.array(ending_pt)), \
("At least one component of the last pt in 'diag_path' should "
+ "match the corresponding component in 'ending_pt'")
return diag_path
@staticmethod
def straight_shot(diag_path: List[List[int]],
ending_pt: List[int]) -> List[List[int]]:
"""[summary] TODO
Args:
diag_path (List[List[int]]): [description]
ending_pt (List[int]): [description]
Returns:
List[List[int]]: [description]
"""
starting_pt = diag_path[-1]
displacement = np.array(ending_pt) - np.array(starting_pt)
assert np.any(displacement == 0), \
"At least one of the displacement components should be 0."
if np.all(displacement == 0):
# 'ending_pt' has already been reached on 'diag_path'.
return diag_path[1:]
directions = np.where(
displacement == 0, 0,
displacement / np.abs(displacement)).astype(int)
magnitude = np.max(np.abs(displacement))
straight = np.full(shape = (magnitude + 1, 2),
fill_value = starting_pt)
for row_idx in range(1, magnitude + 1):
straight[row_idx] = straight[row_idx - 1] + directions
straight_path = [pt.tolist() for pt in straight]
assert straight_path[-1] == ending_pt, ("'straight_path' is not "
+ "ending at 'ending_pt'.")
return straight_path
def shortest_path(self, path_a: list,
path_b: list) -> List[Union[List, int]]:
"""Find the shortest path between the ends of two paths on the env.grid.
Args:
path_a (list): A position of type List[int] or list of positions of
type List[List[int]] on the env.grid.
path_b (list): A position of type List[int] or list of positions of
type List[List[int]] on the env.grid.
Raises:
ValueError: If 'path_a' and 'path_b' is not a list
ValueError: If the elements of the paths have the wrong type
Returns:
List[Union[List, int]]: The shortest path between the endpoints of
'path_a' and 'path_b'.
"""
# Verify that both paths are lists.
assert np.all([isinstance(path, list) for path in [path_a, path_b]]), \
"Both 'path_a' and 'path_b' must be lists."
# Verify that path_a is type List[int] or List[List[int]]
if isinstance(path_a[0], int):
pt_a = path_a
elif isinstance(path_a[0], list):
pt_a = path_a[-1]
else:
raise ValueError("'path_a' must be a position or list of positions")
# Verify that path_b is type List[int] or List[List[int]]
if isinstance(path_b[0], int):
pt_b = path_b
elif isinstance(path_b[0], list):
pt_b = path_b[-1]
else:
raise ValueError("'path_b' must be a position or list of positions")
# Compute shortest path
diag = self.diag_path(pt_a, pt_b)
straight = self.straight_shot(diag, pt_b)
if [diag[0], diag[-1]] == [pt_a, pt_b]:
shortest_path = diag
elif [straight[0], straight[-1]] == [pt_a, pt_b]:
shortest_path = straight
else:
shortest_path = diag + straight[1:]
try:
assert [shortest_path[0], shortest_path[-1]] == [pt_a, pt_b]
except:
breakpoint()
return shortest_path
def make_valid_path(self, rw_pct = 0.15, sp_pct = 0.15) -> np.ndarray:
"""Specifies a guaranteed path without holes between the agent and a
goal. By setting the holes on the environment outside of 'valid_path',
we can guarantee that the environment is solvable.
Args:
rw_pct (float): "Random walk percentage". The percentage of the
length of 'env.grid' that will be taken as random walk steps.
Directly affects the variable 'rw_steps'.
sp_pct (float): "Shortest path percentage". The percentage of the
length of 'env.grid' that will be taken as shortest path steps.
Directly affects the variable 'sp_steps'.
Returns:
valid_path (List[List[int]]): List of ordered pairs that consistute
a guaranteed successful path for the agent.
"""
# TODO: Generate valid path
agent_position = self.env.agent_position
goal_position = self.env.goal_position
path_a, path_g = agent_position, goal_position
rw_steps: int = round(rw_pct * len(self.env.grid))
sp_steps: int = round(0.5 * sp_pct * len(self.env.grid))
rw_steps = 1 if rw_steps < 1 else rw_steps
sp_steps = 1 if sp_steps < 1 else sp_steps
done: bool = False
while done != True: # Run until 'path_a' reaches the goal
# Random walk from both agent and goal starting positions
path_a, path_g = [self.random_walk(n_steps = rw_steps , start = path)
for path in [path_a, path_g]]
# Get shortest path b/w the endpts of both paths
shortest = self.shortest_path(path_a, path_g)
if len(shortest) <= 2:
path_a.append(shortest[-1])
path_a += path_g[::-1]
done = True
try:
assert [path_a[0], path_a[-1]] == [agent_position, goal_position]
except:
print('case 1')
breakpoint()
elif (len(shortest) - 2) <= (2 * sp_steps):
# If shortest path steps 'sp_steps' spans shortest
path_a += shortest[1:-1]
path_a += path_g[::-1]
done = True
try:
assert [path_a[0], path_a[-1]] == [agent_position, goal_position]
except:
print('case 2')
breakpoint()
else:
# Follow the shortest path for sp_steps
front_of_shortest = shortest[1:1 + sp_steps]
back_of_shortest = shortest[-(1 + sp_steps): -1]
path_a += front_of_shortest
path_g += back_of_shortest[::-1]
# TODO: Verify that optimal_g connects to path_g and
# optimal_a connects to path_a
# TODO: Check that the valid path is actually valid -> write test:
# 1. Verify that valid_path starts with agent position and ends with goal
# 2. Verify that the shifts between each position in the path are <= 1.
valid_path: List[List[int]] = path_a
return valid_path
|
<reponame>ysBach/astropy<gh_stars>1-10
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitConversionError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from astropy.utils.compat.misc import override__dir__
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable
from astropy.utils.data_info import ParentDtypeInfo
from astropy import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
from .quantity_helper.function_helpers import (
SUBCLASS_SAFE_FUNCTIONS, FUNCTION_HELPERS, DISPATCHED_FUNCTIONS,
UNSUPPORTED_FUNCTIONS)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return f'{val.value}'
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Quantity (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class Quantity(np.ndarray):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: https://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any integer and (non-Quantity) object inputs are converted
to float by default.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See https://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None and value.dtype.kind in 'iu':
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{}" as a {}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(0), numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and value.dtype.kind in 'iuO':
value = value.astype(float)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more since all use '
'should go through array_function. '
'Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, 'value', input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : `~numpy.ndarray` or tuple of `~numpy.ndarray`
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, (tuple, list)):
if out is None:
out = (None,) * len(result)
return result.__class__(
self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : `UnitBase`, or anything convertible to a :class:`~astropy.units.Unit`, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : Quantity subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{} instances require {} units, not {} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[], copy=True):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy
will only be made if necessary.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
if copy:
# Avoid using to_value to ensure that we make a copy. We also
# don't want to slow down this method (esp. the scalar case).
value = self._to_value(unit, equivalencies)
else:
# to_value only copies if necessary
value = self.to_value(unit, equivalencies)
return self._new_view(value, unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If not provided or
``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
# Index with empty tuple to decay array scalars in to numpy scalars.
return value[()]
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
"'{}' object has no '{}' member".format(
self.__class__.__name__,
attr))
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
"{} instance has no attribute '{}'".format(
self.__class__.__name__, attr))
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting. On the other
# hand, for structured arrays, the ufunc does not work, so we do use
# __eq__ and live with the warnings.
def __eq__(self, other):
try:
if self.dtype.kind == 'V':
return super().__eq__(other)
else:
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
if self.dtype.kind == 'V':
return super().__ne__(other)
else:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if not isinstance(out, np.ndarray):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : numeric, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
subfmt : str, optional
Subformat of the result. For the moment,
only used for format="latex". Supported values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
lstr
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
if format not in formats:
raise ValueError(f"Unknown format '{format}'")
elif format is None:
return f'{self.value}{self._unitstr:s}'
# else, for the moment we assume format="latex"
# need to do try/finally because "threshold" cannot be overridden
# with array2string
pops = np.get_printoptions()
format_spec = '.{}g'.format(
precision if precision is not None else pops['precision'])
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({}{}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
try:
formatter = {'float_kind': float_formatter,
'complex_kind': complex_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
delimiter_left, delimiter_right = formats[format][subfmt]
return r'{left}{0} \; {1}{right}'.format(latex_value, latex_unit,
left=delimiter_left,
right=delimiter_right)
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=prefixstr)
return f'{prefixstr}{arrstr}{self._unitstr:s}>'
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format(f"{value}{self._unitstr:s}",
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# https://numpy.org/doc/stable/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.tolist()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity, so let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
as_quantity = Quantity(value)
_value = as_quantity.to_value(self.unit)
except TypeError:
# Could not make a Quantity. Maybe masked printing?
# Note: masked quantities do not work very well, but no reason
# to break even repr and str.
if (value is np.ma.masked_print_option and
self.dtype.kind == 'O'):
return value
else:
raise
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if check_precision:
# If, e.g., we are casting double to float, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tobytes(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tobytes(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
def __array_function__(self, function, types, args, kwargs):
"""Wrap numpy functions, taking care of units.
Parameters
----------
function : callable
Numpy function to wrap
types : iterable of classes
Classes that provide an ``__array_function__`` override. Can
in principle be used to interact with other classes. Below,
mostly passed on to `~numpy.ndarray`, which can only interact
with subclasses.
args : tuple
Positional arguments provided in the function call.
kwargs : dict
Keyword arguments provided in the function call.
Returns
-------
result: `~astropy.units.Quantity`, `~numpy.ndarray`
As appropriate for the function. If the function is not
supported, `NotImplemented` is returned, which will lead to
a `TypeError` unless another argument overrode the function.
Raises
------
~astropy.units.UnitsError
If operands have incompatible units.
"""
# A function should be in one of the following sets or dicts:
# 1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
# supports Quantity; we pass on to ndarray.__array_function__.
# 2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
# after converting quantities to arrays with suitable units,
# and possibly setting units on the result.
# 3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
# requires a Quantity-specific implementation.
# 4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
# For now, since we may not yet have complete coverage, if a
# function is in none of the above, we simply call the numpy
# implementation.
if function in SUBCLASS_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in FUNCTION_HELPERS:
function_helper = FUNCTION_HELPERS[function]
try:
args, kwargs, unit, out = function_helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
result = super().__array_function__(function, types, args, kwargs)
# Fall through to return section
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
result, unit, out = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
# Fall through to return section
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else:
warnings.warn("function '{}' is not known to astropy's Quantity. "
"Will run it anyway, hoping it will treat ndarray "
"subclasses correctly. Please raise an issue at "
"https://github.com/astropy/astropy/issues. "
.format(function.__name__), AstropyWarning)
return super().__array_function__(function, types, args, kwargs)
# If unit is None, a plain array is expected (e.g., boolean), which
# means we're done.
# We're also done if the result was NotImplemented, which can happen
# if other inputs/outputs override __array_function__;
# hopefully, they can then deal with us.
if unit is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out=out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Quantity. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Quantity subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Quantity)
for t in types):
raise TypeError("the Quantity implementation cannot handle {} "
"with the given arguments."
.format(function)) from None
else:
return NotImplemented
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof)
def mean(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.mean, axis, dtype, out=out)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise TypeError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{} instances require units equivalent to '{}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
f", so cannot set it to '{unit}'."))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs):
"""
Return a boolean array where two arrays are element-wise equal
within a tolerance.
Parameters
----------
a, b : array_like or :class:`~astropy.units.Quantity`
Input values or arrays to compare
rtol : array_like or dimensionless :class:`~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or :class:`~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
UnitsError
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
allclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.isclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, equal_nan=False, **kwargs) -> bool:
"""
Whether two arrays are element-wise equal within a tolerance.
Parameters
----------
a, b : array_like or :class:`~astropy.units.Quantity`
Input values or arrays to compare
rtol : array_like or dimensionless :class:`~astropy.units.Quantity`
The relative tolerance for the comparison, which defaults to
``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`,
then it must be dimensionless.
atol : number or :class:`~astropy.units.Quantity`
The absolute tolerance for the comparison. The units (or lack
thereof) of ``a``, ``b``, and ``atol`` must be consistent with
each other. If `None`, ``atol`` defaults to zero in the
appropriate units.
equal_nan : `bool`
Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will
be considered equal to NaN’s in ``b``.
Notes
-----
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`. However, this differs from the `numpy` function in
that the default for the absolute tolerance here is zero instead of
``atol=1e-8`` in `numpy`, as there is no natural way to set a default
*absolute* tolerance given two inputs that may have differently scaled
units.
Raises
------
UnitsError
If the dimensions of ``a``, ``b``, or ``atol`` are incompatible,
or if ``rtol`` is not dimensionless.
See also
--------
isclose
"""
unquantified_args = _unquantify_allclose_arguments(a, b, rtol, atol)
return np.allclose(*unquantified_args, equal_nan=equal_nan, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'desired' ({desired.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
if atol is None:
# By default, we assume an absolute tolerance of zero in the
# appropriate units. The default value of None for atol is
# needed because the units of atol must be consistent with the
# units for a and b.
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError(
f"Units for 'atol' ({atol.unit}) and 'actual' "
f"({actual.unit}) are not convertible"
)
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("'rtol' should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
<reponame>jiahfong/alr<filename>alr/training/pl_mixup_cyclic.py<gh_stars>1-10
from typing import Optional, Tuple
import torch
import numpy as np
import math
import torch.utils.data as torchdata
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from torch.nn import functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from alr.training.pl_mixup import (
PseudoLabelledDataset,
onehot_transform,
create_warmup_trainer,
create_plmixup_trainer,
PLMixupTrainer,
DataMarker,
)
from alr.training.progress_bar.ignite_progress_bar import ProgressBar
from alr.training.samplers import RandomFixedLengthSampler, MinLabelledSampler
from alr.training.utils import EarlyStopper, PerformanceTracker
class CyclicPLMixupTrainer(PLMixupTrainer):
def fit(
self,
train: torchdata.Dataset,
val: torchdata.Dataset,
pool: torchdata.Dataset,
epochs: Optional[Tuple[int, int, int]] = (50, 400, 60),
):
if isinstance(self._patience, int):
pat1 = pat2 = self._patience
else:
pat1, pat2 = self._patience[0], self._patience[1]
history = {
"val_loss": [],
"val_acc": [],
"override_acc": [],
}
optimiser = self._instantiate_optimiser()
train = PseudoLabelledDataset(
train,
mark=DataMarker.LABELLED,
transform=self._train_transform,
augmentation=self._data_augmentation,
target_transform=onehot_transform(self._num_classes),
)
pool = PseudoLabelledDataset(
pool,
mark=DataMarker.PSEUDO_LABELLED,
transform=self._train_transform,
augmentation=self._data_augmentation,
)
val = PseudoLabelledDataset(
val,
mark=DataMarker.LABELLED,
transform=self._test_transform,
)
val._with_metadata = False
train_loader = torchdata.DataLoader(
train,
batch_size=self._batch_size,
sampler=RandomFixedLengthSampler(train, self._rfls_length, shuffle=True),
**self._loader_kwargs,
)
pool_loader = torchdata.DataLoader(
pool, batch_size=512, shuffle=False, **self._loader_kwargs
)
val_loader = torchdata.DataLoader(
val, batch_size=512, shuffle=False, **self._loader_kwargs
)
pbar = ProgressBar(desc=lambda _: "Stage 1")
# warm up
with train.no_fluff():
val_eval = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(F.nll_loss)},
device=self._device,
)
trainer = create_warmup_trainer(
self._model,
optimiser=optimiser,
device=self._device,
)
es = EarlyStopper(
self._model, patience=pat1, trainer=trainer, key="acc", mode="max"
)
es.attach(val_eval)
@trainer.on(Events.EPOCH_COMPLETED)
def _log(e: Engine):
metrics = val_eval.run(val_loader).metrics
acc, loss = metrics["acc"], metrics["loss"]
pbar.log_message(
f"\tStage 1 epoch {e.state.epoch}/{e.state.max_epochs} "
f"[val] acc, loss = "
f"{acc:.4f}, {loss:.4f}"
)
history["val_acc"].append(acc)
history["val_loss"].append(loss)
pbar.attach(trainer)
trainer.run(train_loader, max_epochs=epochs[0])
es.reload_best()
# pseudo-label points
with pool.no_augmentation():
with pool.no_fluff():
pseudo_labels = []
with torch.no_grad():
self._model.eval()
for x, _ in pool_loader:
x = x.to(self._device)
# add (softmax) probability, hence .exp()
pseudo_labels.append(self._model(x).exp().detach().cpu())
pool.override_targets(torch.cat(pseudo_labels))
plab_acc = pool.override_accuracy
pbar.log_message(f"\t*End of stage 1*: overridden labels' acc: {plab_acc}")
history["override_acc"].append(plab_acc)
# start training with PL
full_dataset = torchdata.ConcatDataset((train, pool))
fds_loader = torchdata.DataLoader(
full_dataset,
batch_sampler=MinLabelledSampler(
train,
pool,
batch_size=self._batch_size,
min_labelled=self._min_labelled,
),
**self._loader_kwargs,
)
val_eval = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(F.nll_loss)},
device=self._device,
)
optimiser = self._instantiate_optimiser()
scheduler = ReduceLROnPlateau(
optimiser,
mode="max",
factor=0.1,
patience=self._lr_patience,
verbose=True,
min_lr=1e-3,
)
trainer = create_plmixup_trainer(
self._model,
optimiser,
pool,
alpha=self._alpha,
num_classes=self._num_classes,
log_dir=self._log_dir,
device=self._device,
)
es = EarlyStopper(
self._model, patience=pat2, trainer=trainer, key="acc", mode="max"
)
es.attach(val_eval)
pbar = ProgressBar(desc=lambda _: "Stage 2")
@trainer.on(Events.EPOCH_COMPLETED)
def _log(e: Engine):
metrics = val_eval.run(val_loader).metrics
acc, loss = metrics["acc"], metrics["loss"]
pbar.log_message(
f"\tEpoch {e.state.epoch}/{e.state.max_epochs} "
f"[val] acc, loss = "
f"{acc:.4f}, {loss:.4f}"
)
history["val_acc"].append(acc)
history["val_loss"].append(loss)
history["override_acc"].append(pool.override_accuracy)
scheduler.step(acc)
pbar.attach(trainer)
trainer.run(fds_loader, max_epochs=epochs[1])
es.reload_best()
####
# save the best weight so far just in case we wander off
pt = PerformanceTracker(self._model, patience=0)
# es.reload_best() would've given us this accuracy, so we store it now
# before restarting the SGD learning rate in case we never recover from moving away from this local minima
pt.step(max(history["val_acc"]))
# reset SGD learning rate to 0.2 and start cyclic learning
init_lr = 0.2
optimiser = torch.optim.SGD(
self._model.parameters(), lr=init_lr, momentum=0.9, weight_decay=1e-4
)
# budget number of epochs
B = epochs[2]
# number of snapshots
M = 6
# total number of training iterations for all B epochs:
# len(fds_loader) = number of iterations need for ONE epoch
T = len(fds_loader) * B
print("Starting cyclic learning")
trainer = create_plmixup_trainer(
self._model,
optimiser,
pool,
alpha=self._alpha,
num_classes=self._num_classes,
log_dir=self._log_dir,
device=self._device,
)
val_eval = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(F.nll_loss)},
device=self._device,
)
@trainer.on(Events.EPOCH_COMPLETED)
def _log2(e: Engine):
metrics = val_eval.run(val_loader).metrics
acc, loss = metrics["acc"], metrics["loss"]
print(
f"\tEpoch {e.state.epoch}/{e.state.max_epochs} "
f"[val] acc, loss = "
f"{acc:.4f}, {loss:.4f}"
)
history["val_acc"].append(acc)
history["val_loss"].append(loss)
history["override_acc"].append(pool.override_accuracy)
pt.step(acc)
@trainer.on(Events.ITERATION_COMPLETED)
def _anneal(e: Engine):
iteration = e.state.iteration
assert iteration > 0
for param_group in optimiser.param_groups:
param_group["lr"] = cyclic_annealer(iteration, T, M, init_lr)
trainer.run(fds_loader, max_epochs=B)
# always want the best set of weights:
# if the cyclic learning scheduler ended up with better weights, use it, otherwise,
# revert to the set of weights before starting cyclic learning
pt.reload_best()
soft_label_history = pool.label_history
self.soft_label_history = torch.stack(soft_label_history, dim=0)
return history
def cyclic_annealer(t, T, M, init_lr=0.2):
return (init_lr / 2) * (
np.cos((np.pi * np.mod(t - 1, math.ceil(T / M))) / math.ceil(T / M)) + 1
)
|
""" Roomba simulation curses"""
import argparse
import curses
from random import randint
from random import choice
from time import sleep
from typing import List
from typing import Tuple
ROOMBA = "@"
DUST1 = "."
DUST2 = ":"
DUST3 = "&"
BASE = "["
OPPOSITE_DIRECTION = {"N": "S", "NE": "SW", "E": "W", "SE": "NW",
"S": "N", "SW": "NE", "W": "E", "NW": "SE"}
class RoombaError(Exception):
pass
class Roomba:
def __init__(self, base_y: int, base_x: int,
width: int, height: int, options: dict) -> None:
self.base_y = base_y
self.base_x = base_x
self.y = base_y
self.x = base_x + 1
self.room_width = width - 1
self.room_height = height - 3
self.charge = options["battery_size"]
self.recharge_rate = options["recharge_rate"]
self.discharge_rate = options["discharge_rate"]
self.battery_size = options["battery_size"]
if self.room_height > self.room_width:
self.low_charge = self.room_height * self.discharge_rate
else:
self.low_charge = self.room_width * self.discharge_rate
self.state = "Ready" # ready, cleaning, charging
self.speed = options["speed"]
self.speed_count = 0
self.model = options["model"]
self.previous_positions = [(self.y, self.x)]
self.direction = ""
self.reverse_direction = ""
def operate(self, room: list) -> bool:
# checks the state do _move or _recharge
if self.state == "Ready" or self.state == "Cleaning":
if self.charge <= 0:
return True
elif self.speed_count == self.speed:
self.speed_count = 0
self.charge -= self.discharge_rate
room[self.y][self.x] = " "
self._move()
room[self.y][self.x] = ROOMBA
return False
else:
room[self.y][self.x] = ROOMBA
self.speed_count += 1
return False
elif self.state == "Charging":
self._charging()
return False
def get_statues(self) -> Tuple[float, str]:
# returns the battery percent and state
return (self.charge / self.battery_size) * 100, self.state
def _move(self) -> None:
if self.charge <= self.low_charge:
self._return_home()
elif self.model == 1:
self._move1()
elif self.model == 2:
self._move2()
elif self.model == 3:
self._move3()
def _move1(self) -> None:
self.state = "Cleaning"
directions = []
if self.y > 0:
if self.y - 1 != self.base_y and self.x != self.base_x:
directions.append((self.y - 1, self.x))
if self.y > 0 and self.x < self.room_width:
if self.y - 1 != self.base_y and self.x + 1 != self.base_x:
directions.append((self.y - 1, self.x + 1))
if self.x < self.room_width:
if self.x + 1 != self.base_x and self.y != self.base_y:
directions.append((self.y, self.x + 1))
if self.x < self.room_width and self.y < self.room_height:
if self.x + 1 != self.base_x and self.y + 1 != self.base_y:
directions.append((self.y + 1, self.x + 1))
if self.y < self.room_height:
if self.y + 1 != self.base_y and self.x != self.base_x:
directions.append((self.y + 1, self.x))
if self.y < self.room_height and self.x > 0:
if self.y + 1 == self.base_y and self.x - 1 == self.base_x:
pass
else:
directions.append((self.y + 1, self.x - 1))
if self.x > 0:
if self.x - 1 == self.base_x and self.y == self.base_y:
pass
else:
directions.append((self.y, self.x - 1))
if self.x > 0 and self.y > 0:
if self.y - 1 == self.base_y and self.x - 1 == self.base_x:
pass
else:
directions.append((self.y - 1, self.x - 1))
self.y, self.x = choice(directions)
def _move2(self) -> None:
self.state = "Cleaning"
directions = []
if self.y > 0:
if self.y - 1 == self.base_y and self.x == self.base_x:
pass
elif (self.y - 1, self.x) in self.previous_positions:
pass
else:
directions.append((self.y - 1, self.x))
if self.y > 0 and self.x < self.room_width:
if self.y - 1 == self.base_y and self.x + 1 == self.base_x:
pass
elif (self.y - 1, self.x + 1) in self.previous_positions:
pass
else:
directions.append((self.y - 1, self.x + 1))
if self.x < self.room_width:
if self.x + 1 == self.base_x and self.y == self.base_y:
pass
elif (self.y, self.x + 1) in self.previous_positions:
pass
else:
directions.append((self.y, self.x + 1))
if self.x < self.room_width and self.y < self.room_height:
if self.x + 1 == self.base_x and self.y + 1 == self.base_y:
pass
elif (self.y + 1, self.x + 1) in self.previous_positions:
pass
else:
directions.append((self.y + 1, self.x + 1))
if self.y < self.room_height:
if self.y + 1 == self.base_y and self.x == self.base_x:
pass
elif (self.y + 1, self.x) in self.previous_positions:
pass
else:
directions.append((self.y + 1, self.x))
if self.y < self.room_height and self.x > 0:
if self.y + 1 == self.base_y and self.x - 1 == self.base_x:
pass
elif (self.y + 1, self.x - 1) in self.previous_positions:
pass
else:
directions.append((self.y + 1, self.x - 1))
if self.x > 0:
if self.x - 1 == self.base_x and self.y == self.base_y:
pass
elif (self.y, self.x + 1) in self.previous_positions:
pass
else:
directions.append((self.y, self.x - 1))
if self.x > 0 and self.y > 0:
if self.y - 1 == self.base_y and self.x - 1 == self.base_x:
pass
elif (self.y - 1, self.x - 1) in self.previous_positions:
pass
else:
directions.append((self.y - 1, self.x - 1))
self.y, self.x = choice(directions)
self.previous_positions.append((self.y, self.x))
if len(self.previous_positions) > 4:
self.previous_positions.pop(0)
def _move3(self) -> None:
good_directions = self._check_directions()
if self.direction == "" or self.direction not in good_directions:
self.direction = choice(good_directions)
self.reverse_direction = OPPOSITE_DIRECTION[self.direction]
if self.direction == "N":
self.y -= 1
elif self.direction == "NE":
self.y -= 1
self.x += 1
elif self.direction == "E":
self.x += 1
elif self.direction == "SE":
self.y += 1
self.x += 1
elif self.direction == "S":
self.y += 1
elif self.direction == "SW":
self.y += 1
self.x -= 1
elif self.direction == "W":
self.x -= 1
elif self.direction == "W":
self.x -= 1
elif self.direction == "NW":
self.y -= 1
self.x -= 1
def _return_home(self) -> Tuple[int, int]:
if self.y > self.base_y:
y = self.y - 1
elif self.y < self.base_y:
y = self.y + 1
else:
y = self.y
if self.x > self.base_x + 1:
x = self.x - 1
elif self.x < self.base_x + 1:
x = self.x + 1
else:
x = self.x
if x == self.base_x + 1 and y == self.base_y:
self.state = "Charging"
self.y = y
self.x = x
return self.y, self.x
def _check_directions(self) -> List[str]:
good_directions = []
if self.y - 1 >= 0: # N
if self.y - 1 == self.base_y and self.x == self.base_x:
pass
else:
good_directions.append("N")
if self.y - 1 >= 0 and self.x + 1 < self.room_width:
if self.y - 1 == self.base_y and self.x + 1 == self.base_x:
pass
else:
good_directions.append("NE")
if self.x + 1 < self.room_width:
if self.y == self.base_y and self.x + 1 == self.base_x:
pass
else:
good_directions.append("E")
if self.y + 1 <= self.room_height and self.x + 1 < self.room_width:
if self.y + 1 == self.base_y and self.x + 1 == self.base_x:
pass
else:
good_directions.append("SE")
if self.y + 1 <= self.room_height:
if self.y + 1 == self.base_y and self.x == self.base_x:
pass
else:
good_directions.append("S")
if self.y + 1 <= self.room_height and self.x - 1 >= 0:
if self.y + 1 == self.base_y and self.x - 1 == self.base_x:
pass
else:
good_directions.append("SW")
if self.x - 1 >= 0:
if self.y == self.base_y and self.x - 1 == self.base_x:
pass
else:
good_directions.append("W")
if self.y - 1 >= 0 and self.x - 1 >= 0:
if self.y - 1 == self.base_y and self.x - 1 == self.base_x:
pass
else:
good_directions.append("NW")
if self.reverse_direction in good_directions:
good_directions.pop(good_directions.index(self.reverse_direction))
return good_directions
def _charging(self) -> None:
self.charge += self.recharge_rate
if self.charge >= self.battery_size:
self.charge = self.battery_size
self.state = "Ready"
def add_dust(room: list, height: int, width: int) -> None:
if randint(1, 3) <= 2:
random_y = randint(0, height - 3)
random_x = randint(0, width - 2)
if room[random_y][random_x] == BASE:
pass
elif room[random_y][random_x] == ROOMBA:
pass
else:
if room[random_y][random_x] == " ":
room[random_y][random_x] = DUST1
elif room[random_y][random_x] == DUST1:
room[random_y][random_x] = DUST2
elif room[random_y][random_x] == DUST2:
room[random_y][random_x] = DUST3
def setup_room_list(width: int, height: int) -> list:
return [[" " for _ in range(width - 1)] for _ in range(height - 2)]
def roomba_option(model_number: int) -> dict:
options = {}
if model_number == 1:
options["model"] = 1
options["battery_size"] = 400
options["recharge_rate"] = 5
options["discharge_rate"] = 2
options["speed"] = 4
if model_number == 2:
options["model"] = 2
options["battery_size"] = 500
options["recharge_rate"] = 6
options["discharge_rate"] = 2
options["speed"] = 3
elif model_number == 3:
options["model"] = 3
options["battery_size"] = 600
options["recharge_rate"] = 6
options["discharge_rate"] = 1.5
options["speed"] = 2
return options
def curses_main(screen, model: int) -> None:
curses.curs_set(0) # Set the cursor to off.
screen.timeout(0) # Turn blocking off for screen.getch().
# curses.init_pair()
height, width = screen.getmaxyx()
if height <= 15 or width <= 15:
raise RoombaError("Error window size should be greater than 15")
room = setup_room_list(width, height)
roomba = Roomba(5, 0, width, height, roomba_option(model))
room[5][0] = BASE
reset = False
running = True
while running:
resize = curses.is_term_resized(height, width)
if resize or reset:
height, width = screen.getmaxyx()
if height <= 15 or width <= 15:
raise RoombaError("Error window size should be greater than 15")
room = setup_room_list(width, height)
roomba = Roomba(5, 0, width, height, roomba_option(model))
room[5][0] = BASE
screen.clear()
add_dust(room, height, width)
reset = roomba.operate(room)
for y, row in enumerate(room):
for x, d in enumerate(row):
if d == ROOMBA:
screen.addstr(y, x, d, curses.A_BOLD)
else:
screen.addstr(y, x, d)
battery, state = roomba.get_statues()
msg = f" Model: {model} Battery: {battery:.1f}% {state}"
screen.addstr(height - 1, 0, msg, curses.A_BOLD)
screen.refresh()
ch = screen.getch()
if ch in [81, 113]:
running = False
sleep(0.25)
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("-m", dest="model",
type=int,
choices=[1, 2, 3],
default=1,
help="Model number to use")
args = parser.parse_args()
try:
curses.wrapper(curses_main, args.model)
except RoombaError as e:
print(e)
return 1
else:
return 0
if __name__ == "__main__":
exit(main())
|
<gh_stars>0
import numpy as np
import numpy.linalg as linalg
import sys
from scipy.misc import derivative
from math import isnan
from tqdm import tqdm as tqdm
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool as Pool
from numpy.polynomial import legendre as leg
def gsection(func, a, b, a_lst=None, b_lst=None, target='min', epsilon=1e-10, iter_lim=1000000):
if a >= b:
a, b = b, a
if target.lower() == 'min' or target.lower() == 'minimum':
sign = 1.0
elif target.lower() == 'max' or target.lower() == 'maximum':
sign = -1.0
else:
raise ValueError('invalid value of "target"')
multiplier1, multiplier2 = (3.0 - np.sqrt(5)) / 2.0, (np.sqrt(5)
- 1.0) / 2.0
dot1, dot2 = a + multiplier1 * (b - a), a + multiplier2 * (b -
a)
if a_lst is not None:
a_lst.append(a)
if b_lst is not None:
b_lst.append(b)
counter = 0
while b - a > epsilon and counter < iter_lim:
if sign * func(dot1) > sign * func(dot2):
a, dot1, dot2 = dot1, dot2, dot1 + multiplier2 * (b -
dot1)
else:
b, dot1, dot2 = dot2, a + multiplier1 * (dot2 - a), dot1
if a_lst is not None:
a_lst.append(a)
if b_lst is not None:
b_lst.append(b)
counter += 1
return (a + b) / 2.0
def left_side_grad(x0, func, epsilon=1e-6):
return (func(np.ones((x0.size, x0.size)) * x0.reshape(x0.size,
1)) - func(np.ones((x0.size, x0.size)) * x0.reshape(x0.size, 1)
- epsilon * np.eye(x0.size))) / epsilon
def right_side_grad(x0, func, epsilon=1e-6):
return (func(np.ones((x0.size, x0.size)) * x0.reshape(x0.size,
1) + epsilon * np.eye(x0.size)) - func(np.ones((x0.size,
x0.size)) * x0.reshape(x0.size, 1))) / epsilon
def middle_grad(x0, func, epsilon=1e-6):
return (func(np.ones((x0.size, x0.size)) * x0.reshape(x0.size,
1) + epsilon * np.eye(x0.size)) - func(np.ones((x0.size,
x0.size)) * x0.reshape(x0.size, 1) - epsilon * np.eye(x0.size)))\
/ 2 / epsilon
def left_side_grad_non_matrix(x0, func, epsilon=1e-6):
gradient, unit_m = np.zeros_like(x0), np.eye(x0.size, x0.size)
for i in range(x0.size):
gradient[i] = (func(x0) - func(x0 - epsilon * unit_m[i])) /\
epsilon
return gradient
def right_side_grad_non_matrix(x0, func, epsilon=1e-6):
gradient, unit_m = np.zeros_like(x0), np.eye(x0.size, x0.size)
for i in range(x0.size):
gradient[i] = (func(x0 + epsilon * unit_m[i]) - func(x0)) /\
epsilon
return gradient
def middle_grad_non_matrix(x0, func, epsilon=1e-6):
gradient = np.zeros_like(x0)
unit_m = np.eye(x0.size, x0.size)
for i in range(x0.size):
gradient[i] = (func(x0 + epsilon * unit_m[i]) - func(x0 -
epsilon * unit_m[i])) / 2 / epsilon
return gradient
def middle_grad_non_matrix_pool(x0, func, epsilon=1e-6):
pool = Pool(np.minimum(x0.size, cpu_count()))
args_lst = [(i, x0, func, epsilon) for i in range(x0.size)]
gradient = pool.map(partial_derivative, args_lst)
pool.close()
pool.join()
return np.array(gradient)
def partial_derivative(args):
i, x0, func, epsilon = args
unit_m = np.eye(x0.size, x0.size)
return (func(x0 + epsilon * unit_m[i]) - func(x0 - epsilon *
unit_m[i])) / 2 / epsilon
def middle_grad_arg_1_pool(x0_1, x0_2, func, epsilon=1e-6):
pool = Pool(np.minimum(x0_1.size, cpu_count()))
args_lst = [(i, x0_1, x0_2, func, epsilon) for i in
range(x0_1.size)]
gradient = pool.map(partial_derivative_arg_1, args_lst)
pool.close()
pool.join()
return np.array(gradient)
def partial_derivative_arg_1(args):
i, x0_1, x0_2, func, epsilon = args
unit_m = np.eye(x0_1.size, x0_1.size)
return (func(x0_1 + epsilon * unit_m[i], x0_2) - func(x0_1 -
epsilon * unit_m[i], x0_2)) / 2 / epsilon
def middle_grad_arg_2_pool(x0_1, x0_2, func, epsilon=1e-6):
pool = Pool(np.minimum(x0_2.size, cpu_count()))
args_lst = [(i, x0_1, x0_2, func, epsilon) for i in
range(x0_2.size)]
gradient = pool.map(partial_derivative_arg_2, args_lst)
pool.close()
pool.join()
return np.array(gradient)
def partial_derivative_arg_2(args):
i, x0_1, x0_2, func, epsilon = args
unit_m = np.eye(x0_2.size, x0_2.size)
return (func(x0_1, x0_2 + epsilon * unit_m[i]) - func(x0_1, x0_2
- epsilon * unit_m[i])) / 2 / epsilon
def step_argmin(kwargs):
func, x_current, direction, step_min, step_max, argmin_finder =\
kwargs.get('func'), kwargs.get('x_current'), \
kwargs.get('direction'), kwargs.get('step_min'), \
kwargs.get('step_max'), kwargs.get('argmin_finder')
return argmin_finder(lambda step: func(x_current - step *
direction), step_min, step_max)
def step_func(kwargs):
step_defining_func, step_index = \
kwargs.get('step_defining_func'), kwargs.get('step_index')
return step_defining_func(step_index)
def step_reduction(kwargs):
func, x_current, direction, default_step, step_red_mult, \
reduction_epsilon, step_epsilon = kwargs.get('func'), \
kwargs.get('x_current'), kwargs.get('direction'),\
kwargs.get('default_step'), kwargs.get('step_red_mult'), \
kwargs.get('reduction_epsilon'), kwargs.get('step_epsilon')
step = default_step
while reduction_epsilon >= func(x_current) - func(x_current -
step * direction) and np.abs(step) > step_epsilon:
step *= step_red_mult
return step
def step_adaptive(kwargs):
func, x_current, direction, default_step, step_red_mult, \
step_incr_mult, lim_num, reduction_epsilon, step_epsilon, grad,\
grad_epsilon = kwargs.get('func'), kwargs.get('x_current'),\
kwargs.get('direction'), kwargs.get('default_step'), \
kwargs.get('step_red_mult'), kwargs.get('step_incr_mult'), \
kwargs.get('lim_num'), kwargs.get('reduction_epsilon'), \
kwargs.get('step_epsilon'), kwargs.get('grad'), \
kwargs.get('grad_epsilon')
step = default_step
while reduction_epsilon >= func(x_current) - func(x_current -
step * direction) and np.abs(step) > step_epsilon:
step *= step_red_mult
if np.abs(step) < step_epsilon:
step = step_epsilon
break_flag = 0
tmp_step, step = step, 0.0
while True:
for i in range(1, lim_num + 1):
f_old, f_new = \
func(x_current - (step + (i - 1) * tmp_step) *
direction),\
func(x_current - (step + i * tmp_step) * direction)
if reduction_epsilon >= f_old - f_new \
or isnan(f_old)\
or isnan(f_new):
step += (i - 1) * tmp_step
break_flag = 1 if i != 1 else 2
break
if break_flag == 1 or break_flag == 2:
break
step += lim_num * tmp_step
tmp_step *= step_incr_mult
x_next = x_current - step * direction
grad_next = grad(x_next, func, grad_epsilon)
if np.dot(x_next - x_current, grad_next) >= 0:
break
if break_flag == 2:
tmp_step /= step_incr_mult
if np.abs(step) < step_epsilon:
step = step_epsilon
return step, tmp_step
def matrix_B_transformation(matrix_B, grad_current, grad_next, beta):
r_vector = np.dot(matrix_B.T, grad_next - grad_current)
r_vector = r_vector / linalg.norm(r_vector)
return np.dot(matrix_B, np.eye(matrix_B.shape[0],
matrix_B.shape[1]) + (beta - 1) * \
np.dot(r_vector.reshape(r_vector.size, 1), r_vector.reshape(1,
r_vector.size)))
def r_algorithm_B_form(func, x0, grad, beta, step_method, step_method_kwargs, grad_epsilon, calc_epsilon_x, calc_epsilon_grad, step_epsilon, iter_lim, return_grads, tqdm_fl, continue_transformation, print_iter_index):
x_current, x_next, matrix_B, grad_current, grad_next = \
x0.copy(), x0.copy(), np.eye(x0.size, x0.size), \
np.random.rand(x0.size), grad(x0, func, epsilon=grad_epsilon)
step_defining_algorithms = {'argmin': step_argmin, 'func':
step_func, 'reduction': step_reduction, 'adaptive':
step_adaptive, 'adaptive_alternative':
step_adaptive}
continuing_step_methods = ['argmin', 'reduction', 'adaptive',
'adaptive_alternative']
step_method_kwargs['func'] = func
step_method_kwargs['step_lim'] = iter_lim
step_method_kwargs['grad'] = grad
step_method_kwargs['grad_epsilon'] = grad_epsilon
results = [x_next.copy()]
grads = [grad_next.copy()]
if tqdm_fl:
iterations = tqdm(range(iter_lim))
else:
iterations = range(iter_lim)
for k in iterations:
if print_iter_index:
print(k)
print(x_next)
print('Вычисление шага')
xi_current = np.dot(matrix_B.T, grad_next)
xi_current = xi_current / linalg.norm(xi_current)
step_method_kwargs['x_current'] = x_next
step_method_kwargs['direction'] = np.dot(matrix_B,
xi_current)
step_method_kwargs['step_index'] = k
step_current = \
(step_defining_algorithms.get(step_method)) \
(step_method_kwargs)
if isinstance(step_current, tuple):
step_current, step_method_kwargs['default_step'] = \
step_current
if np.abs(step_current) < step_epsilon and step_method in \
continuing_step_methods and continue_transformation:
matrix_B = matrix_B_transformation(matrix_B,
grad_current, grad_next, beta)
continue
x_current, grad_current = x_next.copy(), grad_next.copy()
if print_iter_index:
print('Вычисление приближения')
x_next = x_current - step_current * np.dot(matrix_B,
xi_current)
results.append(x_next.copy())
if print_iter_index:
print('Вычисление градиента')
grad_next = grad(x_next, func, epsilon=grad_epsilon)
grads.append(grad_next.copy())
if linalg.norm(x_next - x_current) < calc_epsilon_x or \
linalg.norm(grad_next) < calc_epsilon_grad:
break
if print_iter_index:
print('Преобразование матриц')
matrix_B = matrix_B_transformation(matrix_B, grad_current,
grad_next, beta)
if return_grads:
return np.array(results), np.array(grads)
return np.array(results)
def r_algorithm_B_form_cooperative(func_1, func_2, x0_1, x0_2, grad_1, grad_2, beta, step_method, step_method_kwargs, grad_epsilon, calc_epsilon_x, calc_epsilon_grad, step_epsilon, iter_lim, return_grads, tqdm_fl, continue_transformation, print_iter_index):
x_1_current, x_1_next, matrix_B_1, grad_1_current, grad_1_next=\
x0_1.copy(), x0_1.copy(), np.eye(x0_1.size, x0_1.size), np.random.rand(x0_1.size), grad_1(x0_1, x0_2, func_1,
epsilon=grad_epsilon)
x_2_current, x_2_next, matrix_B_2, grad_2_current, grad_2_next=\
x0_2.copy(), x0_2.copy(), np.eye(x0_2.size, x0_2.size), \
np.random.rand(x0_2.size), grad_2(x0_1, x0_2, func_2,
epsilon=grad_epsilon)
step_defining_algorithms = {'argmin': step_argmin, 'func':
step_func, 'reduction': step_reduction, 'adaptive':
step_adaptive, 'adaptive_alternative':
step_adaptive}
continuing_step_methods = ['argmin', 'reduction', 'adaptive',
'adaptive_alternative']
step_method_kwargs['step_lim'] = iter_lim
step_method_kwargs['grad_epsilon'] = grad_epsilon
results_1 = [x_1_next.copy()]
grads_1 = [grad_1_next.copy()]
results_2 = [x_2_next.copy()]
grads_2 = [grad_2_next.copy()]
if tqdm_fl:
iterations = tqdm(range(iter_lim))
else:
iterations = range(iter_lim)
if 'default_step' in step_method_kwargs:
default_step_1, default_step_2 = \
step_method_kwargs['default_step'], \
step_method_kwargs['default_step']
for k in iterations:
step_1_current_zero, step_2_current_zero = False, False
if print_iter_index:
print(k)
print(x_1_next)
print(x_2_next)
print('Вычисление шага №1')
xi_1_current = np.dot(matrix_B_1.T, grad_1_next)
xi_1_current = xi_1_current / linalg.norm(xi_1_current)
xi_2_current = np.dot(matrix_B_2.T, grad_2_next)
xi_2_current = xi_2_current / linalg.norm(xi_2_current)
step_method_kwargs['func'] = lambda x: func_1(x, x_2_next)
step_method_kwargs['grad'] = lambda x0, func, epsilon: grad_1(x0, x_2_next, func_1, epsilon)
step_method_kwargs['x_current'] = x_1_next
step_method_kwargs['direction'] = np.dot(matrix_B_1,
xi_1_current)
step_method_kwargs['step_index'] = k
if 'default_step' in step_method_kwargs:
step_method_kwargs['default_step'] = default_step_1
step_1_current = (step_defining_algorithms.get(step_method)) \
(step_method_kwargs)
if print_iter_index:
print('Вычисление шага №2')
step_method_kwargs['func'] = lambda x: func_2(x_1_next, x)
step_method_kwargs['grad'] = lambda x0, func, epsilon: \
grad_2(x_1_next, x0, func_2, epsilon)
step_method_kwargs['x_current'] = x_2_next
step_method_kwargs['direction'] = np.dot(matrix_B_2,
xi_2_current)
step_method_kwargs['step_index'] = k
if 'default_step' in step_method_kwargs:
step_method_kwargs['default_step'] = default_step_2
step_2_current =(step_defining_algorithms.get(step_method)) \
(step_method_kwargs)
if isinstance(step_1_current, tuple):
step_1_current, default_step_1 = step_1_current
if isinstance(step_2_current, tuple):
step_2_current, default_step_2 = step_2_current
if (np.abs(step_1_current) < step_epsilon or np.abs(step_2_current) < step_epsilon) and \
step_method in continuing_step_methods and continue_transformation:
matrix_B_1 = matrix_B_transformation(matrix_B_1, grad_1_current, grad_1_next, beta)
matrix_B_2 = matrix_B_transformation(matrix_B_2, grad_2_current, grad_2_next, beta)
continue
if print_iter_index:
print('Вычисление приближения №1')
if np.abs(step_1_current) < 1e-51:
step_1_current_zero = True
else:
x_1_current, grad_1_current = x_1_next.copy(), grad_1_next.copy()
x_1_next = x_1_current - step_1_current * np.dot(matrix_B_1, xi_1_current)
results_1.append(x_1_next.copy())
if print_iter_index:
print('Вычисление приближения №2')
if np.abs(step_2_current) < 1e-51:
step_2_current_zero = True
else:
x_2_current, grad_2_current = x_2_next.copy(), grad_2_next.copy()
x_2_next = x_2_current - step_2_current * np.dot(matrix_B_2, xi_2_current)
results_2.append(x_2_next.copy())
if print_iter_index:
print('Вычисление градиента №1')
grad_1_next = grad_1(x_1_next, x_2_next, func_1, epsilon=grad_epsilon)
grads_1.append(grad_1_next.copy())
if print_iter_index:
print('Вычисление градиента №2')
grad_2_next = grad_2(x_1_next, x_2_next, func_2, epsilon=grad_epsilon)
grads_2.append(grad_2_next.copy())
if linalg.norm(np.concatenate((x_1_next, x_2_next)) -
np.concatenate((x_1_current, x_2_current))) < calc_epsilon_x or \
linalg.norm(np.concatenate((grad_1_next, grad_2_next))) < calc_epsilon_grad or \
(step_1_current_zero and step_2_current_zero):
break
if print_iter_index:
print('Преобразование матриц')
matrix_B_1 = matrix_B_transformation(matrix_B_1, grad_1_current, grad_1_next, beta)
matrix_B_2 = matrix_B_transformation(matrix_B_2, grad_2_current, grad_2_next, beta)
if return_grads:
return np.array(results_1), np.array(results_2), np.array(grads_1), np.array(grads_2)
return np.array(results_1), np.array(results_2)
def matrix_H_transformation(matrix_H, grad_current, grad_next, beta):
r_vector = grad_next - grad_current
return matrix_H + (beta * beta - 1) * np.dot(np.dot(matrix_H, r_vector).reshape(r_vector.size, 1),
np.dot(matrix_H, r_vector).reshape(1, r_vector.size)) / \
np.dot(np.dot(r_vector, matrix_H), r_vector)
def r_algorithm_H_form(func, x0, grad, beta, step_method, step_method_kwargs, grad_epsilon, calc_epsilon_x,
calc_epsilon_grad, step_epsilon, iter_lim, return_grads, tqdm_fl, continue_transformation,
print_iter_index):
x_current, x_next, matrix_H, grad_current, grad_next = \
x0.copy(), x0.copy(), np.eye(x0.size, x0.size), \
np.random.rand(x0.size), grad(x0, func, epsilon=grad_epsilon)
step_defining_algorithms = {'argmin': step_argmin, 'func': step_func, 'reduction': step_reduction,
'adaptive': step_adaptive, 'adaptive_alternative': step_adaptive_alternative}
continuing_step_methods = ['argmin', 'reduction', 'adaptive', 'adaptive_alternative']
step_method_kwargs['func'] = func
step_method_kwargs['step_lim'] = iter_lim
step_method_kwargs['grad'] = grad
step_method_kwargs['grad_epsilon'] = grad_epsilon
results = [x_next.copy()]
grads = [grad_next.copy()]
if tqdm_fl:
iterations = tqdm(range(iter_lim))
else:
iterations = range(iter_lim)
for k in iterations:
if print_iter_index:
print(k)
print(x_next)
print('Вычисление шага')
step_method_kwargs['x_current'] = x_next
step_method_kwargs['direction'] = np.dot(matrix_H, grad_next) / \
np.sqrt(np.dot(np.dot(matrix_H, grad_next), grad_next))
step_method_kwargs['step_index'] = k
step_current = (step_defining_algorithms.get(step_method))(step_method_kwargs)
if isinstance(step_current, tuple):
step_current, step_method_kwargs['default_step'] = step_current
if np.abs(step_current) < step_epsilon and step_method in continuing_step_methods and continue_transformation:
matrix_H = matrix_H_transformation(matrix_H, grad_current, grad_next, beta)
continue
x_current, grad_current = x_next.copy(), grad_next.copy()
if print_iter_index:
print('Вычисление приближения')
x_next = x_current - step_current * np.dot(matrix_H, grad_current) / \
np.sqrt(np.dot(np.dot(matrix_H, grad_current), grad_current))
results.append(x_next.copy())
if print_iter_index:
print('Вычисление градиента')
grad_next = grad(x_next, func, epsilon=grad_epsilon)
grads.append(grad_next.copy())
if linalg.norm(x_next - x_current) < calc_epsilon_x or linalg.norm(grad_next) < calc_epsilon_grad:
break
if print_iter_index:
print('Преобразование матриц')
matrix_H = matrix_H_transformation(matrix_H, grad_current, grad_next, beta)
if return_grads:
return np.array(results), np.array(grads)
return np.array(results)
def r_algorithm_H_form_cooperative(func_1, func_2, x0_1, x0_2, grad_1, grad_2, beta, step_method, step_method_kwargs,
grad_epsilon, calc_epsilon_x, calc_epsilon_grad, step_epsilon, iter_lim,
return_grads, tqdm_fl, continue_transformation, print_iter_index):
x_1_current, x_1_next, matrix_H_1, grad_1_current, grad_1_next = \
x0_1.copy(), x0_1.copy(), np.eye(x0_1.size, x0_1.size), np.random.rand(x0_1.size),\
grad_1(x0_1, x0_2, func_1, epsilon=grad_epsilon)
x_2_current, x_2_next, matrix_H_2, grad_2_current, grad_2_next = \
x0_2.copy(), x0_2.copy(), np.eye(x0_2.size, x0_2.size), np.random.rand(x0_2.size),\
grad_2(x0_1, x0_2, func_2, epsilon=grad_epsilon)
step_defining_algorithms = {'argmin': step_argmin, 'func': step_func, 'reduction': step_reduction,
'adaptive': step_adaptive, 'adaptive_alternative': step_adaptive_alternative}
continuing_step_methods = ['argmin', 'reduction', 'adaptive', 'adaptive_alternative']
step_method_kwargs['step_lim'] = iter_lim
step_method_kwargs['grad_epsilon'] = grad_epsilon
results_1 = [x_1_next.copy()]
grads_1 = [grad_1_next.copy()]
results_2 = [x_2_next.copy()]
grads_2 = [grad_2_next.copy()]
if tqdm_fl:
iterations = tqdm(range(iter_lim))
else:
iterations = range(iter_lim)
if 'default_step' in step_method_kwargs:
default_step_1, default_step_2 = step_method_kwargs['default_step'], step_method_kwargs['default_step']
for k in iterations:
step_1_current_zero, step_2_current_zero = False, False
if print_iter_index:
print(k)
print(x_1_next)
print(x_2_next)
print('Вычисление шага №1')
step_method_kwargs['func'] = lambda x: func_1(x, x_2_next)
step_method_kwargs['grad'] = lambda x0, func, epsilon: grad_1(x0, x_2_next, func_1, epsilon)
step_method_kwargs['x_current'] = x_1_next
step_method_kwargs['direction'] = np.dot(matrix_H_1, grad_1_next) / \
np.sqrt(np.dot(np.dot(matrix_H_1, grad_1_next), grad_1_next))
step_method_kwargs['step_index'] = k
if 'default_step' in step_method_kwargs:
step_method_kwargs['default_step'] = default_step_1
step_1_current = (step_defining_algorithms.get(step_method))(step_method_kwargs)
if print_iter_index:
print('Вычисление шага №2')
step_method_kwargs['func'] = lambda x: func_2(x_1_next, x)
step_method_kwargs['grad'] = lambda x0, func, epsilon: grad_2(x_1_next, x0, func_2, epsilon)
step_method_kwargs['x_current'] = x_2_next
step_method_kwargs['direction'] = np.dot(matrix_H_2, grad_2_next) / \
np.sqrt(np.dot(np.dot(matrix_H_2, grad_2_next), grad_2_next))
step_method_kwargs['step_index'] = k
if 'default_step' in step_method_kwargs:
step_method_kwargs['default_step'] = default_step_2
step_2_current = (step_defining_algorithms.get(step_method))(step_method_kwargs)
if isinstance(step_1_current, tuple):
step_1_current, default_step_1 = step_1_current
if isinstance(step_2_current, tuple):
step_2_current, default_step_2 = step_2_current
if (np.abs(step_1_current) < step_epsilon or np.abs(step_2_current) < step_epsilon) and \
step_method in continuing_step_methods and continue_transformation:
matrix_H_1 = matrix_H_transformation(matrix_H_1, grad_1_current, grad_1_next, beta)
matrix_H_2 = matrix_H_transformation(matrix_H_2, grad_2_current, grad_2_next, beta)
continue
if print_iter_index:
print('Вычисление приближения №1')
if np.abs(step_1_current) < 1e-51:
step_1_current_zero = True
else:
x_1_current, grad_1_current = x_1_next.copy(), grad_1_next.copy()
x_1_next = x_1_current - step_1_current * np.dot(matrix_H_1, grad_1_next) / \
np.sqrt(np.dot(np.dot(matrix_H_1, grad_1_next), grad_1_next))
results_1.append(x_1_next.copy())
if print_iter_index:
print('Вычисление приближения №2')
if np.abs(step_2_current) < 1e-51:
step_2_current_zero = True
else:
x_2_current, grad_2_current = x_2_next.copy(), grad_2_next.copy()
x_2_next = x_2_current - step_2_current * np.dot(matrix_H_2, grad_2_next) / \
np.sqrt(np.dot(np.dot(matrix_H_2, grad_2_next), grad_2_next))
results_2.append(x_2_next.copy())
if print_iter_index:
print('Вычисление градиента №1')
grad_1_next = grad_1(x_1_next, x_2_next, func_1, epsilon=grad_epsilon)
grads_1.append(grad_1_next.copy())
if print_iter_index:
print('Вычисление градиента №2')
grad_2_next = grad_2(x_1_next, x_2_next, func_2, epsilon=grad_epsilon)
grads_2.append(grad_2_next.copy())
if linalg.norm(np.concatenate((x_1_next, x_2_next)) -
np.concatenate((x_1_current, x_2_current))) < calc_epsilon_x or \
linalg.norm(np.concatenate((grad_1_next, grad_2_next))) < calc_epsilon_grad or \
(step_1_current_zero and step_2_current_zero):
break
if print_iter_index:
print('Преобразование матриц')
matrix_H_1 = matrix_H_transformation(matrix_H_1, grad_1_current, grad_1_next, beta)
matrix_H_2 = matrix_H_transformation(matrix_H_2, grad_2_current, grad_2_next, beta)
if return_grads:
return np.array(results_1), np.array(results_2), np.array(grads_1), np.array(grads_2)
return np.array(results_1), np.array(results_2)
def target_input(target):
if target.lower() == "min" or target.lower() == "minimum":
return 1.0
elif target.lower() == "max" or target.lower() == "maximum":
return -1.0
else:
raise ValueError("invalid value of \"target_dual\"")
def x0_input(x0):
return np.array(x0).copy()
def r_algorithm(func, x0, args=None, grad=middle_grad_non_matrix_pool, form='B', beta=0.5, target='min',
grad_epsilon=1e-8, calc_epsilon_x=1e-10, calc_epsilon_grad=1e-10, step_epsilon=1e-15, iter_lim=1000000,
return_grads=False, tqdm_fl=False, continue_transformation=False, print_iter_index=False, **kwargs):
sign = target_input(target)
x0 = x0_input(x0)
step_method_kwargs = {}
if len(kwargs) > 0:
for key in kwargs.keys():
step_method_kwargs[key] = kwargs.get(key)
else:
step_method_kwargs['step_method'] = 'adaptive'
step_method_kwargs['default_step'] = 1.0
step_method_kwargs['step_red_mult'] = 0.8
step_method_kwargs['step_incr_mult'] = 1.2
step_method_kwargs['lim_num'] = 3
step_method_kwargs['reduction_epsilon'] = 1e-15
step_method_kwargs['step_epsilon'] = step_epsilon
step_method = step_method_kwargs.get('step_method')
if args is None:
func_as_arg = lambda x: sign * func(x)
else:
func_as_arg = lambda x: sign * func(x, args)
if 'H' in form:
return r_algorithm_H_form(func_as_arg, x0, grad, beta, step_method, step_method_kwargs,
grad_epsilon=grad_epsilon, calc_epsilon_x=calc_epsilon_x,
calc_epsilon_grad=calc_epsilon_grad, step_epsilon=step_epsilon, iter_lim=iter_lim,
return_grads=return_grads, tqdm_fl=tqdm_fl,
continue_transformation=continue_transformation, print_iter_index=print_iter_index)
else:
return r_algorithm_B_form(func_as_arg, x0, grad, beta, step_method, step_method_kwargs,
grad_epsilon=grad_epsilon, calc_epsilon_x=calc_epsilon_x,
calc_epsilon_grad=calc_epsilon_grad, step_epsilon=step_epsilon, iter_lim=iter_lim,
return_grads=return_grads, tqdm_fl=tqdm_fl,
continue_transformation=continue_transformation, print_iter_index=print_iter_index)
def r_algorithm_cooperative(func_1, func_2, x0_1, x0_2, args_1=None, args_2=None, grad_1=middle_grad_arg_1_pool,
grad_2=middle_grad_arg_2_pool, form='B', beta=0.5, target_1='min', target_2='min',
grad_epsilon=1e-8, calc_epsilon_x=1e-10, calc_epsilon_grad=1e-10, step_epsilon=1e-15,
iter_lim=1000000, return_grads=False, tqdm_fl=False, continue_transformation=True,
print_iter_index=False, **kwargs):
sign_1, sign_2 = target_input(target_1), target_input(target_2)
x0_1, x0_2 = x0_input(x0_1), x0_input(x0_2)
step_method_kwargs = {}
if len(kwargs) > 0:
for key in kwargs.keys():
step_method_kwargs[key] = kwargs.get(key)
else:
step_method_kwargs['step_method'] = 'adaptive'
step_method_kwargs['default_step'] = 10.0
step_method_kwargs['step_red_mult'] = 0.5
step_method_kwargs['step_incr_mult'] = 1.2
step_method_kwargs['lim_num'] = 3
step_method_kwargs['reduction_epsilon'] = 1e-15
step_method_kwargs['step_epsilon'] = step_epsilon
step_method = step_method_kwargs.get('step_method')
if args_1 is None:
func_as_arg_1 = lambda x, y: sign_1 * func_1(x, y)
else:
func_as_arg_1 = lambda x, y: sign_1 * func_1(x, y, args_1)
if args_2 is None:
func_as_arg_2 = lambda x, y: sign_2 * func_2(x, y)
else:
func_as_arg_2 = lambda x, y: sign_2 * func_2(x, y, args_2)
if 'H' in form:
return r_algorithm_H_form_cooperative(func_as_arg_1, func_as_arg_2, x0_1, x0_2, grad_1, grad_2, beta,
step_method, step_method_kwargs, grad_epsilon, calc_epsilon_x,
calc_epsilon_grad, step_epsilon, iter_lim, return_grads, tqdm_fl,
continue_transformation, print_iter_index)
else:
return r_algorithm_B_form_cooperative(func_as_arg_1, func_as_arg_2, x0_1, x0_2, grad_1, grad_2, beta,
step_method, step_method_kwargs, grad_epsilon, calc_epsilon_x,
calc_epsilon_grad, step_epsilon, iter_lim, return_grads, tqdm_fl,
continue_transformation, print_iter_index)
def remove_nearly_same_points(points, eps=1e-3):
results = [points[0].copy()]
for i in range(len(points) - 1):
if np.linalg.norm(results[0] - points[i]) > eps:
results.insert(0, points[i].copy())
results.insert(0, points[len(points) - 1])
return np.array(results[::-1])
def trapezoid_double_on_grid(integrand_grid, x_a, x_b, y_a, y_b):
grid_dot_num_x, grid_dot_num_y = integrand_grid.shape[1] - 1, integrand_grid.shape[0] - 1
return (x_b - x_a) * (y_b - y_a) / 4 / grid_dot_num_x / grid_dot_num_y * \
(integrand_grid[:grid_dot_num_y, :grid_dot_num_x].sum() + integrand_grid[1:, :grid_dot_num_x].sum() +
integrand_grid[:grid_dot_num_y, 1:].sum() + integrand_grid[1:, 1:].sum())
def trapezoid_double_on_grid_array(integrand_grid, x_a, x_b, y_a, y_b):
grid_dot_num_x, grid_dot_num_y = integrand_grid.shape[2] - 1, integrand_grid.shape[1] - 1
return (x_b - x_a) * (y_b - y_a) / 4 / grid_dot_num_x / grid_dot_num_y * \
(integrand_grid[:, :grid_dot_num_y, :grid_dot_num_x] + integrand_grid[:, 1:, :grid_dot_num_x] +
integrand_grid[:, :grid_dot_num_y, 1:] + integrand_grid[:, 1:, 1:]).sum(axis=2).sum(axis=1)
def trapezoid_double_on_grid_matrix(integrand_grid, x_a, x_b, y_a, y_b):
grid_dot_num_x, grid_dot_num_y = integrand_grid.shape[3] - 1, integrand_grid.shape[2] - 1
return (x_b - x_a) * (y_b - y_a) / 4 / grid_dot_num_x / grid_dot_num_y * \
(integrand_grid[:, :, :grid_dot_num_y, :grid_dot_num_x] + integrand_grid[:, :, 1:, :grid_dot_num_x] +
integrand_grid[:, :, :grid_dot_num_y, 1:] + integrand_grid[:, :, 1:, 1:]).sum(axis=3).sum(axis=2)
def trapezoid_double_on_grid_3d_array(integrand_grid, x_a, x_b, y_a, y_b):
grid_dot_num_x, grid_dot_num_y = integrand_grid.shape[4] - 1, integrand_grid.shape[3] - 1
return (x_b - x_a) * (y_b - y_a) / 4 / grid_dot_num_x / grid_dot_num_y * \
(integrand_grid[:, :, :, :grid_dot_num_y, :grid_dot_num_x] + integrand_grid[:, :, :, 1:, :grid_dot_num_x] +
integrand_grid[:, :, :, :grid_dot_num_y, 1:] + integrand_grid[:, :, :, 1:, 1:]).sum(axis=4).sum(axis=3) |
"""
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
class UpdateQueryBuilder:
def __init__(
self,
query,
connection,
local_time,
temporal_query=None,
temporal_query_insert=None,
row_tuple=None,
table_name=None,
temporal_table_name=None,
):
self.query = query
self.temporal_query = temporal_query
self.connection = connection
self.local_time = local_time
self.temporal_query_insert = temporal_query_insert
self.row_tuple = row_tuple
self.table_name = table_name
self.temporal_table_name = temporal_table_name
self.set_table_names()
self.build_queries(local_time)
def set_table_names(self):
UpdateQueryBuilder.set_original_table_name(self)
UpdateQueryBuilder.set_temporal_table_name(self)
def set_original_table_name(self):
original_query = self.query
table_name_pattern = re.compile(r"(?<=update )[^ ]+")
table_name_matches = table_name_pattern.finditer(original_query)
for match in table_name_matches:
table_name_match = match
self.table_name = table_name_match.group(0)
def set_temporal_table_name(self):
table_name = self.table_name
self.temporal_table_name = table_name + "_history"
def build_queries(self, time_string):
UpdateQueryBuilder.build_temporal_query(self, time_string)
UpdateQueryBuilder.build_temporal_query_insert(self, time_string)
def build_temporal_query(self, date_string):
original_query = self.query
condition = UpdateQueryBuilder.get_where_condition(original_query)
temporal_query = "update {} set valid_to='{}' where {} and valid_to='9999-12-31T00:00:00.000000'".format(
self.temporal_table_name, date_string, condition
)
self.temporal_query = temporal_query
def get_where_condition(original_query):
condition_pattern = re.compile(r"(?<=where )[^ ]+")
condition_matches = condition_pattern.finditer(original_query)
for match in condition_matches:
condition_match = match
condition = condition_match.group(0)
return condition
def build_temporal_query_insert(self, date_string):
original_query = self.query
new_values_string = UpdateQueryBuilder.get_new_values(original_query)
column_value_list = UpdateQueryBuilder.create_column_values_list(
new_values_string
)
column_value_dict = UpdateQueryBuilder.create_column_value_dictionary(
column_value_list
)
condition = UpdateQueryBuilder.get_where_condition(original_query)
full_row = UpdateQueryBuilder.get_full_row(self, condition)
new_row_tuple = UpdateQueryBuilder.create_new_query_values(
self, column_value_dict, full_row
)
self.temporal_query_insert = UpdateQueryBuilder.build_query(
self, new_row_tuple, date_string
)
def get_new_values(original_query):
set_value_pattern = re.compile(r"(?<=set )[^ ]+")
set_value_match = set_value_pattern.finditer(original_query)
for match in set_value_match:
set_value_match = match
set_value_string = set_value_match.group(0)
return set_value_string
def get_full_row(self, condition):
query = self.connection.execute(
"select * from {} where {}".format(self.table_name, condition)
)
query_result = query.fetchone()
self.row_tuple = query_result
return query_result
def build_query(self, new_row_tuple, date_string):
query_result_list = list(new_row_tuple)
stripped_query_list = []
for value in query_result_list:
if isinstance(value, str) is True:
stripped_query_list.append(value.strip("'"))
else:
stripped_query_list.append(value)
stripped_query_list.append("{}".format(date_string))
stripped_query_list.append("9999-12-31T00:00:00.000000")
new_tuple = tuple(stripped_query_list)
insert_query = "insert into {} values {}".format(
self.temporal_table_name, new_tuple
)
return insert_query
def create_column_values_list(values_string):
column_value_pattern = re.compile(r"[^=,]+")
column_value_matches = column_value_pattern.finditer(values_string)
values = []
for match in column_value_matches:
values.append(match.group(0))
return values
def create_column_value_dictionary(column_value_list):
"""
Converts column, value list to dictionary.
Args:
A list having odd index as column names and even indexes as their
values. e.g. name='something' would be ['name','something'].
Return Values:
A dictionary with keys as column values and key values as column
values.
"""
column_value_dictionary = {}
try:
for element in column_value_list:
if column_value_list.index(element) == 0:
column_value_dictionary[
column_value_list[column_value_list.index(element)]
] = column_value_list[column_value_list.index(element) + 1]
elif column_value_list.index(element) < 2:
column_value_dictionary[
column_value_list[column_value_list.index(element) + 1]
] = column_value_list[column_value_list.index(element) + 2]
elif column_value_list.index(element) >= 2:
column_value_dictionary[
column_value_list[
column_value_list.index(element)
+ column_value_list.index(element)
]
] = column_value_list[
column_value_list.index(element)
+ column_value_list.index(element)
+ 1
]
except IndexError:
return column_value_dictionary
def create_new_query_values(self, column_value_dictionary, full_row):
full_row_list = list(full_row)
for column in column_value_dictionary.keys():
query_result = self.connection.execute(
"select {} from test where {}".format(
column, UpdateQueryBuilder.get_where_condition(self.query)
)
)
old_value = query_result.fetchone()[0]
old_value_index = full_row_list.index(old_value)
full_row_list.pop(old_value_index)
full_row_list.insert(old_value_index, column_value_dictionary[column])
return tuple(full_row_list)
|
"""The basic grid class."""
from bempp.helpers import timeit as _timeit
import collections as _collections
import numba as _numba
import numpy as _np
EDGES_ID = 2
VERTICES_ID = 1
_EDGE_LOCAL = _np.array([[0, 1], [2, 0], [1, 2]])
class Grid(object):
"""The Grid class."""
@_timeit
def __init__(
self, vertices, elements, domain_indices=None, grid_id=None, scatter=True
):
"""Create a grid from a vertices and an elements array."""
from bempp.api import log
from bempp.api.utils import pool
from bempp.api.utils.helpers import create_unique_id
self._vertices = None
self._elements = None
self._domain_indices = None
self._edges = None
self._element_edges = None
self._edge_adjacency = None
self._vertex_adjacency = None
self._element_neighbors = None
self._vertex_on_boundary = None
self._edge_on_boundary = None
self._edge_neighbors = None
self._vertex_neighbors = None
self._barycentric_grid = None
if grid_id:
self._id = grid_id
else:
self._id = create_unique_id()
self._volumes = None
self._normals = None
self._jacobians = None
self._jacobian_inverse_transposed = None
self._diameters = None
self._integration_elements = None
self._centroids = None
self._device_interfaces = {}
self._element_to_vertex_matrix = None
self._element_to_element_matrix = None
self._normalize_and_assign_input(vertices, elements, domain_indices)
self._enumerate_edges()
self._get_element_adjacency_for_edges_and_vertices()
self._compute_geometric_quantities()
self._compute_boundary_information()
self._compute_edge_neighbors()
self._compute_vertex_neighbors()
self._grid_data_double = GridDataDouble(
self._vertices,
self._elements,
self._edges,
self._element_edges,
self._volumes,
self._normals,
self._jacobians,
self._jacobian_inverse_transposed,
self._diameters,
self._integration_elements,
self._centroids,
self._domain_indices,
self._vertex_on_boundary,
self._element_neighbors.indices,
self._element_neighbors.indexptr,
)
self._grid_data_single = GridDataFloat(
self._vertices.astype("float32"),
self._elements,
self._edges,
self._element_edges,
self._volumes.astype("float32"),
self._normals.astype("float32"),
self._jacobians.astype("float32"),
self._jacobian_inverse_transposed.astype("float32"),
self._diameters.astype("float32"),
self._integration_elements.astype("float32"),
self._centroids.astype("float32"),
self._domain_indices,
self._vertex_on_boundary,
self._element_neighbors.indices,
self._element_neighbors.indexptr,
)
self._is_scattered = False
if scatter and pool.is_initialised() and not pool.is_worker():
self._scatter()
if not pool.is_worker():
log(
(
f"Created grid with id {self.id}. Elements: {self.number_of_elements}. "
+ f"Edges: {self.number_of_edges}. Vertices: {self.number_of_vertices}"
)
)
@property
def vertex_adjacency(self):
"""
Vertex adjacency information.
Returns a matrix with 4 rows. Each column has the entries e0,
e1, ind0, ind1, which means that element e0 is connected to
element e1 via local vertex index ind0 in e0 and ind1 in e1.
Only returnes connectivity via a single vertex. For
connectivity via edges see edge_adjacency.
"""
return self._vertex_adjacency
@property
def edge_adjacency(self):
"""
Edge adjacency information.
Returns a matrix with 6 rows. Each column has the entries e0,
e1, v00, v01, v11, v12, which means that element e0 is
connected to element e1. Vertex v00 in element e0 is
identical to vertex v11 in element e1, and vertex v01 in
element 0 is identical to vertex v12 in element e1.
"""
return self._edge_adjacency
@property
def element_to_vertex_matrix(self):
"""Return the matrix mapping vertices to elements."""
return self._element_to_vertex_matrix
@property
def element_to_element_matrix(self):
"""
Return element to element matrix.
If entry (i,j) has the value n > 0, element i
and element j are connected via n vertices.
"""
return self._element_to_element_matrix
@property
def element_neighbors(self):
"""
Return named tuple (indices, indexptr).
The neighbors of element i are given as
element_neighbors.indices[
element_neighbors.indptr[i] : element_neighbors.indptr[i +1]].
Note that the element i is contained in the list of neighbors.
"""
return self._element_neighbors
@property
def number_of_vertices(self):
"""Return number of vertices."""
return self._vertices.shape[1]
@property
def number_of_edges(self):
"""Return number of edges."""
return self._edges.shape[1]
@property
def number_of_elements(self):
"""Return number of elements."""
return self._elements.shape[1]
@property
def vertices(self):
"""Return vertices."""
return self._vertices
@property
def elements(self):
"""Return elements."""
return self._elements
@property
def edges(self):
"""Return edges."""
return self._edges
@property
def centroids(self):
"""Return the centroids of the elements."""
return self._centroids
@property
def domain_indices(self):
"""Return domain indices."""
return self._domain_indices
@property
def element_edges(self):
"""
Return an array of edge indices for each element.
element_edges[i, j] is the index of the ith edge
in the jth element.
"""
return self._element_edges
@property
def device_interfaces(self):
"""Return the dictionary of device interfaces for the grid."""
return self._device_interfaces
@property
def as_array(self):
"""
Convert the grid to an array.
For a grid with N elements returns a 1d array with
9 * N entries. The three nodes for element with index e
can be found in [9 * e, 9 * (e + 1)].
"""
return self.vertices.T[self.elements.flatten(order="F"), :].flatten(order="C")
@property
def bounding_box(self):
"""
Return the bounding box for the grid.
The bounding box is a 3x2 array box such that
box[:, 0] contains (xmin, ymin, zmin) and box[:, 1]
contains (xmax, ymax, zmax).
"""
box = _np.empty((3, 2), dtype="float64")
box[:, 0] = _np.min(self.vertices, axis=1)
box[:, 1] = _np.max(self.vertices, axis=1)
return box
@property
def volumes(self):
"""Return element volumes."""
return self._volumes
@property
def diameters(self):
"""Return element diameters."""
return self._diameters
@property
def maximum_element_diameter(self):
"""Return the maximum element diameter."""
return _np.max(self.diameters)
@property
def minimum_element_diameter(self):
"""Return the maximum element diameter."""
return _np.min(self.diameters)
@property
def normals(self):
"""Return normals."""
return self._normals
@property
def jacobians(self):
"""Return Jacobians."""
return self._jacobians
@property
def integration_elements(self):
"""Return integration elements."""
return self._integration_elements
@property
def jacobian_inverse_transposed(self):
"""Return the jacobian inverse transposed."""
return self._jacobian_inverse_transposed
@property
def vertex_on_boundary(self):
"""Return vertex boundary information."""
return self._vertex_on_boundary
@property
def edge_on_boundary(self):
"""Return edge boundary information."""
return self._edge_on_boundary
@property
def edge_neighbors(self):
"""Return for each edge the list of neighboring elements.."""
return self._edge_neighbors
def data(self, precision="double"):
"""Return Numba container with all relevant grid data."""
if precision == "double":
return self._grid_data_double
elif precision == "single":
return self._grid_data_single
else:
raise ValueError("precision must be one of 'single', 'double'")
@property
def vertex_neighbors(self):
"""Return for each vertex the list of neighboring elements."""
return self._vertex_neighbors
@property
def barycentric_refinement(self):
"""Return the barycentric refinement of this grid."""
if self._barycentric_grid is None:
self._barycentric_grid = barycentric_refinement(self)
return self._barycentric_grid
@property
def id(self):
"""Return a unique id for the grid."""
return self._id
def _scatter(self):
"""Initialise the grid on all workers."""
from bempp.api.utils import pool
array_proxies = pool.to_buffer(
self.vertices, self.elements, self.domain_indices
)
pool.execute(_grid_scatter_worker, self.id, array_proxies)
self._is_scattered = True
def entity_count(self, codim):
"""Return the number of entities of given codimension."""
if codim == 0:
return self.number_of_elements
if codim == 1:
return self.number_of_edges
if codim == 2:
return self.number_of_vertices
raise ValueError("codim must be one of 0, 1, or 2.")
def plot(self):
"""Plot the grid."""
from bempp.api.external.viewers import visualize
visualize(self)
def get_element(self, index):
"""Return element with a given index."""
return Element(self, index)
def entity_iterator(self, codim):
"""Return an iterator for a given codim."""
def element_iterator():
"""Iterate over elements."""
for index in range(self.number_of_elements):
yield Element(self, index)
def vertex_iterator():
"""Iterate over vertices."""
for index in range(self.number_of_vertices):
yield Vertex(self, index)
def edge_iterator():
"""Iterate over edges."""
for index in range(self.number_of_edges):
yield Edge(self, index)
if codim not in [0, 1, 2]:
raise ValueError("codim must be one of 0, 1, or 2.")
if codim == 0:
iterator = element_iterator()
elif codim == 1:
iterator = edge_iterator()
elif codim == 2:
iterator = vertex_iterator()
return iterator
def map_to_point_cloud(self, order=None, local_points=None, precision="double"):
"""
Return a point cloud representation of the grid on quadratur points.
Return a representation of the grid as a point cloud using points on
each element either defined through a triangle Gauss qudrature order
or by directly specifying an array of local points.
Parameters
----------
order : Integer
Optional parameter. Specify a quadrature order for the point
cloud generation.
local_points: Numpy array
A 2 x N array of N points in local reference coordinates that specify
the points to use for each triangle.
precision: String
Either 'single' or 'double'.
If neither order nor local_points is specified the quadrature order is
obtained from the global parameters.
Returns a M x 3 array of M points that represent the grid on the specified
points.
"""
import bempp.api
from bempp.api.integration.triangle_gauss import rule
if local_points is None:
if order is None:
order = bempp.api.GLOBAL_PARAMETERS.quadrature.regular
local_points, _ = rule(order)
return grid_to_points(self.data("double"), local_points)
def refine(self):
"""Return a new grid with all elements refined."""
new_number_of_vertices = self.number_of_edges + self.number_of_vertices
new_vertices = _np.empty(
(3, new_number_of_vertices), dtype="float64", order="F"
)
new_vertices[:, : self.number_of_vertices] = self.vertices
# Each edge midpoint forms a new vertex.
new_vertices[:, self.number_of_vertices :] = 0.5 * (
self.vertices[:, self.edges[0, :]] + self.vertices[:, self.edges[1, :]]
)
new_elements = _np.empty(
(3, 4 * self.number_of_elements), order="F", dtype="uint32"
)
new_domain_indices = _np.repeat(self.domain_indices, 4)
for index, elem in enumerate(self.elements.T):
vertex0 = elem[0]
vertex1 = elem[1]
vertex2 = elem[2]
vertex01 = self.element_edges[0, index] + self.number_of_vertices
vertex20 = self.element_edges[1, index] + self.number_of_vertices
vertex12 = self.element_edges[2, index] + self.number_of_vertices
new_elements[:, 4 * index] = [vertex0, vertex01, vertex20]
new_elements[:, 4 * index + 1] = [vertex01, vertex1, vertex12]
new_elements[:, 4 * index + 2] = [vertex12, vertex2, vertex20]
new_elements[:, 4 * index + 3] = [vertex01, vertex12, vertex20]
return Grid(new_vertices, new_elements, new_domain_indices)
def _compute_vertex_neighbors(self):
"""Return all elements adjacent to a given vertex."""
from bempp.helpers import IndexList
# self._vertex_neighbors = [None for _ in range(self.number_of_vertices)]
indptr = self.element_to_vertex_matrix.indptr
indices = self.element_to_vertex_matrix.indices
self._vertex_neighbors = IndexList(indices, indptr)
# for index in range(self.number_of_vertices):
# self._vertex_neighbors[index] = indices[indptr[index] : indptr[index + 1]]
def _normalize_and_assign_input(self, vertices, elements, domain_indices):
"""Convert input into the right form."""
from bempp.api.utils.helpers import align_array
if domain_indices is None:
domain_indices = _np.zeros(elements.shape[1], dtype="uint32")
self._vertices = align_array(vertices, "float64", "F")
self._elements = align_array(elements, "uint32", "F")
self._domain_indices = align_array(domain_indices, "uint32", "F")
def _enumerate_edges(self):
"""
Enumerate all edges in a given grid.
Assigns a tuple (edges, element_edges) to
self._edges and self._element_edges.
element_edges is an array a such that a[i, j] is the
index of the ith edge in the jth elements, and edges
is a 2 x nedges array such that the jth column stores the
two nodes associated with the jth edge.
"""
# The following would be better defined inside the njitted routiine.
# But Numba then throws an error that it cannot find the UniTuple type.
edge_tuple_to_index = _numba.typed.Dict.empty(
key_type=_numba.types.containers.UniTuple(_numba.types.int64, 2),
value_type=_numba.types.int64,
)
self._edges, self._element_edges = _numba_enumerate_edges(
self._elements, edge_tuple_to_index
)
def _get_element_adjacency_for_edges_and_vertices(self):
"""Get element adjacency.
The array edge_adjacency has 6 rows, such that for index j the
element edge_adjacency[0, j] is connected with element
edge_adjacency[1, j] via the vertices edge_adjacency[2:4, j]
in the first element and the vertices edge_adjacency[4:6, j]
in the second element. The vertex numbers here are local
numbers (0, 1 or 2).
The array vertex_adjacency has 4 rows, such that for index j the
element vertex_adjacency[0, j] is connected with
vertex_adjacency[1, j] via the vertex vertex_adjacency[2, j]
in the first element and the vertex vertex_adjacency[3, j]
in the second element. The vertex numbers here are local numbers
(0, 1 or 2).
"""
from bempp.helpers import IndexList
self._element_to_vertex_matrix = get_element_to_vertex_matrix(
self._vertices, self._elements
)
elem_to_elem_matrix = get_element_to_element_matrix(
self._vertices, self._elements
)
self._element_to_element_matrix = elem_to_elem_matrix
elements1, elements2, nvertices = _get_element_to_element_vertex_count(
elem_to_elem_matrix
)
vertex_connected_elements1, vertex_connected_elements2 = _element_filter(
elements1, elements2, nvertices, VERTICES_ID
)
edge_connected_elements1, edge_connected_elements2 = _element_filter(
elements1, elements2, nvertices, EDGES_ID
)
self._vertex_adjacency = _find_vertex_adjacency(
self._elements, vertex_connected_elements1, vertex_connected_elements2
)
self._edge_adjacency = _find_edge_adjacency(
self._elements, edge_connected_elements1, edge_connected_elements2
)
self._element_neighbors = IndexList(
elem_to_elem_matrix.indices, elem_to_elem_matrix.indptr
)
def _compute_geometric_quantities(self):
"""Compute geometric quantities for the grid."""
element_vertices = self.vertices.T[self.elements.flatten(order="F")]
indexptr = 3 * _np.arange(self.number_of_elements)
indices = _np.repeat(indexptr, 2) + _np.tile([1, 2], self.number_of_elements)
centroids = (
1.0
/ 3
* _np.sum(
_np.reshape(element_vertices, (self.number_of_elements, 3, 3)), axis=1
)
)
jacobians = (element_vertices - _np.repeat(element_vertices[::3], 3, axis=0))[
indices
]
normal_directions = _np.cross(jacobians[::2], jacobians[1::2], axis=1)
normal_direction_norms = _np.linalg.norm(normal_directions, axis=1)
normals = normal_directions / _np.expand_dims(normal_direction_norms, 1)
volumes = 0.5 * normal_direction_norms
jacobian_diff = jacobians[::2] - jacobians[1::2]
diff_norms = _np.linalg.norm(jacobian_diff, axis=1)
jac_vector_norms = _np.linalg.norm(jacobians, axis=1)
diameters = (
jac_vector_norms[::2]
* jac_vector_norms[1::2]
* diff_norms
/ normal_direction_norms
)
self._volumes = volumes
self._normals = normals
self._jacobians = _np.swapaxes(
_np.reshape(jacobians, (self.number_of_elements, 2, 3)), 1, 2
)
self._diameters = diameters
self._centroids = centroids
jac_transpose_jac = _np.empty((self.number_of_elements, 2, 2), dtype="float64")
for index in range(self.number_of_elements):
jac_transpose_jac[index] = self.jacobians[index].T.dot(
self.jacobians[index]
)
self._integration_elements = _np.sqrt(_np.linalg.det(jac_transpose_jac))
jac_transpose_jac_inv = _np.linalg.inv(jac_transpose_jac)
self._jacobian_inverse_transposed = _np.empty(
(self.number_of_elements, 3, 2), dtype="float64"
)
for index in range(self.number_of_elements):
self._jacobian_inverse_transposed[index] = self.jacobians[index].dot(
jac_transpose_jac_inv[index]
)
def _compute_boundary_information(self):
"""
Return a boolean array with boundary information.
Computes arr0, arr1 such that arr0[j] is True if
vertex j lies on the boundary and arr1[i] is True if edge
i lies on the boundary.
"""
from scipy.sparse import csr_matrix
element_edges = self.element_edges
number_of_elements = self.number_of_elements
number_of_edges = self.number_of_edges
number_of_vertices = self.number_of_vertices
edge_indices = _np.ravel(element_edges, order="F")
repeated_element_indices = _np.repeat(_np.arange(number_of_elements), 3)
data = _np.ones(3 * number_of_elements, dtype="uint32")
element_to_edge = csr_matrix(
(data, (repeated_element_indices, edge_indices)),
shape=(number_of_elements, number_of_edges),
)
edge_to_edge = element_to_edge.T.dot(element_to_edge)
arr1 = edge_to_edge.diagonal() == 1
arr0 = _np.zeros(number_of_vertices, dtype=_np.bool)
for boundary_edge_index in _np.flatnonzero(arr1):
arr0[self.edges[:, boundary_edge_index]] = True
self._vertex_on_boundary = arr0
self._edge_on_boundary = arr1
def _compute_edge_neighbors(self):
"""Get the neighbors of each edge."""
edge_neighbors = [[] for _ in range(self.number_of_edges)]
for element_index in range(self.number_of_elements):
for local_index in range(3):
edge_neighbors[self.element_edges[local_index, element_index]].append(
element_index
)
self._edge_neighbors = [tuple(elem) for elem in edge_neighbors]
@_numba.experimental.jitclass(
[
("vertices", _numba.float64[:, :]),
("elements", _numba.uint32[:, :]),
("edges", _numba.uint32[:, :]),
("element_edges", _numba.uint32[:, :]),
("volumes", _numba.float64[:]),
("normals", _numba.float64[:, :]),
("jacobians", _numba.float64[:, :, :]),
("jac_inv_trans", _numba.float64[:, :, :]),
("diameters", _numba.float64[:]),
("integration_elements", _numba.float64[:]),
("centroids", _numba.float64[:, :]),
("domain_indices", _numba.uint32[:]),
("vertex_on_boundary", _numba.boolean[:]),
("element_neighbor_indices", _numba.uint32[:]),
("element_neighbor_indexptr", _numba.uint32[:]),
]
)
class GridDataDouble(object):
"""A Numba container class for the grid data."""
def __init__(
self,
vertices,
elements,
edges,
element_edges,
volumes,
normals,
jacobians,
jac_inv_trans,
diameters,
integration_elements,
centroids,
domain_indices,
vertex_on_boundary,
element_neighbor_indices,
element_neighbor_indexptr,
):
"""Create a GridDataDouble."""
self.vertices = vertices
self.elements = elements
self.edges = edges
self.element_edges = element_edges
self.volumes = volumes
self.normals = normals
self.jacobians = jacobians
self.jac_inv_trans = jac_inv_trans
self.diameters = diameters
self.integration_elements = integration_elements
self.centroids = centroids
self.domain_indices = domain_indices
self.vertex_on_boundary = vertex_on_boundary
self.element_neighbor_indices = element_neighbor_indices
self.element_neighbor_indexptr = element_neighbor_indexptr
def local2global(self, elem_index, local_coords):
"""Map local to global coordinates."""
return _np.expand_dims(
self.vertices[:, self.elements[0, elem_index]], 1
) + self.jacobians[elem_index].dot(local_coords)
@_numba.experimental.jitclass(
[
("vertices", _numba.float32[:, :]),
("elements", _numba.uint32[:, :]),
("edges", _numba.uint32[:, :]),
("element_edges", _numba.uint32[:, :]),
("volumes", _numba.float32[:]),
("normals", _numba.float32[:, :]),
("jacobians", _numba.float32[:, :, :]),
("jac_inv_trans", _numba.float32[:, :, :]),
("diameters", _numba.float32[:]),
("integration_elements", _numba.float32[:]),
("centroids", _numba.float32[:, :]),
("domain_indices", _numba.uint32[:]),
("vertex_on_boundary", _numba.boolean[:]),
("element_neighbor_indices", _numba.uint32[:]),
("element_neighbor_indexptr", _numba.uint32[:]),
]
)
class GridDataFloat(object):
"""A Numba container class for the grid data."""
def __init__(
self,
vertices,
elements,
edges,
element_edges,
volumes,
normals,
jacobians,
jac_inv_trans,
diameters,
integration_elements,
centroids,
domain_indices,
vertex_on_boundary,
element_neighbor_indices,
element_neighbor_indexptr,
):
"""Create a GridDataFloat."""
self.vertices = vertices
self.elements = elements
self.edges = edges
self.element_edges = element_edges
self.volumes = volumes
self.normals = normals
self.jacobians = jacobians
self.jac_inv_trans = jac_inv_trans
self.diameters = diameters
self.integration_elements = integration_elements
self.centroids = centroids
self.domain_indices = domain_indices
self.vertex_on_boundary = vertex_on_boundary
self.element_neighbor_indices = element_neighbor_indices
self.element_neighbor_indexptr = element_neighbor_indexptr
def local2global(self, elem_index, local_coords):
"""Map local to global coordinates."""
return _np.expand_dims(
self.vertices[:, self.elements[0, elem_index]], 1
) + self.jacobians[elem_index].dot(local_coords)
class ElementGeometry(object):
"""Provides geometry information for an element."""
def __init__(self, grid, index):
"""Initialize geometry wth a 3x3 array of corners."""
self._grid = grid
self._index = index
@property
def corners(self):
"""Return corners."""
return self._grid.vertices[:, self._grid.elements[:, self._index]]
@property
def jacobian(self):
"""Return jacobian."""
return self._grid.jacobians[self._index]
@property
def integration_element(self):
"""Return integration element."""
return self._grid.integration_elements[self._index]
@property
def jacobian_inverse_transposed(self):
"""Return Jacobian inverse transposed."""
return self._grid.jacobian_inverse_transposed[self._index]
@property
def normal(self):
"""Return normal."""
return self._grid.normals[self._index]
@property
def volume(self):
"""Return volume."""
return self._grid.volumes[self._index]
@property
def diameter(self):
"""Return the diameter of the circumcircle."""
return self._grid.diameters[self._index]
@property
def centroid(self):
"""Return the centroid of the element."""
return self._grid.centroids[self._index]
def local2global(self, points):
"""Map points in local coordinates to global."""
return _np.expand_dims(self.corners[:, 0], 1) + self.jacobian @ points
class Element(object):
"""Provides a view onto an element of the grid."""
def __init__(self, grid, index):
self._grid = grid
self._index = index
@property
def index(self):
"""Index of the element."""
return self._index
@property
def grid(self):
"""Associated grid."""
return self._grid
@property
def geometry(self):
"""Return geometry."""
grid = self._grid
return ElementGeometry(grid, self.index)
@property
def domain_index(self):
"""Return the domain index."""
return self._grid.domain_indices[self.index]
def sub_entity_iterator(self, codim):
"""Return iterator over subentitites."""
def edge_iterator():
"""Iterate over edges."""
for index in self._grid.element_edges[:, self.index]:
yield Edge(self._grid, index)
def vertex_iterator():
"""Iterate over vertices."""
for index in self._grid.elements[:, self.index]:
yield Vertex(self._grid, index)
if codim not in [1, 2]:
raise ValueError("codim must be 1 (for edges) or 2 (for vertices)")
if codim == 1:
iterator = edge_iterator()
if codim == 2:
iterator = vertex_iterator()
return iterator
def __eq__(self, other):
"""Check if elements are equal."""
if isinstance(other, Element):
if other.grid == self.grid and other.index == self.index:
return True
return False
VertexGeometry = _collections.namedtuple("VertexGeometry", "corners")
class Vertex(object):
"""Provides a view onto a vertex of the grid."""
def __init__(self, grid, index):
"""Create a vertex."""
self._grid = grid
self._index = index
@property
def index(self):
"""Index of the vertex."""
return self._index
@property
def geometry(self):
"""Return geometry."""
return VertexGeometry(self._grid.vertices[:, self.index].reshape(3, 1))
class EdgeGeometry(object):
"""Implementation of a geometry for edges."""
def __init__(self, corners):
"""Create edge geometry."""
self._corners = corners
self._volume = _np.linalg.norm(corners[:, 1] - corners[:, 0])
@property
def corners(self):
"""Return the corners."""
return self._corners
@property
def volume(self):
"""Return length of the edge."""
return self._volume
class Edge(object):
"""Provides a view onto an edge of the grid."""
def __init__(self, grid, index):
"""Create an edge."""
self._grid = grid
self._index = index
@property
def index(self):
"""Return the index of the edge."""
return self._index
@property
def geometry(self):
"""Return geometry."""
grid = self._grid
return EdgeGeometry(grid.vertices[:, grid.edges[:, self.index]])
def get_element_to_vertex_matrix(vertices, elements):
"""Return the sparse matrix mapping vertices to elements."""
from scipy.sparse import csr_matrix
number_of_elements = elements.shape[1]
number_of_vertices = vertices.shape[1]
vertex_indices = _np.ravel(elements, order="F")
vertex_element_indices = _np.repeat(_np.arange(number_of_elements), 3)
data = _np.ones(len(vertex_indices), dtype="uint32")
return csr_matrix(
(data, (vertex_indices, vertex_element_indices)),
shape=(number_of_vertices, number_of_elements),
dtype="uint32",
)
def get_element_to_element_matrix(vertices, elements):
"""
Return element to element matrix.
If entry (i,j) has the value n > 0, element i
and element j are connected via n vertices.
"""
element_to_vertex = get_element_to_vertex_matrix(vertices, elements)
return element_to_vertex.T.dot(element_to_vertex)
@_numba.njit(locals={"index": _numba.types.int32})
def _compare_array_to_value(array, val):
"""
Return i such that array[i] == val.
If val not found return -1
"""
for index, elem in enumerate(array):
if elem == val:
return index
return -1
@_numba.njit(
locals={
"index1": _numba.types.int32,
"index2": _numba.types.int32,
"full_index1": _numba.types.int32,
}
)
def _find_first_common_array_index_pair_from_position(array1, array2, start=0):
"""
Return first index pair (i, j) such that array1[i] = array2[j].
Assumes that one index pair satisfying the equality
always exists. Method checks in array1 from position start
onwards.
"""
for index1 in range(len(array1[start:])):
full_index1 = index1 + start
index2 = _compare_array_to_value(array2, array1[full_index1])
if index2 != -1:
return (full_index1, index2)
raise ValueError("Could not find a common index pair.")
@_numba.njit(locals={"offset": _numba.types.int32})
def _find_two_common_array_index_pairs(array1, array2):
"""Return two index pairs (i, j) such that array1[i] = array2[j]."""
offset = 0
index_pairs = _np.empty((2, 2), dtype=_np.int32)
index_pairs[:, 0] = _find_first_common_array_index_pair_from_position(
array1, array2, offset
)
offset = index_pairs[0, 0] + 1 # Next search starts behind found pair
index_pairs[:, 1] = _find_first_common_array_index_pair_from_position(
array1, array2, offset
)
return index_pairs
@_numba.njit()
def _get_shared_vertex_information_for_two_elements(elements, elem0, elem1):
"""
Return tuple (i, j).
The tuple has the property elements[i, elem0] == elements[j, elem1]
"""
i, j = _find_first_common_array_index_pair_from_position(
elements[:, elem0], elements[:, elem1]
)
return (i, j)
@_numba.njit()
def _get_shared_edge_information_for_two_elements(elements, elem0, elem1):
"""
Return 2x2 array of int32 indices.
Each column in the return indices as a pair (i, j) such that
elements[i, elem0] = elements[j, elem1]
"""
index_pairs = _find_two_common_array_index_pairs(
elements[:, elem0], elements[:, elem1]
)
# Ensure that order of indices is the same as Bempp 3
if index_pairs[1, 1] < index_pairs[1, 0]:
for i in range(2):
tmp = index_pairs[i, 0]
index_pairs[i, 0] = index_pairs[i, 1]
index_pairs[i, 1] = tmp
return index_pairs
@_numba.njit()
def _find_vertex_adjacency(elements, test_indices, trial_indices):
"""
Return for element pairs the vertex adjacency.
The return array vertex_adjacency has 4 rows, such that for index j
the element vertex_adjacency[0, j] is connected with
vertex_adjacency[1, j] via the vertex vertex_adjacency[2, j] in
the first element and the vertex vertex_adjacency[3, j] in the
second element. The vertex numbers here are local
numbers (0, 1 or 2).
"""
number_of_indices = len(test_indices)
adjacency = _np.zeros((4, number_of_indices), dtype=_np.int32)
for index in range(number_of_indices):
# Now find the position of the shared vertex
test_index = test_indices[index]
trial_index = trial_indices[index]
i, j = _get_shared_vertex_information_for_two_elements(
elements, test_index, trial_index
)
adjacency[:, index] = (test_index, trial_index, i, j)
return adjacency
@_numba.njit()
def _find_edge_adjacency(elements, elem0_indices, elem1_indices):
"""
Return for element pairs the edge adjacency.
The return array edge_adjacency has 6 rows, such that for index
j the element edge_adjacency[0, j] is connected with
edge_adjacency[1, j] via the two vertices edge_adjacency[2:4, j]
in the first element and the vertices edge_adjacency[4:6, j]
in the second element. The vertex numbers here are local
numbers (0, 1 or 2).
"""
number_of_indices = len(elem0_indices)
adjacency = _np.zeros((6, number_of_indices), dtype=_np.int32)
for index in range(number_of_indices):
elem0 = elem0_indices[index]
elem1 = elem1_indices[index]
index_pairs = _get_shared_edge_information_for_two_elements(
elements, elem0, elem1
)
adjacency[0, index] = elem0
adjacency[1, index] = elem1
adjacency[2:, index] = index_pairs.flatten()
return adjacency
def _get_element_to_element_vertex_count(element_to_element_matrix):
"""
Return a tuple of arrays (elements1, elements2, nvertices).
The element elements1[i] is connected with elements2[i] via
nvertices[i] vertices.
"""
coo_matrix = element_to_element_matrix.tocoo()
elements1 = coo_matrix.row
elements2 = coo_matrix.col
nvertices = coo_matrix.data
return (elements1, elements2, nvertices)
def _element_filter(elements1, elements2, nvertices, filter_type):
"""
Return element pairs according to a filter condition.
Takes an array (elements1, elements2, nvertices)
such that elements1[i] and elements2[i] are connected
via nvertices[i] vertices and returns a tuple
(new_elem1, new_elem2) of all element pairs connected via
vertices (filter_type=VERTICES) or edges (filter_type=EDGES).
"""
# Elements connected via edges share two vertices
filtered_indices = _np.argwhere(nvertices == filter_type).flatten()
return (elements1[filtered_indices], elements2[filtered_indices])
@_numba.njit()
def _sort_values(val1, val2):
"""Return a tuple with the input values sorted."""
if val1 > val2:
val1, val2 = val2, val1
return val1, val2
@_numba.njit()
def _vertices_from_edge_index(element, local_index):
"""
Return the vertices associated with an edge.
Element is 3-tupel with the vertex indices.
Sorts the returned vertices in ascending order.
"""
vertex0, vertex1 = element[_EDGE_LOCAL[local_index]]
return _sort_values(vertex0, vertex1)
def grid_from_segments(grid, segments):
"""Return new grid from segments of existing grid."""
element_in_new_grid = _np.zeros(grid.number_of_elements, dtype=_np.bool)
for elem in range(grid.number_of_elements):
if grid.domain_indices[elem] in segments:
element_in_new_grid[elem] = True
new_elements = grid.elements[:, element_in_new_grid]
new_domain_indices = grid.domain_indices[element_in_new_grid]
vertex_indices = list(set(new_elements.ravel()))
new_vertices = grid.vertices[:, vertex_indices]
new_vertex_map = -_np.ones(grid.number_of_vertices, dtype=_np.int)
new_vertex_map[vertex_indices] = _np.arange(len(vertex_indices))
new_elements = new_vertex_map[new_elements.ravel()].reshape(3, -1)
return Grid(new_vertices, new_elements, new_domain_indices)
@_numba.njit
def _create_barycentric_connectivity_array(
vertices, elements, element_edges, edges, number_of_edges
):
"""Return the vertices and elements of refined barycentric grid."""
number_of_vertices = vertices.shape[1]
number_of_elements = elements.shape[1]
new_number_of_vertices = number_of_vertices + number_of_elements + number_of_edges
new_vertices = _np.empty((3, new_number_of_vertices), dtype=_np.float64)
new_elements = _np.empty((3, 6 * number_of_elements), dtype=_np.float64)
edge_to_vertex = -_np.ones(number_of_edges)
new_vertices[:, :number_of_vertices] = vertices
local_vertex_ids = _np.empty(3, dtype=_np.uint32)
for index in range(number_of_elements):
# Create barycentric mid-point
new_vertices[:, number_of_vertices] = (
1.0 / 3 * _np.sum(vertices[:, elements[:, index]], axis=1)
)
midpoint_index = number_of_vertices
number_of_vertices += 1
for local_index in range(3):
edge_index = element_edges[local_index, index]
if edge_to_vertex[edge_index] > -1:
# Vertex already created
local_vertex_ids[local_index] = edge_to_vertex[edge_index]
else:
# Vertex needs to be created
new_vertices[:, number_of_vertices] = 0.5 * _np.sum(
vertices[:, edges[:, edge_index]], axis=1
)
local_vertex_ids[local_index] = number_of_vertices
edge_to_vertex[edge_index] = number_of_vertices
number_of_vertices += 1
# Have created all necessary vertices. Now create the elements.
# New barycentric elements are created in anti-clockwise order
# starting with the triangle at the first vertex of the triangle
# and sharing a segment with the edge 0. The second triangle is
# along the same edge, but adjacent to vertex 1, and so on.
new_elements[0, 6 * index + 0] = elements[0, index]
new_elements[1, 6 * index + 0] = local_vertex_ids[0]
new_elements[2, 6 * index + 0] = midpoint_index
new_elements[0, 6 * index + 1] = elements[1, index]
new_elements[1, 6 * index + 1] = midpoint_index
new_elements[2, 6 * index + 1] = local_vertex_ids[0]
new_elements[0, 6 * index + 2] = elements[1, index]
new_elements[1, 6 * index + 2] = local_vertex_ids[2]
new_elements[2, 6 * index + 2] = midpoint_index
new_elements[0, 6 * index + 3] = elements[2, index]
new_elements[1, 6 * index + 3] = midpoint_index
new_elements[2, 6 * index + 3] = local_vertex_ids[2]
new_elements[0, 6 * index + 4] = elements[2, index]
new_elements[1, 6 * index + 4] = local_vertex_ids[1]
new_elements[2, 6 * index + 4] = midpoint_index
new_elements[0, 6 * index + 5] = elements[0, index]
new_elements[1, 6 * index + 5] = midpoint_index
new_elements[2, 6 * index + 5] = local_vertex_ids[1]
return new_vertices, new_elements
def barycentric_refinement(grid):
"""Return the barycentric refinement of a given grid."""
new_vertices, new_elements = _create_barycentric_connectivity_array(
grid.vertices,
grid.elements,
grid.element_edges,
grid.edges,
grid.number_of_edges,
)
return Grid(
new_vertices, new_elements, _np.repeat(grid.domain_indices, 6), scatter=False
)
def union(grids, domain_indices=None, swapped_normals=None):
"""
Return the union of a given list of grids.
Parameters
----------
grids: list
A list of grid objects.
domain_indices : list
Attach a list of domain indices to the new
grid such that grid[j] received the domain
index domain_indices[j]
swapped_normals : list of boolean
A list of the form [False, True, ...],
that specifies for each grid if the normals
should be swapped (True) or not (False). This
is helpful if one grid is defined to be inside
another grid.
This method returns a new grid object, which is
the union of the input grid objects.
"""
from bempp.api.grid.grid import Grid
vertex_offset = 0
element_offset = 0
vertex_count = sum([grid.number_of_vertices for grid in grids])
element_count = sum([grid.number_of_elements for grid in grids])
vertices = _np.empty((3, vertex_count), dtype="float64")
elements = _np.empty((3, element_count), dtype="uint32")
all_domain_indices = _np.empty(element_count, dtype="uint32")
if domain_indices is None:
domain_indices = range(len(grids))
if swapped_normals is None:
swapped_normals = len(grids) * [False]
for index, grid in enumerate(grids):
nelements = grid.number_of_elements
nvertices = grid.number_of_vertices
vertices[:, vertex_offset : vertex_offset + nvertices] = grid.vertices
if swapped_normals[index]:
current_elements = grid.elements[[0, 2, 1], :]
else:
current_elements = grid.elements
elements[:, element_offset : element_offset + nelements] = (
current_elements + vertex_offset
)
all_domain_indices[
element_offset : element_offset + nelements
] = domain_indices[index]
vertex_offset += nvertices
element_offset += nelements
return Grid(vertices, elements, all_domain_indices)
def enumerate_vertex_adjacent_elements(grid, support_elements):
"""
Enumerate in anti-clockwise order all elements adjacent to all vertices in support.
Returns a list [neighbors_0, neighbors_1, ...], where neighbors_i is a list
[(elem_index, local_ind1, local_ind2), ...] of tuples, where elem_index is an
element in the support that as connected with vertex i. local_ind1 and local_ind2 are
the local indices of the two edges that are adjacent to vertex i. They are sorted in
anti-clockwise order with respect to the natural normal directions of the elements.
Moreover, all tuples represent elements in anti-clockwise order.
"""
vertex_edges = [[] for _ in range(grid.vertices.shape[1])]
for element_index in support_elements:
for local_index, edge_index in enumerate(grid.element_edges[:, element_index]):
for ind in range(2):
vertex_edges[grid.edges[ind, edge_index]].append(
(element_index, local_index)
)
# Now sort each list so that edges appear in anti-clockwise order according
# to neighboring edges.
def sort_neighbors(grid_data, neighbors):
"""Implement the sorting of a neighbors list."""
# Swap the edges in each element so
# that they have edges in anti-clockwise order
locally_sorted_neighbors = []
while neighbors:
# Take first element in list
elem1 = neighbors.pop()
for index, elem2 in enumerate(neighbors):
# Find index of next list element associated
# with the same grid element
if elem2[0] == elem1[0]:
neighbors.pop(index)
break
# Check if the two edges in the found element entries
# are in clockwise or anti-clockwise order.
# Resort accordingly
if elem1[1] == (1 + elem2[1]) % 3:
locally_sorted_neighbors.append((elem1[0], elem2[1], elem1[1]))
else:
locally_sorted_neighbors.append((elem1[0], elem1[1], elem2[1]))
# locally sorted neighbors now has triplets (elem_index, local_ind1, local_ind2) of
# one element index and two associated edge indices that are anti-clockwise sorted.
sorted_neighbors = []
sorted_neighbors.append(locally_sorted_neighbors.pop())
while locally_sorted_neighbors:
found = False
for index, elem in enumerate(locally_sorted_neighbors):
# Check if element is successor of last element in sorted list
last = sorted_neighbors[-1]
first = sorted_neighbors[0]
if (
grid_data.element_edges[elem[1], elem[0]]
== grid_data.element_edges[last[2], last[0]]
):
locally_sorted_neighbors.pop(index)
found = True
sorted_neighbors.append(elem)
break
if (
grid_data.element_edges[elem[2], elem[0]]
== grid_data.element_edges[first[1], first[0]]
):
locally_sorted_neighbors.pop(index)
found = True
sorted_neighbors.insert(0, elem)
break
if not found:
raise Exception(
"Two elements seem to be connected only by a vertex, not by an edge."
)
return sorted_neighbors
for vertex_index, neighbors in enumerate(vertex_edges):
# First sort by element
if not neighbors:
# Continue if empty
continue
vertex_edges[vertex_index] = sort_neighbors(grid.data(), neighbors)
return vertex_edges
@_numba.njit
def _numba_enumerate_edges(elements, edge_tuple_to_index):
"""
Enumerate all edges in a given grid.
Assigns a tuple (edges, element_edges) to
self._edges and self._element_edges.
element_edges is an array a such that a[i, j] is the
index of the ith edge in the jth elements, and edges
is a 2 x nedges array such that the jth column stores the
two nodes associated with the jth edge.
"""
edges = []
number_of_elements = elements.shape[1]
element_edges = _np.zeros((3, number_of_elements), dtype=_np.int32)
number_of_edges = 0
for elem_index in range(number_of_elements):
elem = elements[:, elem_index]
for local_index in range(3):
edge_tuple = _vertices_from_edge_index(elem, local_index)
if edge_tuple not in edge_tuple_to_index:
edge_index = number_of_edges
edge_tuple_to_index[edge_tuple] = edge_index
edges.append(edge_tuple)
number_of_edges += 1
else:
edge_index = edge_tuple_to_index[edge_tuple]
element_edges[local_index, elem_index] = edge_index
return _np.array(edges, dtype=_np.int32).T, element_edges
def _grid_scatter_worker(grid_id, array_proxies):
"""Assign a new grid on the worker."""
from bempp.api.utils import pool
from bempp.api.grid.grid import Grid
from bempp.api import log
vertices, elements, domain_indices = pool.from_buffer(array_proxies)
# if not pool.has_key(grid_id):
if grid_id not in pool:
pool.insert_data(
grid_id,
Grid(vertices.copy(), elements.copy(), domain_indices.copy(), grid_id),
)
log(f"Copied grid with id {grid_id} to worker {pool.get_id()}", "debug")
else:
log(f"Use cached grid with id {grid_id} on worker {pool.get_id()}", "debug")
@_numba.njit
def grid_to_points(grid_data, local_points):
"""
Map a grid to an array of points.
Returns a (N, 3) point array that stores the global vertices
associated with the local points in each triangle.
Points are stored in consecutive order for each element
in the support_elements list. Hence, the returned array is of the form
[ v_1^1, v_2^1, ..., v_M^1, v_1^2, v_2^2, ...], where
v_i^j is the ith point in the jth element in
the support_elements list.
Parameters
----------
grid_data : GridData
A Bempp GridData object.
local_points : np.ndarray
(2, M) array of local coordinates.
"""
number_of_elements = grid_data.elements.shape[1]
number_of_points = local_points.shape[1]
points = _np.empty((number_of_points * number_of_elements, 3), dtype=_np.float64)
for elem in range(number_of_elements):
points[number_of_points * elem : number_of_points * (1 + elem), :] = (
_np.expand_dims(grid_data.vertices[:, grid_data.elements[0, elem]], 1)
+ grid_data.jacobians[elem].dot(local_points)
).T
return points
|
from django.contrib.auth.models import User
from django.http import Http404
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework import permissions, viewsets, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework_simplejwt.views import TokenObtainPairView
from api.inventory_helpers import InventoryPagination, InventoryOrdering
from api.models import Task, JobTemplate, Inventory, Configuration
from api.permissions import ConfigurationPermission
from api.serializers import TaskSerializer, JobTemplateSerializer, InventorySerializer, UserSerializer, \
EnhancedTokenObtainPairSerializer
class TaskViewSet(viewsets.ModelViewSet):
"""
The Task endpoint lists all tasks or view a single task. It also provides options to run a task sync/asyc
and you can abort scheduled tasks
"""
queryset = Task.objects.all().order_by('-id')
serializer_class = TaskSerializer
permission_classes = [permissions.DjangoModelPermissions]
filter_backends = [filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend]
filterset_fields = ['status', 'template__name', 'inventory__name', 'created_by__username', 'is_template']
search_fields = ['name']
ordering_fields = ['id', 'name', 'status', 'date_scheduled', 'date_started', 'date_finished', 'inventory']
@action(detail=True, methods=['POST'])
def run(self, request, pk):
task = self.get_object()
task.run_task()
serializer = self.get_serializer(task)
return Response(serializer.data)
@action(detail=True, methods=['POST'])
def run_async(self, request, pk):
task = self.get_object()
task.schedule()
return Response(status=status.HTTP_202_ACCEPTED)
@action(detail=True, methods=['PUT'])
def abort(self, request, pk):
task = self.get_object()
task.abort()
serializer = self.get_serializer(task)
return Response(serializer.data)
class JobTemplateViewSet(viewsets.ModelViewSet):
"""
The JobTemplate endpoint lists all available JobTemplates as well as details of a single JobTemplate
"""
queryset = JobTemplate.objects.all()
serializer_class = JobTemplateSerializer
permission_classes = [permissions.DjangoModelPermissions]
filter_backends = [filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend]
filterset_fields = ['file_name', 'function_name', 'package_path', 'created_by__username']
search_fields = ['name', 'function_name', 'file_name']
ordering_fields = ['id', 'name', 'package_path', 'file_name', 'function_name', 'created_by__username']
class InventoryViewSet(viewsets.ModelViewSet):
"""
Inventory endpoint. List all inventories, list all or a single host for a defined inventory. List all groups of
an inventory.
"""
queryset = Inventory.objects.all()
serializer_class = InventorySerializer
permission_classes = [permissions.DjangoModelPermissions]
pagination_class = InventoryPagination
filterset_fields = ['groups__contains', 'platform__contains', 'name__contains', 'hostname__contains']
ordering_fields = ['name', 'hostname', 'platform']
@action(detail=True, methods=['GET'])
def hosts(self, request, pk):
search_fields = ['name__contains', 'hostname__contains']
inventory = self.get_object()
query_params = []
for key, value in request.query_params.items():
query_params.append({key: value}) if key in self.filterset_fields and value else None
search = request.query_params['search'] if 'search' in request.query_params else ''
queryset = inventory.get_hosts(query_params, search_fields, search)
queryset = InventoryOrdering().filter_queryset(request, queryset, self)
paginator = self.pagination_class()
data = paginator.paginate_queryset(queryset=queryset, request=request)
return paginator.get_paginated_response(data)
# allows url pattern: /api/inventory/{inventory_id}/host/{name}
@action(detail=True, methods=['GET'], name='hosts', url_path='hosts/(?P<name>[a-z0-9.-]+)')
def host_detail(self, request, pk, name=None):
inventory = self.get_object()
try:
host_detail = inventory.get_host_detail(name)
return Response(host_detail)
except LookupError:
raise Http404
@action(detail=True, methods=['GET'])
def groups(self, request, pk):
inventory = self.get_object()
groups = inventory.get_groups()
return Response(groups)
class UserViewSet(viewsets.ModelViewSet):
"""
The user endpoint provides a list of all users and lets you view single users
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [permissions.DjangoModelPermissions]
class ConfigurationView(viewsets.ViewSet):
"""
Shows the global Nornir configuration. Users of group superuser can also post a new configuration.
"""
permission_classes = [ConfigurationPermission]
def list(self, request, format=None):
configuration = Configuration.get()
return Response(configuration)
def create(self, request, format=None):
configuration = Configuration.set(request.data)
return Response(configuration)
class EnhancedTokenObtainPairView(TokenObtainPairView):
"""
API endpoint used to get and renew JWT
"""
serializer_class = EnhancedTokenObtainPairSerializer
|
"""Unit tests to test lsf configuration """
# pylint: disable=W0703
# pylint: disable=R0904
import os
import sys
import unittest
import ConfigParser
import logging
# setup system library path
pathname = os.path.realpath('../')
sys.path.insert(0, pathname)
from osg_configure.configure_modules import lsf
from osg_configure.modules.utilities import get_test_config
from osg_configure.modules import exceptions
# NullHandler is only available in Python 2.7+
try:
NullHandler = logging.NullHandler
except AttributeError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
global_logger = logging.getLogger(__name__)
global_logger.addHandler(NullHandler())
class TestLSF(unittest.TestCase):
"""
Unit test class to test LSFConfiguration class
"""
def testParsing(self):
"""
Test configuration parsing
"""
config_file = get_test_config("lsf/lsf1.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
attributes = settings.get_attributes()
options = {'OSG_JOB_MANAGER_HOME': '/opt/lsf',
'OSG_LSF_LOCATION': '/opt/lsf',
'OSG_JOB_MANAGER': 'LSF'}
for option in options:
value = options[option]
self.assertTrue(attributes.has_key(option),
"Attribute %s missing" % option)
err_msg = "Wrong value obtained for %s, " \
"got %s instead of %s" % (option, attributes[option], value)
self.assertEqual(attributes[option], value, err_msg)
def testParsingDisabled(self):
"""
Test parsing when disabled
"""
config_file = get_test_config("lsf/lsf_disabled.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
attributes = settings.get_attributes()
self.assertEqual(len(attributes),
0,
"Disabled configuration should have no attributes")
def testParsingIgnored(self):
"""
Test parsing when ignored
"""
config_file = get_test_config("lsf/ignored.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
attributes = settings.get_attributes()
self.assertEqual(len(attributes),
0,
"Ignored configuration should have no attributes")
def testMissingLSFLocation(self):
"""
Test the check_attributes function to see if it catches missing LSF location
"""
config_file = get_test_config("lsf/missing_location.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
attributes = settings.get_attributes()
self.assertFalse(settings.check_attributes(attributes),
"Did not notice missing LSF location")
def testMissingLSFProfile(self):
"""
Test the check_attributes function to see if it catches missing LSF profile
"""
config_file = get_test_config("lsf/missing_profile.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
self.assertRaises(exceptions.SettingError,
settings.parse_configuration,
configuration)
def testValidSettings(self):
"""
Test the check_attributes function to see if it works on valid settings
"""
config_file = get_test_config("lsf/check_ok.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
attributes = settings.get_attributes()
self.assertTrue(settings.check_attributes(attributes),
"Correct settings incorrectly flagged as invalid")
def testValidSettings2(self):
"""
Test the check_attributes function to see if it works on valid settings
"""
config_file = get_test_config("lsf/check_ok2.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
attributes = settings.get_attributes()
self.assertTrue(settings.check_attributes(attributes),
"Correct settings incorrectly flagged as invalid")
def testServiceList(self):
"""
Test to make sure right services get returned
"""
config_file = get_test_config("lsf/check_ok.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
services = settings.enabled_services()
expected_services = set(['condor-ce', 'globus-gridftp-server'])
self.assertEqual(services, expected_services,
"List of enabled services incorrect, " +
"got %s but expected %s" % (services, expected_services))
config_file = get_test_config("lsf/lsf_disabled.ini")
configuration = ConfigParser.SafeConfigParser()
configuration.read(config_file)
settings = lsf.LSFConfiguration(logger=global_logger)
try:
settings.parse_configuration(configuration)
except Exception as e:
self.fail("Received exception while parsing configuration: %s" % e)
services = settings.enabled_services()
expected_services = set()
self.assertEqual(services, expected_services,
"List of enabled services incorrect, " +
"got %s but expected %s" % (services, expected_services))
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
global_logger.addHandler(console)
unittest.main()
|
import model.attention as attention
from model.language_model import WordEmbedding, QuestionEmbedding
from model.classifier import SimpleClassifier
from utilities import config
from torch.nn.functional import binary_cross_entropy_with_logits as bce_loss
from model.vqa_debias_loss_fuctions import *
from model.fc import MLP, FCNet
# def bce_loss(input, target, mean=True):
# """
# Function that measures Binary Cross Entropy between target and output logits:
# """
# if not target.is_same_size(input):
# raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
# max_val = (-input).clamp(min=0)
# loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
# loss = loss.sum(dim=1)
# return loss.mean() if mean else loss
class BaseModel_with_Onestep(nn.Module):
def __init__(self, w_emb, q_emb, v_att, classifier, debias_loss_fn ,extra_c1, extra_c2):
super(BaseModel_with_Onestep, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.classifier = classifier
self.debias_loss_fn = debias_loss_fn
self.extra_c1 = extra_c1
self.extra_c2 = extra_c2
def forward(self, v, b, q, labels, bias, hint=None, has_hint=None):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
*_v_emb: [batch, g*v_dim], mask_weight: [batch, g]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
v_emb, v_att, mask_v_emb = self.v_att(v, q_emb, hint) # [batch, v_dim]
if config.att_norm:
v_emb = attention.apply_norm_attention(v, v_att, mode='rand')
joint_repr, logits = self.classifier(q_emb, v_emb)
debias_loss = torch.zeros(1)
if labels is not None:
if config.use_debias:
debias_loss = self.debias_loss_fn(joint_repr, logits, bias, labels, has_hint)
elif config.use_rubi:
q_pred = self.extra_c1(q_emb.detach())
q_out = self.extra_c2(q_pred)
rubi_logits = logits*torch.sigmoid(q_pred)
if has_hint is not None:
debias_loss = bce_loss(rubi_logits, labels, False) + bce_loss(q_out, labels, False)
debias_loss = (debias_loss * has_hint).sum()/ has_hint.sum()
else:
debias_loss = bce_loss(rubi_logits, labels) + bce_loss(q_out, labels)
debias_loss *= labels.size(1)
return logits, debias_loss, v_att
class BaseModel_with_Twostep(nn.Module):
def __init__(self, w_emb, q_emb, v_att, classifier, debias_loss_fn ,extra_c1, extra_c2):
super(BaseModel_with_Twostep, self).__init__()
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.classifier = classifier
self.debias_loss_fn = debias_loss_fn
self.extra_c1 = extra_c1
self.extra_c2 = extra_c2
def forward(self, v, b, q, labels, bias, hint=None, has_hint=None):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
*_v_emb: [batch, g*v_dim], mask_weight: [batch, g]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb(w_emb) # [batch, q_dim]
v_emb, v_att = self.v_att(v, q_emb, hint) # [batch, v_dim]
if config.att_norm:
v_emb = attention.apply_norm_attention(v, v_att, mode='avg')
joint_repr, logits = self.classifier(q_emb, v_emb)
debias_loss = torch.zeros(1)
if labels is not None:
if config.use_debias:
debias_loss = self.debias_loss_fn(joint_repr, logits, bias, labels, has_hint)
elif config.use_rubi:
q_pred = self.extra_c1(q_emb.detach())
q_out = self.extra_c2(q_pred)
rubi_logits = logits*torch.sigmoid(q_pred)
if has_hint is not None:
debias_loss = bce_loss(rubi_logits, labels, reduction='none') + bce_loss(q_out, labels, reduction='none')
debias_loss = (debias_loss.sum(dim=1) * has_hint).sum()/ has_hint.sum()
else:
debias_loss = bce_loss(rubi_logits, labels) + bce_loss(q_out, labels)
debias_loss *= labels.size(1)
return logits, debias_loss, v_att
def build_baseline_with_onestep(embeddings, num_ans_candidates, debias_mode='LearnedMixin'):
assert debias_mode in ['BiasProduct', 'ReweightByInvBias', 'LearnedMixin', 'Plain']
vision_features = config.output_features
visual_glimpses = config.visual_glimpses
hidden_features = config.hid_dim
question_features = config.hid_dim
w_emb = WordEmbedding(
embeddings,
dropout=0.0
)
q_emb = QuestionEmbedding(
w_dim=300,
hid_dim=question_features,
nlayers=1,
bidirect=False,
dropout=0.0
)
v_att = attention.Attention(
v_dim=vision_features,
q_dim=question_features,
hid_dim=hidden_features,
glimpses=visual_glimpses,
)
classifier = SimpleClassifier(
in_dim=(question_features, visual_glimpses * vision_features),
hid_dim=(hidden_features, hidden_features * 2),
out_dim=num_ans_candidates,
dropout=0.5
)
# mask_v_att = attention.Attention(
# v_dim=vision_features,
# q_dim=question_features,
# hid_dim=hidden_features,
# glimpses=visual_glimpses,
# )
#
# mask_classifier = SimpleClassifier(
# in_dim=(question_features, vision_features),
# hid_dim=(hidden_features, hidden_features * 2),
# out_dim=num_ans_candidates,
# dropout=0.5
# )
# Add the loss_fn based our arguments
debias_loss_fn = eval(debias_mode)()
return BaseModel_with_Onestep(w_emb, q_emb, v_att, classifier, debias_loss_fn)
def build_baseline_with_twostep(embeddings, num_ans_candidates, debias_mode='LearnedMixin'):
assert debias_mode in ['BiasProduct', 'ReweightByInvBias', 'LearnedMixin', 'Plain']
vision_features = config.output_features
visual_glimpses = config.visual_glimpses
hidden_features = config.hid_dim
question_features = config.hid_dim
w_emb = WordEmbedding(
embeddings,
dropout=0.0
)
q_emb = QuestionEmbedding(
w_dim=300,
hid_dim=question_features,
nlayers=1,
bidirect=False,
dropout=0.0
)
v_att = attention.Attention(
v_dim=vision_features,
q_dim=question_features,
hid_dim=hidden_features,
glimpses=visual_glimpses,
)
classifier = SimpleClassifier(
in_dim=(question_features, visual_glimpses * vision_features),
hid_dim=(hidden_features, hidden_features * 2),
out_dim=num_ans_candidates,
dropout=0.5
)
if config.use_rubi:
c1 = MLP(
input_dim=question_features,
dimensions=[1024, 1024, num_ans_candidates],
)
c2 = nn.Linear(num_ans_candidates, num_ans_candidates)
else:
c1, c2 = None, None
# Add the loss_fn based our arguments
debias_loss_fn = eval(debias_mode)(hidden_features if config.fusion_type=='mul' else hidden_features*2)
return BaseModel_with_Twostep(w_emb, q_emb, v_att, classifier, debias_loss_fn, c1, c2) |
import os
import pulleffect
import unittest
import tempfile
import json
import flask
import requests
from mock import patch
from mock import MagicMock
import pulleffect.lib.timeclock
from pulleffect.lib.utilities import Widgets
import logging
class TestCases(unittest.TestCase):
def setUp(self):
"""Before each test, set up a blank database
"""
self.db_fd, pulleffect.app.config['DATABASE'] = tempfile.mkstemp()
pulleffect.app.config['TESTING'] = True
self.app = pulleffect.app.test_client()
self.ctx = pulleffect.app.test_request_context()
self.ctx.push()
def tearDown(self):
"""Get rid of the database again after each test."""
os.close(self.db_fd)
os.unlink(pulleffect.app.config['DATABASE'])
self.ctx.pop()
def test_post_single_message(self):
"""POST a single message should succeed"""
message = json.dumps({
"device": "wamdamdam",
"device_type": "brian",
"location": "hamsterville",
"severity": "seriously important",
"description": "this is a description"
})
rv = self.app.post('/messages', data = message,
follow_redirects=True,
content_type='application/json')
assert b'id' in rv.data
def test_post_empty_message(self):
"""POST an empty message should result in error"""
rv = self.app.post('/messages', follow_redirects=True)
assert b'error' in rv.data
def test_post_missing_device_message(self):
"""POST a message missing the device field should give error"""
message = json.dumps({
"device_type": "brian",
"location": "hamsterville",
"severity": "seriously important",
"description": "this is a description"
})
rv = self.app.post(
'/messages', data=message,
follow_redirects=True, content_type='application/json')
assert b'Submitted message is missing required fields' in rv.data
def test_post_missing_device_type_message(self):
"""POST a message with device type field missing, should give error"""
message = json.dumps(
{
"device": "wamdamdam",
"location": "hamsterville",
"severity": "seriously important",
"description": "this is a description"
})
rv = self.app.post('/messages', data=message,
follow_redirects=True,
content_type='application/json')
assert b'Submitted message is missing required fields' in rv.data
def test_post_missing_location_message(self):
"""POST a message missing location field should give error"""
message = json.dumps(
{
"device": "wamdamdam",
"device_type": "brian",
"severity": "seriously important",
"description": "this is a description"
})
rv = self.app.post('/messages', data=message,
follow_redirects=True,
content_type='application/json')
assert b'Submitted message is missing required fields' in rv.data
def test_post_missing_severity_message(self):
"""POST a message missing severity field should give error"""
message = json.dumps({
"device": "wamdamdam",
"device_type": "brian",
"location": "hamsterville",
"description": "this is a description"
})
rv = self.app.post('/messages', data=message,
follow_redirects=True,
content_type='application/json')
assert b'Submitted message is missing required fields' in rv.data
def test_post_missing_description_message(self):
"""POST a message missing description field should give error"""
message = json.dumps({
"device": "wamdamdam",
"device_type": "brian",
"location": "hamsterville",
"severity": "seriously important"
})
rv = self.app.post('/messages', data=message,
follow_redirects=True,
content_type='application/json')
assert b'Submitted message is missing required fields' in rv.data
if __name__ == '__main__':
unittest.main()
|
import os
import sys
from glob import glob
import setuptools
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from distutils.sysconfig import get_config_var, get_python_inc
from distutils.version import LooseVersion
import versioneer
assert LooseVersion(setuptools.__version__) >= LooseVersion("18.0"), \
"Requires `setuptools` version 18.0 or higher."
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
def readme():
with open("README.rst", "r") as f:
return(f.read())
version = versioneer.get_version()
with open("src/version.pxi", "w") as f:
f.writelines([
"__version__ = " + "\"" + str(version) + "\""
])
cython_dep = ["cython >= 0.23"]
numpy_dep = ["numpy >= 1.7"]
boost_dep = ["boost-cpp >= 1.56"]
boost_dep = (boost_dep if sys.argv[1] == "bdist_conda" else [])
setup_requires = cython_dep + numpy_dep
setup_requires = setup_requires if (sys.argv[1].startswith("bdist") or
sys.argv[1].startswith("build") or
sys.argv[1].startswith("install")) else []
build_requires = cython_dep + numpy_dep + boost_dep
install_requires = numpy_dep + boost_dep
install_requires += [] if sys.argv[1] == "bdist_conda" else cython_dep
tests_require = cython_dep + numpy_dep
include_dirs = [
os.path.join(os.path.dirname(os.path.abspath(__file__)), "include"),
os.path.dirname(get_python_inc()),
get_python_inc()
]
library_dirs = list(filter(
lambda v: v is not None,
[get_config_var("LIBDIR")]
))
sources = glob("src/*.pxd") + glob("src/*.pyx")
libraries = []
if os.name == "posix":
libraries.append("boost_container")
elif os.name == "nt":
libname = "boost_container"
path = os.environ.get("LIB", "").split(";")
libmatches = sum(
list(glob(os.path.join(p, "%s*.lib" % libname)) for p in path), []
)
library_dirs.append(os.path.dirname(libmatches[0]))
libraries.append(os.path.splitext(os.path.basename(libmatches[0]))[0])
extra_compile_args = []
setup(
name="rank_filter",
version=version,
description="A simple python module containing an in-place linear rank"
" filter optimized in C++.",
long_description=readme(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries'
],
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/nanshe-org/rank_filter",
download_url="https://github.com/nanshe-org/rank_filter/archive/v%s.tar.gz"
% version,
license="BSD",
cmdclass=dict(
list(versioneer.get_cmdclass().items()) +
[
('build_ext', build_ext)
]
),
setup_requires=setup_requires,
build_requires=build_requires,
install_requires=install_requires,
tests_require=tests_require,
test_suite="tests",
headers=glob("include/*.hxx"),
ext_modules=[Extension("rank_filter",
sources=sources,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
language="c++")],
zip_safe=False
)
|
<gh_stars>100-1000
import FlowCal
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import manifold, datasets
from time import time
from MulticoreTSNE import MulticoreTSNE as TSNE
from sklearn.decomposition import PCA
from sklearn.datasets import make_swiss_roll, make_s_curve
def data_prep(data_path, dataset='MNIST', size=10000):
'''
This function loads the dataset as numpy array.
Input:
data_path: path of the folder you store all the data needed.
dataset: the name of the dataset.
size: the size of the dataset. This is useful when you only
want to pick a subset of the data
Output:
X: the dataset in numpy array
labels: the labels of the dataset.
'''
if dataset == 'MNIST':
X = np.load(data_path + '/mnist_images.npy', allow_pickle=True).reshape(70000, 28*28)
labels = np.load(data_path + '/mnist_labels.npy', allow_pickle=True)
elif dataset == 'FMNIST':
X = np.load(data_path + '/fmnist_images.npy', allow_pickle=True).reshape(70000, 28*28)
labels = np.load(data_path + '/fmnist_labels.npy', allow_pickle=True)
elif dataset == 'coil_20':
X = np.load(data_path + '/coil_20.npy', allow_pickle=True).reshape(1440, 128*128)
labels = np.load(data_path + '/coil_20_labels.npy', allow_pickle=True)
elif dataset == 'coil_100':
X = np.load(data_path + '/coil_100.npy', allow_pickle=True).reshape(7200, -1)
labels = np.load(data_path + '/usr/xtmp/hyhuang/MNIST/coil_100_labels.npy', allow_pickle=True)
elif dataset == 'mammoth':
with open(data_path + '/mammoth_3d.json', 'r') as f:
X = json.load(f)
X = np.array(X)
with open(data_path + '/mammoth_umap.json', 'r') as f:
labels = json.load(f)
labels = labels['labels']
labels = np.array(labels)
elif dataset == 'mammoth_50k':
with open(data_path + '/mammoth_3d_50k.json', 'r') as f:
X = json.load(f)
X = np.array(X)
labels = np.zeros(10)
elif dataset == 'Flow_cytometry':
X = FlowCal.io.FCSData(data_path + '/11-12-15_314.fcs')
labels = np.zeros(10)
elif dataset == 'Mouse_scRNA':
data = pd.read_csv(data_path + '/GSE93374_Merged_all_020816_BatchCorrected_LNtransformed_doubletsremoved_Data.txt', sep='\t')
X = data.to_numpy()
labels = pd.read_csv(data_path + '/GSE93374_cell_metadata.txt', sep='\t')
elif dataset == 'swiss_roll':
X, labels = make_swiss_roll(n_samples=size, random_state=20200202)
elif dataset == 's_curve':
X, labels = make_s_curve(n_samples=size, random_state=20200202)
elif dataset == 's_curve_hole':
X, labels = make_s_curve(n_samples=size, random_state=20200202)
anchor = np.array([0, 1, 0])
indices = np.sum(np.square(X-anchor), axis=1) > 0.3
X, labels = X[indices], labels[indices]
elif dataset == 'swiss_roll_hole':
X, labels = make_swiss_roll(n_samples=size, random_state=20200202)
anchor = np.array([-10, 10, 0])
indices = np.sum(np.square(X-anchor), axis=1) > 20
X, labels = X[indices], labels[indices]
elif dataset == 'kddcup99':
X = np.load(data_path + '/KDDcup99_float.npy', allow_pickle=True)
labels = np.load(data_path + '/KDDcup99_labels_int.npy', allow_pickle=True)
elif dataset == '20NG':
X = np.load(data_path + '/20NG.npy', allow_pickle=True)
labels = np.load(data_path + '/20NG_labels.npy', allow_pickle=True)
elif dataset == 'USPS':
X = np.load(data_path + '/USPS.npy', allow_pickle=True)
labels = np.load(data_path + '/USPS_labels.npy', allow_pickle=True)
elif dataset == 'cifar10':
X = np.load(data_path + '/cifar10_imgs.npy', allow_pickle=True)
labels = np.load('/cifar10_labels.npy', allow_pickle=True)
elif dataset == 'cifar100':
X = np.load(data_path + '/cifar100_imgs.npy', allow_pickle=True)
labels = np.load('/cifar100_labels.npy', allow_pickle=True)
else:
print('Unsupported dataset')
assert(False)
return X[:size], labels[:size]
def experiment(X, method='PaCMAP', **kwargs):
if method == 'PaCMAP':
transformer = PaCMAP(**kwargs)
elif method == 'UMAP':
transformer = umap.UMAP(**kwargs)
elif method == 'TriMAP':
transformer = trimap.TRIMAP(**kwargs)
elif method == 'LargeVis':
transformer = LargeVis(**kwargs)
elif method == 't-SNE':
transformer = TSNE(**kwargs)
else:
print("Incorrect method specified")
assert(False)
start_time = time()
X_low = transformer.fit_transform(X)
total_time = time() - start_time
print("This run's time:")
print(total_time)
return X_low, total_time
def experiment_five(X, method='PaCMAP', **kwargs):
length = X.shape[0]
X_lows, all_times = [], []
for i in range(5):
X_low, all_time = experiment(X, method, **kwargs)
X_lows.append(X_low)
all_times.append(all_time)
X_lows = np.array(X_lows)
all_times = np.array(all_times)
return X_lows, all_times
def main(data_path, output_path, dataset_name='MNIST', size=10000000):
X, labels = data_prep(data_path, dataset=dataset_name, size=size)
if dataset_name == 'Mouse_scRNA':
pca = PCA(n_components=1000)
X = pca.fit_transform(X)
elif X.shape[1] > 100:
pca = PCA(n_components=100)
X = pca.fit_transform(X)
print("Data loaded successfully")
methods = ['t-SNE']
args = {'t-SNE':[{'perplexity':10}, {'perplexity':20}, {'perplexity':40}]}
print("Experiment started")
for method in methods:
parameters = args[method]
for parameter in parameters:
X_low, total_time = experiment_five(X, method, **parameter)
if 'n_neighbors' in parameter:
n_neighbors = parameter['n_neighbors']
elif 'perplexity' in parameter:
n_neighbors = parameter['perplexity']
else:
n_neighbors = 10 # Default value
loc_string = output_path + \
'{dataset_name}_{method}_{n_neighbors}'.format(dataset_name=dataset_name, method=method, n_neighbors=n_neighbors)
np.save(loc_string, X_low)
avg_time = np.mean(total_time)
print('Average time for method {method} on {dataset_name} with param={n_neighbors} is {avg_time}'.format(dataset_name=dataset_name, method=method, n_neighbors=n_neighbors, avg_time=avg_time))
print('The detailed time is {total_time}'.format(total_time=total_time))
return 0
if __name__ == '__main__':
# Please define the data_path and output_path here
data_path = "../data/"
output_path = "../output/"
main(data_path, output_path, 'MNIST')
main(data_path, output_path, 'FMNIST')
main(data_path, output_path, 'coil_20')
main(data_path, output_path, 'coil_100')
main(data_path, output_path, 'Mouse_scRNA')
main(data_path, output_path, 'mammoth')
main(data_path, output_path, 's_curve', 10000)
main(data_path, output_path, 's_curve_hole', 10000)
main(data_path, output_path, '20NG', 100000)
main(data_path, output_path, 'USPS', 100000)
main(data_path, output_path, 'kddcup99', 10000000)
main(data_path, output_path, 'cifar10', 10000000)
main(data_path, output_path, 'cifar100', 10000000) |
#!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: <NAME>
# import modules
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
import numpy as np
## convert a sympy matrix to a numpy
def sym_num(sym):
return np.array(sym.tolist()).astype(np.float64)
# calculate the rotation matrix from the base to the end gripper: ROT * Rot_correct
def rpyToRotation(r, p, y):
ROT = Matrix([
[cos(p)*cos(y), sin(p)*sin(r)*cos(y) - sin(y)*cos(r), sin(p)*cos(r)*cos(y) + sin(r)*sin(y)],
[sin(y)*cos(p), sin(p)*sin(r)*sin(y) + cos(r)*cos(y), sin(p)*sin(y)*cos(r) - sin(r)*cos(y)],
[ -sin(p), sin(r)*cos(p), cos(p)*cos(r)]])
Rot_correct = Matrix([
[0., 0., 1.0],
[0., -1.0, 0.],
[1.0, 0., 0.]])
ROT = ROT * Rot_correct
return sym_num(ROT)
## Get the rotation matrix from base to WC, using q1, q2, q3
def eval_r0_3(q1, q2, q3):
R0_3_eval = Matrix([
[-sin(q3)*sin(q2 - 0.5*pi)*cos(q1) + cos(q1)*cos(q3)*cos(q2 - 0.5*pi), -sin(q3)*cos(q1)*cos(q2 - 0.5*pi) - sin(q2 - 0.5*pi)*cos(q1)*cos(q3), -sin(q1)],
[-sin(q1)*sin(q3)*sin(q2 - 0.5*pi) + sin(q1)*cos(q3)*cos(q2 - 0.5*pi), -sin(q1)*sin(q3)*cos(q2 - 0.5*pi) - sin(q1)*sin(q2 - 0.5*pi)*cos(q3), cos(q1)],
[ -sin(q3)*cos(q2 - 0.5*pi) - sin(q2 - 0.5*pi)*cos(q3), sin(q3)*sin(q2 - 0.5*pi) - cos(q3)*cos(q2 - 0.5*pi), 0]])
return sym_num(R0_3_eval)
def handle_calculate_IK(req):
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print "No valid poses received"
return -1
else:
# Initialize service response
joint_trajectory_list = []
for x in xrange(0, len(req.poses)):
# IK code starts here
joint_trajectory_point = JointTrajectoryPoint()
# Extract end-effector position and orientation from request
# px,py,pz = end-effector position
# roll, pitch, yaw = end-effector orientation
px = req.poses[x].position.x
py = req.poses[x].position.y
pz = req.poses[x].position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[req.poses[x].orientation.x, req.poses[x].orientation.y,
req.poses[x].orientation.z, req.poses[x].orientation.w])
### Your IK code here
# Compensate for rotation discrepancy between DH parameters and Gazebo
#
#
# Calculate joint angles using Geometric IK method
#
#
###
ROT_EE = rpyToRotation(roll, pitch, yaw)
# calculate the wrist center
EE = [px,py,pz]
WC = EE - (0.303) * ROT_EE[:, 2]
# calculate joint angles using Geometric IK method
theta1 = atan2(WC[1], WC[0])
side_a = 1.501
side_b = sqrt(pow((sqrt(WC[0] * WC[0] + WC[1] * WC[1]) - 0.35), 2) + pow((WC[2] - 0.75), 2))
side_c = 1.25
angle_a = acos((side_b * side_b + side_c * side_c - side_a * side_a) / (2 * side_b * side_c))
angle_b = acos((side_a * side_a + side_c * side_c - side_b * side_b) / (2 * side_a * side_c))
angle_c = acos((side_a * side_a + side_b * side_b - side_c * side_c) / (2 * side_a * side_b))
theta2 = pi / 2 - angle_a - atan2(WC[2] - 0.75, sqrt(WC[0] * WC[0] + WC[1] * WC[1]) - 0.35)
theta3 = pi / 2 - (angle_b + 0.036)
# calculate the rotation matrix from base to link 3
R0_3 = eval_r0_3(theta1, theta2, theta3)
R3_6 = np.dot(np.linalg.inv(R0_3), ROT_EE)
theta4 = atan2(R3_6[2, 2], -R3_6[0, 2])
theta5 = atan2(sqrt(R3_6[0, 2] * R3_6[0, 2] + R3_6[2, 2] * R3_6[2, 2]), R3_6[1, 2])
theta6 = atan2(-R3_6[1, 1], R3_6[1, 0])
# Populate response for the IK request
# In the next line replace theta1,theta2...,theta6 by your joint angle variables
joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
rospy.loginfo("length of Joint Trajectory List: %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
# initialize node and declare calculate_ik service
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print "Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
|
<gh_stars>10-100
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
import typing
from pydantic import Field
from .animation import Animation
from .audio import Audio
from .document import Document
from .formatted_text import FormattedText
from .photo import Photo
from .sticker import Sticker
from .video import Video
from .video_note import VideoNote
from .voice_note import VoiceNote
from ..base_object import BaseObject
class WebPage(BaseObject):
"""
Describes a web page preview
:param url: Original URL of the link
:type url: :class:`str`
:param display_url: URL to display
:type display_url: :class:`str`
:param type_: Type of the web page. Can be: article, photo, audio, video, document, profile, app, or something else
:type type_: :class:`str`
:param site_name: Short name of the site (e.g., Google Docs, App Store)
:type site_name: :class:`str`
:param title: Title of the content
:type title: :class:`str`
:param param_description: Description of the content
:type param_description: :class:`FormattedText`
:param photo: Image representing the content; may be null, defaults to None
:type photo: :class:`Photo`, optional
:param embed_url: URL to show in the embedded preview
:type embed_url: :class:`str`
:param embed_type: MIME type of the embedded preview, (e.g., text/html or video/mp4)
:type embed_type: :class:`str`
:param embed_width: Width of the embedded preview
:type embed_width: :class:`int`
:param embed_height: Height of the embedded preview
:type embed_height: :class:`int`
:param duration: Duration of the content, in seconds
:type duration: :class:`int`
:param author: Author of the content
:type author: :class:`str`
:param animation: Preview of the content as an animation, if available; may be null, defaults to None
:type animation: :class:`Animation`, optional
:param audio: Preview of the content as an audio file, if available; may be null, defaults to None
:type audio: :class:`Audio`, optional
:param document: Preview of the content as a document, if available; may be null, defaults to None
:type document: :class:`Document`, optional
:param sticker: Preview of the content as a sticker for small WEBP files, if available; may be null, defaults to None
:type sticker: :class:`Sticker`, optional
:param video: Preview of the content as a video, if available; may be null, defaults to None
:type video: :class:`Video`, optional
:param video_note: Preview of the content as a video note, if available; may be null, defaults to None
:type video_note: :class:`VideoNote`, optional
:param voice_note: Preview of the content as a voice note, if available; may be null, defaults to None
:type voice_note: :class:`VoiceNote`, optional
:param instant_view_version: Version of instant view, available for the web page (currently, can be 1 or 2), 0 if none
:type instant_view_version: :class:`int`
"""
ID: str = Field("webPage", alias="@type")
url: str
display_url: str
type_: str = Field(..., alias='type')
site_name: str
title: str
param_description: FormattedText
photo: typing.Optional[Photo] = None
embed_url: str
embed_type: str
embed_width: int
embed_height: int
duration: int
author: str
animation: typing.Optional[Animation] = None
audio: typing.Optional[Audio] = None
document: typing.Optional[Document] = None
sticker: typing.Optional[Sticker] = None
video: typing.Optional[Video] = None
video_note: typing.Optional[VideoNote] = None
voice_note: typing.Optional[VoiceNote] = None
instant_view_version: int
@staticmethod
def read(q: dict) -> WebPage:
return WebPage.construct(**q)
|
# -*- coding: utf-8 -*-
"""install_data.py
Provides a more sophisticated facility to install data files
than distutils' install_data does.
You can specify your files as a template like in MANIFEST.in
and you have more control over the copy process.
Copyright 2000 by <NAME>, Germany.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# created 2000/08/01, <NAME> <<EMAIL>>
# modified 2000/12/18, <NAME> <<EMAIL>>
###########################################################################
# import some modules we need
import os,sys,string
from types import StringType,TupleType,ListType
from distutils.util import change_root
from distutils.filelist import FileList
from distutils.command.install_data import install_data
###########################################################################
# a container class for our more sophisticated install mechanism
class Data_Files:
""" container for list of data files.
supports alternate base_dirs e.g. 'install_lib','install_header',...
supports a directory where to copy files
supports templates as in MANIFEST.in
supports preserving of paths in filenames
eg. foo/xyz is copied to base_dir/foo/xyz
supports stripping of leading dirs of source paths
eg. foo/bar1/xyz, foo/bar2/abc can be copied to bar1/xyz, bar2/abc
"""
def __init__(self,base_dir=None,files=None,copy_to=None,template=None,preserve_path=0,strip_dirs=0):
self.base_dir = base_dir
self.files = files
self.copy_to = copy_to
if template is not None:
t = []
for item in template:
item = string.strip(item)
if not item:continue
t.append(item)
template = t
self.template = template
self.preserve_path = preserve_path
self.strip_dirs = strip_dirs
self.finalized = 0
def warn (self, msg):
sys.stderr.write ("warning: %s: %s\n" %
("install_data", msg))
def debug_print (self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.core import DEBUG
if DEBUG:
print msg
def finalize(self):
""" complete the files list by processing the given template """
if self.finalized:
return
if self.files == None:
self.files = []
if self.template != None:
if type(self.template) == StringType:
self.template = string.split(self.template,";")
filelist = FileList(self.warn,self.debug_print)
for line in self.template:
filelist.process_template_line(string.strip(line))
filelist.sort()
filelist.remove_duplicates()
self.files.extend(filelist.files)
self.finalized = 1
# end class Data_Files
###########################################################################
# a more sophisticated install routine than distutils install_data
class install_Data_Files (install_data):
def check_data(self,d):
""" check if data are in new format, if not create a suitable object.
returns finalized data object
"""
if not isinstance(d, Data_Files):
self.warn(("old-style data files list found "
"-- please convert to Data_Files instance"))
if type(d) is TupleType:
if len(d) != 2 or not (type(d[1]) is ListType):
raise DistutilsSetupError, \
("each element of 'data_files' option must be an "
"Data File instance, a string or 2-tuple (string,[strings])")
d = Data_Files(copy_to=d[0],files=d[1])
else:
if not (type(d) is StringType):
raise DistutilsSetupError, \
("each element of 'data_files' option must be an "
"Data File instance, a string or 2-tuple (string,[strings])")
d = Data_Files(files=[d])
d.finalize()
return d
def run(self):
self.outfiles = []
install_cmd = self.get_finalized_command('install')
for d in self.data_files:
d = self.check_data(d)
install_dir = self.install_dir
# alternative base dir given => overwrite install_dir
if d.base_dir != None:
install_dir = getattr(install_cmd,d.base_dir)
# copy to an other directory
if d.copy_to != None:
if not os.path.isabs(d.copy_to):
# relatiev path to install_dir
dir = os.path.join(install_dir, d.copy_to)
elif install_cmd.root:
# absolute path and alternative root set
dir = change_root(self.root,d.copy_to)
else:
# absolute path
dir = d.copy_to
else:
# simply copy to install_dir
dir = install_dir
# warn if necceassary
self.warn("setup script did not provide a directory to copy files to "
" -- installing right in '%s'" % install_dir)
dir=os.path.normpath(dir)
# create path
self.mkpath(dir)
# copy all files
for src in d.files:
if d.strip_dirs > 0:
dst = string.join(string.split(os.path.normcase(src),os.sep)[d.strip_dirs:],os.sep)
else:
dst = src
if d.preserve_path:
# preserve path in filename
self.mkpath(os.path.dirname(os.path.join(dir,dst)))
out = self.copy_file(src, os.path.join(dir,dst))
else:
out = self.copy_file(src, dir)
if type(out) is TupleType:
out = out[0]
self.outfiles.append(out)
return self.outfiles
def get_inputs (self):
inputs = []
for d in self.data_files:
d = self.check_data(d)
inputs.append(d.files)
return inputs
def get_outputs (self):
return self.outfiles
###########################################################################
|
#!/usr/bin/env python3
import sys
import argparse
import asyncio
from mobnet import Nameservice, Network
try:
import signal
except ImportError:
signal = None
class mobnet_server(asyncio.Protocol):
length_header = 4
encoding = 'JSON'
clients = []
topics = {}
verbose = False
ip = None
def __init__(self):
self.transport = None
def connection_made(self, transport):
print('=====================')
print('Node has connected.')
self.transport = transport
self.clients.append(self)
#Define the send and unpack fuction
self.unpacker = Network.Unpacker()
self.send = lambda topic, data: self.transport.write(Network.pack(Network.encode(topic, data, self.encoding),
self.length_header))
def connection_lost(self, exc):
print('---------------------')
#remove self from clients
self.clients.remove(self)
#remove self from all topics
for topic in self.topics:
if self in self.topics[topic]:
self.topics[topic].remove(self)
print(f"Node removed.")
def data_received(self, data):
# print('RAW DATA', data)
socket_data = self.unpacker.unpack(data, self.length_header)
# print('SOCKET DATA', socket_data)
if socket_data:
for data in socket_data:
self.process_data(data)
def process_data(self, msg):
topic, data = Network.decode(msg, self.encoding)
if data == 'SUBSCRIBE' or self.encoding == 'bytes' and data == b'SUBSCRIBE':
if topic not in self.topics:
self.topics[topic] = []
self.topics[topic].append(self)
if self.verbose:
print(f"A node has subscribed to {topic}")
else:
if topic in self.topics:
for sub in self.topics[topic]:
sub.send(topic, data)
if self.verbose:
print(f"A message has been published to {topic}")
else:
if self.verbose:
print(f"A message has been published to {topic} but no one is subscribed")
def eof_received(self):
pass
def start_server(loop, host, port, encoding, length, server_name, name_server):
mobnet_server.encoding = encoding
mobnet_server.length_header = length
f = loop.create_server(mobnet_server, host, port)
if name_server and server_name:
ns = Network.Node(name_server, Nameservice.port)
ns.publish('name_set', {'name': server_name, 'ip': ns.socket_name, 'port': port})
return loop.run_until_complete(f)
ARGS = argparse.ArgumentParser(description='mobnet server.')
ARGS.add_argument(
'-host', action='store', dest='host',
default = '0.0.0.0', help='Host name')
ARGS.add_argument(
'-port', action='store', dest='port',
default=20801, type=int, help='Port number')
ARGS.add_argument(
'-iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop')
ARGS.add_argument(
'-length', action='store', dest='length',
default=4, type=int, help='Size of the length field')
ARGS.add_argument(
'-encode', action='store', dest='encode',
default='JSON', help='The encoding to be used')
ARGS.add_argument(
'-name', action='store', dest='servername',
default=None, help='The name for name_server.py to tell others')
ARGS.add_argument(
'-ns', action='store', dest='nameserver',
default=None, help='The name_server.py address.')
if __name__ == '__main__':
args = ARGS.parse_args()
if ':' in args.host:
args.host, port = args.host.split(':', 1)
args.port = int(port)
if args.iocp:
from asyncio import windows_events
loop = windows_events.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
print(f'Using backend: {loop.__class__.__name__}')
if signal is not None and sys.platform != 'win32':
loop.add_signal_handler(signal.SIGINT, loop.stop)
server = start_server(loop, args.host, args.port, args.encode, args.length, args.servername, args.nameserver)
print(f'Starting mobnet server on {args.host} port {args.port} with '
f'{args.encode} and header length field {args.length}')
try:
loop.run_forever()
finally:
server.close()
loop.close()
|
<gh_stars>0
#!/usr/bin/env python
import argparse
import os
import skelconf
import adios
import skel_bpy
import skel_settings
# To produce submit scripts, we'll work from a template. There will
# be two types of replacement, simple variables, and macros (for the
# tests)
def generate_submit_scripts_from_xml (params):
settings = skel_settings.skel_settings()
for batch in params.get_batches():
#platform = params.get_target()
settings = skel_settings.skel_settings()
platform = settings.get_submit_target()
sfile = open ('submit_' + platform + '_' + batch.get_name(), 'w')
sfile_template = open (os.path.expanduser('~/.skel/templates/submit_' + platform + '.tpl'), 'r')
i = 0
template_lines = sfile_template.readlines()
while i < len (template_lines):
template_line = template_lines[i]
if '$$START_TEST$$' in template_line:
# This is the test macro, run through it for each test
template_start_index = i + 1
for test in batch.get_tests():
j = template_start_index
template_line = template_lines[j]
while not '$$END_TEST$$' in template_line:
sfile.write (submit_line_template_replace (template_line, params, batch, test, settings))
j = j + 1
template_line = template_lines[j]
# Point at the first line after the macro
i = j + 1
else:
# Fill in any replacement vars in this line...
template_line = submit_line_template_replace (template_line, params, batch, None, settings)
sfile.write (template_line)
i = i + 1
sfile_template.close()
sfile.close()
import re
import math
def submit_line_template_replace (template_line, params, batch, test, settings):
template_line = template_line.replace ('$$JOB_NAME$$', batch.get_name() + '_%d'%batch.get_cores() + '_skel_' + params.get_application() )
template_line = template_line.replace ('$$WALLTIME$$', batch.get_walltime() )
template_line = template_line.replace ('$$APP$$', params.get_application() )
template_line = template_line.replace ('$$CORES_USED$$', '%d'%batch.get_cores() )
template_line = template_line.replace ('$$TARGET$$', params.get_target() )
template_line = template_line.replace ('$$ACCOUNT$$', settings.get_account() )
if test != None:
#Test specific replacements
template_line = template_line.replace ('$$TAGS$$', test.get_tags() )
template_line = template_line.replace ('$$METHOD$$', test.get_method() )
template_line = template_line.replace ('$$EXEC$$', params.get_application() + '_skel_' + test.get_group_name() + '_' + test.get_type() )
template_line = template_line.replace ('$$ITERATIONS$$', test.get_iterations() )
template_line = template_line.replace ('$$METHOD_PARAMS$$', test.get_method_params() )
template_line = template_line.replace ('$$EXT$$', test.get_ext() )
if test.get_rm() == 'pre' or test.get_rm() == 'both':
prerm = 'rm -rf out*'
else:
prerm = ''
template_line = template_line.replace ('$$PRE_RM$$', prerm)
if test.get_rm() == 'post' or test.get_rm() == 'both':
postrm = 'rm -rf out*'
else:
postrm = ''
template_line = template_line.replace ('$$POST_RM$$', postrm)
if '$$CORES_TOTAL$$' in template_line:
pattern = re.compile (r"\$\$CORES_TOTAL\$\$[\d]*\$\$")
match = pattern.search (template_line)
match_term = match.group()
# If we split the matched string at the dollar signs, the cores/node will be
# at index 4
count = float(match_term.split('$')[4])
total_cores = int (math.ceil( (batch.get_cores() / count) ) * count)
template_line = template_line.replace (match_term, '%d'%total_cores)
if '$$NODES_TOTAL$$' in template_line:
pattern = re.compile (r"\$\$NODES_TOTAL\$\$[\d]*\$\$")
match = pattern.search (template_line)
match_term = match.group()
count = float(match_term.split('$')[4])
total_nodes = int (math.ceil( (batch.get_cores() / count) ) )
template_line = template_line.replace (match_term, '%d'%total_nodes)
return template_line
def generate_submit_scripts_from_yaml (args):
#print "Generating submission script using yaml file"
bpy = skel_bpy.skel_bpy (args.yamlfile)
outfilename = "submit.pbs"
template_file_name = "~/.skel/templates/submit_sith.tmpl"
# Only proceed if outfilename does not already exist, or if -f was used
if os.path.exists (outfilename) and not args.force:
print "%s exists, aborting. Delete the file or use -f to overwrite." % outfilename
return 999
skel_file = open (outfilename, 'w')
# Now for the Cheetah magic:
from Cheetah.Template import Template
template_file = open (os.path.expanduser(template_file_name), 'r')
t = Template(file=template_file)
settings = skel_settings.skel_settings()
t.bpy = bpy
t.project = args.project
t.target = settings.get_submit_target()
t.account = settings.get_account()
t.job_name = "skel_%s_%d" % (args.project, bpy.get_num_procs() )
t.walltime = "1:00:00"
t.iteration_count = 1
t.executable = "%s_skel_%s" % (t.project, bpy.get_group_name() )
skel_file.write (str(t) )
def generate_submit_scripts_with_args (parent_parser):
args = pparse_command_line (parent_parser)
try:
config = adios.adiosConfig (args.project + '_skel.xml')
except (IOError):
print "XXError reading " + args.project + "_skel.xml. Try running skel xml " + args.project + " first."
return 1
if args.yamlfile is not None:
generate_submit_scripts_from_yaml(args)
else:
try:
params = skelconf.skelConfig (args.project + '_params.xml')
except (IOError):
print "Error reading " + args.project + "_params.xml. Try running skel params " + args.project + " first,"
print "then check that " + args.project + "_params.xml exists."
return 1
generate_submit_scripts_from_xml (params)
def pparse_command_line (parent_parser):
parser = argparse.ArgumentParser (
parents = [parent_parser],
formatter_class=argparse.RawDescriptionHelpFormatter,
prog='skel',
#add_help=False,
description='''\
skel source
create source code to access the I/O pattern for the target skeletal application''')
parser.add_argument ('project', metavar='project', help='Name of the skel project')
parser.add_argument ('-y', '--yaml-file', dest='yamlfile', help='yaml file to use for I/O pattern')
parser.add_argument ('-f', '--force', dest='force', action='store_true', help='overwrite existing source file')
parser.set_defaults(force=False)
return parser.parse_args()
def parse_command_line():
parser = argparse.ArgumentParser (description='Create submission scripts for the given skel project')
parser.add_argument ('project', metavar='project', help='Name of the skel project')
return parser.parse_args()
def main(argv=None):
skel_settings.create_settings_dir_if_needed()
args = parse_command_line()
config = adios.adiosConfig (args.project + '_skel.xml')
params = skelconf.skelConfig (args.project + '_params.xml')
#generate_makefiles_c (params)
generate_submit_scripts_from_xml (params)
if __name__ == "__main__":
main()
|
<reponame>rpartsey/habitat-pointnav-aux
"""
Using this eval script
- modify cell 2 definitions as desired (load in the appropriate folders)
- get values in last cell, plots in second to last cell
- modify plot key to see given metric
"""
#%%
import math
import os
import matplotlib.pyplot as plt
from scipy import interpolate
import numpy as np
import seaborn as sns
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from sklearn import metrics
# Strings
key_labels = { "spl": "SPL - Train",
"success": "Success - Train",
"eval_spl": "SPL - Val",
"eval_success": "Success - Val"
}
axis_labels = {
"spl": "SPL",
"eval_spl": "SPL",
"success": "Success",
"eval_success": "Success"
}
cpc_name = "CPC|A"
cpc_codename = "cpca"
cpca_id_td_codename = "cpca-id-td"
cpc_all_name = cpc_name + "{1-16}"
variant_labels = {
"baseline": "Baseline",
f"{cpc_codename}1": f"{cpc_name}-1",
f"{cpc_codename}2": f"{cpc_name}-2",
f"{cpc_codename}4": f"{cpc_name}-4",
f"{cpc_codename}8": f"{cpc_name}-8",
f"{cpc_codename}16": f"{cpc_name}-16",
"id": "ID",
"td": "TD",
f"{cpc_codename}16w": f"Weighted {cpc_name}",
f"{cpc_codename}_attn": f"{cpc_all_name}: Attn",
f"{cpc_codename}_attn-e": f"{cpc_all_name}: Attn+E",
f"{cpc_codename}_repeat": "CPC|A-16 Repeat",
f"{cpc_codename}_fixed": f"{cpc_all_name}: Fixed",
f"{cpc_codename}_single": f"{cpc_all_name}: Single",
f"{cpca_id_td_codename}_single": f"{cpc_all_name}+ID+TD: Single",
f"{cpca_id_td_codename}_average": f"{cpc_all_name}+ID+TD: Average",
f"{cpca_id_td_codename}_soft": f"{cpc_all_name}+ID+TD: Softmax",
# f"{cpca_id_td_codename}_attn-e": f"{cpc_all_name}+ID+TD: Attn+E",
f"{cpca_id_td_codename}_attn-2e": f"{cpc_all_name}+ID+TD: Attn+E",
f"{cpca_id_td_codename}_attn": f"{cpc_all_name}+ID+TD: Attn",
# "baseline_ddppo": "Baseline DDPPO",
# f"{cpca_id_td_codename}_single_ddppo": f"{cpc_all_name}+ID+TD: Single DDPPO",
# f"{cpca_id_td_codename}_attn-2e_ddppo": f"{cpc_all_name}+ID+TD: Attn+E DDPPO",
}
def get_run_logs(v):
folder = os.path.join(run_root, v)
run_folders = os.listdir(folder)
run_folders.sort()
event_paths = []
for run_folder in run_folders:
if 'run' in run_folder:
full_path = os.path.join(folder, run_folder)
event_paths.append(full_path)
return event_paths
tf_size_guidance = {'scalars': 1000}
plot_key_folder_dict = {
'eval_spl': 'eval_metrics_spl/',
'eval_success': 'eval_metrics_success/'
}
#%%
run_root = "/nethome/jye72/projects/habitat-pointnav-aux/tb/r3/"
# run_root = "/nethome/jye72/projects/habitat-pointnav-aux/tb/mp3d_pn/"
run_count = 4
np.random.seed(0)
# nested by variant and then run i
# Set what to plot
variants_1 = ['baseline', f'{cpc_codename}16', 'id', 'td']
variants_2 = ['baseline', f'{cpc_codename}16', f"{cpc_codename}_single", f"{cpca_id_td_codename}_single"]
variants_3 = ['baseline', f"{cpc_codename}16", f"{cpca_id_td_codename}_soft", f"{cpca_id_td_codename}_attn-2e", f"{cpca_id_td_codename}_single"]
plotted_union = list(set(variants_1) | set(variants_2) | set(variants_3))
# plotted_union = [ "baseline", f"{cpca_id_td_codename}_attn-2e"]
# plotted_union = ["baseline", f"{cpca_id_td_codename}_single", f"{cpca_id_td_codename}_attn-2e"]
# plotted_union = [f"{cpca_id_td_codename}_attn-2e"]
# plotted_union = [f"{cpc_codename}_attn", f"{cpc_codename}_attn-e",f"{cpc_codename}_repeat",f"{cpc_codename}_fixed"]
# plotted_union = [f"{cpc_codename}_attn", f"{cpc_codename}_attn-e",f"{cpc_codename}_repeat",f"{cpc_codename}_fixed"]
# plotted_union = [f"{cpca_id_td_codename}_soft", f"{cpca_id_td_codename}_average", f"{cpc_codename}_single", f"{cpca_id_td_codename}_attn"]
# plotted_union = [f'{cpc_codename}1', f'{cpc_codename}2', f'{cpc_codename}4',]
# plotted_union = [f'{cpc_codename}16w', 'id', 'td']
palette = sns.color_palette(palette='muted', n_colors=len(plotted_union), desat=0.9)
variants = plotted_union
variants = variant_labels.keys()
variants = ['baseline', 'cpca-id-td_single', 'cpca-id-td_attn-2e']
variant_colors = {}
for i, v in enumerate(plotted_union):
variant_colors[v] = palette[(i+3) % len(plotted_union)]
sns.palplot(palette)
variant_paths = {}
for variant in variants:
variant_paths[variant] = get_run_logs(variant)
#%%
# * Key
# plot_key = 'success' # spl, success, eval_spl, eval_success
# plot_key = 'spl' # spl, success, eval_spl, eval_success
plot_key = 'eval_success' # spl, success, eval_spl, eval_success
plot_key = 'eval_spl' # spl, success, eval_spl, eval_success
plot_key_folder = plot_key_folder_dict.get(plot_key, "")
# Load
plot_values = {}
plot_steps = {}
for variant, variant_runs in variant_paths.items():
plot_values[variant] = []
plot_steps[variant] = []
min_steps = 0
for i, run in enumerate(variant_runs):
if len(plot_steps[variant]) >= run_count:
break
accum_path = os.path.join(run, plot_key_folder)
if not os.path.exists(accum_path):
continue
event_acc = EventAccumulator(accum_path, tf_size_guidance)
event_acc.Reload()
scalars = event_acc.Scalars('eval_metrics')
steps_and_values = np.stack(
[np.asarray([scalar.step, scalar.value])
for scalar in scalars])
steps = steps_and_values[:, 0]
values = steps_and_values[:, 1]
if len(steps) < 41: # We allow more in case we doubled something
print(f"skipping {variant}, {i}")
unique, indices = np.unique(steps, return_index=True)
print(unique)
print(values[-1])
continue # Incomplete
plot_steps[variant].append(steps)
plot_values[variant].append(values)
# print(variant)
# for run in plot_values[variant]:
# print(len(run))
#%%
# * Cropping (and averaging) values of each checkpoint - for multi-eval
def get_cleaned_data(raw_steps, raw_values, average=1):
clean_steps = {}
clean_values = {}
for variant in variants:
clean_steps[variant] = []
clean_values[variant] = []
if variant in plot_steps:
for i in range(len(plot_steps[variant])):
steps = raw_steps[variant][i]
vals = raw_values[variant][i]
un, ind, inv = np.unique(steps, return_index=True, return_inverse=True)
# all the places where there are 0s, is where the first unique is. Select them
clean_steps[variant].append(steps[ind])
avg_values = []
for step in range(len(un)):
step_vals = vals[inv == step][:average]
# print(step, len(step_vals))
avg_step_val = np.mean(step_vals)
avg_values.append(avg_step_val)
clean_values[variant].append(avg_values)
return clean_steps, clean_values
clean_steps, clean_values = get_cleaned_data(plot_steps, plot_values, average=3)
#%%
best_ckpts = {}
for variant in clean_values:
if len(clean_values[variant]) == 4 and len(clean_values[variant][3]) < 40:
print(variant)
var_data = np.array(clean_values[variant][:3])
else:
var_data = np.array(clean_values[variant])
best_ckpt = 2 * (np.argmax(var_data, axis=1))
best_ckpts[variant] = best_ckpt.tolist()
print(f"{variant:20} {best_ckpts[variant]}")
import json
with open(f"{plot_key}_ckpts.csv", 'w') as f:
json.dump(best_ckpts, f)
#%%
print(clean_values['baseline'][0][-1])
print(clean_values['cpca-id-td_single'][1][-3])
print(clean_values['cpca-id-td_attn-2e'][0][-3])
#%%
def get_means_and_ci(values, window_size=1, early_stop=True):
r"""
Returns means and CI np arrays
args:
values: dict of trials by variant, each value a list of trial data
window_size: window smoothing of trials
returns:
mean and CI dict, keyed by same variants
"""
means={}
ci = {}
for variant in values:
# data = np.array(values[variant])
min_overlap = min(len(trial) for trial in values[variant])
data = np.array([trial[:min_overlap] for trial in values[variant]])
# print(data.shape)
# print(variant)
values_smoothed = np.empty_like(data)
if window_size > 1:
for i in range(data.shape[1]):
window_start = max(0, i - window_size)
window = data[:, window_start:i + 1]
values_smoothed[:, i] = window.mean(axis=1)
else:
values_smoothed = data
if early_stop:
best_until = np.copy(values_smoothed)
for t in range(best_until.shape[1]):
best_until[:,t] = np.max(best_until[:,:t+1], axis=1)
values_smoothed = best_until
means[variant] = np.mean(values_smoothed, axis=0)
ci[variant] = 1.96 * np.std(values_smoothed, axis=0) \
/ math.sqrt(run_count) # 95%
return means, ci
if 'eval' in plot_key:
# data = plot_values
data = clean_values
else:
data = interpolated_values
plot_means, plot_ci = get_means_and_ci(data, window_size=1, early_stop=True)
true_means, true_ci = get_means_and_ci(data, window_size=1, early_stop=False) # For AUC calc
#%%
print(clean_values['cpca-id-td_attn-2e'][0][-1])
print(clean_values['cpca-id-td_attn-2e'][1][-1])
print(clean_values['cpca-id-td_attn-2e'][2][-1])
print(clean_values['cpca-id-td_attn-2e'][3][-1])
print(plot_means['cpca-id-td_attn-2e'][-1])
#%%
# Style
SMALL_SIZE = 12
MEDIUM_SIZE = 15
LARGE_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', labelsize=LARGE_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.style.use('seaborn-muted')
plt.figure(figsize=(6,4))
plt.xlabel("Frames (Million)")
plt.ylabel(key_labels[plot_key])
spine_alpha = 0.3
plt.gca().spines['right'].set_alpha(spine_alpha)
plt.gca().spines['bottom'].set_alpha(spine_alpha)
plt.gca().spines['left'].set_alpha(spine_alpha)
plt.gca().spines['top'].set_alpha(spine_alpha)
plt.grid(alpha=0.25)
plt.tight_layout()
# Plot evals
# Axes
plt.xlim(0, 40)
plt.xticks(np.arange(0, 45, 5))
x_scale = 1e6
if 'eval' in plot_key:
lower_lim = 0.0
# upper_lim = 0.5 if 'success' in plot_key else .3
upper_lim = 0.9 if 'success' in plot_key else .8
plt.ylim(lower_lim, upper_lim)
plt.yticks(np.arange(lower_lim, upper_lim + 0.01, 0.1))
# * Plot settings
set_num = 2
variant_lists = [variants_1, variants_2, variants_3]
plotted = variant_lists[set_num]
# plotted = ['baseline', 'cpca4', 'cpca-id-td_soft', 'cpca-id-td_single', 'cpca-id-td_attn', 'cpca-id-td_attn-e']
# Table 1
# plotted = ['baseline', 'cpca-id-td_attn-2e']
# plotted = ['baseline', 'cpca4', 'cpca-id-td_soft', 'cpca-id-td_attn-2e']
# plotted = ['baseline', 'cpca-id-td_soft', 'cpca-id-td_attn', 'cpca-id-td_attn-2e', 'cpca_single', 'cpca-id-td_single']
# plotted = variants
for variant in plotted:
if 'eval' in plot_key:
x = clean_steps[variant][0] / x_scale
y = plot_means[variant]
line, = plt.plot(x, y, label=variant_labels.get(variant, variant), c=variant_colors.get(variant))
plt.fill_between(x, y - plot_ci[variant], y + plot_ci[variant], facecolor=line.get_color(), alpha=0.5)
def annotate(idx, from_var, to_var, hoffset=-6, voffset=0):
lo = plot_means[from_var][idx]
hi = plot_means[to_var][idx]
if (hi - lo) > 0:
sign = "+"
else:
sign = "-"
plt.text(idx+hoffset, hi+voffset, f"{sign} {abs(hi - lo):.2f}", size=16)
plt.annotate("", xy=(idx, lo), xycoords="data", xytext=(idx, hi), textcoords="data", arrowprops=dict(arrowstyle="<-", connectionstyle="arc3,rad=0", linewidth="1.5"))
# Simple
if set_num == 0:
annotate(40, "baseline", "cpca16", hoffset=-6.5, voffset=0.02)
# annotate(2, "baseline", "cpca16", hoffset=1, voffset=0.02)
leg_start = .71
# Homo
if set_num == 1:
# annotate(40, "baseline", "cpca16", -6, -0.08)
annotate(40, "baseline", "cpca-id-td_single", -6, 0.02)
annotate(2, "baseline", "cpca-id-td_single", 2, 0.05)
leg_start = .36
# leg_start = .57
# Diverse
if set_num == 2:
leg_start = .32
annotate(2, "baseline", "cpca-id-td_attn-2e", 1.0, .01)
leg = plt.legend(loc=(leg_start, .01),
markerfirst=False, ncol=1, frameon=False, labelspacing=0.4)
# leg = plt.legend(loc=(0.01, .7),
# markerfirst=True, ncol=1, frameon=False, labelspacing=0.4)
for line in leg.get_lines():
line.set_linewidth(2.0)
# plt.title("MP3D + Noisy Actuation + Sliding Off")
plt.savefig('test.pdf', dpi=150)
#%%
print(plot_means['baseline'][-1])
print(plot_means['cpca16'][19])
print(plot_means['cpca-id-td_single'][12])
#%%
# Teaser
# Style
SMALL_SIZE = 12
MEDIUM_SIZE = 15
LARGE_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', labelsize=LARGE_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.style.use('seaborn-muted')
plt.figure(figsize=(6,4))
plt.xlabel("Steps (Million)")
plt.ylabel("SPL (Higher is Better)")
spine_alpha = 0.3
plt.gca().spines['right'].set_alpha(0.0)
plt.gca().spines['bottom'].set_alpha(spine_alpha)
plt.gca().spines['left'].set_alpha(spine_alpha)
plt.gca().spines['top'].set_alpha(0.0)
plt.grid(alpha=0.25)
plt.tight_layout()
# Plot evals
# Axes
plt.xlim(0, 40)
plt.xticks(np.arange(0, 45, 5))
x_scale = 1e6
if 'eval' in plot_key:
lower_lim = 0.0
# upper_lim = 0.5 if 'success' in plot_key else .3
upper_lim = 0.9 if 'success' in plot_key else .8
plt.ylim(lower_lim, upper_lim)
plt.yticks(np.arange(lower_lim, upper_lim + 0.01, 0.1))
# * Plot settings
variant_labels = {
'baseline': "DD-PPO (Wijmans et al., 2020)",
"cpca-id-td_attn-2e": "Ours"
}
plotted = ['cpca-id-td_attn-2e', 'baseline']
for variant in plotted:
if 'eval' in plot_key:
x = clean_steps[variant][0] / x_scale
y = plot_means[variant]
line, = plt.plot(x, y, label=variant_labels.get(variant, variant), c=variant_colors.get(variant))
plt.fill_between(x, y - plot_ci[variant], y + plot_ci[variant], facecolor=line.get_color(), alpha=0.5)
idx = 40
hoffset = -10
voffset = -0.1
lo = plot_means['baseline'][idx]
hi = plot_means['cpca-id-td_attn-2e'][idx]
plt.annotate("", xy=(idx, lo), xycoords="data", xytext=(idx, hi + 0.01), textcoords="data", arrowprops=dict(arrowstyle="<-", connectionstyle="arc3,rad=0", linewidth="1.5"))
plt.text(idx+hoffset, hi+voffset, f"+{(hi - lo):.2f} SPL", size=16)
plt.annotate("", xy=(40, lo), xycoords="data", xytext =(7, lo), textcoords="data", arrowprops=dict(arrowstyle="<-", connectionstyle="arc3,rad=0", linewidth="1.5"))
plt.text(18, lo + 0.02, f"5.5x faster", size=16)
leg = plt.legend(loc=(0.32, .05), markerfirst=False, ncol=1, frameon=False, labelspacing=0.4)
for line in leg.get_lines():
line.set_linewidth(2.0)
plt.title("Performance on PointGoal Navigation \n (with RGB + GPS + Compass sensors)")
plt.savefig('test.pdf', dpi=150)
#%%
# Hack around loading spl and success
#%%
# Prints values for tables
print(plot_key)
latex_label = {
"baseline": "Baseline",
"id": "ID",
"td": "TD",
"cpca1": "\cpcat$1$",
"cpca2": "\cpcat$2$",
"cpca4": "\cpcat$4$",
"cpca8": "\cpcat$8$",
"cpca16": "\cpcat$16$",
"cpca16w": "Weighted \cpcat16",
"cpca_single": "\\allcpc: Add",
"cpca-id-td_single": "\\allcpc+ID+TD: Add",
"cpca-id-td_attn-2e": "\\allcpc+ID+TD: Attn+E",
"cpca-id-td_attn": "\\allcpc+ID+TD: Attn",
"cpca-id-td_soft": "\\allcpc+ID+TD: Softmax",
"cpca-id-td_average": "\\allcpc+ID+TD: Average",
"cpca_attn": "\\allcpc: Attn",
"cpca_attn-e": "\\allcpc: Attn+E",
"cpca_fixed": "\\allcpc: Fixed Attn",
"cpca_fixed": "\\allcpc: Fixed Attn",
"cpca_repeat": "\cpcat16$\\times 5$: Attn",
}
basic_template = "\\rownumber {} & \n ${:.3f} $\scriptsize{{$\pm {:.3f}$}} & ${:.3f} $\scriptsize{{$\pm {:.3f}$}}"
# variant, auc, auc ci, best, best ci
for variant in variants:
auc = metrics.auc(np.arange(0,1 + 1.0/40, 1.0/40), true_means[variant])
auc_ci = metrics.auc(np.arange(0,1 + 1.0/40,1.0/40), true_ci[variant])
print(basic_template.format(
latex_label[variant],
auc, auc_ci,
plot_means[variant][-1], plot_ci[variant][-1]
))
print("\\\\")
# print(f"${auc:.3f} \pm {auc_ci:.3f}$")
# print( # f"10M: ${plot_means[variant][ten_mil_key]:.3f} \pm {plot_ci[variant][ten_mil_key]:.3f}$ \n" +
# f"${plot_means[variant][-1]:.3f} \pm {plot_ci[variant][-1]:.3f}$")
#%%
print(plot_means['cpca-id-td_attn-2e']) |
"""Cleans the US Census TIGER Shapefile data.
This code is based almost entirely on open source code written by @jamesturk
at OpenStates, which can be found here --> is.gd/1K0YAy
"""
import re
import geojson
import zipfile
import subprocess
from pathlib import Path
from utils import print_cr
from app.models import RegionType
from app.config import DATA_RAW_PATH, DATA_CLEANED_PATH, TigerDataset
from app.lookups.ccid import assemble_ccid
TIGER_RAW_PATH = Path(DATA_RAW_PATH % 'tiger')
TIGER_CLEAN_PATH = Path(DATA_CLEANED_PATH % 'tiger')
TK = TigerDataset.Keys
def render_district_type_and_shortcode(reg_type, props):
"""Render district type and shortcode fields from properties object."""
# In MD, we replace the word "Subdistrict" with the word "District"
dist_name = props[TK.NAME].replace("Subdistrict", "District")
# In NJ, we need to remove the word "General" from the district type
# "General Assembly"
dist_name = dist_name.replace("General", '').strip()
# generate the district type
dist_type = dist_name.split("District")[0].strip()
# handle Congressional 'At Large' districts seperately
if reg_type == RegionType.CONGR and "at large" in props[TK.NAME].lower():
shortcode = "CD AL"
else:
# clean the district's name into a shortcode
shortcode = dist_name.replace('State', '')
shortcode = re.sub(r'[^A-Z0-9\s]', '', shortcode)
shortcode = re.sub(
r'\w D', lambda m: shortcode[m.start()] + shortcode[m.end() - 1], shortcode
)
shortcode = shortcode.strip()
return dist_type, shortcode
def nitpick_geojson(file_path):
with open(file_path, 'r+') as geo_file:
geo = geojson.load(geo_file)
for ftr in geo['features']:
props = ftr['properties']
r_type = RegionType.fuzzy_cast(props[TK.TYPE_CODE])
# make NAME field consistent across all region types
if 'NAMELSAD' in props.keys():
props[TK.NAME] = props['NAMELSAD']
del props['NAMELSAD']
# build a CCID field for the region
props[TK.CCID] = assemble_ccid(
RegionType.fuzzy_cast(props[TK.TYPE_CODE]), props[TK.GEOID]
)
if r_type in (RegionType.CONGR, RegionType.SLDU, RegionType.SLDL):
# make district number field consistent across all district types
dist_num_key = list(
filter(lambda k: re.match(r'SLD[UL]ST|CD11\dFP', k), props.keys())
).pop()
props[TK.DIST_NUM] = props[dist_num_key]
del props[dist_num_key]
# in SC, house district names seem to be malformed - where every other
# name just includes the number, SC house districts include a 'HD-' prefix
props[TK.NAME] = re.sub(r'HD-0*', '', props[TK.NAME])
dist_type, shortcode = render_district_type_and_shortcode(r_type, props)
props[TK.SHORTCODE] = shortcode
props[TK.DIST_TYPE] = dist_type
geo_file.seek(0)
geo_file.truncate()
geo_file.write(geojson.dumps(geo))
def clean(_):
TIGER_CLEAN_PATH.mkdir(exist_ok=True)
for raw_year in [yp for yp in TIGER_RAW_PATH.iterdir() if str(yp.name).isdigit()]:
(TIGER_CLEAN_PATH / raw_year.name).mkdir(exist_ok=True)
(TIGER_CLEAN_PATH / raw_year.name / 'sldu').mkdir(exist_ok=True)
(TIGER_CLEAN_PATH / raw_year.name / 'sldl').mkdir(exist_ok=True)
# make a working directory for intermediate files
(working_dir := raw_year / 'temp').mkdir(exist_ok=True)
for raw_zip in raw_year.glob(r'**/tl*.zip'):
# see if it already exists in clean, and continue if so
clean_geo = Path(
str(raw_zip).replace('/raw-data/', '/data/').replace('.zip', '.geojson')
)
if clean_geo.exists():
print_cr(f"{clean_geo.name} already cleaned, skipping!")
continue
# unzip the zip file
with zipfile.ZipFile(raw_zip, "r") as f:
f.extractall(working_dir)
working_shp = working_dir / raw_zip.name.replace('.zip', '.shp')
print_cr(f"{working_shp} => {clean_geo}")
subprocess.run( # create the GeoJSON file
[
"ogr2ogr",
"-where",
"GEOID NOT LIKE '%ZZ%'",
"-t_srs",
"crs:84",
"-f",
"GeoJSON",
str(clean_geo),
str(working_shp),
],
check=True,
)
# nitpick fields in the new geojson file
nitpick_geojson(clean_geo)
# remove the temporary zip file from clean
subprocess.run(['rm', '-rf', str(working_dir)], check=True)
|
#CopyRight: Please take permission before using this script. Most importantly, please cite this work if you use this script.
#
#Citation: <NAME>, DMLWAS: Deep & Machine Learning Wide Association Studies with ExhaustiveDNN such as for genome variations linked to phenotype or drug repositioning
#
#++++++++++++++++ Author: <NAME> Email: <EMAIL> Date: 12th January 2020 Purpose: Does and Exhaustive Neural Network model building for a range of hidden layers and hidden unit values. Example: Does an Exhaustive Deep Neural Network execution on encoded data for genotype and the corresponding phenotype values. However, it can be used for any other purpose too.
################################################################
import numpy as np
import pandas as pd
import os
#importing basic library for preprocessing
data=pd.read_csv("MultiColDIPsScoredEncoded.txt") #reading data
x= data.values#converting into array
y=pd.read_csv("Phenotypes.txt") #Here we get the Y phenotype values
y=y.values[:,1]
c=[]
for i in data:
if data[i].isnull().any():
c.append(i)
#getting to list of column for nulll values
c.reverse()
for i in c:
data=data.drop(i,axis=1)
#dropping null columns from back direction in order to prevent column number change
x=data.values
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=.3,random_state=0)
classifier = Sequential()
def add_layer(i,clf):
clf.add(Dense(output_dim = i,
init = 'uniform',
activation = 'relu'))
return(clf)
#module for adding layer after initialization i will be number of hidden units clf will be model constructed
def initiate(clf,column_no,i):
clf.add(Dense(output_dim = i,
init = 'uniform',
activation = 'relu',
input_dim = column_no))
return(clf)
#we are initiating our neural with input layer number and first hidden layer hidden units number
def output(clf):
clf.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
return(clf)
#here we are getting output from neural network
def compiler(clf):
clf.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
return(clf)
#here we are compiling our model
earlyStopping=keras.callbacks.EarlyStopping(monitor="val_loss",patience=200,verbose=1,mode="auto")
#we are creating model for early stop
def fitMyModel(clf,x,y,x1,y1,b=10000,n=1000):
clf.fit(x, y, batch_size = b, nb_epoch = n,callbacks=[earlyStopping],validation_data=[x1,y1])
return(clf)
#n=1000 is the epoch default
#this module is used for fitting
#b is batch size by default we are keeping it 10000 as number of roows to get
from sklearn.model_selection import StratifiedKFold
#for cross validation
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=10)
def savemodel(k,i,clf):
model_json = clf.to_json()
with open("model/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
clf.save_weights("model/model.h5")
print("Saved model to disk")
#here we are saving model skeleton and its weights and bias in model directory
def deletePrevModel():
ls=os.listdir("model")
for i in ls:
os.remove("model/{}".format(i))
#when we will get new best values we have to delete old model and weights
m=2
n=8
#m is min number of hidden layer
#n is max number of hidden layer
p=8 #min number of hidden units
q=12 #max number of hidden units
b=10 # is batch size
n=1000 #epoch
from sklearn.metrics import confusion_matrix
best_score=0
for k in range(p,q,1): # loop for hidden unit
for i in range(m,n,1): # loop for hidden layer
clf = Sequential()
clf=initiate(clf,x.shape[1],k)# here we are initiating model k in number of first hidden units
for j in range(i): #executing each hidden layer
clf=add_layer(k,clf)
clf =output(clf)
clf=compiler(clf)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=10)
scores=[]
for train, test in kfold.split(x, y):
clf=fitMyModel(clf,x[train],y[train],x[test],y[test],b,n)
score = clf.evaluate(x[test], y[test], verbose=0)
scores.append(score[1])
avg_score=np.mean(scores)
if avg_score>best_score:
deletePrevModel()
savemodel(k,i,clf)
best_score=avg_score
# Predicting the Test set results
y_pred = clf.predict(xtest)
y_pred = (y_pred > 0.5)
cm = confusion_matrix(ytest, y_pred)
#writing to file
f=open("model/score.txt","w")
f.write("hidden units: {} \nhidden layers: {} \nbest_score:{} \nconfusion_matrix:{}".format(k,i,best_score,cm))
f.close()
del(clf)
#here either early stopping condition is met or epoch end is met the model will save and terminate for each value in loop
|
<gh_stars>1-10
# ==============================================================================
# MIT License
#
# Copyright 2021 Institute for Automotive Engineering of RWTH Aachen University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import tensorflow as tf
import utils
from third_party.point_pillars_3 import getPointPillarsModel
def getModel(y_min, y_max, x_min, x_max, step_x_size, step_y_size,
max_points_per_pillar, max_pillars, number_features,
number_channels, label_resize_shape, batch_size):
Xn = int((x_max - x_min) / step_x_size)
Yn = int((y_max - y_min) / step_y_size)
# Point Pillars Feature Net
input_pillars, input_indices, concat = getPointPillarsModel(
tuple([Xn, Yn]), int(max_pillars), int(max_points_per_pillar),
int(number_features), int(number_channels), batch_size)
# Evidential Prediction Head
prediction = tf.keras.layers.Conv2D(2, (3, 3),
padding="same",
name="ogm/conv2d",
activation="relu")(concat)
return tf.keras.models.Model([input_pillars, input_indices], [prediction])
def getLoss():
return ExpectedMeanSquaredError()
class ExpectedMeanSquaredError(tf.keras.losses.Loss):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.epoch_num = tf.Variable(0.0)
def call(self, y_true: tf.Tensor, y_pred: tf.Tensor):
prob, _, S, num_evidential_classes = utils.evidences_to_masses(y_pred)
loss = tf.math.add(
tf.reduce_sum((y_true - prob)**2, axis=-1, keepdims=True),
tf.reduce_sum(prob * (1 - prob) / (S + 1), axis=-1, keepdims=True))
alpha = y_pred * (1 - y_true) + 1
KL_reg = tf.minimum(1.0, tf.cast(self.epoch_num / 10,
tf.float32)) * self.kl_regularization(
alpha, num_evidential_classes)
loss = loss + KL_reg
# higher weight for loss on evidence for state "occupied" because it is underrepresented in training data
weight_occupied = 100
loss = tf.where(y_true[..., 1] > 0.5,
tf.squeeze(loss * weight_occupied, axis=-1),
tf.squeeze(loss, axis=-1))
loss = tf.reduce_mean(loss)
return loss
def kl_regularization(self, alpha, K):
beta = tf.ones_like(alpha)
S_alpha = tf.reduce_sum(alpha, axis=-1, keepdims=True)
KL = tf.math.add_n([
tf.reduce_sum((alpha - beta) *
(tf.math.digamma(alpha) - tf.math.digamma(S_alpha)),
axis=-1,
keepdims=True),
tf.math.lgamma(S_alpha) -
tf.reduce_sum(tf.math.lgamma(alpha), axis=-1, keepdims=True),
tf.reduce_sum(tf.math.lgamma(beta), axis=-1, keepdims=True) -
tf.math.lgamma(tf.reduce_sum(beta, axis=-1, keepdims=True))
])
return KL
|
import logging
import re
from mythril.analysis import solver
from mythril.analysis.ops import *
from mythril.analysis.report import Issue
from mythril.exceptions import UnsatError
from mythril.laser.ethereum.taint_analysis import TaintRunner
from z3 import Z3Exception
'''
MODULE DESCRIPTION:
This module finds what could be a token system.
Requirements, Sender balance check in contraints,
'''
def execute(statespace):
""" Executes the analysis module"""
#logging.debug("Executing module: TOKEN2")
issues = []
taints = []
for state, node in _get_states_with_opcode(statespace, "CALLDATALOAD"):
#state = node.states[index]
taint_stack = [False for _ in state.mstate.stack]
taint_stack[-1] = True
taints.append(TaintRunner.execute(statespace, node, state, initial_stack=taint_stack))
for state, node in _get_tainted_sstores(statespace, taints):
funtcion_we_are_in = node.contract_name + "." + node.function_name
following_sstores = _get_sstore_along_the_line(statespace, node)
if len(following_sstores) > 0:
# logging.debug("SSTORE"*10)
# logging.info("Found SSTORE %s following an SSTORE in (%s)"%(len(following_sstores), funtcion_we_are_in))
# logging.debug("%s: BEGIN Contraints of first SSTORE"%(funtcion_we_are_in))
#print("%s found following stores (%s)"%(funtcion_we_are_in, len(following_sstores)))
r_n_constraints = list(map(_normalize_constraint, filter(_relevant_constraint, node.constraints)))
# for c in r_n_constraints:
# logging.info(c)
matches = check_for_token_pattern(state, following_sstores, r_n_constraints, funtcion_we_are_in)
if len(matches) > 0:
issues.append(Issue(node.contract_name, node.function_name, None,
"Found a transfer like function",
"WUPI"))
else:
pass
#logging.info("Found no matching sstores")
# logging.debug("%s: END Contraints, those where the relevant constraints"%(funtcion_we_are_in))
# logging.debug("%s: Leading value =\n%s"%(funtcion_we_are_in, _get_value_sstore(state)))
# logging.debug("%s: Following value =\n%s"%(funtcion_we_are_in, _get_value_sstore(following_sstores[0])))
# logging.debug("SSTORE"*10)
else:
pass
#logging.info("%s: FOUND SSTORE (%s), but nothing followed"%(funtcion_we_are_in, _get_value_sstore(state)))
return issues
def check_for_token_pattern(sstore_start, following_sstores, relevant_n_constraints, f_name):
matches = []
op_set = set(["bvsub", "bvadd"])
matching_constraints = check_sstore_value(sstore_start, op_set, relevant_n_constraints, False, True, f_name)
for f_sstore in following_sstores:
for c in matching_constraints:
res = check_sstore_value(f_sstore, op_set - set([c["op"]]), [c["constraint"]], c["had_constraint_in_index_or_val"], False, f_name)
sstore_i, sstore_val = _get_value_sstore(sstore_start)
# print("%s Start \n%s"%(f_name, sstore_val))
# print("%s C \n%s"%(f_name, c))
sstore_i, sstore_val = _get_value_sstore(f_sstore)
# print("%s End \n%s"%(f_name, sstore_val))
if len(res) > 0:
matches.append({"store1": sstore_start, "store2": f_sstore, "constraint": c})
return matches
# must contain relevant constraint storagevalue as well as plus or minus
def fuzzy_compare_terms(a, b):
try:
return term_str(a) == "storage_" + term_str(b) or term_str(a) == term_str(b) or a == b
except Z3Exception as e:
#logging.debug("Error Evaluating constraint_in_index")
return None
def check_sstore_value(sstore, s_ops, relevant_n_constraints, had_constraint_in_index_or_val, first, f_name):
sstore_i, sstore_val = _get_value_sstore(sstore)
matches = []
#print("%s CHECKING VALUE with constraints (%s)"%(f_name, len(relevant_n_constraints)))
for constraint in relevant_n_constraints:
# logging.debug("BEGIN CHECK SSTORE")
# logging.debug("store")
# logging.debug(sstore_val)
# logging.debug("constraint")
# logging.debug(constraint)
# logging.debug("T"*70)
# logging.debug(term_str(sstore_val))
# logging.debug("\n=")
# logging.debug(term_str(sstore_i))
# logging.debug("T"*70)
if sstore_i == None:
raise Exception("AHHHLkjfklsajfdljaslfjl")
#storage_in_term = in_term(sstore_val, lambda x: str(x) == str(constraint["gt"]))
storage_in_term = in_term(sstore_val, lambda x: _contains_storage(x) != None) #already checked that for first store via taint analysis
index_self_ref = in_term(sstore_val, lambda x: fuzzy_compare_terms(x, sstore_i)) #check selfref,
if not had_constraint_in_index_or_val:
constraint_in_index = in_term(sstore_i, lambda x: fuzzy_compare_terms(x,constraint["gt"])) #check selfref,
constraint_in_value = in_term(sstore_val, lambda x: fuzzy_compare_terms(x, constraint["gt"])) #check selfref,
else:
constraint_in_index = True
constraint_in_value = True
calldata_in_term = in_term(sstore_val, lambda x: str(x) == str(constraint["lt"]))
op_in_term = in_term(sstore_val, lambda x: x.decl().name() in s_ops )
# logging.debug("storage_in_term")
# logging.debug(storage_in_term)
# logging.debug("calldata_in_term")
# logging.debug(calldata_in_term)
# logging.debug("index_self_ref")
# logging.debug(index_self_ref)
# logging.debug("constraint_in_index")
# logging.debug(constraint_in_index)
# logging.debug("constraint_in_value")
# logging.debug(constraint_in_value)
# # logging.debug("op_in_term")
# if op_in_term != None:
# logging.debug(op_in_term.decl().name())
if storage_in_term != None and calldata_in_term != None and op_in_term != None and (index_self_ref != None or constraint_in_index != None or constraint_in_value != None):
matches.append({"constraint": constraint, "op": op_in_term.decl().name(), "had_constraint_in_index_or_val": constraint_in_index != None or constraint_in_value != None})
#logging.debug("END CHECK SSTORE")
return matches
def term_str(term):
return str(term).replace('\n', ' ').replace('\r', '').strip()
def in_term(term, f):
if not isinstance(term, ExprRef):
return None
if f(term):
return term
for i in range(term.num_args()):
r = in_term(term.arg(i), f)
if r != None:
return r
return None
def _relevant_constraint(constraint):
# storage value must be greater or equal some other thing.
if not isinstance(constraint, ExprRef):
return False
#logging.info(constraint.decl().name())
rootOp = constraint.decl().name()
if not(rootOp == "bvult" or \
rootOp == "bvugt" or \
rootOp == "bvuge" or \
rootOp == "bvule"):
return False
lhs = constraint.arg(0)
rhs = constraint.arg(1)
#logging.info(constraint)
# <
if rootOp == 'bvult':
return (_contains_calldata(lhs) != None and _contains_storage(rhs) != None )
# >
elif rootOp == 'bvugt':
return (_contains_calldata(rhs) != None and _contains_storage(lhs) != None)
# >=
elif rootOp == 'bvuge':
return (_contains_calldata(rhs) != None and _contains_storage(lhs) != None)
# <=
elif rootOp == 'bvule':
# print("")
# print(lhs)
# print(_contains_calldata(lhs))
# print(rhs)
# print(_contains_storage(rhs))
return (_contains_calldata(lhs) != None and _contains_storage(rhs) != None)
else:
raise Exception("This should never happen")
return True
def _normalize_constraint(constraint):
if not isinstance(constraint, ExprRef):
raise Exception("ahhhh you are soooo not normal")
#logging.info(constraint.decl().name())
rootOp = constraint.decl().name()
if not(rootOp == "bvult" or \
rootOp == "bvugt" or \
rootOp == "bvuge" or \
rootOp == "bvule"):
return False
lhs = constraint.arg(0)
rhs = constraint.arg(1)
# <
if rootOp == 'bvult':
return {"gt": rhs, "lt": lhs}
# >
elif rootOp == 'bvugt':
return {"gt": lhs, "lt": rhs}
# >=
elif rootOp == 'bvuge':
return {"gt": lhs, "lt": rhs}
# <=
elif rootOp == 'bvule':
return {"gt": rhs, "lt": lhs}
else:
raise Exception("ahhhh you are soooo not normal")
def _contains_calldata(z3_term):
#logging.debug("%s should contain calldata"%str(z3_term))
m = re.search(r'calldata_MAIN\[([0-9]+)\]', str(z3_term))
if(m):
offset = m.group(1)
ret = "calldata_MAIN[%s]"%(offset)
#logging.debug("YES %s"%(ret))
return ret
else:
#logging.debug("NO")
return None
def _contains_storage(z3_term):
#logging.debug("%s should contain storage"%str(z3_term))
m = re.match(r'storage_([a-z0-9_&^]+)', str(z3_term))
if(m):
#logging.debug("YES")
return True
else:
#logging.debug("NO")
return None
def _get_sstore_along_the_line(statespace, node_to_start):
return _search_children(statespace, node_to_start, None)
def _get_tainted_sstores(statespace, taints):
for state, node in _get_states_with_opcode(statespace, "SSTORE"):
for taint in taints:
if _check_sstore(state, taint):
yield state, node
def _get_states_with_opcode(statespace, opcode):
""" Gets all (state, node) tuples in in with opcode"""
for k in statespace.nodes:
node = statespace.nodes[k]
for state in node.states:
if state.get_current_instruction()["opcode"] == opcode:
yield state, node
def _check_usage(state, taint_result):
"""Delegates checks to _check_{instruction_name}()"""
opcode = state.get_current_instruction()['opcode']
if opcode == 'SSTORE':
if _check_sstore(state, taint_result):
return [state]
return []
def _check_sstore(state, taint_result):
""" Check if store operation is dependent on the result of expression"""
assert state.get_current_instruction()['opcode'] == 'SSTORE'
return taint_result.check(state, -2)
def _get_value_sstore(state_sstore):
#logging.info(state_sstore.get_current_instruction()['opcode'])
assert state_sstore.get_current_instruction()['opcode'] == 'SSTORE'
stack = copy.deepcopy(state_sstore.mstate.stack)
to = stack.pop()
val = stack.pop()
#index, value = state_sstore.mstate.stack[-1], state_sstore.mstate.stack[-2]
return to, val
def _search_children(statespace, node, constraint=[], index=0, depth=0, max_depth=64):
"""
Checks the statespace for children states, with JUMPI or SSTORE instuctions,
for dependency on expression
:param statespace: The statespace to explore
:param node: Current node to explore from
:param index: Current state index node.states[index] == current_state
:param depth: Current depth level
:param max_depth: Max depth to explore
:return: List of states that match the opcodes and are dependent on expression
"""
#logging.debug("SEARCHING SSTORE used after %d", node.uid)
results = []
if depth >= max_depth:
return []
# Explore current node from index
for j in range(index, len(node.states)):
current_state = node.states[j]
current_instruction = current_state.get_current_instruction()
if current_instruction['opcode'] in ['SSTORE']:
element = [current_state]
if _check_requires(element[0], node, statespace, constraint):
continue
results += element
# Recursively search children
children = \
[
statespace.nodes[edge.node_to]
for edge in statespace.edges
if edge.node_from == node.uid
# and _try_constraints(statespace.nodes[edge.node_to].constraints, constraint) is not None
]
for child in children:
results += _search_children(statespace, child, depth=depth + 1, max_depth=max_depth)
return results
def _check_requires(state, node, statespace, constraint):
"""Checks if usage of overflowed statement results in a revert statement"""
instruction = state.get_current_instruction()
if instruction['opcode'] is not "JUMPI":
return False
children = [
statespace.nodes[edge.node_to]
for edge in statespace.edges
if edge.node_from == node.uid
]
for child in children:
opcodes = [s.get_current_instruction()['opcode'] for s in child.states]
if "REVERT" in opcodes or "ASSERT_FAIL" in opcodes:
return True
# I added the following case, bc of false positives if the max depth is not high enough
if len(children) == 0:
return True
return False
################# NOT USED
def _dependent_on_storage(expression):
""" Checks if expression is dependent on a storage symbol and returns the influencing storages"""
pattern = re.compile(r"storage_[a-z0-9_&^]*[0-9]+")
return pattern.findall(str(simplify(expression)))
def _get_storage_variable(storage, state):
"""
Get storage z3 object given storage name and the state
:param storage: storage name example: storage_0
:param state: state to retrieve the variable from
:return: z3 object representing storage
"""
index = int(re.search('[0-9]+', storage).group())
try:
return state.environment.active_account.storage[index]
except KeyError:
return None
def _can_change(constraints, variable):
""" Checks if the variable can change given some constraints """
_constraints = copy.deepcopy(constraints)
try:
model = solver.get_model(_constraints)
except UnsatError:
return False
try:
initial_value = int(str(model.eval(variable, model_completion=True)))
return _try_constraints(constraints, [variable != initial_value]) is not None
except AttributeError:
return False
def _get_influencing_storages(call):
""" Examines a Call object and returns an iterator of all storages that influence the call value or direction"""
state = call.state
node = call.node
# Get relevant storages
to, value = call.to, call.value
storages = []
if to.type == VarType.SYMBOLIC:
storages += _dependent_on_storage(to.val)
if value.type == VarType.SYMBOLIC:
storages += _dependent_on_storage(value.val)
# See if they can change within the constraints of the node
for storage in storages:
variable = _get_storage_variable(storage, state)
can_change = _can_change(node.constraints, variable)
if can_change:
yield storage
def _get_influencing_sstores(statespace, interesting_storages):
""" Gets sstore (state, node) tuples that write to interesting_storages"""
for sstore_state, node in _get_states_with_opcode(statespace, 'SSTORE'):
index, value = sstore_state.mstate.stack[-1], sstore_state.mstate.stack[-2]
try:
index = util.get_concrete_int(index)
except AttributeError:
index = str(index)
if "storage_{}".format(index) not in interesting_storages:
continue
yield sstore_state, node
# TODO: remove
def _try_constraints(constraints, new_constraints):
"""
Tries new constraints
:return Model if satisfiable otherwise None
"""
_constraints = copy.deepcopy(constraints)
for constraint in new_constraints:
_constraints.append(copy.deepcopy(constraint))
try:
model = solver.get_model(_constraints)
return model
except UnsatError:
return None
|
import sys
sys.path.insert(0,'../')
import unittest
import os
import crawl
from crawl_test import FIXTURE_ROOT,TestBase
class SharedTests(object):
def new_crawl(self,callback=None):
search_path = crawl.Crawl(FIXTURE_ROOT)
search_path.append_paths("app/views","vendor/plugins/signal_id/app/views",".")
search_path.append_extensions("builder","coffee","str",".erb")
search_path.alias_extension('htm',"html")
search_path.alias_extension('xhtml',"html")
search_path.alias_extension('php',"html")
search_path.alias_extension('coffee',"js")
return callback(search_path) if callback else search_path
def setUp(self):
self.crawl = self.new_crawl()
def fixture_path(self,path):
return os.path.abspath(os.path.join(FIXTURE_ROOT,path))
def testRoot(self):
self.assertEqual(FIXTURE_ROOT,self.crawl.root)
def testPaths(self):
self.assertEqual(
[
self.fixture_path('app/views'),
self.fixture_path('vendor/plugins/signal_id/app/views'),
self.fixture_path('.')
],
self.crawl.paths
)
def testExtensions(self):
self.assertEqual([".builder",".coffee",".str",".erb"],self.crawl.extensions)
def testIndex(self):
self.assertIsInstance(self.crawl.index(),crawl.index.Index)
def testFindNonexistantFile(self):
self.assertIsNone(self.crawl.find("people/show.html"))
def testFindWithoutExtension(self):
self.assertEqual(
self.fixture_path("app/views/projects/index.html.erb"),
self.crawl.find("projects/index.html")
)
def testFindWithExtension(self):
self.assertEqual(
self.fixture_path("app/views/projects/index.html.erb"),
self.crawl.find("projects/index.html.erb")
)
def testFindWithLeadingSlash(self):
self.assertEqual(
self.fixture_path("app/views/projects/index.html.erb"),
self.crawl.find("/projects/index.html")
)
def testFindRespectsPathOrder(self):
self.assertEqual(
self.fixture_path("app/views/layouts/interstitial.html.erb"),
self.crawl.find('layouts/interstitial.html')
)
def reverse_paths(search):
search.paths.reverse()
return search
search = self.new_crawl(callback=reverse_paths)
self.assertEqual(
self.fixture_path("vendor/plugins/signal_id/app/views/layouts/interstitial.html.erb"),
search.find('layouts/interstitial.html')
)
def testFindRespectsExtensionOrder(self):
self.assertEqual(
self.fixture_path("app/views/recordings/index.atom.builder"),
self.crawl.find("recordings/index.atom")
)
def reverse_exts(search):
search.extensions.reverse()
return search
search = self.new_crawl(callback=reverse_exts)
self.assertEqual(
self.fixture_path("app/views/recordings/index.atom.erb"),
search.find("recordings/index.atom")
)
def testFindWithMultipleLogicalPathsReturnsFirstMatch(self):
self.assertEqual(
self.fixture_path("app/views/recordings/index.html.erb"),
self.crawl.find("recordings/index.txt","recordings/index.html","recordings/index.atom")
)
def testFindFileInPathRootReturnsExpandedPath(self):
self.assertEqual(
self.fixture_path("app/views/index.html.erb"),
self.crawl.find("index.html")
)
def testFindExtensionlessFile(self):
self.assertEqual(
self.fixture_path('README'),
self.crawl.find('README')
)
def testFindFileWithMultipleExtensions(self):
self.assertEqual(
self.fixture_path("app/views/projects/project.js.coffee.erb"),
self.crawl.find("projects/project.js")
)
def testFindFileWithMultipleExtensionsRespectsExtensionOrder(self):
self.assertEqual(
self.fixture_path("app/views/application.js.coffee.str"),
self.crawl.find("application.js")
)
def reverse_exts(search):
search.extensions.reverse()
return search
search = self.new_crawl(callback=reverse_exts)
self.assertEqual(
self.fixture_path("app/views/application.js.coffee.erb"),
search.find("application.js")
)
def testFindFileByAliasedExtension(self):
self.assertEqual(
self.fixture_path("app/views/people.coffee"),
self.crawl.find('people.coffee')
)
self.assertEqual(
self.fixture_path("app/views/people.coffee"),
self.crawl.find('people.js')
)
self.assertEqual(
self.fixture_path("app/views/people.htm"),
self.crawl.find('people.htm')
)
self.assertEqual(
self.fixture_path("app/views/people.htm"),
self.crawl.find('people.html')
)
def testFindFileWithAliasesPrefersPrimaryExtension(self):
self.assertEqual(
self.fixture_path("app/views/index.html.erb"),
self.crawl.find("index.html")
)
self.assertEqual(
self.fixture_path("app/views/index.php"),
self.crawl.find("index.php")
)
def testFindWithBasePathOptionAndRelativeLogicalPath(self):
self.assertEqual(
self.fixture_path("app/views/projects/index.html.erb"),
self.crawl.find("./index.html",base_path = self.fixture_path("app/views/projects"))
)
def testFindIgnoresBasePathOptionWhenLogicalPathNotRelative(self):
self.assertEqual(
self.fixture_path("app/views/index.html.erb"),
self.crawl.find("index.html",base_path = self.fixture_path("app/views/projects"))
)
def testBasePathOptionMustBeExpanded(self):
self.setUp()
self.assertIsNone(self.crawl.find('./index.html',base_path='app/views/projects'))
def testFindAllRespectsPathOrder(self):
results = []
def callback(paths):
results.extend(paths)
self.crawl.find("layouts/interstitial.html",callback=callback)
self.assertEqual(
[
self.fixture_path("app/views/layouts/interstitial.html.erb"),
self.fixture_path("vendor/plugins/signal_id/app/views/layouts/interstitial.html.erb")
],
results
)
def testFindAllWithMultipleExtensionsRespectsExtensionOrder(self):
results = []
def callback(paths):
results.extend(paths)
self.crawl.find("application.js",callback=callback)
self.assertEqual(
[
self.fixture_path("app/views/application.js.coffee.str"),
self.fixture_path("app/views/application.js.coffee.erb")
],
results
)
def testFindFilenameInsteadOfDirectory(self):
self.assertEqual(
self.fixture_path("app/views/projects.erb"),
self.crawl.find("projects")
)
def testIgnoresDirectories(self):
self.assertIsNone(self.crawl.find("recordings"))
def testEntries(self):
expected = [
"application.js.coffee.erb",
"application.js.coffee.str",
"index.html.erb",
"index.php",
"layouts",
"people.coffee",
"people.htm",
"projects",
"projects.erb",
"recordings"
]
self.assertEqual(
expected,
sorted(self.crawl.entries(self.fixture_path("app/views")))
)
def testStat(self):
assert self.crawl.stat(self.fixture_path("app/views/index.html.erb"))
assert self.crawl.stat(self.fixture_path("app/views"))
self.assertIsNone(self.crawl.stat(self.fixture_path("app/views/missing.html")))
class CrawlTest(SharedTests,TestBase):
def testRootDefaultsToCWD(self):
cur_dir = os.curdir
os.chdir(FIXTURE_ROOT)
search = crawl.Crawl()
self.assertEqual(FIXTURE_ROOT,search.root)
os.chdir(cur_dir)
def testFindReflectsChangesInTheFileSystem(self):
try:
self.assertIsNone(self.crawl.find("dashboard.html"))
f = open(self.fixture_path('dashboard.html'),'w')
f.write('dashboard')
f.close()
self.assertEqual(
self.fixture_path('dashboard.html'),
self.crawl.find('dashboard.html')
)
finally:
os.unlink(self.fixture_path('dashboard.html'))
assert not os.path.exists(self.fixture_path('dashboard.html'))
class IndexTest(SharedTests,TestBase):
def new_crawl(self,callback=None):
search = super(IndexTest,self).new_crawl(callback=callback)
return search.index()
def testChangingTrailPathDoesntAffectIndex(self):
search = crawl.Crawl(FIXTURE_ROOT)
search.paths.append('.')
index = search.index()
self.assertEqual([self.fixture_path('.')],search.paths)
self.assertEqual([self.fixture_path('.')],index.paths)
search.paths.append("app/views")
self.assertEqual(
[self.fixture_path("."),self.fixture_path("app/views")],
search.paths
)
self.assertEqual([self.fixture_path('.')],index.paths)
def testChangingTrailExtensionsDoesntAffectIndex(self):
search = crawl.Crawl(FIXTURE_ROOT)
search.extensions.append('builder')
index = search.index()
self.assertEqual(['.builder'],search.extensions)
self.assertEqual(['.builder'],index.extensions)
search.extensions.append('str')
self.assertEqual(['.builder','.str'],search.extensions)
self.assertEqual(['.builder'],index.extensions)
def testFindDoesNotReflectChangesInTheFileSystem(self):
try:
self.assertIsNone(self.crawl.find("dashboard.html"))
f = open(self.fixture_path('dashboard.html'),'w')
f.write('dashboard')
f.close()
if hasattr(self,'assertIsNone'):
self.assertIsNone(self.crawl.find("dashboard.html"))
else:
self.assertEquals(None,self.crawl.find("dashboard.html"))
finally:
os.unlink(self.fixture_path('dashboard.html'))
assert not os.path.exists(self.fixture_path('dashboard.html'))
|
###############################################################################
#
# ptrelpos.py - find relative positions to place protein cartoon elements
#
# File: ptrelpos.py
# Author: <NAME>
# Created: October 2007
#
# $Id: ptrelpos.py 1482 2008-06-21 08:32:24Z astivala $
#
#
###############################################################################
import Bio.PDB
from ptnode import *
from ptdistmatrix import PTDistMatrix, calc_residue_dist
from pttableau import PTTableau
# TODO: have use_longest_for_orientation to choose to use longest rather
# than nearest strand in sheet for orientation. Currently using nearest
# (using longest makes 2PEE-3 different from other serpins (1QLP, 1MTP, etc.)
# for example, due to very bent longest strand in large sheet).
# Maybe should use strand with best fitting axis for oriention instead?
#-----------------------------------------------------------------------------
#
# Module globals
#
#-----------------------------------------------------------------------------
# constants
RELPOS_ABOVE = 0
RELPOS_BELOW = 1
RELPOS_LEFT = 2
RELPOS_RIGHT = 3
# global variables
verbose = False
#-----------------------------------------------------------------------------
#
# Class definitions
#
#-----------------------------------------------------------------------------
class PTRelativePosition:
"""
PTRelativePosition is a class for finding the relative position
of SSEs to each other for laying them out in the cartoon, using information
from the PDB structure and the distance matrices and sheet (strand
position) information that has already been determined.
"""
def __init__(self, pdb_struct, distmatrix, sheet_strandlists_dict, tableau,
chain_dict, sheet_dict):
"""
Parameters:
pdb_struct - The Bio.PDB parsed PDB struct (atomic co-ordinates)
for this protein.
distmatrix - The PTDistMatrix distance matrices for this protein.
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
tableau - the PTTableau which has been built for this protein
chain_dict - Each value of the chain_dict is a
List of nodes in order from N to C terminus
so chain_dict is { chainid : node_list }
sheet_dict - dict of {sheet_id : ptnode_list} represneting sheets
"""
self.pdb_struct = pdb_struct
self.distmatrix = distmatrix
self.sheet_strandlists_dict = sheet_strandlists_dict
self.tableau = tableau
self.chain_dict = chain_dict
self.sheet_dict = sheet_dict
def get_strand_posnum(self, strand, sheet_strandlists_dict = None):
"""
Return the index of the supplied strand in its sheet
in the outermost ('horizontal') list i.e. the number of strands
it is from the 'leftmost' strand.
Parameters:
strand - PTNode strand to find position number of
sheet_strandlists_dict - the sheet strandlists dict to use
for this strand. Default None. If None, use
the data member sheet_strandlists_dict
(This is to enable this function to be used
for other domains, not the one this object is for).
The strand has to belong to the same domain as
the sheet_strandlists_dict, otherwise this makes no
sense.
Uses data members (read):
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
Return value - index in outermost list of entry for this sheet id
that the strand is in.
"""
assert(isinstance(strand, PTNodeStrand))
sheet_id = strand.get_sheet_id()
if sheet_strandlists_dict != None:
ssd = sheet_strandlists_dict
else:
ssd = self.sheet_strandlists_dict
horiz_order_list = ssd[sheet_id]
for posnum in range(len(horiz_order_list)):
if strand in horiz_order_list[posnum]:
return posnum
assert(False) # strand must be in its own sheet somewhere
def any_strands_before_or_after_strand(self, strand1, strandlist):
"""
Return True if any strand in strandlist
immediately follows or precedes strand1 in sequence,
i.e. is some strand in strandlist
is the SSE immeidately C-terminal or N-terminal of strand1
in the same chain.
Parameters:
strand1 - PTNodeStrand of strand to check if any strand is after
strandlist - list of PTNodeStrand to check if any of them
immediately follow strand1 in sequence
Return value:
True if some strand in strandlist
is immediately C-terminal or N-terminal of strand1 in chain
else False
Uses data members (Readonly):
chain_dict
Note index() raises ValueError exception if strand is not
found in the list of nodes for its chain, which should never
happen (ie if this exception is raised there is some internal
inconsistency in the chain dict or strand structure).
"""
assert(isinstance(strand1, PTNodeStrand))
chainid = strand1.get_chainid()
nodelist = self.chain_dict[chainid]
# FIXME index() is probably a linear search, should
# maybe build some dictionary to do this faster, but doesn't
# really matter that much (probably)
strand1_index = nodelist.index(strand1)
next_index = strand1_index + 1
prev_index = strand1_index - 1
if next_index >= len(nodelist) and prev_index < 0:
return False
if next_index < len(nodelist):
nextnode = nodelist[next_index]
else:
nextnode = None
if prev_index >= 0:
prevnode = nodelist[prev_index]
else:
prevnode = None
if (not isinstance(nextnode, PTNodeStrand) and
not isinstance(prevnode, PTNodeStrand)):
return False
for strand2 in strandlist:
if strand2.get_chainid() != chainid:
continue
if nextnode == strand2 or prevnode == strand2:
return True
return False
def get_longest_strand(self, horiz_order_list):
"""
Return the strand and its length (as number of residues)
of the longest starnd in the sheet specified
by its list of list of strands (horizontal outer list, each
elment list aligned vertically).
Parameters:
horiz_order_list - the sheet strand list for the sheet as built by
build_sheet_constraints
Return value:
tuple (ptnodestrand, length)
where ptnodestrand is PTNodeStrand of longest strand and length is
number of residues in longest strand in the sheet.
Uses no data members.
"""
longest_strand_length = 0
longest_strand = None
for vert_list in horiz_order_list:
if len(vert_list) < 1:
continue # should not happen anyway
strand = vert_list[0] # NOTE: assumes longest is single
# FIXME: this assumption is not always good, e.g. 1W81
# where 16 and 12 are on same vert axis both neighbours
# of 11, and 16 is about same length as 11 so 16 and 12
# together definitely longer than 11 causing overlap on figure
if strand.get_span() > longest_strand_length:
longest_strand_length = strand.get_span()
longest_strand = strand
return (longest_strand, longest_strand_length)
def flip_all_strands_in_sheet(self, sheet_id):
"""
Turn the sheet 'upside-down'.
Flip the reverse flag in each strand of the sheet i.e. set if not
set, unset if set. Initially (in build_sheet_constraints(), these
flags are set based on the first ('leftmost') strand being set as
not-reversed, but after we find orientations we may actually want
that strand the other way, so we just flip all the reversed flags.
Not only do we flip the reverse flag, we also have to
shift the align position as it was calculated (in
bulid_sheet_constraints()) with the reverse flag as it was before
(obviously). So now the offset is changed to be relative to the
other (i.e. after reversing) end of the strand, and no special
case is needed for reversed sideways strands when laying out the sheet
for the diagram.
Parmeters:
sheet_id - id of the sheet to flip reverse flags in
Return value: None
Uses data members (read/write):
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
"""
# align positions were relative to the (original) top of this strand
first_strand_len= self.sheet_strandlists_dict[sheet_id][0][0].get_span()
for strandlist in self.sheet_strandlists_dict[sheet_id]:
for strand in strandlist:
strand.set_reversed(not strand.get_reversed())
# now make the align position relative to the other end
strand.set_align_pos(first_strand_len - strand.get_align_pos()
- strand.get_span())
def reverse_strand_order_in_sheet(self, sheet_id, sheet_strandlists_dict):
"""
Flip the sheet left-to-right.
Reverse the order of the strands in the sheet.
Not only do we need to rervese the horiz order list of strands,
we also have to adjust the align positions as calculaated in
build_sheet_constraints() accordingly. These offsets were relative
to the first strand in the list (which itself is offset 0), now
that strand is the last so we need to adjust them all so new
first is offset 0 and others relative to it.
This is not as easy as going through the horiz_order_list because
of bifurcations and the order of offsets being added is the dfs
order used in the original build_sheet_constraints(), so we
recompute the align positions from scratch.
(TODO: should be a more efficient way of just
recalcuating these without calling
compute_align_positions() again to do it from scratch,
but since we need the dfs
order anyway, it does not really matter much).
Parameters:
sheet_id - id of sheet to reverse
sheet_strandlists_dict - IN/OUT
the sheet strandlists dict that contains
the sheet identified by sheet_id
Return value:
None
Uses data members:
None
Note strand nodes are also modified (the align_pos value), only
nodes that are in the sheet are referenced.
"""
# first recompute the relative align positions
start_node = sheet_strandlists_dict[sheet_id][-1][0] # start at end
dfs_list = []
dfs_strands_from(start_node, {}, dfs_list, None)
assert(start_node == dfs_list[0][0] and dfs_list[0][1] == None)
start_node.set_align_pos(0)
for (node, from_node) in dfs_list[1:]:
compute_align_positions(node, from_node)
# now reverse the list
sheet_strandlists_dict[sheet_id].reverse()
def set_all_sheet_strands_sideways(self, sheet_id):
"""
Set the sideways flag in every strand of a sheet.
Parameters:
sheet_id - id of the sheet to set sideways flags in
Return value: None
Uses data members (read/write):
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
"""
for strandlist in self.sheet_strandlists_dict[sheet_id]:
for strand in strandlist:
strand.set_sideways(True)
def get_relative_position(self, reference_element, test_element):
"""
Find the relative position of test_element relative to
reference_element.
Parameters:
reference_element - an element (either sheet id e.g. 'A' or
helix (PTNodeHelix object) to find position relative to
test_element - and element (as per reference_element) to find
position of relative to reference_element
NOTE: reversed and sideways flags in test_element may be set
by this function.
Uses data members (read):
distmatrix - the distance matrix
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
Return value:
tuple (relpos, ref_strand, test_strand) where relpos is
RELPOS_ABOVE, RELPOS_BELOW, RELPOS_LEFT or RELPOS_RIGHT
ref_strand is strand in reference sheet it is relative to
or None if reference element is not a sheet
test_strand is strand in test sheet that is relative to
reference_element or None if test element is not a sheet
"""
assert(isinstance(reference_element, PTNodeHelix) or
len(reference_element) == 1) # sheet id
assert(isinstance(test_element, PTNodeHelix) or
len(test_element) == 1) # sheet id
if isinstance(reference_element, PTNodeHelix):
ref_strand = None
(relpos, ref_sse, test_strand) = \
self.get_relpos_to_helix(reference_element, test_element)
else:
(relpos, ref_sse, test_strand) = \
self.get_relpos_to_sheet(reference_element, test_element)
return (relpos, ref_sse, test_strand)
def get_relpos_helix_to_helix(self, reference_helix, test_element,
nearest_ref_resnum, nearest_test_resnum):
"""
Find the relative position of a helix to a helix.
Parameters:
reference_helix - helix to place relative to
test_element - helix to place relative to reference helix
nearest_ref_resnum - residue number in reference_helix that test
helix is closest to
nearest_test_resnum - residue number in test_helix that is closest
to reference helix
Return value:
relpos of test to reerence helix
Uses no data members
"""
if reference_helix.is_resnum_nearer_top(nearest_ref_resnum) \
and not test_element.is_resnum_nearer_top(nearest_test_resnum):
if reference_helix.get_sideways():
relpos = RELPOS_LEFT
else:
relpos = RELPOS_ABOVE
else:
if reference_helix.get_sideways():
relpos = RELPOS_RIGHT
else:
relpos = RELPOS_BELOW
return relpos
def get_relpos_sheet_to_helix(self, reference_helix, closest_test_strand,
nearest_ref_resnum, nearest_test_resnum,
test_sheet_strandlists_dict):
"""
Find the relative position of a sheet to a helix.
Parameters:
reference_helix - helix to find relpos of sheet to
closest_test_strand - strand in sheet closest to reference helix
nearest_ref_resnum - residue number in reference_helix that test
strand is closest to
nearest_test_resnum - residue number in closest_test_strand
that is closest
to reference helix
test_sheet_strandlists_dict - strandlists dict of test sheet
Return value:
relpos of sheet to helix
"""
test_strand_posnum = self.get_strand_posnum(closest_test_strand,
test_sheet_strandlists_dict)
if test_strand_posnum == 0:
if reference_helix.get_sideways():
relpos = RELPOS_BELOW
else:
relpos = RELPOS_RIGHT
elif test_strand_posnum == \
len(test_sheet_strandlists_dict[closest_test_strand.get_sheet_id()]) - 1:
if reference_helix.get_sideways():
relpos = RELPOS_ABOVE
else:
relpos = RELPOS_LEFT
else:
# need to decide ABOVE/BELOW
# decide based on nearby residues at top/bottom
if reference_helix.is_resnum_nearer_top(nearest_ref_resnum) \
and not closest_test_strand.is_resnum_nearer_top(
nearest_test_resnum):
if reference_helix.get_sideways():
relpos = RELPOS_LEFT
else:
relpos = RELPOS_ABOVE
else:
if reference_helix.get_sideways():
relpos = RELPOS_RIGHT
else:
relpos = RELPOS_BELOW
return relpos
def get_relpos_to_helix(self, reference_helix, test_element,
use_longest_for_orientation = False):
"""
Find the relative position of test_element relative to
the helix reference_helix
Parameters:
reference_helix - PTNodeHelix to find position relative to
test_element - and element (sheet id or helix) to find
position of relative to reference_helix
use_longest_for_orientation - (default True) if True, use
the longest strand in each sheet to determine the
relative orientations using tableau, otherwise uses
the closest strands (the ones used to determine relative
position).
NOTE: reversed and sideways flags in test_element may be set
by this function.
Uses data members (read):
distmatrix - the distance matrix
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
Return value:
tuple (relpos, reference_helix, test_strand) where relpos is
RELPOS_ABOVE, RELPOS_BELOW, RELPOS_LEFT or RELPOS_RIGHT
and test_strand is strand in test_element it is relative
to or None if test element is not a sheet
reference_helix is just the parameter
"""
assert(isinstance(reference_helix, PTNodeHelix))
assert(isinstance(test_element, PTNodeHelix) or
len(test_element) == 1) # sheet id
if isinstance(test_element, PTNodeHelix):
closest_test_strand = None
# orientation needs to be taken into account (tableau)
if self.tableau != None:
try:
tabcode = self.tableau[(reference_helix, test_element)]
if verbose:
sys.stderr.write(' orientation ' +
str(reference_helix) + ', ' +
str(test_element) + ': ' +
tabcode +
'\n')
except:
sys.stderr.write('WARNING: no tableau entry for ' +
str(reference_helix) + ',' +
str(test_element) + '.' +
'Using PE (parallel).\n')
tabcode = 'PE'
else:
tabcode = 'PE'
# if test helix is crossing- Left or Right of reference,
# and reference is not sideways, set test helix sideways,
# and for crossing-Right set reversed flag if referce is not
# reversed (and for xing-Left set reversedflag if reference IS
# reversed).
# Otherwise, if helices are antiparallel then set the
# reversed flag in the test helix to the opposite value of
# that in the reference helix.
# FIXME: should clean this up and use resolve_orientation()
if ( (tabcode[0] == 'L' or tabcode[0] == 'R')
and not reference_helix.get_sideways() ):
test_element.set_sideways(True)
if ( (tabcode[0] == 'R' and not reference_helix.get_reversed())
or
(tabcode[0] == 'L' and reference_helix.get_reversed()) ):
test_element.set_reversed(True)
elif ( tabcode[0] == 'O' ):
test_element.set_reversed(not reference_helix.get_reversed())
# decide on placement based on nearest residues in the helices
# FIXME: this is really no good, need to take account of
# orientation and find some way of deciding if helices are really
# 'beside' each other (esp when antiparalel for example)..
# Note 'helix clustering' (partially) solves this problem for
# the special case of being near a sheet to use as reference
# for positioning, see get_helixcluster_relative_position().
(nearest_ref_resnum, nearest_test_resnum) = \
self.distmatrix.get_nearest_sse_residues(reference_helix,
test_element)
relpos = self.get_relpos_helix_to_helix(reference_helix,
test_element,
nearest_ref_resnum,
nearest_test_resnum)
else:
# the test element is a sheet. Place above or below, aligning
# strand with helix, or if strand is on edge of sheet possibly
# left/right of helix.
(closest_test_strand, unused) = \
self.distmatrix.get_strand_nearest_element(test_element,
reference_helix)
if verbose:
sys.stderr.write(' relpos to helix: test is ' +
str(closest_test_strand) + ' in sheet ' +
test_element + '\n')
# orientation needs to be taken into account (tableau)
if self.tableau != None:
if use_longest_for_orientation:
(orientation_test_strand, unused_length2) = \
self.get_longest_strand(
self.sheet_strandlists_dict[test_element])
else:
orientation_test_strand= closest_test_strand
try:
tabcode = self.tableau[(reference_helix,
orientation_test_strand)]
if verbose:
sys.stderr.write(' orientation ' +
str(reference_helix) + ', ' +
str(orientation_test_strand) + ': ' +
tabcode +
'\n')
except:
sys.stderr.write('WARNING: no tableau entry for ' +
str(reference_helix) + ',' +
str(orientation_test_strand) + '.' +
'Using PE (parallel).\n')
tabcode = 'PE'
else:
tabcode = 'PE'
# if ref helix and test strand are antiparallel but flagged as
# same direction in nodes, or parallel but flagged as different
# direction in nodes, then flip them all strands in the
# test sheet.
if (((tabcode[0] == 'O') and
reference_helix.get_reversed() ==
closest_test_strand.get_reversed()) or
((tabcode[0] == 'P') and
reference_helix.get_reversed() !=
closest_test_strand.get_reversed())):
self.flip_all_strands_in_sheet(test_element)
# if test strand is crossing- Left or Right of reference,
# and reference is not sideways, set test sheet sideways
# FIXME: should clean this up and use resolve_orientation()
elif ( (tabcode[0] == 'L' or tabcode[0] == 'R') and
not reference_helix.get_sideways() ):
self.set_all_sheet_strands_sideways(test_element)
if verbose:
sys.stderr.write(' sheet ' + test_element +
' is sideways (' + tabcode[0] + ')\n')
# un-reversed ('up') when sideways is left-pointing
if tabcode[0] == 'R':
self.flip_all_strands_in_sheet(test_element)
(nearest_ref_resnum, nearest_test_resnum) = \
self.distmatrix.get_nearest_sse_residues(reference_helix,
closest_test_strand)
relpos = self.get_relpos_sheet_to_helix(reference_helix,
closest_test_strand,
nearest_ref_resnum,
nearest_test_resnum,
self.sheet_strandlists_dict)
if verbose:
sys.stderr.write(' relpos to helix: test is ' +
ptrelpos_to_str(relpos) + ' reference.\n')
return (relpos, reference_helix, closest_test_strand)
def get_helixcluster_relative_position(self, reference_helix, test_helix,
ref_strand):
"""
Find the relative position of test_element helix relative to
the helix reference_helix in a helix cluster, in which the
first helix in the cluster is algined on the seq_strand axis.
Parameters:
reference_helix - PTNodeHelix to find position relative to
test_helix - and element (PTNodeHelix) to find
position of relative to reference_helix
ref_strand - The PTNodeStrand that we are deeming to be sharing
an axis with the reference_helix, used to align that helix.
For the first helix in the cluster, this is the strand
that the helix is immediately C-terminal of. For subsequent
helices, it is returned from this subroutine as the
strand we have decided it will be aligned with based
on dihedral angle (same/other side) calculation.
NOTE: reversed and sideways flags in test_element may be set
by this function.
Uses data members (read):
distmatrix - the distance matrix
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
Return value:
tuple (relpos, test_strand) where relpos is one of
RELPOS_ABOVE, RELPOS_BELOW, RELPOS_LEFT or RELPOS_RIGHT
and test_strand is the strand that we have decided the test-helix
is on the same side of the ref_strand as.
"""
assert(isinstance(reference_helix, PTNodeHelix))
assert(isinstance(test_helix, PTNodeHelix))
# orientation needs to be taken into account (tableau)
if self.tableau != None:
try:
tabcode = self.tableau[(reference_helix, test_helix)]
if verbose:
sys.stderr.write(' orientation ' +
str(reference_helix) + ', ' +
str(test_helix) + ': ' +
tabcode +
'\n')
except:
sys.stderr.write('WARNING: no tableau entry for ' +
str(reference_helix) + ',' +
str(test_helix) + '.' +
'Using PE (parallel).\n')
tabcode = 'PE'
else:
tabcode = 'PE'
# if test helix is crossing- Left or Right of reference,
# and reference is not sideways, set test helix sideways,
# and for crossing-Right set reversed flag if referce is not
# reversed (and for xing-Left set reversedflag if reference IS
# reversed).
# Otherwise, if helices are antiparallel then set the
# reversed flag in the test helix to the opposite value of
# that in the reference helix.
# FIXME: should clean thi sup and use resolve_orientation()
if ( (tabcode[0] == 'L' or tabcode[0] == 'R')
and not reference_helix.get_sideways() ):
test_helix.set_sideways(True)
if ( (tabcode[0] == 'R' and not reference_helix.get_reversed())
or
(tabcode[0] == 'L' and reference_helix.get_reversed()) ):
test_helix.set_reversed(True)
elif ( tabcode[0] == 'O' ):
test_helix.set_reversed(not reference_helix.get_reversed())
# decide on placement of test helix relative to reference helix
# using the sheet containting the seq_strand as reference.
# We will do this by using the dihedral angle calculation similar
# to that used in deciding relative sides of strads in a sheet
# (see strands_on_opposite_sides() in ptnode.py).
# The already placed (reference) helix will be assumed to be
# aligned on the axis of some strand
# in the nearby sheet (this is the ref_strand parameter).
# Then we compute the dihedral angle between
# the planes formed by the axes of the test_helix and a neighbour
# of that strand, with the reference strand
# in common. If the absolute value of this angle is < pi/2 then
# the test helix is on the same side of the reference strand
# as the neighbour strand (i.e. we will say it is on the same
# axis as the neihgbour strand), otherwise on the other side.
#
# TODO: for angles close to 0, should align on same as referene
# helix, ie.. above/below it not left/right.
# if one strand is sideways, all are
sheet_is_sideways = ref_strand.get_sideways()
ref_strand_posnum = self.get_strand_posnum(ref_strand)
if (ref_strand_posnum ==
len(self.sheet_strandlists_dict[ref_strand.get_sheet_id()])-1):
neighbour_strand_posnum = ref_strand_posnum - 1
other_side_strand_posnum = None # on the rightmost side of sheet
if sheet_is_sideways:
neighbour_relpos = RELPOS_ABOVE # XXX check this
other_relpos = RELPOS_BELOW
else:
neighbour_relpos = RELPOS_LEFT
other_relpos = RELPOS_RIGHT
else:
neighbour_strand_posnum = ref_strand_posnum + 1
if ref_strand_posnum > 0:
other_side_strand_posnum = ref_strand_posnum - 1
else:
other_side_strand_posnum = None # leftmost side of sheet
if sheet_is_sideways:
neighbour_relpos = RELPOS_BELOW # XXX check this
other_relpos = RELPOS_ABOVE
else:
neighbour_relpos = RELPOS_RIGHT
other_relpos = RELPOS_LEFT
neighbour_strand = \
self.sheet_strandlists_dict[ref_strand.get_sheet_id()]\
[neighbour_strand_posnum][0]
if other_side_strand_posnum != None:
other_side_strand = \
self.sheet_strandlists_dict[ref_strand.get_sheet_id()]\
[other_side_strand_posnum][0]
else:
other_side_strand = None
angle = ref_strand.axis_dihedral_angle(neighbour_strand, test_helix,
self.pdb_struct)
# FIXME: arbitrarily choosing 'same side' if angle cannot be calculated
if angle == None or abs(angle) < pi/2: # same side
test_strand = neighbour_strand
relpos = neighbour_relpos
else:
test_strand = other_side_strand
relpos = other_relpos
if verbose:
sys.stderr.write(' helixcluster relpos helix: test is ' +
ptrelpos_to_str(relpos) + ' reference.\n')
# FIXME: if ref strand is last in sheet and we end up on other
# side from neighbour, there is no new test_strand to return,
# need to do something else in this case.
if test_strand == None:
sys.stderr.write('WARNING: (helix clustering) '
'no reference strand for helix ' +
str(test_helix) + '\n')
test_strand = ref_strand # FIXME: just use end strand for now
return (relpos, test_strand)
def get_relpos_helix_to_sheet(self, closest_ref_strand,
nearest_ref_resnum):
"""
Find the relative position of helix relative to sheet
Parameters:
closest_ref_strand - strand in sheet closest to the test helix
nearest_ref_resnum - residue number in the closest_ref_strand that
is nearest the test helix.
Note that the test helix itself is not needed in this funtion, it
just uses the position of the nearest_ref_resnum to determine
the relative position
Return value:
relpos (ABOVE/LEFT/etc.) to the ref strand
Uses data members (readonly):
distmatrix - the distance matrix
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
"""
reference_sheetid = closest_ref_strand.get_sheet_id()
ref_strand_posnum = self.get_strand_posnum(closest_ref_strand)
if ref_strand_posnum == 0 or \
ref_strand_posnum == \
len(self.sheet_strandlists_dict[reference_sheetid]) - 1:
# strand is on edge of sheet so could place helix beside
# it if appropriate
if ref_strand_posnum == 0:
if closest_ref_strand.get_sideways():
relpos = RELPOS_ABOVE
else:
relpos = RELPOS_LEFT
else:
if closest_ref_strand.get_sideways():
relpos = RELPOS_BELOW
else:
relpos = RELPOS_RIGHT
else:
# not near strand on edge of sheet, place above/below
if closest_ref_strand.is_resnum_nearer_top(nearest_ref_resnum):
if closest_ref_strand.get_sideways():
relpos = RELPOS_LEFT
else:
relpos = RELPOS_ABOVE
else:
if closest_ref_strand.get_sideways():
relpos = RELPOS_RIGHT
else:
relpos = RELPOS_BELOW
return relpos
def get_relpos_sheet_to_sheet(self, closest_ref_strand,closest_test_strand,
test_sheet_strandlists_dict,
tabcode,
enable_changes=False):
"""
Find the relative position of a sheet relative to sheet
Parameters:
closest_ref_strand - strand in ref sheet closest to the test sheet
closest_test_strand - strand in test sheet closest to ref sheet
test_sheet_strandlists_dict - The sheet_strandlists_dict for the
test sheet
tabcode - two character tableau code for relative orinetation
between the two sheets
enable_changes - (default False) If True, the function can
change reverse/sideways flags in strands of test
sheet, otherwise does not change them.
Return value:
relpos (ABOVE/LEFT/etc.) to the ref strand
Uses data members:
distmatrix - the distance matrix
sheet_strandlists_dict - (read, write (only if enable_changes=True)
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
NB writing to this refers to changing the orientation
(reversed/sideways)
flags in the strand nodes if necessary, or to reversing
the order of the (outermost) node list if necessary.
This is only done if parameter enable_changes=True
"""
# if test and ref strands are both first/last in sheet,
# and sheets are parallel or antiparallel (not crossing),
# then place the sheets side-by-side. if test strand is
# on 'right' end of sheet and ref is also on 'right' end of sheet
# (or similarly for left), then we will 'flip' the test sheet
# over by reverseing order of strands so the ref and test
# strands are beside each other.
test_strand_posnum = self.get_strand_posnum(closest_test_strand,
test_sheet_strandlists_dict)
ref_strand_posnum = self.get_strand_posnum(closest_ref_strand)
ref_left_edge = (ref_strand_posnum == 0)
reference_sheetid = closest_ref_strand.get_sheet_id()
test_sheetid = closest_test_strand.get_sheet_id()
ref_right_edge = (ref_strand_posnum ==
len(self.sheet_strandlists_dict[reference_sheetid]) - 1)
test_left_edge = (test_strand_posnum == 0)
test_right_edge = (test_strand_posnum ==
len(test_sheet_strandlists_dict[test_sheetid]) - 1)
crossing = (tabcode[0] == 'R' or tabcode[0] == 'L') #not par/antipar
if ( not crossing and ( (ref_left_edge or ref_right_edge) and
(test_left_edge or test_right_edge) ) ):
if ( (ref_left_edge and test_left_edge) or
(ref_right_edge and test_right_edge) ):
if enable_changes:
self.reverse_strand_order_in_sheet(test_sheetid,
test_sheet_strandlists_dict)
if verbose:
sys.stderr.write(' reversed strand order for sheet ' +
test_sheetid +
' so test strand is near ref strand\n')
if ref_left_edge:
if closest_ref_strand.get_sideways():
relpos = RELPOS_ABOVE
else:
relpos = RELPOS_LEFT
else:
assert(ref_right_edge)
if closest_ref_strand.get_sideways():
relpos = RELPOS_BELOW
else:
relpos = RELPOS_RIGHT
else:
# need to decide ABOVE/BELOW
# decide based on nearby residues at top/bottom
# For now, let's try a kind of dodgy method of taking
# the 'central' strand in the test and the longest
# in the reference, and finding the test residue to the
# 'top' and 'bottom' residues in the reference strand.
# If it is nearer top, position above else below.
# FIXME: should have something more principled here, e.g.
# actually using sheet centroids and projecting onto
# plane of largest sheet or something.
# Did try using projectino of c and n term points onto
# axis, but results are even more inconsistent, on serpins
# esp. since longest strand has high curvature and irregularity
# (see notebook 10/2/08 - maybe should try using strand
# with best fitted axis for this (and orientation)).
# So now to stop small differences in relative 3d positions
# causing different representation of topologically similar
# structures, we have a fudge factor and assume BELOW
# unless 'significantly' closer to other end.
# (nearest_ref_resnum, nearest_test_resnum) = \
# self.distmatrix.get_nearest_sse_residues(closest_ref_strand,
# closest_test_strand)
# if closest_ref_strand.is_resnum_nearer_top(nearest_ref_resnum) \
# and not closest_test_strand.is_resnum_nearer_top(
# nearest_test_resnum):
test_central_strand = \
test_sheet_strandlists_dict[test_sheetid] \
[len(test_sheet_strandlists_dict[test_sheetid])/2][0]
(ref_longest_strand, length_unused) = \
self.get_longest_strand(\
self. sheet_strandlists_dict[reference_sheetid])
# residue lists are ordered in N to C direction
test_residue_list = \
test_central_strand.get_residue_list()
ref_residue_list = \
ref_longest_strand.get_residue_list()
test_central_residue = \
test_residue_list[len(test_residue_list)/2]
if enable_changes:
dist_to_ref_nterm = \
self.distmatrix.get_distance(test_central_residue,
ref_residue_list[0])
dist_to_ref_cterm = \
self.distmatrix.get_distance(test_central_residue,
ref_residue_list[-1])
else:
# something of an abuse of the name of this variable; it
# is only True when using an external (outside this domain)
# element as the test element, so in such a case we cannot
# use the self.distmatrix so explicitly calculate the
# distances instead
dist_to_ref_nterm = \
calc_residue_dist(test_central_residue,
ref_residue_list[0])
dist_to_ref_cterm = \
calc_residue_dist(test_central_residue,
ref_residue_list[-1])
near_cterm = (dist_to_ref_cterm < dist_to_ref_nterm)
FUDGE = 0.15 # difference must be more than 15% of min dist
is_signficant = (abs(dist_to_ref_nterm - dist_to_ref_cterm)
> FUDGE*min(dist_to_ref_nterm,dist_to_ref_cterm))
if verbose:
sys.stderr.write(' sheet relpos test strand ' +
str(test_central_strand) + ' ref strand ' +
str(ref_longest_strand) + '\n')
sys.stderr.write(' cterm dist = ' +str(dist_to_ref_cterm)
+ ' nterm dist = ' +str(dist_to_ref_nterm))
sys.stderr.write('; is_signifcant = ' +
str(is_signficant) + '\n')
if is_signficant:
if ref_longest_strand.get_reversed():
# print 'zzzzzzzz reversed'
near_top = not near_cterm
else:
near_top = near_cterm
# print 'zzz',near_cterm,near_top
if near_top:
if closest_ref_strand.get_sideways():
relpos = RELPOS_LEFT
else:
relpos = RELPOS_ABOVE
else:
if closest_ref_strand.get_sideways():
relpos = RELPOS_RIGHT
else:
relpos = RELPOS_BELOW
else:
if closest_ref_strand.get_sideways():
relpos = RELPOS_RIGHT
else:
relpos = RELPOS_BELOW
# make sure the closest test strand is drawn close to the
# ref sheet. Since sheet is always drawn by strands in order
# they are in the list of list of strands in the
# sheet_strandlists_dict for the sheet, we may need to
# reverse this list.
if ( enable_changes and
((ref_strand_posnum <
(len(self.sheet_strandlists_dict[reference_sheetid]))/2
and
test_strand_posnum >
(len(self.sheet_strandlists_dict[test_sheetid])-1) / 2 )
or
(ref_strand_posnum >
(len(self.sheet_strandlists_dict[reference_sheetid]))/2
and
test_strand_posnum <
(len(test_sheet_strandlists_dict[test_sheetid])) / 2) ) ):
self.reverse_strand_order_in_sheet(test_sheetid,
self.sheet_strandlists_dict)
if verbose:
sys.stderr.write(' reversed strand order for sheet ' +
test_sheetid +
' so test strand is near ref strand\n')
return relpos
def get_relpos_to_sheet(self, reference_sheetid, test_element,
use_longest_for_orientation=True):
"""
Find the relative position of test_element relative to
the sheet reference_sheetid
Parameters:
reference sheet_id - sheet id of sheet to find position
of test_element relative to
test_element - and element (sheet id or helix) to find
position of relative to reference sheet
use_longest_for_orientation - (default True) if True, use
the longest strand in each sheet to determine the
relative orientations using tableau, otherwise uses
the closest strands (the ones used to determine relative
position).
NOTE: reversed and sideways flags in test_element may be set
by this function. It may also reverse the horiz_order_list
for the test sheet.
Uses data members (read):
distmatrix - the distance matrix
sheet_dict - dict of {sheet_id : nodelist }
sheet_strandlists_dict - (read/write)
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
NB writing to this refers to changing the orientation
(reversed/sideways)
flags in the strand nodes if necessary, or to reversing
the order of the (outermost) node list if necessary.
Return value:
tuple (relpos, ref_strand, test_strand) where relpos is
RELPOS_ABOVE, RELPOS_BELOW, RELPOS_LEFT or RELPOS_RIGHT
and ref_strand is strand in reference sheet to which it is
relative and
test_strand is strand in test element relative to it or
None if test element is not a strand
"""
assert(len(reference_sheetid) == 1)
assert(isinstance(test_element, PTNodeHelix) or
len(test_element) == 1) # sheet id
# find the strand in reference sheet the test obj is closest to
(closest_ref_strand, closest_test_strand)= \
self.distmatrix.get_strand_nearest_element(reference_sheetid,
test_element)
if verbose:
sys.stderr.write(' relpos to sheet: reference is ' +
str(closest_ref_strand) + ' in sheet ' +
reference_sheetid + '\n')
if isinstance(test_element, PTNodeHelix):
# place helix close to (above, below, or ,for strands on edge
# sheet, left/right) the reference strand
(nearest_ref_resnum, nearest_test_resnum) = \
self.distmatrix.get_nearest_sse_residues(closest_ref_strand,
test_element)
relpos = self.get_relpos_helix_to_sheet(closest_ref_strand,
nearest_ref_resnum)
# orientation needs to be taken into account (tableau)
if use_longest_for_orientation:
(orientation_ref_strand, unused_length) = \
self.get_longest_strand(
self.sheet_strandlists_dict[reference_sheetid])
else:
orientation_ref_strand = closest_ref_strand
if self.tableau != None:
try:
tabcode=self.tableau[(orientation_ref_strand, test_element)]
if verbose:
sys.stderr.write(' orientation ' +
str(orientation_ref_strand) + ', ' +
str(test_element) + ': ' + tabcode +
'\n')
except:
sys.stderr.write('WARNING: no tableau entry for ' +
str(orientation_ref_strand) + ',' +
str(test_element) + '.' +
'Using PE (parallel).\n')
tabcode = 'PE'
else:
tabcode = 'PE'
# if ref strand and helix are not crossing, but ref sheet is
# sideways and helix isn't, then set helix sideways if
# ref sheet is (Note sheets start out not sideways)
# FIXME: should clean this up and use resolve_orientation()
crossing = (tabcode[0] == 'R' or tabcode[0] == 'L') #not par/antipar
if ( not crossing and closest_ref_strand.get_sideways() ):
test_element.set_sideways(True)
# if test helix is crossing- Left or Right of reference,
# and reference is not sideways, set test helix sideways
if ( (tabcode[0] == 'L' or tabcode[0] == 'R')
and not orientation_ref_strand.get_sideways() ):
test_element.set_sideways(True)
else:
# the test element is a sheet. Place above or below, aligning
# strands, or, if ref and test strand are both on edge
# of sheet, left or right.
if verbose:
sys.stderr.write(' relpos to sheet: test is ' +
str(closest_test_strand) + ' in sheet ' +
test_element + '\n')
# orientation needs to be taken into account (tableau)
if use_longest_for_orientation:
(orientation_ref_strand, unused_length) = \
self.get_longest_strand(
self.sheet_strandlists_dict[reference_sheetid])
(orientation_test_strand, unused_length2) = \
self.get_longest_strand(
self.sheet_strandlists_dict[test_element])
else:
orientation_ref_strand = closest_ref_strand
orientation_test_strand= closest_test_strand
if self.tableau != None:
try:
tabcode = self.tableau[(orientation_ref_strand,
orientation_test_strand)]
if verbose:
sys.stderr.write(' orientation ' +
str(orientation_ref_strand) + ', ' +
str(orientation_test_strand) + ': ' +
tabcode +
'\n')
except:
sys.stderr.write('WARNING: no tableau entry for ' +
str(orientation_ref_strand) + ',' +
str(orientation_test_strand) + '.' +
'Using OS (antiparallel).\n')
tabcode = 'OS' #2nd char is arbitrary
else:
tabcode = 'OS'
crossing = (tabcode[0] == 'R' or tabcode[0] == 'L') #not par/antipar
# heuristic test for 'folded over' sheets (like sandwiches)
# where if the tabcode is antiparallel, we
# actually want to reverse it (antipar->par)
# so that it is as if we have 'unfolded' the sheets along
# the 'hinge' formed by the coils between adjacent in
# sequence strands. (see notes 26/2/08-26/2/08 (FIXME:
# should describe this better here rather than referecning
# handwritten notes!))
# This test is that if at least two strands
# in the test sheet immediately follow strands in ref sheet
# or vice versa
# and orientation is antiparallel then convert it to parallel.
# NB the HH and KK codes are only supposed to be used
# for strands in the same sheet (not between strands in different
# sheets) and that is now (10June2008) what is implemented.
# so we never check KK or HH here since we are dealing with
# strands in different sheets, instead always stick to P or O.
if tabcode[0] == 'P':
ADJSTRAND_COUNT_THRESHOLD = 2 # at least this many to reverse
adjstrand_count = 0
for strand1 in self.sheet_dict[reference_sheetid]:
if self.any_strands_before_or_after_strand(
strand1, self.sheet_dict[test_element]):
adjstrand_count += 1
# print 'xxxx',reference_sheetid,test_element,adjstrand_count
if adjstrand_count >= ADJSTRAND_COUNT_THRESHOLD:
if verbose:
sys.stderr.write(' sheet ' + reference_sheetid +
' and sheet ' +
test_element +
' folded over: reversing orientation\n')
tabcode = 'OS' # 2nd char is arbitrary.
# if ref and test strands are not crossing, but one sheet is
# sideways and other isn't, then set test sheet sideways if
# ref sheet is (Note sheets start out not sideways)
if ( not crossing and closest_ref_strand.get_sideways() ):
self.set_all_sheet_strands_sideways(test_element)
# if ref and test strands are antiparallel but flagged as
# same direction in nodes, or parallel but flagged as different
# direction in nodes, then flip them all strands in the
# test sheet.
# print 'qqqqq',tabcode,orientation_test_strand,orientation_test_strand.get_reversed(),orientation_ref_strand,orientation_ref_strand.get_reversed()
if (((tabcode[0] == 'O') and
orientation_ref_strand.get_reversed() ==
orientation_test_strand.get_reversed()) or
((tabcode[0] == 'P') and
orientation_ref_strand.get_reversed() !=
orientation_test_strand.get_reversed())):
# print 'zzzzzzzzzzzzzzzzzzzzzzzzzz',orientation_test_strand,orientation_ref_strand
self.flip_all_strands_in_sheet(test_element)
# if test strand is crossing- Left or Right of reference,
# and reference is not sideways, set test sheet sideways
elif ( crossing and not closest_ref_strand.get_sideways() ):
self.set_all_sheet_strands_sideways(test_element)
if verbose:
sys.stderr.write(' sheet ' + test_element +
' is sideways (' + tabcode[0] + ')\n')
# un-reversed ('up') when sideways is left-pointing
if ( (tabcode[0] == 'R' and
not orientation_test_strand.get_reversed()) or
(tabcode[0] == 'L' and
orientation_test_strand.get_reversed()) ):
self.flip_all_strands_in_sheet(test_element)
relpos = self.get_relpos_sheet_to_sheet(closest_ref_strand,
closest_test_strand,
self.sheet_strandlists_dict,
tabcode,
enable_changes=True)
if verbose:
sys.stderr.write(' relpos to sheet: test is ' +
ptrelpos_to_str(relpos) + ' reference.\n')
return (relpos, closest_ref_strand, closest_test_strand)
def get_external_relpos(self, reference_element, test_element,
closest_ref_strand,
closest_test_strand,
nearest_ref_resnum,
nearest_test_resnum,
tabcode,
test_sheet_strandlists_dict):
"""
Find the relative position of test_element relative to
reference_element, where test_element is not an element in this
domain. Used for releative placement of domains.
Parameters:
reference_element - an element (either sheet id e.g. 'A' or
helix (PTNodeHelix object) to find position relative to,
the element is in this domain
test_element - and element (as per reference_element) to find
position of relative to reference_element,
the element is not in this domain (cannot use member
data sheet_strandlists_dict etc. for infomratin on
this element)
closest_ref_strand - strand in reference sheet if reference_element
is a sheet, else None.
closest_test_strand - strand in test sheet if test_element is a sheet
else None.
nearest_ref_resnum - residue number in reference SSE that test
element is closest to
nearest_test_resnum - residue number in test SSE that is closest
to reference element
tabcode - two char tableau code for relative orientation of the
external domain with this domain.
test_sheet_strandlists_dict - sheet strandlists dict for test
element when it is a sheet (else None).
Note this is neeed as the test element is
not part of this domain. (For the reference
element in this domain, the data member
sheet_strandlists_dict can be used).
Uses data members (read):
distmatrix - the distance matrix
sheet_strandlists_dict -
dictionary of { sheet_id : list of list of nodes }
where the list of list of nodes is
described in build_sheet_constraints()
Return value:
relpos where relpos is
RELPOS_ABOVE, RELPOS_BELOW, RELPOS_LEFT or RELPOS_RIGHT.
"""
assert(isinstance(reference_element, PTNodeHelix) or
len(reference_element) == 1) # sheet id
assert(isinstance(test_element, PTNodeHelix) or
len(test_element) == 1) # sheet id
if isinstance(reference_element, PTNodeHelix):
if isinstance(test_element, PTNodeHelix):
relpos = self.get_relpos_helix_to_helix(reference_element,
test_element,
nearest_ref_resnum,
nearest_test_resnum)
else:
relpos = self.get_relpos_sheet_to_helix(reference_element,
closest_test_strand,
nearest_ref_resnum,
nearest_test_resnum,
test_sheet_strandlists_dict)
else:
# reference element is a sheet
if isinstance(test_element, PTNodeHelix):
relpos = self.get_relpos_helix_to_sheet(closest_ref_strand,
nearest_ref_resnum)
else:
relpos = self.get_relpos_sheet_to_sheet(closest_ref_strand,
closest_test_strand,
test_sheet_strandlists_dict,
tabcode,
enable_changes = False)
return relpos
##########################################################################
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def ptrelpos_set_verbose(verb):
"""
set the module global verbose flag in this module to supplied value
Parameters: verb - True (for verbose output) or False
Return value: None
Uses globals: verbose (in this module)
"""
global verbose
verbose = verb
def ptrelpos_to_str(relpos):
"""
Return string representation of relative position RELPOS_ABOVE etc.
for verbose output/debugging.
Parameters: relpos - RELPOS_ABOVE, etc.
Return value: string corresponding to relpos
"""
if relpos == RELPOS_ABOVE:
s = "ABOVE"
elif relpos == RELPOS_BELOW:
s = "BELOW"
elif relpos == RELPOS_LEFT:
s = "LEFT of"
elif relpos == RELPOS_RIGHT:
s = "RIGHT of"
else:
s = "*BAD RELPOS (" + str(relpos) + ") *"
return s
def resolve_orientation(tabcode, ref_sse, test_sse):
"""
Resolve the orientation encoded in tableau code (see pttableau.py)
between ref_sse and test_sse into a (sideways, reversed) tuple.
Parameters:
tabdoe - two charater tableau code for orienmtation between ref_sse and
test_sse
ref_sse - PTNode (strand or helix) as reference (sideways and reversed
taken to be already fixed in this node)
test_sse - PTNode (strand or helix) to return sideways/reversed flags
for, relative to ref_sse, using tabcode
Return value:
tuple (sideways, reversed) where sideways and reversed are Boolean
describing if test_sse needs to be sideways or reversed to have
correct relationship to ref_sse according to tabcode
"""
crossing = (tabcode[0] == 'R' or tabcode[0] == 'L')
if ( (crossing and not ref_sse.get_sideways()) or
(not crossing and ref_sse.get_sideways()) ):
sideways = True
else:
sideways = False
parallel = (tabcode[0] == 'P' or tabcode[0] == 'K')
if ( (parallel and ref_sse.get_reversed()) or
(not parallel and not ref_sse.get_reversed()) ):
reversed = True
else:
reversed = False
return (sideways, reversed)
|
<reponame>PICT-ACM-Student-Chapter/OJ_API
# Create your views here.
from functools import cmp_to_key
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse, JsonResponse
from rest_framework import permissions
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.status import HTTP_404_NOT_FOUND
from contest.models import Contest
from contest.permissions import IsAllowedInContest, IsInTime, IsStartInTime
from contest.serializers import LeaderBoardSerializer, \
UserContestListSerializer, QuestionIdListSerializer
from contest.serializers import UserContestSerializer, ContestSerializer
from core.models import UserContest
class ContestList(ListAPIView):
serializer_class = UserContestListSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
def get_queryset(self):
return UserContest.objects.filter(user_id=self.request.user.id)
class ContestDetails(RetrieveAPIView):
serializer_class = ContestSerializer
lookup_url_kwarg = 'id'
queryset = Contest.objects.all()
permission_classes = [permissions.IsAuthenticated, IsAllowedInContest,
IsInTime]
class StartContest(APIView):
permission_classes = [permissions.IsAuthenticated, IsStartInTime]
def patch(self, request, id):
try:
user_contest = UserContest.objects.get(
contest_id=id,
user_id=request.user.id)
user_contest.status = "STARTED"
user_contest.save()
return JsonResponse(UserContestSerializer(user_contest).data)
except UserContest.DoesNotExist:
return HttpResponse(status=404)
def compare_scores(a, b):
"""
return a negative value (< 0) when the left item should be sorted before
the right item
return a positive value (> 0) when the left item should be sorted after
the right item
"""
if a.total_score > b.total_score:
return -1
elif a.total_score < b.total_score:
return 1
else:
if a.total_penalty < b.total_penalty:
return -1
else:
return 1
class LeaderBoard(ListAPIView):
serializer_class = LeaderBoardSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
contest_id = self.kwargs['contest_id']
cache_key = 'leaderboard_{}'.format(
self.kwargs['contest_id'],
)
data = cache.get(cache_key)
if not data:
data = UserContest.objects.filter(contest_id__id=contest_id,
status='STARTED')
data = sorted(data, key=cmp_to_key(compare_scores))
cache.set(cache_key, data,
settings.CACHE_TTLS['LEADERBOARD'])
else:
self.check_permissions(self.request)
return data
def get(self, request, *args, **kwargs):
res = self.list(self, request, *args, **kwargs)
cache_key = 'leaderboard_{}_ques'.format(
self.kwargs['contest_id'],
)
ques_ids = cache.get(cache_key)
if not ques_ids:
try:
ques = Contest.objects.get(
id=self.kwargs['contest_id']).questions.all()
ques_ids = QuestionIdListSerializer(ques, many=True).data
cache.set(cache_key, ques_ids,
settings.CACHE_TTLS['CONTEST_QUESTIONS'])
except Contest.DoesNotExist:
return Response(status=HTTP_404_NOT_FOUND)
data = res.data
data['questions'] = ques_ids
return Response(data=data)
|
<gh_stars>100-1000
import os
import typing
import numpy
import pandas
from d3m import container, exceptions, utils as d3m_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.base import primitives
__all__ = ('FixedSplitDatasetSplitPrimitive',)
class Hyperparams(hyperparams.Hyperparams):
primary_index_values = hyperparams.Set(
elements=hyperparams.Hyperparameter[str](''),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description='A set of primary index values of the main resource belonging to the test (score) split. Cannot be set together with "row_indices".',
)
row_indices = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description='A set of row indices of the main resource belonging to the test (score) split. Cannot be set together with "primary_index_values".',
)
delete_recursive = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Delete rows in other resources/tables which are not needed for rows left in the dataset entry point resource/table.",
)
class FixedSplitDatasetSplitPrimitive(primitives.TabularSplitPrimitiveBase[Hyperparams]):
"""
A primitive which splits a tabular Dataset in a way that uses for the test
(score) split a fixed list of primary index values or row indices of the main
resource to be used. All other rows are added used for the train split.
"""
metadata = metadata_base.PrimitiveMetadata(
{
'id': '1654f000-2178-4520-be4c-a95bc26b8d3a',
'version': '0.1.0',
'name': "Fixed split tabular dataset splits",
'python_path': 'd3m.primitives.tods.evaluation.fixed_split_dataset_split',
'source': {
'name': "DATALab@TexasA&M University",
'contact': 'mailto:<EMAIL>',
'uris': [
'https://gitlab.com/datadrivendiscovery/common-primitives/blob/master/common_primitives/fixed_split.py',
'https://gitlab.com/datadrivendiscovery/common-primitives.git',
],
},
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.DATA_SPLITTING,
],
'primitive_family': metadata_base.PrimitiveFamily.EVALUATION,
},
)
def _get_splits(self, attributes: pandas.DataFrame, targets: pandas.DataFrame, dataset: container.Dataset, main_resource_id: str) -> typing.List[typing.Tuple[numpy.ndarray, numpy.ndarray]]:
# This should be handled by "Set" hyper-parameter, but we check it here again just to be sure.
if d3m_utils.has_duplicates(self.hyperparams['primary_index_values']):
raise exceptions.InvalidArgumentValueError("\"primary_index_values\" hyper-parameter has duplicate values.")
if d3m_utils.has_duplicates(self.hyperparams['row_indices']):
raise exceptions.InvalidArgumentValueError("\"row_indices\" hyper-parameter has duplicate values.")
if self.hyperparams['primary_index_values'] and self.hyperparams['row_indices']:
raise exceptions.InvalidArgumentValueError("Both \"primary_index_values\" and \"row_indices\" cannot be provided.")
if self.hyperparams['primary_index_values']:
primary_index_values = numpy.array(self.hyperparams['primary_index_values'])
index_columns = dataset.metadata.get_index_columns(at=(main_resource_id,))
if not index_columns:
raise exceptions.InvalidArgumentValueError("Cannot find index columns in the main resource of the dataset, but \"primary_index_values\" is provided.")
main_resource = dataset[main_resource_id]
# We reset the index so that the index corresponds to row indices.
main_resource = main_resource.reset_index(drop=True)
# We use just the "d3mIndex" column and ignore multi-key indices.
# This works for now because it seems that every current multi-key
# dataset in fact has an unique value in "d3mIndex" alone.
# See: https://gitlab.datadrivendiscovery.org/MIT-LL/d3m_data_supply/issues/117
index_column = index_columns[0]
score_data = numpy.array(main_resource.loc[main_resource.iloc[:, index_column].isin(primary_index_values)].index)
score_data_set = set(score_data)
assert len(score_data) == len(score_data_set), (len(score_data), len(score_data_set))
if len(score_data) != len(primary_index_values):
raise exceptions.InvalidArgumentValueError("\"primary_index_values\" contains values which do not exist.")
else:
score_data = numpy.array(self.hyperparams['row_indices'])
score_data_set = set(score_data)
all_data_set = set(numpy.arange(len(attributes)))
if not score_data_set <= all_data_set:
raise exceptions.InvalidArgumentValueError("\"row_indices\" contains indices which do not exist, e.g., {indices}.".format(
indices=sorted(score_data_set - all_data_set)[:5],
))
train_data = []
for i in numpy.arange(len(attributes)):
if i not in score_data_set:
train_data.append(i)
assert len(train_data) + len(score_data) == len(attributes), (len(train_data), len(score_data), len(attributes))
return [(numpy.array(train_data), score_data)]
|
# -*- coding: utf-8 -*-
import numpy as np
import networkx as nx
from scipy import sparse
from scipy.linalg import eig
from itertools import product
def get_base_modularity_matrix(network):
'''
Obtain the modularity matrix for the whole network
Parameters
----------
network : nx.Graph or nx.DiGraph
The network of interest
Returns
-------
np.matrix
The modularity matrix for `network`
Raises
------
TypeError
When the input `network` does not fit either nx.Graph or nx.DiGraph
'''
if type(network) == nx.Graph:
return sparse.csc_matrix(nx.modularity_matrix(network))
elif type(network) == nx.DiGraph:
return sparse.csc_matrix(nx.directed_modularity_matrix(network))
else:
raise TypeError('Graph type not supported. Use either nx.Graph or nx.Digraph')
def _get_delta_Q(X, a):
'''
Calculate the detal modularity
.. math::
\deltaQ = s^T \cdot \^{B_{g}} \cdot s
.. math:: \deltaQ = s^T \cdot \^{B_{g}} \cdot s
Parameters
----------
X : np.matrix
B_hat_g
a : np.matrix
s, which is the membership vector
Returns
-------
float
The corresponding :math:`\deltaQ`
'''
delta_Q = (a.T.dot(X)).dot(a)
return delta_Q[0,0]
def get_modularity(network, community_dict):
'''
Calculate the modularity. Edge weights are ignored.
Undirected:
.. math:: Q = \frac{1}{2m}\sum_{i,j} \(A_ij - \frac{k_i k_j}{2m}\) * \detal_(c_i, c_j)
Directed:
.. math:: Q = \frac{1}{m}\sum_{i,j} \(A_ij - \frac{k_i^{in} k_j^{out}}{m}\) * \detal_{c_i, c_j}
Parameters
----------
network : nx.Graph or nx.DiGraph
The network of interest
community_dict : dict
A dictionary to store the membership of each node
Key is node and value is community index
Returns
-------
float
The modularity of `network` given `community_dict`
'''
Q = 0
G = network.copy()
nx.set_edge_attributes(G, {e:1 for e in G.edges}, 'weight')
A = nx.to_scipy_sparse_matrix(G).astype(float)
if type(G) == nx.Graph:
# for undirected graphs, in and out treated as the same thing
out_degree = in_degree = dict(nx.degree(G))
M = 2.*(G.number_of_edges())
print("Calculating modularity for undirected graph")
elif type(G) == nx.DiGraph:
in_degree = dict(G.in_degree())
out_degree = dict(G.out_degree())
M = 1.*G.number_of_edges()
print("Calculating modularity for directed graph")
else:
print('Invalid graph type')
raise TypeError
nodes = list(G)
Q = np.sum([A[i,j] - in_degree[nodes[i]]*\
out_degree[nodes[j]]/M\
for i, j in product(range(len(nodes)),\
range(len(nodes))) \
if community_dict[nodes[i]] == community_dict[nodes[j]]])
return Q / M
def get_mod_matrix(network, comm_nodes=None, B=None):
'''
This function computes the modularity matrix
for a specific group in the network.
(a.k.a., generalized modularity matrix)
Specifically,
.. math::
B^g_{i,j} = B_ij - \delta_{ij} \sum_(k \in g) B_ik
m = \abs[\Big]{E}
B_ij = A_ij - \dfrac{k_i k_j}{2m}
OR...
B_ij = \(A_ij - \frac{k_i^{in} k_j^{out}}{m}
When `comm_nodes` is None or all nodes in `network`, this reduces to :math:`B`
Parameters
----------
network : nx.Graph or nx.DiGraph
The network of interest
comm_nodes : iterable (list, np.array, or tuple)
List of nodes that defines a community
B : np.matrix
Modularity matrix of `network`
Returns
-------
np.matrix
The modularity of `comm_nodes` within `network`
'''
if comm_nodes is None:
comm_nodes = list(network)
return get_base_modularity_matrix(network)
if B is None:
B = get_base_modularity_matrix(network)
# subset of mod matrix in g
indices = [list(network).index(u) for u in comm_nodes]
B_g = B[indices, :][:, indices]
#print 'Type of `B_g`:', type(B_g)
# B^g_(i,j) = B_ij - δ_ij * ∑_(k∈g) B_ik
# i, j ∈ g
B_hat_g = np.zeros((len(comm_nodes), len(comm_nodes)), dtype=float)
# ∑_(k∈g) B_ik
B_g_rowsum = np.asarray(B_g.sum(axis=1))[:, 0]
if type(network) == nx.Graph:
B_g_colsum = np.copy(B_g_rowsum)
elif type(network) == nx.DiGraph:
B_g_colsum = np.asarray(B_g.sum(axis=0))[0, :]
for i in range(B_hat_g.shape[0]):
for j in range(B_hat_g.shape[0]):
if i == j:
B_hat_g[i,j] = B_g[i,j] - 0.5 * (B_g_rowsum[i] + B_g_colsum[i])
else:
B_hat_g[i,j] = B_g[i,j]
if type(network) == nx.DiGraph:
B_hat_g = B_hat_g + B_hat_g.T
return sparse.csc_matrix(B_hat_g)
def largest_eig(A):
'''
A wrapper over `scipy.linalg.eig` to produce
largest eigval and eigvector for A when A.shape is small
'''
vals, vectors = eig(A.todense())
real_indices = [idx for idx, val in enumerate(vals) if not bool(val.imag)]
vals = [vals[i].real for i in range(len(real_indices))]
vectors = [vectors[i] for i in range(len(real_indices))]
max_idx = np.argsort(vals)[-1]
return np.asarray([vals[max_idx]]), np.asarray([vectors[max_idx]]).T
|
# -*- coding: utf-8 -*-
import os
from config import *
import numpy as np
import time
import libxml2 as lx
class XMLTemplate(object):
def __init__(self, fname):
assert(os.path.isfile(fname))
self._fname = fname
self._load()
def _load(self):
f = open(self._fname, 'r')
self._str = f.read()
self._ori = self._str
f.close()
def sub(self, key, value):
self._str = self._str.replace('##'+key+'##', str(value) )
def reset(self):
self._str = self._ori
@property
def string(self):
return self._str
def allKeys(self):
raise NotImplementedError('allKeys not implemented!')
class Page(object):
''' Root abstract class for every HTML page. '''
def __init__(self, fname):
if fname is None:
#TODO: implement DO_NOT_LOAD_PAGE_CLASS_ITSELF
pass
else:
self._tmpl = XMLTemplate(fname)
self._edir = os.path.join(DQSROOTDIR, 'expdata')
def errorPage(self, e):
self._etmpl = XMLTemplate('../templates/errorPage.xml')
self._etmpl.sub('type', str(type(e)).split("'")[1])
self._etmpl.sub('message', str(e) )
import traceback
traceback.print_exc()
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self._etmpl.sub('position', '%s:%d' % (fname, exc_tb.tb_lineno) )
return self._etmpl.string
def mkCheckBox(dic, name, id=None, cls=None):
#TODO: implement class *cls*
assert(type(dic) == dict)
checks = []
i = 0
for key in dic.keys():
s = '<input type="checkbox" id="%s" value="%s" %s/> '\
% (name+'_'+str(key), str(key), 'checked' if dic[key] else '')
checks.append(s)
if dic[key]:
try:
color = params.curves['r%d'%i].color
except:
color = '#aaaaaa'
print 'Colors should be defined for all curves!'
i += 1
else:
color = '#dddddd'
s = '<label for="%s" style="color: %s;">Rat №%s</label>' % \
(name+'_'+str(key), color, str(key))
checks.append(s)
return '\n'.join(checks)
def mkComboBox(dic, selected, name, id=None, cls=None, sort_flag=False):
#TODO: implement class *cls*
assert(type(dic) == dict)
l = []
l.append('<select size="1" name="%s">' % name)
kar = np.sort(dic.keys()) if sort_flag else dic.keys()
for key in kar:
s = '<option value="%s" %s>%s</option> ' % (str(key),\
'selected' if str(key) == str(selected) else '', dic[key])
l.append(s)
l.append('</select>')
return '\n'.join(l)
def mkDateCombo(startt, stopt, curt, name, id=None, cls=None, addspecial=[]):
zz = np.arange(dq.tu.lower_day(startt), dq.tu.lower_day(stopt)+1, step=24*3600)
dates = map(lambda x: time.strftime('%d.%m',time.localtime(x)), zz)
for ad in addspecial:
if ad == 'hour_ago':
zz = list(zz)
zz.append(int(time.time())-3600)
dates.append('hour ago')
if ad == 'now':
zz = list(zz)
zz.append(int(time.time()))
dates.append('now')
dic = dict(zip(zz,dates))
return mkComboBox(dic, curt, name, id, cls, sort_flag=True)
def mkGetRequest(**kwargs):
#TODO: make character safety
s = []
for k,v in kwargs.iteritems():
_s = str(k)+'='+str(v)
_s = _s.replace(' ', '%20')
s.append(_s)
return '?'+'&'.join(s)
|
"""
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
"""
from builtins import zip, str, range
import pdb, os, csv, re, io, json
import urllib.request, urllib.error, urllib.parse
from bs4 import BeautifulSoup
from tqdm import tqdm
from shutil import rmtree
from nltk.tokenize import word_tokenize, sent_tokenize
from unidecode import unidecode
import time
# PARAMS
SUMMARY_DIR = '../../raw_summaries/pinkmonkey/summaries'
# Summary list info
summary_list_file = "literature_links.tsv"
#Always create a new errors file when starting to run the script
f_errors = open("section_errors.txt","w")
# Get contents of the summary file
with open(summary_list_file, 'r') as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
summary_infos = list(reader)
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
def chapter_section_check(link_text_lower, link_text_not_lower):
return 'chapter' in link_text_lower or 'scene' in link_text_lower\
or 'Act' in link_text_not_lower or 'part' in link_text_lower or 'prologue' in link_text_lower or 'epilogue' in link_text_lower\
or 'story' in link_text_lower or 'preface' in link_text_lower or 'Section' in link_text_not_lower
def remove_toc(text):
pat = '((.*)(table[ ]{1,}of contents.*))'
if re.match(pat, text, re.IGNORECASE):
to_replace = re.match(pat, text, re.IGNORECASE).group(3)
text = text.replace(to_replace, "")
return text
def get_overview_paragraphs(overview, specific_summary_dir):
overview_paragraphs = []
try:
soup = BeautifulSoup(urllib.request.urlopen(overview), "html.parser")
except Exception as e:
print (e)
time.sleep(4)
try:
soup = BeautifulSoup(urllib.request.urlopen(overview), "html.parser")
except Exception as e:
print ("Overview not found: ", e, overview)
with open("section_errors.txt","a") as f:
f.write(overview + "\t" + "Overview" + "\t" + specific_summary_dir + "\n")
return overview_paragraphs
flag = 0
pat = "(.*\(synopsis\))"
paragraphs = soup.findAll(["p","h3"])
iframe_text = "Your browser does not support the IFRAME tag."
for ix, paragraph in enumerate(paragraphs):
overview_text = paragraph.text.strip().replace(iframe_text, "").replace("\r\n"," ").replace("\n"," ")
if re.match(pat, overview_text, re.IGNORECASE):
break
if re.match(pat, overview_text, re.IGNORECASE):
to_replace = re.match(pat, overview_text, re.IGNORECASE).group(1)
overview_text = overview_text.replace(to_replace, "")
overview_text = remove_toc(overview_text)
overview_text = unidecode(overview_text)
overview_text = ". ".join([line.strip().rstrip() for line in overview_text.split('. ')])
return overview_text
def save_section_para(section_text, section_title, section_link, specific_summary_dir, index):
section_text = remove_toc(section_text)
section_text = remove_toc(section_text)
section_dict = {}
section_dict["name"] = section_title
section_dict["summary"] = section_text
section_dict["analysis"] = ""
section_dict["url"] = section_link
output_fname = os.path.join(specific_summary_dir, 'section_%d.txt' % index)
with open(output_fname, 'w', encoding="utf-8") as fp:
json.dump(section_dict, fp)
def get_section_paragraphs(page_url, specific_summary_dir):
soup = BeautifulSoup(urllib.request.urlopen(page_url), "html.parser")
section_paragraphs = []
all_links = []
section_links = []
flag = 0
one_level_up_url = os.path.dirname(page_url)
all_links = soup.findAll("a")
overview_exists = 0
for link in all_links:
link_text_not_lower = link.text.strip().replace("\r\n"," ").replace("\n"," ")
link_text_lower = link.text.strip().lower().replace("\r\n"," ").replace("\n"," ")
if "summaries" in link_text_lower or 'synopsis' in link_text_lower or 'plot' in link_text_lower or chapter_section_check(link_text_lower, link_text_not_lower):
section_path = os.path.join(one_level_up_url, link.get("href"))
section_links.append((link.text.strip().rstrip(), section_path))
if 'synopsis' in link_text_lower or 'plot' in link_text_lower:
overview_exists = 1
# print (section_links)
overview_found = 0
index = -1
for link_text, link in section_links:
link_text = link_text.replace("\r\n"," ").replace("\n"," ")
link_text_lower = link_text.strip().rstrip().lower().replace("\r\n"," ").replace("\n"," ")
link_text_not_lower = link_text.strip().rstrip().replace("\r\n"," ").replace("\n"," ")
#Fetch overview first
if overview_exists and ('synopsis' in link_text_lower or 'plot' in link_text_lower) and overview_found == 0:
overview = link
overview_title = link_text
print (overview_title, overview)
overview_text = get_overview_paragraphs(overview, specific_summary_dir)
# print ("overview_text: ", overview_text)
# overview_text = "<PARAGRAPH>".join(overview_paragraphs)
overview_dict = {}
overview_dict["name"] = "overview"
overview_dict["summary"] = overview_text
overview_dict["analysis"] = ""
overview_dict["url"] = overview
output_fname = os.path.join(specific_summary_dir, "overview.json")
with open(output_fname, 'w', encoding="utf-8") as fp:
json.dump(overview_dict, fp)
overview_found = 1
continue
if (overview_found == 1 or not overview_exists) and chapter_section_check(link_text_lower, link_text_not_lower):
# chapter_url = os.path.join(one_level_up_url, link.get("href"))
chapter_url = link
print(link_text, chapter_url)
index += 1
try:
chapter_soup = BeautifulSoup(urllib.request.urlopen(chapter_url), "html.parser")
except Exception as e:
print (e)
time.sleep(4)
try:
chapter_soup = BeautifulSoup(urllib.request.urlopen(chapter_url), "html.parser")
except Exception as e:
print ("Chapter not found: ", e, chapter_url)
with open("section_errors.txt","a") as f:
f.write(str(index) + "\t" + chapter_url + "\t" + link_text + "\t" + specific_summary_dir + "\n")
continue
chapter_paras = chapter_soup.findAll(["p", "h3"])
iframe_text = "Your browser does not support the IFRAME tag."
pat = "(.*summary )(.*)"
for ix, chapter_para in enumerate(chapter_paras):
try:
section_text = chapter_para.text.strip().replace(iframe_text, "").replace("\r\n"," ").replace("\n"," ")
if re.match(pat, section_text, re.IGNORECASE):
break
except: # No text inside the para HTML
continue
section_text = unidecode(section_text)
section_text = ". ".join([line.strip().rstrip() for line in section_text.split('. ')])
section_title = link_text
save_section_para(section_text, section_title, chapter_url, specific_summary_dir, index)
# For each summary info
for k, (title, page_url) in enumerate(summary_infos):
print('\n>>> {}. {} - {} <<<'.format(k, title, page_url))
# Create a directory for the work if needed
specific_summary_dir = os.path.join(SUMMARY_DIR, title)
if not os.path.exists(specific_summary_dir):
os.makedirs(specific_summary_dir)
else:
print("Found existing directory, skipping.")
continue
# Parse page
try:
soup = BeautifulSoup(urllib.request.urlopen(page_url), "html.parser")
except Exception as e:
print ("page not found: ", e)
continue
get_section_paragraphs(page_url, specific_summary_dir)
|
<gh_stars>0
import argparse
import csv
import os
import re
import sys
from pathlib import Path
from src import data_loader
from src.dataset_classes.DAST_datasets import DastDataset
from src.dataset_classes.datasets import DataSet
from src.feature_extraction.feature_extractor import FeatureExtractor
punctuation = re.compile('[^a-zA-ZæøåÆØÅ0-9]')
current_path = os.path.abspath(__file__)
"""
A script containing methods for preprocessing data for use in stance detection. Currently the script is set up to
handle the DAST dataset, and data scraped using the tweet_fetcher.py script. Information regarding data
structures can be found in the README at the project root.
"""
def get_database_variables(database, data):
"""
Switch function which generates variables based on which database type is entered as argument, currently supporting
'dast' and 'twitter'. Defines raw data path, which class or child of DataSet to use and out path.
:param data: either full path to the raw data or the raw data itself
:param database: database type, currently supporting 'dast' and 'twitter'
:return: three database-specific variables; raw data path, out path and which class or child class of DataSet to use
"""
if not data:
path_switch = {
'dast': '../../data/datasets/dast/raw/dataset/',
'twitter': '../../data/datasets/twitter/raw/loekke.txt'
}
data = os.path.join(current_path, Path(path_switch.get(database)))
dataset_switch = {
'dast': DastDataset(),
'twitter': DataSet()
}
dataset = dataset_switch.get(database)
out_path_switch = {
'dast': '../../data/datasets/dast/preprocessed/stance/',
'twitter': '../../data/datasets/twitter/preprocessed/stance/'
}
out_path = os.path.join(current_path, Path(out_path_switch.get(database)))
return data, dataset, out_path
def write_preprocessed(header_features, feature_vectors, out_path):
"""
Writes data which has been preprocessed by the preprocess() method to a CSV file at a given out path.
:param header_features: feature names to be printed at the file header
:param feature_vectors: an array of branches, each branch containing a number of data points of the form (ID,
SDQC value, [feature vector])
:param out_path: a full data path at which data is to be written
"""
if not feature_vectors:
print('No preprocessed data detected')
return
print('Writing feature vectors to', out_path)
with open(out_path, "w+", newline='') as out_file:
csv_writer = csv.writer(out_file, delimiter='\t')
header = ['id', 'sdqc_submission'] + header_features
csv_writer.writerow(header)
written_vectors = set()
for branch in feature_vectors:
for (comment_id, sdqc_submission, feature_vec) in branch:
if comment_id not in written_vectors:
csv_writer.writerow([comment_id, sdqc_submission, *feature_vec])
written_vectors.add(comment_id)
print('Done')
def get_branch_level_features(dataset, sdqc_parent, text, lexicon, sentiment, pos, wembs, lstm_wembs):
"""
Generates features for a full dataset using the feature_extractor class, storing them at a branch level to more
easily save data in the desired format.
:param dataset: an object of the DataSet class containing all data points which are to be converted to feature
vectors
:param sdqc_parent: whether the SDQC value of the parent comment in the conversation tree should be included as
feature
:param text: whether a number of textual features should be included, see the text_features method
:param lexicon: whether a number of lexicon features should be included, see the special_words_in_text method
:param sentiment: whether the sentiment of the text should be included as a feature
:param pos: whether the POS tags of words should be included as features
:param wembs: whether cosine similarity between word embeddings should be used as features
:param lstm_wembs: whether word embeddings formatted for use in the stance_lstm model should be included as
features
:return: an array of branches, each of which contains the feature vectors for all comments in that conversation
branch
"""
feature_extractor = FeatureExtractor(dataset)
feature_vectors = []
for source_tweet in dataset.submissions:
for branch in source_tweet.branches:
branch_features = []
for annotation in branch:
features = feature_extractor.create_feature_vector(annotation, dataset, sdqc_parent, text, lexicon,
sentiment, pos, wembs, lstm_wembs)
if features:
branch_features.append(features)
feature_vectors.append(branch_features)
return feature_vectors
def preprocess(database, data=False, sub=False, sdqc_parent=False, text=False, lexicon=False, sentiment=False,
pos=False, wembs=False, lstm_wembs=False, write_out=False, out_file_name='timestamps.csv'):
"""
Loads raw data at a given data path, extracts features to be used for stance detection, formats the data, and
returns the processed data. If so specified, saves the preprocessed data to a data file.
:param database: a database type, supporting either 'dast' or 'twitter'
:param data: either the path to the raw data which is to be preprocessed, or the raw data itself
:param sub: whether sub-sampling should be applied to the dataset, removing conversation branches where all
comments are of the "commenting" SDQC class, which is found to usually be the majority class
:param sdqc_parent: whether the SDQC value of the parent comment in the conversation tree should be included as
feature
:param text: whether a number of textual features should be included, see the text_features method
:param lexicon: whether a number of lexicon features should be included, see the special_words_in_text method
:param sentiment: whether the sentiment of the text should be included as a feature
:param pos: whether the POS tags of words should be included as features
:param wembs: whether cosine similarity between word embeddings should be used as features
:param lstm_wembs: whether word embeddings formatted for use in the stance_lstm model should be included as
features
:param write_out: whether the preprocessed data should be saved to file
:param out_file_name: the name of the generated file containing the preprocessed data
:return: the dataset from which the feature vectors have been built, along with feature vectors
"""
feature_inputs = [sdqc_parent, text, lexicon, sentiment, pos, wembs, lstm_wembs]
feature_names = ['sdqc_parent', 'text', 'lexicon', 'sentiment', 'pos', 'word2vec', 'comment_wembs']
features_header = [feature_names[i] for i in range(len(feature_inputs)) if feature_inputs[i] is True]
if lstm_wembs:
features_header.append('source_wembs')
data, dataset, out_path = get_database_variables(database, data)
if type(data) is str:
raw_data = data_loader.load_raw_data(data, database)
else:
raw_data = data
for tree in raw_data:
dataset.add_submission(tree[0])
for branch in tree[1:]:
dataset.add_branch(branch, sub_sample=sub)
feature_vectors = get_branch_level_features(dataset, sdqc_parent, text, lexicon, sentiment, pos, wembs, lstm_wembs)
if write_out:
out_path = os.path.join(out_path, out_file_name)
write_preprocessed(features_header, feature_vectors, out_path)
return dataset, feature_vectors
if __name__ == "__main__":
"""
Client for preprocessing data for stance detection.
See project README for more in-depth description of command-line interfaces.
:param argv: user-specified arguments parsed from command line.
"""
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description='Preprocessing data for use in stance detection, defaults provided. '
'LSTM stance model is currently only compatible with lstm_wembs.')
parser.add_argument('-db', '--database', default='dast', help='Database type, either \'twitter\' or \'dast\'')
parser.add_argument('-dp', '--data_path', default=False, help='Path to raw data')
parser.add_argument('-ss', '--sub_sample', default=True,
help='Implement sub-sampling by removing conversation branches of only "commenting" labels')
parser.add_argument('-sp', '--sdqc_parent', default=False, help='Include sdqc_parent as feature?')
parser.add_argument('-tf', '--text_features', default=False, help='Include textual features?')
parser.add_argument('-sm', '--sentiment', default=False, help='Include comment sentiment as feature?')
parser.add_argument('-lx', '--lexicon', default=False, help='Include lexicon-based features, e.g. swear word count?')
parser.add_argument('-pos', '--pos', default=False, help='Include POS tags as feature?')
parser.add_argument('-we', '--word_embs', default=False, help='Include embedding-based features, e.g. cosine '
'similarity across branches?')
parser.add_argument('-le', '--lstm_wembs', default=True, help='Include LSTM-formatted word embedding features?')
parser.add_argument('-wo', '--write_out', default=True, help='Write preprocessed data to file?')
parser.add_argument('-on', '--out_file_name', default='timestamps.csv', help='Name of out file')
args = parser.parse_args(argv)
preprocess(database=args.database, data=args.data_path, sub=args.sub_sample, sdqc_parent=args.sdqc_parent,
text=args.text_features, sentiment=args.sentiment, lexicon=args.lexicon, pos=args.pos,
wembs=args.word_embs, lstm_wembs=args.lstm_wembs, write_out=args.write_out,
out_file_name=args.out_file_name)
|
<reponame>ashwinipokle/deq<gh_stars>100-1000
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.autograd as autograd
import sys
import copy
import numpy as np
from termcolor import colored
import os
sys.path.append('../../')
from lib.optimizations import weight_norm, VariationalDropout, VariationalHidDropout, VariationalAttnDropout
from lib.solvers import anderson, broyden
from lib.jacobian import jac_loss_estimate, power_method
from utils.adaptive_embedding import AdaptiveEmbedding
from utils.positional_embedding import PositionalEmbedding
from utils.proj_adaptive_softmax import ProjectedAdaptiveLogSoftmax
from utils.log_uniform_sampler import LogUniformSampler, sample_logits
class WeightSharePositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(WeightSharePositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.ff1_net = nn.Linear(d_model, d_inner)
self.drop1 = VariationalHidDropout(dropout=dropout, length_first=True)
self.ff2_net = nn.Linear(d_inner, d_model)
self.drop2 = VariationalHidDropout(dropout=dropout, length_first=True)
self.pre_lnorm = pre_lnorm
def wnorm(self):
self.ff1_net, self.ff1_fn = weight_norm(module=self.ff1_net, names=['weight'], dim=0)
self.ff2_net, self.ff2_fn = weight_norm(module=self.ff2_net, names=['weight'], dim=0)
def reset(self, bsz, qlen):
self.drop1.reset_mask(bsz, self.d_inner, qlen)
self.drop2.reset_mask(bsz, self.d_model, qlen)
if 'ff1_fn' in self.__dict__:
self.ff1_fn.reset(self.ff1_net)
if 'ff2_fn' in self.__dict__:
self.ff2_fn.reset(self.ff2_net)
def forward(self, inp, attn_out=None):
assert inp.size(1) == self.d_model, "Feature dimension not match!!"
inp = inp.transpose(1,2)
if self.pre_lnorm:
inp = F.layer_norm(inp, (self.d_model,))
relu_out1 = self.drop1(F.relu(self.ff1_net(inp)))
out2 = self.drop2(self.ff2_net(relu_out1))
output = out2 + inp
if not self.pre_lnorm:
output = F.layer_norm(output, (self.d_model,))
return output.transpose(1,2)
class WeightShareSelfAttention(nn.Module):
# This is similar to the RelPartialLearnableMultiHeadAttn class in Transformer-XL
def __init__(self, d_model, n_head, d_head, dropout, dropatt,
pre_lnorm=False, local_size=None):
super(WeightShareSelfAttention, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.dropout = dropout
self.scale = 1 / (d_head ** 0.5)
self.qkv_net = nn.Conv1d(d_model, 3 * n_head * d_head, kernel_size=1, bias=False)
self.r_net = nn.Conv1d(d_model, n_head * d_head, kernel_size=1, bias=False)
self.r_w_bias = nn.Parameter(torch.rand(n_head, d_head).uniform_(-0.05, 0.05))
self.r_r_bias = nn.Parameter(torch.rand(n_head, d_head).uniform_(-0.05, 0.05))
self.o_net = nn.Conv1d(n_head * d_head, d_model, kernel_size=1)
self.dropatt = VariationalAttnDropout(dropout=dropatt)
self.drop = VariationalHidDropout(dropout=dropout)
self.pre_lnorm = pre_lnorm
self.local_size = local_size
def wnorm(self):
self.qkv_net, self.qkv_fn = weight_norm(module=self.qkv_net, names=['weight'], dim=0)
self.r_net, self.r_fn = weight_norm(module=self.r_net, names=['weight'], dim=0)
self.o_net, self.o_fn = weight_norm(module=self.o_net, names=['weight'], dim=0)
def reset(self, bsz, qlen, klen):
self.dropatt.reset_mask(bsz, self.n_head, qlen, klen)
self.drop.reset_mask(bsz, self.d_model, qlen)
if 'qkv_fn' in self.__dict__:
self.qkv_fn.reset(self.qkv_net)
if 'r_fn' in self.__dict__:
self.r_fn.reset(self.r_net)
if 'o_fn' in self.__dict__:
self.o_fn.reset(self.o_net)
def _rel_shift(self, x):
# x has dimension (bsz x n_head x qlen x klen)
bsz, n_head, qlen, klen = x.size()
x_padded = F.pad(x, (1,0))
x_padded = x_padded.view(bsz, n_head, klen+1, qlen)
return x_padded[:,:,1:].view_as(x)
def forward(self, z1ss, pos_emb, u1ss, mems=None):
# Note: In this context, qlen means the length of the sequence; and mlen describes
# the length of the padding. Their sum is klen.
bsz, d_model, qlen = z1ss.size()
r_w_bias, r_r_bias = self.r_w_bias, self.r_r_bias
n_head, d_head = self.n_head, self.d_head
rlen = pos_emb.size(2)
if mems is None:
mems = torch.tensor([]).view(0,0,0)
mlen = mems.size(2)
cat = torch.cat([mems, z1ss], dim=-1)
if self.pre_lnorm:
cat = F.layer_norm(cat.transpose(1,2), (d_model,)).transpose(1,2)
w_heads = self.qkv_net(cat) # (N x 3*d_model x seq_len)
r_head_k = self.r_net(pos_emb)
# Input injection
w_heads += u1ss
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=1)
w_head_q = w_head_q[:,:,-qlen:]
klen = w_head_k.size(2)
w_head_q = w_head_q.view(bsz, n_head, d_head, qlen) # bsz x n_head x d_head x qlen
w_head_k = w_head_k.view(bsz, n_head, d_head, klen) # bsz x n_head x d_head x klen
w_head_v = w_head_v.view(bsz, n_head, d_head, klen) # bsz x n_head x d_head x klen
r_head_k = r_head_k.view(n_head, d_head, rlen) # n_head x d_head x rlen
# Compute attention score
rw_head_q = w_head_q + r_w_bias[:,:,None] # bsz x n_head x d_head x qlen
AC = torch.einsum('bndi,bndj->bnij', rw_head_q, w_head_k)
rr_head_q = w_head_q + r_r_bias[:,:,None]
BD = torch.einsum('bndi,ndj->bnij', rr_head_q, r_head_k)
BD = self._rel_shift(BD) # for relative positional embedding
attn_score = AC + BD # bsz x n_head x qlen x klen
attn_score.mul_(self.scale)
# Compute attention probability
# We apply a local mask, with local horizon size of mlen
local_size = self.local_size or 1000
attn_mask = (torch.triu(torch.ones(qlen, klen), diagonal=1+mlen) > 0)[None]
attn_mask += (torch.tril(torch.ones(qlen, klen), diagonal=mlen-local_size) > 0)[None]
if attn_mask is not None and attn_mask.any().item():
attn_score = attn_score.float().masked_fill(
attn_mask[None], -float('inf')).type_as(attn_score)
attn_prob = F.softmax(attn_score, dim=-1) # bsz x n_head x qlen x klen
attn_prob = self.dropatt(attn_prob)
# Compute attention vector
attn_vec = torch.einsum('bnij,bndj->bndi', (attn_prob, w_head_v))
# [bsz x d x qlen]
attn_vec = attn_vec.contiguous().view(bsz, n_head*d_head, attn_vec.size(-1))
# Linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
# Residual connection + layer normolization (if applicable)
if self.pre_lnorm:
out = attn_out + z1ss
else:
out = F.layer_norm((attn_out + z1ss).transpose(1,2), (d_model,)).transpose(1,2)
return out
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
pre_lnorm = kwargs.get('pre_lnorm')
local_size = kwargs.get('local_size', None)
dropatt = kwargs.get('dropatt', 0.0)
self.dec_attn = WeightShareSelfAttention(d_model, n_head, d_head, dropout=dropout, dropatt=dropatt,
pre_lnorm=pre_lnorm, local_size=local_size)
self.pos_ff = WeightSharePositionwiseFF(d_model, d_inner, dropout, pre_lnorm=pre_lnorm)
def wnorm(self):
self.dec_attn.wnorm()
self.pos_ff.wnorm()
def reset(self, bsz, qlen, klen):
# Reset the dropout mask(s) and re-compute the weight normalized weights at the START of each iterations
self.dec_attn.reset(bsz, qlen, klen)
self.pos_ff.reset(bsz, qlen)
def forward(self, z1ss, uss, z0, *args):
pos_emb = args[0]
output = self.dec_attn(z1ss, pos_emb, uss, mems=z0)
output = self.pos_ff(output)
return output
class DEQTransformerLM(nn.Module):
def __init__(self, n_token, n_layer, eval_n_layer, n_head, d_model, d_head, d_inner,
dropout, dropatt, tie_weights=True, d_embed=None, div_val=1,
tie_projs=[False], pre_lnorm=False, wnorm=False, tgt_len=None,
mem_len=None, local_size=0, pretrain_steps=1, cutoffs=[], load='',
f_solver=anderson, b_solver=None, stop_mode="rel", logging=None):
super().__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs, div_val=div_val)
self.iodrop = VariationalDropout()
self.dropout = dropout
self.pos_drop = VariationalHidDropout(dropout=dropout)
self.pretrain_steps = pretrain_steps
self.tgt_len = tgt_len
self.mem_len = mem_len
self.local_size = local_size
self.max_klen = tgt_len + mem_len
self.n_layer = n_layer
self.eval_n_layer = eval_n_layer
self.inject_conv = nn.Conv1d(d_model, 3*d_model, kernel_size=1)
self.pos_emb = PositionalEmbedding(self.d_model)
self.func = RelPartialLearnableDecoderLayer(n_head, d_model, d_head, d_inner, dropout=dropout, dropatt=dropatt,
pre_lnorm=pre_lnorm, local_size=local_size)
self.f_solver = f_solver
self.b_solver = b_solver if b_solver else self.f_solver
self.hook = None
self.stop_mode = stop_mode
self.alternative_mode = "abs" if self.stop_mode == "rel" else "rel"
self.logging = logging or print
if wnorm: self.func.wnorm()
# use adaptive softmax (including standard softmax)
# (Note: To use sample softmax, refer to the Transformer-XL implementation)
self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model, cutoffs, div_val=div_val)
if tie_weights:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.word_emb.emb_layers[i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and div_val == 1 and d_model != d_embed:
self.crit.out_projs[i].weight.data = self.word_emb.emb_projs[0].weight.data
elif tie_proj and div_val != 1:
self.crit.out_projs[i].weight.data = self.word_emb.emb_projs[i].weight.data
if len(load) > 0:
params_dict = torch.load(load)
self.load_weights(params_dict)
self.logging(f"Finished loading. d_embed={self.inject_conv.weight.data.size(1)}")
def reset_length(self, tgt_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
def load_weights(self, params_dict):
self.load_state_dict(params_dict)
def save_weights(self, path, name='pretrained_deq'):
with open(os.path.join(path, f'{name}.pth'), 'wb') as f:
self.logging(f"Saving weight state dict at {name}.pth")
torch.save(self.state_dict(), f)
def init_mems(self):
if self.mem_len <= 0:
self.logging("init_mems: Hmmmm... you shouldn't be here.")
return None
# mems is not None
with torch.no_grad():
mems = [torch.empty(0), torch.empty(0)]
return mems # For z0 and u0
def _update_mems(self, z1s, us, z0, qlen, mlen):
# does not deal with None
if self.mem_len <= 0:
self.logging("_update_mems: Hmmmm... you shouldn't be here.")
return None
# mems is not None
with torch.no_grad():
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len) # Account for when mlen = 0
zs = torch.cat([z0, z1s], dim=2)
new_z0 = zs[:,:,beg_idx:end_idx].detach().permute(2,0,1).contiguous() # seq_len x bsz x d_model
new_u0 = us[:,:,beg_idx:end_idx].detach().permute(2,0,1).contiguous()
return [new_z0, new_u0]
def _forward(self, dec_inp, mems=None, f_thres=30, b_thres=40, train_step=-1,
compute_jac_loss=True, spectral_radius_mode=False, writer=None):
"""
Apply the DEQ-Transformer language model on input word tokens
:param dec_inp: Input words of shape (seq_len x bsz) and dtype torch.LongTensor
:param mems: History madding and the transformed input corresponding to it; must be a tuple (z0, u0)
where z0 has dimension (bsz x d_model x pad_len) and u0 has size (bsz x 3*d_model x pad_len)
:param f_thres: Forward pass threshold
:param b_thres: Backward pass threshold
:param train_step: The number of training step that the current iteration is at
:param compute_jac_loss: Whether to return an (optional) Jacobian-stability-related loss
:param spectral_radius_mode: Whether to estimate spectral radius at J(z*) (note: this is very slow!!)
:param writer: Tensorboard writer
:return: tuple(output sequence, new memory, jac loss, spec. radius)
"""
# Assume dec_inp has shape (qlen x bsz)
dec_inp = dec_inp.t()
bsz, qlen = dec_inp.size()
word_emb = self.word_emb(dec_inp)
word_emb = self.iodrop(word_emb, self.dropout)
u1s = self.inject_conv(word_emb.transpose(1,2)) # bsz x 3*d_model x qlen
z0, u0 = mems
d_model = self.d_model
if z0 is not None and z0.nelement() > 0:
assert z0.size(2) == u0.size(2), "Padding fixed points and padding embedding dimensions don't agree"
else:
z0, u0 = torch.zeros(bsz, d_model, 0), torch.zeros(bsz, 3*d_model, 0)
mlen = z0.size(2)
klen = mlen + qlen # qlen is seq_len, mlen is pad_len
pos_seq = torch.arange(klen-1, -1, -1.0)
pos_emb = self.pos_drop(self.pos_emb(pos_seq)) # bsz x d_model x (qlen + mlen) for positional embedding
us = torch.cat([u0, u1s], dim=2)
z1s = torch.zeros(bsz, d_model, qlen) # bsz x d_model x (qlen + mlen) for initial estimate of output
func_args = [us, z0, pos_emb]
jac_loss = torch.tensor(0.0).to(z1s)
sradius = torch.zeros(bsz, 1).to(z1s)
deq_mode = (train_step < 0) or (train_step >= self.pretrain_steps)
if not deq_mode:
# In pretraining mode with stacked (weight-tied) layers. NOT recommended for large models (as then
# a stacking of, for example, 16 layers would be extremely inefficient). One can also train with
# M layers and evaluate using N layers (which typically leads to worse performance).
n_layer = self.n_layer if self.training or train_step > 0 else self.eval_n_layer
for i in range(n_layer):
z1s = self.func(z1s, *func_args)
new_z1s = z1s
else:
# Compute the equilibrium via DEQ. When in training mode, we need to register the analytical backward
# pass according to the Theorem 1 in the paper.
with torch.no_grad():
result = self.f_solver(lambda z: self.func(z, *func_args), z1s, threshold=f_thres, stop_mode=self.stop_mode)
z1s = result['result']
new_z1s = z1s
if (not self.training) and spectral_radius_mode:
with torch.enable_grad():
z1s.requires_grad_()
new_z1s = self.func(z1s, *func_args)
_, sradius = power_method(new_z1s, z1s, n_iters=150)
if self.training:
z1s.requires_grad_()
new_z1s = self.func(z1s, *func_args)
if compute_jac_loss:
jac_loss = jac_loss_estimate(new_z1s, z1s, vecs=1)
def backward_hook(grad):
if self.hook is not None:
# To avoid infinite loop
self.hook.remove()
torch.cuda.synchronize()
new_grad = self.b_solver(lambda y: autograd.grad(new_z1s, z1s, y, retain_graph=True)[0] + grad, \
torch.zeros_like(grad), threshold=b_thres)['result']
return new_grad
self.hook = new_z1s.register_hook(backward_hook)
core_out = self.iodrop(new_z1s, self.dropout).permute(2,0,1).contiguous() # qlen x bsz x d_model
new_mems = self._update_mems(new_z1s, us, z0, mlen, qlen)
return core_out, new_mems, jac_loss.view(-1,1), sradius.view(-1,1)
def forward(self, data, target, mems, train_step=-1, **kwargs):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
if not mems:
mems = self.init_mems()
else:
for i in range(len(mems)):
mems[i] = mems[i].permute(1,2,0).contiguous() # bsz x [-1] x seq_len
qlen, bsz = data.size()
mlen = 0 if mems[0].nelement() == 0 else mems[0].size(2)
klen = mlen + qlen
# Reset dropout in self.func
self.pos_drop.reset_mask(1, self.d_model, klen)
self.func.reset(bsz, qlen, klen)
tgt_len = target.size(0)
f_thres = kwargs.get('f_thres', 30)
b_thres = kwargs.get('b_thres', 40)
compute_jac_loss = kwargs.get('compute_jac_loss', True)
sradius_mode = kwargs.get('spectral_radius_mode', False)
writer = kwargs.get('writer', None)
hidden, new_mems, jac_loss, sradius = self._forward(data, mems=mems, f_thres=f_thres, b_thres=b_thres, train_step=train_step,
compute_jac_loss=compute_jac_loss, spectral_radius_mode=sradius_mode,
writer=writer)
pred_hid = hidden[-tgt_len:]
loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.contiguous().view(-1))
loss = loss.view(tgt_len, -1)
if new_mems is None:
return [loss, jac_loss, sradius]
else:
return [loss, jac_loss, sradius] + new_mems
|
<gh_stars>1-10
from __future__ import annotations
import datetime
from typing import List, Optional, Tuple, Union, TYPE_CHECKING
from .image import Image
if TYPE_CHECKING:
from .media import Manga, Anime
__all__ = (
'CharacterName',
'CharacterBirthdate',
'Character'
)
class CharacterName:
"""
Attributes:
first: The character's given name.
middle: The character's middle name.
last: The character's surname.
full: The character's first and last name.
native: The character's full name in their native language.
alternatives: Other names the character might be referred to as.
"""
def __init__(self, payload) -> None:
self.first: str = payload['first']
self.middle: str = payload['middle']
self.last: str = payload['last']
self.full: str = payload['full']
self.native: str = payload['native']
self.alternatives: List[str] = payload['alternative']
class CharacterBirthdate:
"""
Attributes:
year: Numeric Year.
month: Numeric month.
day: Numeric day.
"""
def __init__(self, character: 'Character') -> None:
"""
Args:
character: A [Character](./character.md) object
"""
birth = character._payload['dateOfBirth']
self.character = character
self.year: Optional[int] = birth['year']
self.month: Optional[int] = birth['month']
self.day: Optional[int] = birth['day']
def get_datetime(self, age: Optional[str]=None) -> Optional[Tuple[datetime.datetime]]:
"""
A function that computes the character's aproximate birth in relation with today's time.
Args:
age: If this is None, it will use the character's age.
Returns:
An aproximate datetime.
"""
age = age or self.character.age
if not age:
return None
if any(date is None for date in (self.month, self.day)):
return None
if len(age.split('-')) == 2:
young, old = age.split('-')
youngest = self.get_datetime(young)
oldest = self.get_datetime(old)
return youngest, oldest
dt = datetime.datetime(year=int(age), month=self.month, day=self.day)
timedelta = datetime.datetime.utcnow() - dt
years = timedelta.days // 365
new = datetime.datetime(year=years, month=self.month, day=self.day)
return new, None
class Character:
"""
Attributes:
description: The description of this character.
gender: The gender of this character.
url: This character's Anilist URL.
favourites: The number of favourites on this character.
age: The age of this character.
"""
def __init__(self, payload, session) -> None:
self._payload = payload
self._session = session
self.description: str = self._payload['description']
self.favourites: int = self._payload['favourites']
self.url: str = self._payload['siteUrl']
self.gender: str = self._payload['gender']
self.age: str = self._payload['age']
def __repr__(self) -> str:
return '<Character name={0.name.full!r}>'.format(self)
@property
def apperances(self) -> Optional[List[Union[Anime, Manga]]]:
"""
This character's apperances on difference mangas and animes.
Returns:
A list of [Media](./media.md).
"""
if not self._payload.get('media'):
return None
from .media import _get_media
animes = self._payload['media']['nodes']
return [_get_media(anime)(anime, self._session) for anime in animes]
@property
def name(self) -> CharacterName:
"""
Returns:
A [CharacterName](./character.md) object
"""
return CharacterName(self._payload['name'])
@property
def image(self) -> Image:
"""
Returns:
An [Image](./image.md) object.
"""
return self._cls(self._session, self._payload['image'])
@property
def birth(self) -> CharacterBirthdate:
"""
Returns:
A [CharacterBirthdate](./character.md) object.
"""
return CharacterBirthdate(self._payload['dateOfBirth'])
def to_dict(self):
return self._payload.copy()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.