hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0cd73c42319f872a1770299941dcf449f784fa3d | 4,067 | py | Python | tests/test_tutorial/test_response_model/test_tutorial004.py | parampavar/fastapi | 4e77737a3f7bf2608132ea170e9ff013b5af6732 | [
"MIT"
] | 2 | 2020-07-17T21:33:28.000Z | 2020-07-17T21:33:38.000Z | tests/test_tutorial/test_response_model/test_tutorial004.py | parampavar/fastapi | 4e77737a3f7bf2608132ea170e9ff013b5af6732 | [
"MIT"
] | 1 | 2021-07-24T15:25:13.000Z | 2021-07-24T15:25:13.000Z | tests/test_tutorial/test_response_model/test_tutorial004.py | parampavar/fastapi | 4e77737a3f7bf2608132ea170e9ff013b5af6732 | [
"MIT"
] | 1 | 2019-03-29T06:15:07.000Z | 2019-03-29T06:15:07.000Z | import pytest
from fastapi.testclient import TestClient
from response_model.tutorial004 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Item",
"operationId": "read_item_items__item_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "string"},
"name": "item_id",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
"description": {"title": "Description", "type": "string"},
"tax": {"title": "Tax", "type": "number", "default": 10.5},
"tags": {
"title": "Tags",
"type": "array",
"items": {"type": "string"},
"default": [],
},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
@pytest.mark.parametrize(
"url,data",
[
("/items/foo", {"name": "Foo", "price": 50.2}),
(
"/items/bar",
{"name": "Bar", "description": "The bartenders", "price": 62, "tax": 20.2},
),
(
"/items/baz",
{
"name": "Baz",
"description": None,
"price": 50.2,
"tax": 10.5,
"tags": [],
},
),
],
)
def test_get(url, data):
response = client.get(url)
assert response.status_code == 200, response.text
assert response.json() == data
| 32.277778 | 87 | 0.339562 |
61030b0217e5004d8de7019dbdf2400d19e5d4a5 | 679 | py | Python | django_docs/makingqueries/migrations/0004_themeblog.py | djangojeng-e/django_tutorial | 78a5f8e17253a32f43079b2c17ffe4cecbd3c3f0 | [
"MIT"
] | null | null | null | django_docs/makingqueries/migrations/0004_themeblog.py | djangojeng-e/django_tutorial | 78a5f8e17253a32f43079b2c17ffe4cecbd3c3f0 | [
"MIT"
] | 9 | 2021-03-19T10:01:27.000Z | 2022-01-13T03:05:42.000Z | django_docs/makingqueries/migrations/0004_themeblog.py | djangojeng-e/django_tutorial | 78a5f8e17253a32f43079b2c17ffe4cecbd3c3f0 | [
"MIT"
] | 1 | 2020-05-01T12:55:48.000Z | 2020-05-01T12:55:48.000Z | # Generated by Django 3.0.5 on 2020-04-26 05:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('makingqueries', '0003_auto_20200424_0639'),
]
operations = [
migrations.CreateModel(
name='Themeblog',
fields=[
('blog_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='makingqueries.Blog')),
('theme', models.CharField(max_length=200)),
],
bases=('makingqueries.blog',),
),
]
| 29.521739 | 193 | 0.627393 |
468c861d3b0387781f5434e79cf3b88d0393ed13 | 2,374 | py | Python | 1249-minimum-remove-to-make-valid-parentheses/1249-minimum-remove-to-make-valid-parentheses.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 1249-minimum-remove-to-make-valid-parentheses/1249-minimum-remove-to-make-valid-parentheses.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 1249-minimum-remove-to-make-valid-parentheses/1249-minimum-remove-to-make-valid-parentheses.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | class Solution:
def minRemoveToMakeValid1(self, s: str) -> str:
"""
10010123
"())()((("
0101000
"()()((("
"()()"
"""
open_count, close_count = 0, 0
for char in s:
if char == "(":
open_count += 1
elif char == ")" and open_count:
open_count -= 1
elif char == ")":
close_count += 1
formatted_chars = []
for char in s:
if char == ")" and close_count:
close_count -= 1
else:
formatted_chars.append(char)
s2 = "".join(formatted_chars)
formatted_chars = []
for char in reversed(s2):
if char == "(" and open_count:
open_count -= 1
else:
formatted_chars.append(char)
return "".join(reversed(formatted_chars))
def minRemoveToMakeValid(self, s: str) -> str:
"""
Ex 1:
"lee(t(c)o)de)" -> "lee(t(c)o)de)".replace(")", "")
"(()))" -> ")"
Ex 2:
"))((()"
-> "))((" -> "))((".replace(")", "").replace(")", "").replace("(", "")
-> ""
Time O(N):
1. collect brackets O(N)
2. remove all matching brackets O(N)
3. remove all unmatched brackets O(N)
Space O(N)
"""
brackets = [] # "))((" # ""
for char in s:
if char in ["(", ")"]:
brackets.append(char)
# ) -> remove ()
# find mismatched brackets
bracket_string = "".join(brackets) # "))(("
pair = "()"
while pair in bracket_string:
bracket_string = bracket_string.replace(pair, "")
# replace ) from left to right
formatted_string = s # "))((" # ")((" # "((" # "(" # ""
for bracket in bracket_string:
if bracket == ")":
formatted_string = formatted_string.replace(bracket, "", 1)
# replace ( from right to left
formatted_string = "".join(reversed(formatted_string))
for bracket in bracket_string:
if bracket == "(":
formatted_string = formatted_string.replace(bracket, "", 1)
return "".join(reversed(formatted_string)) | 32.520548 | 83 | 0.437658 |
b4d3ef9c9600404f5e4d0dbe51200da605802801 | 16,802 | py | Python | src/ttkcreator/__main__.py | dmalves/ttkbootstrap | 04d441c7a0cfbe6a2debea80c41994201dfc5562 | [
"MIT"
] | 406 | 2021-04-04T10:58:14.000Z | 2022-03-29T22:24:36.000Z | src/ttkcreator/__main__.py | dmalves/ttkbootstrap | 04d441c7a0cfbe6a2debea80c41994201dfc5562 | [
"MIT"
] | 149 | 2021-03-26T12:45:41.000Z | 2022-03-28T20:27:29.000Z | src/ttkcreator/__main__.py | dmalves/ttkbootstrap | 04d441c7a0cfbe6a2debea80c41994201dfc5562 | [
"MIT"
] | 52 | 2021-04-08T14:50:28.000Z | 2022-03-27T15:23:41.000Z | import shutil
import json
from uuid import uuid4
from pathlib import Path
import ttkbootstrap as ttk
from tkinter import Frame
from tkinter.colorchooser import askcolor
from tkinter.filedialog import askopenfilename, asksaveasfilename
from ttkbootstrap.themes import standard, user
from ttkbootstrap.style import ThemeDefinition
from ttkbootstrap.constants import *
from ttkbootstrap.dialogs import Messagebox
class ThemeCreator(ttk.Window):
def __init__(self):
super().__init__("TTK Creator")
self.configure_frame = ttk.Frame(self, padding=(10, 10, 5, 10))
self.configure_frame.pack(side=LEFT, fill=BOTH, expand=YES)
self.demo_frame = ttk.Frame(self, padding=(5, 10, 10, 10))
self.demo_frame.pack(side=LEFT, fill=BOTH, expand=YES)
self.setup_theme_creator()
self.demo_widgets = DemoWidgets(self, self.style)
self.demo_widgets.pack(fill=BOTH, expand=YES)
def setup_theme_creator(self):
# application menu
self.menu = ttk.Menu()
self.menu.add_command(label="Save", command=self.save_theme)
self.menu.add_command(label="Reset", command=self.change_base_theme)
self.menu.add_command(label="Import", command=self.import_user_themes)
self.menu.add_command(label="Export", command=self.export_user_themes)
self.configure(menu=self.menu)
# theme configuration settings
## user theme name
f1 = ttk.Frame(self.configure_frame, padding=(5, 2))
ttk.Label(f1, text="name", width=12).pack(side=LEFT)
self.theme_name = ttk.Entry(f1)
self.theme_name.insert(END, "new theme")
self.theme_name.pack(side=LEFT, fill=X, expand=YES)
f1.pack(fill=X, expand=YES)
## base theme
f2 = ttk.Frame(self.configure_frame, padding=(5, 2))
ttk.Label(f2, text="base theme", width=12).pack(side=LEFT)
self.base_theme = ttk.Combobox(f2, values=self.style.theme_names())
self.base_theme.insert(END, "litera")
self.base_theme.pack(side=LEFT, fill=X, expand=YES)
f2.pack(fill=X, expand=YES, pady=(0, 15))
self.base_theme.bind("<<ComboboxSelected>>", self.change_base_theme)
## color options
self.color_rows = []
for color in self.style.colors.label_iter():
row = ColorRow(self.configure_frame, color, self.style)
self.color_rows.append(row)
row.pack(fill=BOTH, expand=YES)
row.bind("<<ColorSelected>>", self.create_temp_theme)
def create_temp_theme(self, *_):
"""Creates a temp theme using the current configure settings and
changes the theme in tkinter to that new theme.
"""
themename = "temp_" + str(uuid4()).replace("-", "")[:10]
colors = {}
for row in self.color_rows:
colors[row.label["text"]] = row.color_value
definition = ThemeDefinition(themename, colors, self.style.theme.type)
self.style.register_theme(definition)
self.style.theme_use(themename)
self.update_color_patches()
def change_base_theme(self, *_):
"""Sets the initial colors used in the color configuration"""
themename = self.base_theme.get()
self.style.theme_use(themename)
self.update_color_patches()
def update_color_patches(self):
"""Updates the color patches next to the color code entry."""
for row in self.color_rows:
row.color_value = self.style.colors.get(row.label["text"])
row.update_patch_color()
def export_user_themes(self):
"""Export user themes saved in the user.py file"""
inpath = Path(user.__file__)
outpath = asksaveasfilename(
initialdir="/",
initialfile="user.py",
filetypes=[("python", "*.py")],
)
if outpath:
shutil.copyfile(inpath, outpath)
Messagebox.ok(
parent=self,
title="Export",
message="User themes have been exported.",
)
def import_user_themes(self):
"""Import user themes into the user.py file. Any existing data
in the user.py file will be overwritten."""
outpath = Path(user.__file__)
inpath = askopenfilename(
initialdir="/",
initialfile="user.py",
filetypes=[("python", "*.py")],
)
confirm = Messagebox.okcancel(
title="Import",
message="This import will overwrite the existing user themes. Ok to import?",
)
if confirm == "OK" and inpath:
shutil.copyfile(inpath, outpath)
Messagebox.ok(
parent=self,
title="Export",
message="User themes have been imported.",
)
def save_theme(self):
"""Save the current settings as a new theme. Warn using if
saving will overwrite existing theme."""
name = self.theme_name.get().lower().replace(" ", "")
if name in user.USER_THEMES:
result = Messagebox.okcancel(
title="Save Theme",
alert=True,
message=f"Overwrite existing theme {name}?",
)
if result == "Cancel":
return
colors = {}
for row in self.color_rows:
colors[row.label["text"]] = row.color_value
theme = {name: {"type": self.style.theme.type, "colors": colors}}
user.USER_THEMES.update(theme)
standard.STANDARD_THEMES[name] = theme[name]
# save user themes to file
formatted = json.dumps(user.USER_THEMES, indent=4)
out = 'USER_THEMES = ' + formatted
filepath = user.__file__
with open(filepath, 'w', encoding='utf-8') as f:
f.write(out)
definition = ThemeDefinition(name, colors, self.style.theme.type)
self.style.register_theme(definition)
self.style.theme_use(name)
new_themes = []
for themename in self.style.theme_names():
if not themename.startswith("temp"):
new_themes.append(themename)
self.base_theme.configure(values=new_themes)
Messagebox.ok(f"The theme {name} has been created", "Save theme")
class ColorRow(ttk.Frame):
def __init__(self, master, color, style):
super().__init__(master, padding=(5, 2))
self.colorname = color
self.style = style
self.label = ttk.Label(self, text=color, width=12)
self.label.pack(side=LEFT)
self.patch = Frame(
master=self, background=self.style.colors.get(color), width=15
)
self.patch.pack(side=LEFT, fill=BOTH, padx=2)
self.entry = ttk.Entry(self, width=12)
self.entry.pack(side=LEFT, fill=X, expand=YES)
self.entry.bind("<FocusOut>", self.enter_color)
self.color_picker = ttk.Button(
master=self,
text="...",
bootstyle=SECONDARY,
command=self.pick_color,
)
self.color_picker.pack(side=LEFT, padx=2)
# set initial color value and patch color
self.color_value = self.style.colors.get(color)
self.update_patch_color()
def pick_color(self):
"""Callback for when a color is selected from the color chooser"""
color = askcolor(color=self.color_value)
if color[1]:
self.color_value = color[1]
self.update_patch_color()
self.event_generate("<<ColorSelected>>")
def enter_color(self, *_):
"""Callback for when a color is typed into the entry"""
try:
self.color_value = self.entry.get().lower()
self.update_patch_color()
except:
self.color_value = self.style.colors.get(self.label["text"])
self.update_patch_color()
self.event_generate("<<ColorSelected>>")
def update_patch_color(self):
"""Update the color patch frame with the color value stored in
the entry widget."""
self.entry.delete(0, END)
self.entry.insert(END, self.color_value)
self.patch.configure(background=self.color_value)
class DemoWidgets(ttk.Frame):
"""Builds a frame containing an example of most ttkbootstrap widgets
with various styles and states applied.
"""
ZEN = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
def __init__(self, master, style):
super().__init__(master)
self.style: ttk.Style = style
self.create_left_frame()
self.create_right_frame()
def create_right_frame(self):
container = ttk.Frame(self)
container.pack(side=RIGHT, fill=BOTH, expand=YES, padx=5)
# demonstrates various button styles
btn_group = ttk.Labelframe(
master=container, text="Buttons", padding=(10, 5)
)
btn_group.pack(fill=X)
menu = ttk.Menu(self)
for i, t in enumerate(self.style.theme_names()):
menu.add_radiobutton(label=t, value=i)
default = ttk.Button(master=btn_group, text="solid button")
default.pack(fill=X, pady=5)
default.focus_set()
mb = ttk.Menubutton(
master=btn_group,
text="solid menubutton",
bootstyle=SECONDARY,
menu=menu,
)
mb.pack(fill=X, pady=5)
cb = ttk.Checkbutton(
master=btn_group,
text="solid toolbutton",
bootstyle=(SUCCESS, TOOLBUTTON),
)
cb.invoke()
cb.pack(fill=X, pady=5)
ob = ttk.Button(
master=btn_group, text="outline button", bootstyle=(INFO, OUTLINE)
)
ob.pack(fill=X, pady=5)
mb = ttk.Menubutton(
master=btn_group,
text="outline menubutton",
bootstyle=(WARNING, OUTLINE),
menu=menu,
)
mb.pack(fill=X, pady=5)
cb = ttk.Checkbutton(
master=btn_group,
text="outline toolbutton",
bootstyle="success-outline-toolbutton",
)
cb.pack(fill=X, pady=5)
lb = ttk.Button(master=btn_group, text="link button", bootstyle=LINK)
lb.pack(fill=X, pady=5)
cb1 = ttk.Checkbutton(
master=btn_group,
text="rounded toggle",
bootstyle=(SUCCESS, ROUND, TOGGLE),
)
cb1.invoke()
cb1.pack(fill=X, pady=5)
cb2 = ttk.Checkbutton(
master=btn_group, text="squared toggle", bootstyle=(SQUARE, TOGGLE)
)
cb2.pack(fill=X, pady=5)
cb2.invoke()
input_group = ttk.Labelframe(
master=container, text="Other input widgets", padding=10
)
input_group.pack(fill=BOTH, pady=(10, 5), expand=YES)
entry = ttk.Entry(input_group)
entry.pack(fill=X)
entry.insert(END, "entry widget")
password = ttk.Entry(master=input_group, show="•")
password.pack(fill=X, pady=5)
password.insert(END, "password")
spinbox = ttk.Spinbox(master=input_group, from_=0, to=100)
spinbox.pack(fill=X)
spinbox.set(45)
cbo = ttk.Combobox(
master=input_group,
text=self.style.theme.name,
values=self.style.theme_names(),
)
cbo.pack(fill=X, pady=5)
cbo.current(self.style.theme_names().index(self.style.theme.name))
de = ttk.DateEntry(input_group)
de.pack(fill=X)
def create_left_frame(self):
"""Create all the left frame widgets"""
container = ttk.Frame(self)
container.pack(side=LEFT, fill=BOTH, expand=YES, padx=5)
# demonstrates all color options inside a label
color_group = ttk.Labelframe(
master=container, text="Theme color options", padding=10
)
color_group.pack(fill=X, side=TOP)
for color in self.style.colors:
cb = ttk.Button(color_group, text=color, bootstyle=color)
cb.pack(side=LEFT, expand=YES, padx=5, fill=X)
# demonstrates all radiobutton widgets active and disabled
cr_group = ttk.Labelframe(
master=container, text="Checkbuttons & radiobuttons", padding=10
)
cr_group.pack(fill=X, pady=10, side=TOP)
cr1 = ttk.Checkbutton(cr_group, text="selected")
cr1.pack(side=LEFT, expand=YES, padx=5)
cr1.invoke()
cr2 = ttk.Checkbutton(cr_group, text="deselected")
cr2.pack(side=LEFT, expand=YES, padx=5)
cr3 = ttk.Checkbutton(cr_group, text="disabled", state=DISABLED)
cr3.pack(side=LEFT, expand=YES, padx=5)
cr4 = ttk.Radiobutton(cr_group, text="selected", value=1)
cr4.pack(side=LEFT, expand=YES, padx=5)
cr4.invoke()
cr5 = ttk.Radiobutton(cr_group, text="deselected", value=2)
cr5.pack(side=LEFT, expand=YES, padx=5)
cr6 = ttk.Radiobutton(
cr_group, text="disabled", value=3, state=DISABLED
)
cr6.pack(side=LEFT, expand=YES, padx=5)
# demonstrates the treeview and notebook widgets
ttframe = ttk.Frame(container)
ttframe.pack(pady=5, fill=X, side=TOP)
table_data = [
("South Island, New Zealand", 1),
("Paris", 2),
("Bora Bora", 3),
("Maui", 4),
("Tahiti", 5),
]
tv = ttk.Treeview(
master=ttframe, columns=[0, 1], show="headings", height=5
)
for row in table_data:
tv.insert("", END, values=row)
tv.selection_set("I001")
tv.heading(0, text="City")
tv.heading(1, text="Rank")
tv.column(0, width=300)
tv.column(1, width=70, anchor=CENTER)
tv.pack(side=LEFT, anchor=NE, fill=X)
nb = ttk.Notebook(ttframe)
nb.pack(side=LEFT, padx=(10, 0), expand=YES, fill=BOTH)
nb_text = (
"This is a notebook tab.\nYou can put any widget you want here."
)
nb.add(ttk.Label(nb, text=nb_text), text="Tab 1", sticky=NW)
nb.add(
child=ttk.Label(nb, text="A notebook tab."),
text="Tab 2",
sticky=NW,
)
nb.add(ttk.Frame(nb), text="Tab 3")
nb.add(ttk.Frame(nb), text="Tab 4")
nb.add(ttk.Frame(nb), text="Tab 5")
# text widget
txt = ttk.Text(master=container, height=5, width=50, wrap="none")
txt.insert(END, DemoWidgets.ZEN)
txt.pack(side=LEFT, anchor=NW, pady=5, fill=BOTH, expand=YES)
# demonstrates scale, progressbar, and meter, and scrollbar widgets
lframe_inner = ttk.Frame(container)
lframe_inner.pack(fill=BOTH, expand=YES, padx=10)
scale = ttk.Scale(
master=lframe_inner, orient=HORIZONTAL, value=75, from_=100, to=0
)
scale.pack(fill=X, pady=5, expand=YES)
ttk.Progressbar(
master=lframe_inner,
orient=HORIZONTAL,
value=50,
).pack(fill=X, pady=5, expand=YES)
ttk.Progressbar(
master=lframe_inner,
orient=HORIZONTAL,
value=75,
bootstyle="success-striped",
).pack(fill=X, pady=5, expand=YES)
m = ttk.Meter(
master=lframe_inner,
metersize=150,
amountused=45,
subtext="meter widget",
bootstyle="info",
interactive=True,
)
m.pack(pady=10)
sb = ttk.Scrollbar(
master=lframe_inner,
orient=HORIZONTAL,
)
sb.set(0.1, 0.9)
sb.pack(fill=X, pady=5, expand=YES)
sb = ttk.Scrollbar(
master=lframe_inner, orient=HORIZONTAL, bootstyle="danger-round"
)
sb.set(0.1, 0.9)
sb.pack(fill=X, pady=5, expand=YES)
if __name__ == "__main__":
creator = ThemeCreator()
creator.mainloop()
| 35.522199 | 89 | 0.595584 |
994d7723842ec86deec48fdbad06ca075a9f39f7 | 4,386 | py | Python | samples/openapi3/client/petstore/python-experimental/petstore_api/models/enum_class.py | doc22940/openapi-generator | 50d21cb0d161e7917bb410a7db78811635f0837b | [
"Apache-2.0"
] | 1 | 2020-09-16T22:26:09.000Z | 2020-09-16T22:26:09.000Z | samples/openapi3/client/petstore/python-experimental/petstore_api/models/enum_class.py | doc22940/openapi-generator | 50d21cb0d161e7917bb410a7db78811635f0837b | [
"Apache-2.0"
] | null | null | null | samples/openapi3/client/petstore/python-experimental/petstore_api/models/enum_class.py | doc22940/openapi-generator | 50d21cb0d161e7917bb410a7db78811635f0837b | [
"Apache-2.0"
] | 1 | 2020-10-06T15:41:06.000Z | 2020-10-06T15:41:06.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class EnumClass(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'_ABC': "_abc",
'-EFG': "-efg",
'(XYZ)': "(xyz)",
},
}
validations = {
}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
@staticmethod
def _composed_schemas():
return None
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
])
def __init__(self, value='-efg', _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs): # noqa: E501
"""enum_class.EnumClass - a model defined in OpenAPI
Args:
Keyword Args:
value (str): defaults to '-efg', must be one of ['-efg'] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
self.value = value
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 32.977444 | 174 | 0.592795 |
c35cd344a5109145ffeb4278d8b659fb6812d261 | 1,351 | py | Python | DeepLearning/DLStart/common/gradient.py | 981935539/Ace | eabeb30dc330fb3fae1d916d9a7b180432571a43 | [
"MIT"
] | null | null | null | DeepLearning/DLStart/common/gradient.py | 981935539/Ace | eabeb30dc330fb3fae1d916d9a7b180432571a43 | [
"MIT"
] | null | null | null | DeepLearning/DLStart/common/gradient.py | 981935539/Ace | eabeb30dc330fb3fae1d916d9a7b180432571a43 | [
"MIT"
] | null | null | null | # coding: utf-8
import numpy as np
def _numerical_gradient_1d(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h) # 可以随便传个值,这个值是不起作用的,这是更直观
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 还原值
return grad
def numerical_gradient_2d(f, X):
if X.ndim == 1:
return _numerical_gradient_1d(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_1d(f, x)
return grad
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
# flags=['multi_index']表示对x进行多重索引
# op_flags=['readwrite']表示可以对x进行读写操作
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index # (0,0),(0,1) ....
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
print(fxh1)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
print(fxh2)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 还原值
it.iternext()
return grad | 24.125 | 68 | 0.49889 |
a066bdf821aef8443a5b23dc79f85e47b4bdcf00 | 61,679 | py | Python | test/unit/common/middleware/test_recon.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 1 | 2019-05-25T10:55:58.000Z | 2019-05-25T10:55:58.000Z | test/unit/common/middleware/test_recon.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 12 | 2015-06-23T23:20:17.000Z | 2016-01-27T00:37:12.000Z | test/unit/common/middleware/test_recon.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 5 | 2015-06-04T19:00:11.000Z | 2015-12-16T21:04:33.000Z | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
from contextlib import contextmanager
import mock
import os
from posix import stat_result, statvfs_result
from shutil import rmtree
import tempfile
import unittest
from unittest import TestCase
from swift import __version__ as swiftver
from swift.common import ring, utils
from swift.common.swob import Request
from swift.common.middleware import recon
from swift.common.storage_policy import StoragePolicy
from test.unit import patch_policies
def fake_check_mount(a, b):
raise OSError('Input/Output Error')
def fail_os_listdir():
raise OSError('No such file or directory')
def fail_io_open(file_path, open_mode):
raise IOError('No such file or directory')
class FakeApp(object):
def __call__(self, env, start_response):
return "FAKE APP"
def start_response(*args):
pass
class FakeFromCache(object):
def __init__(self, out=None):
self.fakeout = out
self.fakeout_calls = []
def fake_from_recon_cache(self, *args, **kwargs):
self.fakeout_calls.append((args, kwargs))
return self.fakeout
class OpenAndReadTester(object):
def __init__(self, output_iter):
self.index = 0
self.out_len = len(output_iter) - 1
self.data = output_iter
self.output_iter = iter(output_iter)
self.read_calls = []
self.open_calls = []
def __iter__(self):
return self
def next(self):
if self.index == self.out_len:
raise StopIteration
else:
line = self.data[self.index]
self.index += 1
return line
def read(self, *args, **kwargs):
self.read_calls.append((args, kwargs))
try:
return next(self.output_iter)
except StopIteration:
return ''
@contextmanager
def open(self, *args, **kwargs):
self.open_calls.append((args, kwargs))
yield self
class MockOS(object):
def __init__(self, ls_out=None, isdir_out=None, ismount_out=False,
statvfs_out=None):
self.ls_output = ls_out
self.isdir_output = isdir_out
self.ismount_output = ismount_out
self.statvfs_output = statvfs_out
self.listdir_calls = []
self.isdir_calls = []
self.ismount_calls = []
self.statvfs_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_isdir(self, *args, **kwargs):
self.isdir_calls.append((args, kwargs))
return self.isdir_output
def fake_ismount(self, *args, **kwargs):
self.ismount_calls.append((args, kwargs))
if isinstance(self.ismount_output, Exception):
raise self.ismount_output
else:
return self.ismount_output
def fake_statvfs(self, *args, **kwargs):
self.statvfs_calls.append((args, kwargs))
return statvfs_result(self.statvfs_output)
class FakeRecon(object):
def __init__(self):
self.fake_replication_rtype = None
self.fake_updater_rtype = None
self.fake_auditor_rtype = None
self.fake_expirer_rtype = None
def fake_mem(self):
return {'memtest': "1"}
def fake_load(self):
return {'loadtest': "1"}
def fake_async(self):
return {'asynctest': "1"}
def fake_get_device_info(self):
return {"/srv/1/node": ["sdb1"]}
def fake_replication(self, recon_type):
self.fake_replication_rtype = recon_type
return {'replicationtest': "1"}
def fake_updater(self, recon_type):
self.fake_updater_rtype = recon_type
return {'updatertest': "1"}
def fake_auditor(self, recon_type):
self.fake_auditor_rtype = recon_type
return {'auditortest': "1"}
def fake_expirer(self, recon_type):
self.fake_expirer_rtype = recon_type
return {'expirertest': "1"}
def fake_mounted(self):
return {'mountedtest': "1"}
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_unmounted_empty(self):
return []
def fake_diskusage(self):
return {'diskusagetest': "1"}
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_swiftconfmd5(self):
return {'/etc/swift/swift.conf': "abcdef"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
def fake_sockstat(self):
return {'sockstattest': "1"}
def fake_driveaudit(self):
return {'driveaudittest': "1"}
def fake_time(self):
return {'timetest': "1"}
def nocontent(self):
return None
def raise_IOError(self, *args, **kwargs):
raise IOError
def raise_ValueError(self, *args, **kwargs):
raise ValueError
def raise_Exception(self, *args, **kwargs):
raise Exception
@patch_policies(legacy_only=True)
class TestReconSuccess(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(prefix='swift_recon_md5_test')
utils.mkdirs(self.tempdir)
self.app = self._get_app()
self.mockos = MockOS()
self.fakecache = FakeFromCache()
self.real_listdir = os.listdir
self.real_isdir = os.path.isdir
self.real_ismount = utils.ismount
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
os.path.isdir = self.mockos.fake_isdir
utils.ismount = self.mockos.fake_ismount
os.statvfs = self.mockos.fake_statvfs
self.real_from_cache = self.app._from_recon_cache
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
self.frecon = FakeRecon()
# replace hash md5 implementation of the md5_hash_for_file function
mock_hash_for_file = mock.patch(
'swift.common.middleware.recon.md5_hash_for_file',
lambda f, **kwargs: 'hash-' + os.path.basename(f))
self.addCleanup(mock_hash_for_file.stop)
mock_hash_for_file.start()
self.ring_part_shift = 5
self.ring_devs = [{'id': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6200,
'device': 'sda1'},
{'id': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6200,
'device': 'sdb1'},
None,
{'id': 3, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6200,
'device': 'sdc1'},
{'id': 4, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6200,
'device': 'sdd1'}]
self._create_rings()
def tearDown(self):
os.listdir = self.real_listdir
os.path.isdir = self.real_isdir
utils.ismount = self.real_ismount
os.statvfs = self.real_statvfs
del self.mockos
self.app._from_recon_cache = self.real_from_cache
del self.fakecache
rmtree(self.tempdir)
def _get_app(self):
app = recon.ReconMiddleware(FakeApp(), {'swift_dir': self.tempdir})
return app
def _create_ring(self, ringpath, replica_map, devs, part_shift):
ring.RingData(replica_map, devs, part_shift).save(ringpath,
mtime=None)
def _create_rings(self):
# make the rings unique so they have different md5 sums
rings = {
'account.ring.gz': [
array.array('H', [3, 1, 3, 1]),
array.array('H', [0, 3, 1, 4]),
array.array('H', [1, 4, 0, 3])],
'container.ring.gz': [
array.array('H', [4, 3, 0, 1]),
array.array('H', [0, 1, 3, 4]),
array.array('H', [3, 4, 0, 1])],
'object.ring.gz': [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])],
'object-1.ring.gz': [
array.array('H', [1, 0, 1, 0]),
array.array('H', [1, 0, 1, 0]),
array.array('H', [4, 3, 4, 3])],
'object-2.ring.gz': [
array.array('H', [1, 1, 1, 0]),
array.array('H', [1, 0, 1, 3]),
array.array('H', [4, 2, 4, 3])]
}
for ringfn, replica_map in rings.items():
ringpath = os.path.join(self.tempdir, ringfn)
self._create_ring(ringpath, replica_map, self.ring_devs,
self.ring_part_shift)
@patch_policies([
StoragePolicy(0, 'stagecoach'),
StoragePolicy(1, 'pinto', is_deprecated=True),
StoragePolicy(2, 'toyota', is_default=True),
])
def test_get_ring_md5(self):
# We should only see configured and present rings, so to handle the
# "normal" case just patch the policies to match the existing rings.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz',
'%s/object-1.ring.gz' % self.tempdir:
'hash-object-1.ring.gz',
'%s/object-2.ring.gz' % self.tempdir:
'hash-object-2.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-{1,2}.ring.gz should both appear as they are present on disk
# and were configured as policies.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
def test_get_ring_md5_ioerror_produces_none_hash(self):
# Ring files that are present but produce an IOError on read should
# still produce a ringmd5 entry with a None for the hash. Note that
# this is different than if an expected ring file simply doesn't exist,
# in which case it is excluded altogether from the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir: None,
'%s/container.ring.gz' % self.tempdir: None,
'%s/object.ring.gz' % self.tempdir: None}
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
side_effect=IOError):
ringmd5 = self.app.get_ring_md5()
self.assertEqual(sorted(ringmd5.items()),
sorted(expt_out.items()))
def test_get_ring_md5_failed_ring_hash_recovers_without_restart(self):
# Ring files that are present but produce an IOError on read will
# show a None hash, but if they can be read later their hash
# should become available in the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir: None,
'%s/container.ring.gz' % self.tempdir: None,
'%s/object.ring.gz' % self.tempdir: None}
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
side_effect=IOError):
ringmd5 = self.app.get_ring_md5()
self.assertEqual(sorted(ringmd5.items()),
sorted(expt_out.items()))
# If we fix a ring and it can be read again, its hash should then
# appear using the same app instance
def fake_hash_for_file(fn):
if 'object' not in fn:
raise IOError
return 'hash-' + os.path.basename(fn)
expt_out = {'%s/account.ring.gz' % self.tempdir: None,
'%s/container.ring.gz' % self.tempdir: None,
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz'}
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
fake_hash_for_file):
ringmd5 = self.app.get_ring_md5()
self.assertEqual(sorted(ringmd5.items()),
sorted(expt_out.items()))
@patch_policies([
StoragePolicy(0, 'stagecoach'),
StoragePolicy(2, 'bike', is_default=True),
StoragePolicy(3502, 'train')
])
def test_get_ring_md5_missing_ring_recovers_without_restart(self):
# If a configured ring is missing when the app is instantiated, but is
# later moved into place, we shouldn't need to restart object-server
# for it to appear in recon.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz',
'%s/object-2.ring.gz' % self.tempdir:
'hash-object-2.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-1.ring.gz should not appear as it's present but unconfigured.
# object-3502.ring.gz should not appear as it's configured but not
# (yet) present.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
# Simulate the configured policy's missing ringfile being moved into
# place during runtime
ringfn = 'object-3502.ring.gz'
ringpath = os.path.join(self.tempdir, ringfn)
ringmap = [array.array('H', [1, 2, 1, 4]),
array.array('H', [4, 0, 1, 3]),
array.array('H', [1, 1, 0, 3])]
self._create_ring(os.path.join(self.tempdir, ringfn),
ringmap, self.ring_devs, self.ring_part_shift)
expt_out[ringpath] = 'hash-' + ringfn
# We should now see it in the ringmd5 response, without a restart
# (using the same app instance)
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
@patch_policies([
StoragePolicy(0, 'stagecoach', is_default=True),
StoragePolicy(2, 'bike'),
StoragePolicy(2305, 'taxi')
])
def test_get_ring_md5_excludes_configured_missing_obj_rings(self):
# Object rings that are configured but missing aren't meant to appear
# in the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz',
'%s/object-2.ring.gz' % self.tempdir:
'hash-object-2.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-1.ring.gz should not appear as it's present but unconfigured.
# object-2305.ring.gz should not appear as it's configured but not
# present.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
])
def test_get_ring_md5_excludes_unconfigured_present_obj_rings(self):
# Object rings that are present but not configured in swift.conf
# aren't meant to appear in the ringmd5 response.
expt_out = {'%s/account.ring.gz' % self.tempdir:
'hash-account.ring.gz',
'%s/container.ring.gz' % self.tempdir:
'hash-container.ring.gz',
'%s/object.ring.gz' % self.tempdir:
'hash-object.ring.gz'}
# We need to instantiate app after overriding the configured policies.
app = self._get_app()
# object-{1,2}.ring.gz should not appear as they are present on disk
# but were not configured as policies.
self.assertEqual(sorted(app.get_ring_md5().items()),
sorted(expt_out.items()))
def test_from_recon_cache(self):
oart = OpenAndReadTester(['{"notneeded": 5, "testkey1": "canhazio"}'])
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart.open)
self.assertEqual(oart.read_calls, [((), {})])
self.assertEqual(oart.open_calls, [(('test.cache', 'r'), {})])
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': 'canhazio'})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_ioerror(self):
oart = self.frecon.raise_IOError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_valueerror(self):
oart = self.frecon.raise_ValueError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_exception(self):
oart = self.frecon.raise_Exception
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEqual(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_get_mounted(self):
mounts_content = [
'rootfs / rootfs rw 0 0',
'none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /proc proc rw,nosuid,nodev,noexec,relatime 0 0',
'none /dev devtmpfs rw,relatime,size=248404k,nr_inodes=62101,'
'mode=755 0 0',
'none /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,'
'ptmxmode=000 0 0',
'/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252 / ext4'
' rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0',
'none /sys/fs/fuse/connections fusectl rw,relatime 0 0',
'none /sys/kernel/debug debugfs rw,relatime 0 0',
'none /sys/kernel/security securityfs rw,relatime 0 0',
'none /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0',
'none /var/run tmpfs rw,nosuid,relatime,mode=755 0 0',
'none /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0',
'/dev/loop0 /mnt/sdb1 xfs rw,noatime,nodiratime,attr2,nobarrier,'
'logbufs=8,noquota 0 0',
'rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0',
'nfsd /proc/fs/nfsd nfsd rw,relatime 0 0',
'none /proc/fs/vmblock/mountPoint vmblock rw,relatime 0 0',
'']
mounted_resp = [
{'device': 'rootfs', 'path': '/'},
{'device': 'none', 'path': '/sys'},
{'device': 'none', 'path': '/proc'},
{'device': 'none', 'path': '/dev'},
{'device': 'none', 'path': '/dev/pts'},
{'device': '/dev/disk/by-uuid/'
'e5b143bd-9f31-49a7-b018-5e037dc59252', 'path': '/'},
{'device': 'none', 'path': '/sys/fs/fuse/connections'},
{'device': 'none', 'path': '/sys/kernel/debug'},
{'device': 'none', 'path': '/sys/kernel/security'},
{'device': 'none', 'path': '/dev/shm'},
{'device': 'none', 'path': '/var/run'},
{'device': 'none', 'path': '/var/lock'},
{'device': 'none', 'path': '/lib/init/rw'},
{'device': '/dev/loop0', 'path': '/mnt/sdb1'},
{'device': 'rpc_pipefs', 'path': '/var/lib/nfs/rpc_pipefs'},
{'device': 'nfsd', 'path': '/proc/fs/nfsd'},
{'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
oart = OpenAndReadTester(mounts_content)
rv = self.app.get_mounted(openr=oart.open)
self.assertEqual(oart.open_calls, [(('/proc/mounts', 'r'), {})])
self.assertEqual(rv, mounted_resp)
def test_get_load(self):
oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
rv = self.app.get_load(openr=oart.open)
self.assertEqual(oart.read_calls, [((), {})])
self.assertEqual(oart.open_calls, [(('/proc/loadavg', 'r'), {})])
self.assertEqual(rv, {'5m': 0.029999999999999999, '15m': 0.0,
'processes': 16306, 'tasks': '1/220',
'1m': 0.029999999999999999})
def test_get_mem(self):
meminfo_content = ['MemTotal: 505840 kB',
'MemFree: 26588 kB',
'Buffers: 44948 kB',
'Cached: 146376 kB',
'SwapCached: 14736 kB',
'Active: 194900 kB',
'Inactive: 193412 kB',
'Active(anon): 94208 kB',
'Inactive(anon): 102848 kB',
'Active(file): 100692 kB',
'Inactive(file): 90564 kB',
'Unevictable: 0 kB',
'Mlocked: 0 kB',
'SwapTotal: 407544 kB',
'SwapFree: 313436 kB',
'Dirty: 104 kB',
'Writeback: 0 kB',
'AnonPages: 185268 kB',
'Mapped: 9592 kB',
'Shmem: 68 kB',
'Slab: 61716 kB',
'SReclaimable: 46620 kB',
'SUnreclaim: 15096 kB',
'KernelStack: 1760 kB',
'PageTables: 8832 kB',
'NFS_Unstable: 0 kB',
'Bounce: 0 kB',
'WritebackTmp: 0 kB',
'CommitLimit: 660464 kB',
'Committed_AS: 565608 kB',
'VmallocTotal: 34359738367 kB',
'VmallocUsed: 266724 kB',
'VmallocChunk: 34359467156 kB',
'HardwareCorrupted: 0 kB',
'HugePages_Total: 0',
'HugePages_Free: 0',
'HugePages_Rsvd: 0',
'HugePages_Surp: 0',
'Hugepagesize: 2048 kB',
'DirectMap4k: 10240 kB',
'DirectMap2M: 514048 kB',
'']
meminfo_resp = {'WritebackTmp': '0 kB',
'SwapTotal': '407544 kB',
'Active(anon)': '94208 kB',
'SwapFree': '313436 kB',
'DirectMap4k': '10240 kB',
'KernelStack': '1760 kB',
'MemFree': '26588 kB',
'HugePages_Rsvd': '0',
'Committed_AS': '565608 kB',
'Active(file)': '100692 kB',
'NFS_Unstable': '0 kB',
'VmallocChunk': '34359467156 kB',
'Writeback': '0 kB',
'Inactive(file)': '90564 kB',
'MemTotal': '505840 kB',
'VmallocUsed': '266724 kB',
'HugePages_Free': '0',
'AnonPages': '185268 kB',
'Active': '194900 kB',
'Inactive(anon)': '102848 kB',
'CommitLimit': '660464 kB',
'Hugepagesize': '2048 kB',
'Cached': '146376 kB',
'SwapCached': '14736 kB',
'VmallocTotal': '34359738367 kB',
'Shmem': '68 kB',
'Mapped': '9592 kB',
'SUnreclaim': '15096 kB',
'Unevictable': '0 kB',
'SReclaimable': '46620 kB',
'Mlocked': '0 kB',
'DirectMap2M': '514048 kB',
'HugePages_Surp': '0',
'Bounce': '0 kB',
'Inactive': '193412 kB',
'PageTables': '8832 kB',
'HardwareCorrupted': '0 kB',
'HugePages_Total': '0',
'Slab': '61716 kB',
'Buffers': '44948 kB',
'Dirty': '104 kB'}
oart = OpenAndReadTester(meminfo_content)
rv = self.app.get_mem(openr=oart.open)
self.assertEqual(oart.open_calls, [(('/proc/meminfo', 'r'), {})])
self.assertEqual(rv, meminfo_resp)
def test_get_async_info(self):
from_cache_response = {'async_pending': 5}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_async_info()
self.assertEqual(self.fakecache.fakeout_calls,
[((['async_pending'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, {'async_pending': 5})
def test_get_replication_info_account(self):
from_cache_response = {
"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0},
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('account')
self.assertEqual(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
'/var/cache/swift/account.recon'), {})])
self.assertEqual(rv, {
"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0},
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25})
def test_get_replication_info_container(self):
from_cache_response = {
"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('container')
self.assertEqual(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
'/var/cache/swift/container.recon'), {})])
self.assertEqual(rv, {
"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25})
def test_get_replication_object(self):
from_cache_response = {
"replication_time": 0.2615511417388916,
"replication_stats": {
"attempted": 179,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"remove": 0, "rsync": 0,
"start": 1333044050.855202, "success": 358},
"replication_last": 1357969645.25,
"object_replication_time": 0.2615511417388916,
"object_replication_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last', 'object_replication_time',
'object_replication_last'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, {
"replication_time": 0.2615511417388916,
"replication_stats": {
"attempted": 179,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"remove": 0, "rsync": 0,
"start": 1333044050.855202, "success": 358},
"replication_last": 1357969645.25,
"object_replication_time": 0.2615511417388916,
"object_replication_last": 1357969645.25})
def test_get_replication_info_unrecognized(self):
rv = self.app.get_replication_info('unrecognized_recon_type')
self.assertIsNone(rv)
def test_get_updater_info_container(self):
from_cache_response = {"container_updater_sweep": 18.476239919662476}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('container')
self.assertEqual(self.fakecache.fakeout_calls,
[((['container_updater_sweep'],
'/var/cache/swift/container.recon'), {})])
self.assertEqual(rv, {"container_updater_sweep": 18.476239919662476})
def test_get_updater_info_object(self):
from_cache_response = {"object_updater_sweep": 0.79848217964172363}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_updater_sweep'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, {"object_updater_sweep": 0.79848217964172363})
def test_get_updater_info_unrecognized(self):
rv = self.app.get_updater_info('unrecognized_recon_type')
self.assertIsNone(rv)
def test_get_expirer_info_object(self):
from_cache_response = {'object_expiration_pass': 0.79848217964172363,
'expired_last_pass': 99}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_expirer_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_expiration_pass', 'expired_last_pass'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, from_cache_response)
def test_get_auditor_info_account(self):
from_cache_response = {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('account')
self.assertEqual(self.fakecache.fakeout_calls,
[((['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
'/var/cache/swift/account.recon'), {})])
self.assertEqual(rv, {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"})
def test_get_auditor_info_container(self):
from_cache_response = {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('container')
self.assertEqual(self.fakecache.fakeout_calls,
[((['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
'/var/cache/swift/container.recon'), {})])
self.assertEqual(rv, {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"})
def test_get_auditor_info_object(self):
from_cache_response = {
"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, {
"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}})
def test_get_auditor_info_object_parallel_once(self):
from_cache_response = {
"object_auditor_stats_ALL": {
'disk1': {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
'disk2': {
"audit_time": 115,
"bytes_processed": 234660,
"completed": 115,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}},
"object_auditor_stats_ZBF": {'disk1disk2': {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEqual(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
'/var/cache/swift/object.recon'), {})])
self.assertEqual(rv, {
"object_auditor_stats_ALL": {
'disk1': {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
'disk2': {
"audit_time": 115,
"bytes_processed": 234660,
"completed": 115,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}},
"object_auditor_stats_ZBF": {'disk1disk2': {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}})
def test_get_auditor_info_unrecognized(self):
rv = self.app.get_auditor_info('unrecognized_recon_type')
self.assertIsNone(rv)
def test_get_unmounted(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.isdir_output = True
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {}),
(('/srv/node/faketwo',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_excludes_files(self):
unmounted_resp = []
self.mockos.ls_output = ['somerando.log']
self.mockos.isdir_output = False
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/somerando.log',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_all_mounted(self):
unmounted_resp = []
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.isdir_output = True
self.mockos.ismount_output = True
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {}),
(('/srv/node/faketwo',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_checkmount_fail(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': 'brokendrive'}]
self.mockos.ls_output = ['fakeone']
self.mockos.isdir_output = True
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/fakeone',), {})])
self.assertEqual(self.mockos.ismount_calls,
[(('/srv/node/fakeone',), {})])
self.assertEqual(rv, unmounted_resp)
def test_get_unmounted_no_mounts(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = []
self.mockos.ls_output = []
self.mockos.isdir_output = False
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls, [])
self.assertEqual(rv, unmounted_resp)
def test_get_diskusage(self):
# posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
# f_bfree=1113075, f_bavail=1013351,
# f_files=498736,
# f_ffree=397839, f_favail=397839, f_flag=0,
# f_namemax=255)
statvfs_content = (4096, 4096, 1963185, 1113075, 1013351, 498736,
397839, 397839, 0, 255)
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
self.mockos.statvfs_output = statvfs_content
self.mockos.ismount_output = True
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(self.mockos.statvfs_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(rv, du_resp)
def test_get_diskusage_excludes_files(self):
du_resp = []
self.mockos.ls_output = ['somerando.log']
self.mockos.isdir_output = False
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/somerando.log',), {})])
self.assertEqual(self.mockos.statvfs_calls, [])
self.assertEqual(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'brokendrive', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_diskusage()
self.assertEqual(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEqual(self.mockos.isdir_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(self.mockos.ismount_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEqual(rv, du_resp)
@mock.patch("swift.common.middleware.recon.check_mount", fake_check_mount)
def test_get_diskusage_oserror(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'Input/Output Error', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.isdir_output = True
rv = self.app.get_diskusage()
self.assertEqual(rv, du_resp)
def test_get_quarantine_count(self):
dirs = [['sda'], ['accounts', 'containers', 'objects', 'objects-1']]
self.mockos.ismount_output = True
def fake_lstat(*args, **kwargs):
# posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
# st_mtime=9, st_ctime=10)
return stat_result((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
def fake_exists(*args, **kwargs):
return True
def fake_listdir(*args, **kwargs):
return dirs.pop(0)
with mock.patch("os.lstat", fake_lstat):
with mock.patch("os.path.exists", fake_exists):
with mock.patch("os.listdir", fake_listdir):
rv = self.app.get_quarantine_count()
self.assertEqual(rv, {'objects': 4, 'accounts': 2, 'policies':
{'1': {'objects': 2}, '0': {'objects': 2}},
'containers': 2})
def test_get_socket_info(self):
sockstat_content = ['sockets: used 271',
'TCP: inuse 30 orphan 0 tw 0 alloc 31 mem 0',
'UDP: inuse 16 mem 4', 'UDPLITE: inuse 0',
'RAW: inuse 0', 'FRAG: inuse 0 memory 0',
'']
oart = OpenAndReadTester(sockstat_content)
self.app.get_socket_info(openr=oart.open)
self.assertEqual(oart.open_calls, [
(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
def test_get_driveaudit_info(self):
from_cache_response = {'drive_audit_errors': 7}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_driveaudit_error()
self.assertEqual(self.fakecache.fakeout_calls,
[((['drive_audit_errors'],
'/var/cache/swift/drive.recon'), {})])
self.assertEqual(rv, {'drive_audit_errors': 7})
def test_get_time(self):
def fake_time():
return 1430000000.0
with mock.patch("time.time", fake_time):
now = fake_time()
rv = self.app.get_time()
self.assertEqual(rv, now)
class TestReconMiddleware(unittest.TestCase):
def fake_list(self, path):
return ['a', 'b']
def setUp(self):
self.frecon = FakeRecon()
self.real_listdir = os.listdir
os.listdir = self.fake_list
self.app = recon.ReconMiddleware(FakeApp(), {'object_recon': "true"})
self.real_app_get_device_info = self.app.get_device_info
self.real_app_get_swift_conf_md5 = self.app.get_swift_conf_md5
os.listdir = self.real_listdir
# self.app.object_recon = True
self.app.get_mem = self.frecon.fake_mem
self.app.get_load = self.frecon.fake_load
self.app.get_async_info = self.frecon.fake_async
self.app.get_device_info = self.frecon.fake_get_device_info
self.app.get_replication_info = self.frecon.fake_replication
self.app.get_auditor_info = self.frecon.fake_auditor
self.app.get_updater_info = self.frecon.fake_updater
self.app.get_expirer_info = self.frecon.fake_expirer
self.app.get_mounted = self.frecon.fake_mounted
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_swift_conf_md5 = self.frecon.fake_swiftconfmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
self.app.get_driveaudit_error = self.frecon.fake_driveaudit
self.app.get_time = self.frecon.fake_time
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
req = Request.blank('/recon/mem', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_mem_resp)
def test_recon_get_version(self):
req = Request.blank('/recon/version',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, [utils.json.dumps({'version': swiftver})])
def test_recon_get_load(self):
get_load_resp = ['{"loadtest": "1"}']
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_load_resp)
def test_recon_get_async(self):
get_async_resp = ['{"asynctest": "1"}']
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_async_resp)
def test_get_device_info(self):
get_device_resp = ['{"/srv/1/node": ["sdb1"]}']
req = Request.blank('/recon/devices',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_device_resp)
def test_recon_get_replication_notype(self):
get_replication_resp = ['{"replicationtest": "1"}']
req = Request.blank('/recon/replication',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_replication_all(self):
get_replication_resp = ['{"replicationtest": "1"}']
# test account
req = Request.blank('/recon/replication/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'account')
self.frecon.fake_replication_rtype = None
# test container
req = Request.blank('/recon/replication/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'container')
self.frecon.fake_replication_rtype = None
# test object
req = Request.blank('/recon/replication/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_replication_resp)
self.assertEqual(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_auditor_invalid(self):
get_auditor_resp = ['Invalid path: /recon/auditor/invalid']
req = Request.blank('/recon/auditor/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
def test_recon_get_auditor_notype(self):
get_auditor_resp = ['Invalid path: /recon/auditor']
req = Request.blank('/recon/auditor',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
def test_recon_get_auditor_all(self):
get_auditor_resp = ['{"auditortest": "1"}']
req = Request.blank('/recon/auditor/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
self.assertEqual(self.frecon.fake_auditor_rtype, 'account')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
self.assertEqual(self.frecon.fake_auditor_rtype, 'container')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_auditor_resp)
self.assertEqual(self.frecon.fake_auditor_rtype, 'object')
self.frecon.fake_auditor_rtype = None
def test_recon_get_updater_invalid(self):
get_updater_resp = ['Invalid path: /recon/updater/invalid']
req = Request.blank('/recon/updater/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_updater_notype(self):
get_updater_resp = ['Invalid path: /recon/updater']
req = Request.blank('/recon/updater',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_updater(self):
get_updater_resp = ['{"updatertest": "1"}']
req = Request.blank('/recon/updater/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(self.frecon.fake_updater_rtype, 'container')
self.frecon.fake_updater_rtype = None
self.assertEqual(resp, get_updater_resp)
req = Request.blank('/recon/updater/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
self.assertEqual(self.frecon.fake_updater_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_expirer_invalid(self):
get_updater_resp = ['Invalid path: /recon/expirer/invalid']
req = Request.blank('/recon/expirer/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_expirer_notype(self):
get_updater_resp = ['Invalid path: /recon/expirer']
req = Request.blank('/recon/expirer',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_updater_resp)
def test_recon_get_expirer_object(self):
get_expirer_resp = ['{"expirertest": "1"}']
req = Request.blank('/recon/expirer/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_expirer_resp)
self.assertEqual(self.frecon.fake_expirer_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_mounted(self):
get_mounted_resp = ['{"mountedtest": "1"}']
req = Request.blank('/recon/mounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_mounted_resp)
def test_recon_get_unmounted(self):
get_unmounted_resp = ['{"unmountedtest": "1"}']
self.app.get_unmounted = self.frecon.fake_unmounted
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_unmounted_resp)
def test_recon_get_unmounted_empty(self):
get_unmounted_resp = '[]'
self.app.get_unmounted = self.frecon.fake_unmounted_empty
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = ''.join(self.app(req.environ, start_response))
self.assertEqual(resp, get_unmounted_resp)
def test_recon_get_diskusage(self):
get_diskusage_resp = ['{"diskusagetest": "1"}']
req = Request.blank('/recon/diskusage',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_diskusage_resp)
def test_recon_get_ringmd5(self):
get_ringmd5_resp = ['{"ringmd5test": "1"}']
req = Request.blank('/recon/ringmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_ringmd5_resp)
def test_recon_get_swiftconfmd5(self):
get_swiftconfmd5_resp = ['{"/etc/swift/swift.conf": "abcdef"}']
req = Request.blank('/recon/swiftconfmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_swiftconfmd5_resp)
def test_recon_get_quarantined(self):
get_quarantined_resp = ['{"quarantinedtest": "1"}']
req = Request.blank('/recon/quarantined',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_quarantined_resp)
def test_recon_get_sockstat(self):
get_sockstat_resp = ['{"sockstattest": "1"}']
req = Request.blank('/recon/sockstat',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_sockstat_resp)
def test_recon_invalid_path(self):
req = Request.blank('/recon/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['Invalid path: /recon/invalid'])
def test_no_content(self):
self.app.get_load = self.frecon.nocontent
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['Internal server error.'])
def test_recon_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, 'FAKE APP')
def test_recon_get_driveaudit(self):
get_driveaudit_resp = ['{"driveaudittest": "1"}']
req = Request.blank('/recon/driveaudit',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_driveaudit_resp)
def test_recon_get_time(self):
get_time_resp = ['{"timetest": "1"}']
req = Request.blank('/recon/time',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, get_time_resp)
def test_get_device_info_function(self):
"""Test get_device_info function call success"""
resp = self.app.get_device_info()
self.assertEqual(['sdb1'], resp['/srv/1/node'])
def test_get_device_info_fail(self):
"""Test get_device_info failure by failing os.listdir"""
os.listdir = fail_os_listdir
resp = self.real_app_get_device_info()
os.listdir = self.real_listdir
device_path = list(resp)[0]
self.assertIsNone(resp[device_path])
def test_get_swift_conf_md5(self):
"""Test get_swift_conf_md5 success"""
resp = self.app.get_swift_conf_md5()
self.assertEqual('abcdef', resp['/etc/swift/swift.conf'])
def test_get_swift_conf_md5_fail(self):
"""Test get_swift_conf_md5 failure by failing file open"""
with mock.patch('swift.common.middleware.recon.md5_hash_for_file',
side_effect=IOError):
resp = self.real_app_get_swift_conf_md5()
self.assertIsNone(resp['/etc/swift/swift.conf'])
if __name__ == '__main__':
unittest.main()
| 43.806108 | 79 | 0.554824 |
09b4b0b7e23dfe61a50c81a31f7c4cc85a60b7b2 | 3,992 | py | Python | cvxpy/transforms/scalarize.py | jasondark/cvxpy | 56aaa01b0e9d98ae5a91a923708129a7b37a6f18 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2015-06-03T01:33:46.000Z | 2021-11-15T01:48:49.000Z | cvxpy/transforms/scalarize.py | Toby-Gao/cvxpy | bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-10-22T07:46:38.000Z | 2020-10-22T07:46:38.000Z | cvxpy/transforms/scalarize.py | Toby-Gao/cvxpy | bd6f5142effa8cf883d1a0d7fd46c0d906b2fb93 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-10-22T01:35:58.000Z | 2022-01-19T10:48:51.000Z | """
Copyright 2017 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy import Constant
import cvxpy.atoms as atoms
from cvxpy.problems.objective import Minimize, Maximize
from cvxpy.transforms import indicator
def weighted_sum(objectives, weights):
"""Combines objectives as a weighted sum.
Args:
objectives: A list of Minimize/Maximize objectives.
weights: A vector of weights.
Returns:
A Minimize/Maximize objective.
"""
num_objs = len(objectives)
return sum(objectives[i]*weights[i] for i in range(num_objs))
def targets_and_priorities(objectives, priorities, targets, limits=None, off_target=1e-5):
"""Combines objectives with penalties within a range between target and limit.
Each Minimize objective i has value
priorities[i]*objectives[i] when objectives[i] >= targets[i]
+infinity when objectives[i] > limits[i]
Each Maximize objective i has value
priorities[i]*objectives[i] when objectives[i] <= targets[i]
+infinity when objectives[i] < limits[i]
Args:
objectives: A list of Minimize/Maximize objectives.
priorities: The weight within the trange.
targets: The start (end) of penalty for Minimize (Maximize)
limits: The hard end (start) of penalty for Minimize (Maximize)
off_target: Penalty outside of target.
Returns:
A Minimize/Maximize objective.
"""
num_objs = len(objectives)
new_objs = []
for i in range(num_objs):
obj = objectives[i]
sign = 1 if Constant.cast_to_const(priorities[i]).is_nonneg() else -1
off_target *= sign
if type(obj) == Minimize:
expr = (priorities[i] - off_target)*atoms.pos(obj.args[0] - targets[i])
expr += off_target*obj.args[0]
if limits is not None:
expr += sign*indicator([obj.args[0] <= limits[i]])
new_objs.append(expr)
else: # Maximize
expr = (priorities[i] - off_target)*atoms.min_elemwise(obj.args[0], targets[i])
expr += off_target*obj.args[0]
if limits is not None:
expr += sign*indicator([obj.args[0] >= limits[i]])
new_objs.append(expr)
obj_expr = sum(new_objs)
if obj_expr.is_convex():
return Minimize(obj_expr)
else:
return Maximize(obj_expr)
def max(objectives, weights):
"""Combines objectives as max of weighted terms.
Args:
objectives: A list of Minimize/Maximize objectives.
weights: A vector of weights.
Returns:
A Minimize objective.
"""
num_objs = len(objectives)
expr = atoms.maximum(*[(objectives[i]*weights[i]).args[0] for i in range(num_objs)])
return Minimize(expr)
def log_sum_exp(objectives, weights, gamma=1):
"""Combines objectives as log_sum_exp of weighted terms.
The objective takes the form
log(sum_{i=1}^n exp(gamma*weights[i]*objectives[i]))/gamma
As gamma goes to 0, log_sum_exp approaches weighted_sum. As gamma goes to infinity,
log_sum_exp approaches max.
Args:
objectives: A list of Minimize/Maximize objectives.
weights: A vector of weights.
gamma: Parameter interpolating between weighted_sum and max.
Returns:
A Minimize objective.
"""
num_objs = len(objectives)
terms = [(objectives[i]*weights[i]).args[0] for i in range(num_objs)]
expr = atoms.log_sum_exp(gamma*atoms.vstack(terms))/gamma
return Minimize(expr)
| 32.455285 | 91 | 0.676102 |
596286ee8a29779dfa0686c99a5bf182c7ca42df | 2,750 | py | Python | tests/test_data.py | ElMehdiBen/match | 7fc74d3635c243e9c51e2c0c8f7761adb22ff878 | [
"MIT"
] | 11 | 2017-10-18T19:03:54.000Z | 2021-06-23T03:32:00.000Z | tests/test_data.py | ElMehdiBen/match | 7fc74d3635c243e9c51e2c0c8f7761adb22ff878 | [
"MIT"
] | null | null | null | tests/test_data.py | ElMehdiBen/match | 7fc74d3635c243e9c51e2c0c8f7761adb22ff878 | [
"MIT"
] | 4 | 2019-04-26T15:30:44.000Z | 2022-01-29T17:19:08.000Z |
datatype_instances = {
'phonenumber': {
'valid': [
'6083456789',
'(608)3456789',
'(608) 3456789',
'(608) 345-6789',
'608-345-6789',
'608.345.6789',
'+1608.345.6789',
'+1 608.345.6789',
'+1 (608) 345-6789',
],
'invalid': [
'608456789',
'(23)3456789',
'(608) 456789',
'(608) 345-789',
'1234(608)-345-6789',
],
'equivalent': [
# First is normalized version
'+16083456789',
'6083456789',
'(608)3456789',
'(608) 3456789',
'(608) 345-6789',
'608-345-6789',
'608.345.6789',
'+1608.345.6789',
'+1 608.345.6789',
'+1 (608) 345-6789',
]
},
'datetime': {
'valid': [
'Sep 25 2003',
'Sep 25th 2003',
'Sep 25 10:36:28 2003',
'Thu Sep 25 2003',
'Thu Sep 25th 2003',
'Thu Sep 25 10:36:28 2003',
'Thursday Sep 25 10:36:28 2003',
'Thursday September 25 10:36:28 2003',
'Thursday September 25th 10:36:28am 2003',
'2003-09-25T10:49:41.5-03:00',
'2003-09-25T10:49:41',
'20030925T104941-0300',
'20030925T104941',
1064486188,
1064486188000,
'1064486188',
'1064486188000',
],
'invalid': [
# '64th of February', Well, dateutil.parser actually likes this one...
'1st of Neptune',
'Neptune 1',
'definitely not a date',
'10000000000000000000000',
'ahem',
],
'equivalent': [
# First is normalized version
'2003-09-25T10:36:28',
'Thu Sep 25 10:36:28 2003',
'Thursday Sep 25 10:36:28 2003',
'Thursday September 25 10:36:28 2003',
'Thursday September 25th 10:36:28am 2003',
]
},
'email': {
'valid': [
'k@r.vh',
'a.b+c@a.b.cd',
'AAA@bb.cc',
'a_b_c@a.b.c.def',
'aaaaaaaaaaaaa@bbbbb.ccccccccc',
],
'invalid': [
'a\nb@b.cd',
'a b@b.cd',
'b @b.cd',
'a@b',
'a@b.',
'a@b. cd',
'a@b.c',
'aaaa@bbb. ccc',
],
'equivalent': [
# First is normalized version
'a.b.c@bb.cc',
' a.b.c@bb.cc ',
'a.b.c@BB.cc',
'A.B.c@bb.cc',
# 'A.B.c+d@BB.cc',
]
},
}
| 26.190476 | 82 | 0.388727 |
670c8275a3fe92731a30f9c108855aab37e41ba4 | 1,319 | py | Python | pyenv/lib/python3.6/site-packages/sass_processor/apps.py | ronald-rgr/ai-chatbot-smartguide | c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf | [
"Apache-2.0"
] | null | null | null | pyenv/lib/python3.6/site-packages/sass_processor/apps.py | ronald-rgr/ai-chatbot-smartguide | c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf | [
"Apache-2.0"
] | 3 | 2020-03-23T18:01:51.000Z | 2021-03-19T23:15:15.000Z | pyenv/lib/python3.6/site-packages/sass_processor/apps.py | ronald-rgr/ai-chatbot-smartguide | c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import os
from django.apps import apps, AppConfig
from django.conf import settings
from django.core.files.storage import get_storage_class
APPS_INCLUDE_DIRS = []
class SassProcessorConfig(AppConfig):
name = 'sass_processor'
verbose_name = "Sass Processor"
_storage = get_storage_class(import_path=settings.STATICFILES_STORAGE)()
_auto_include = getattr(settings, 'SASS_PROCESSOR_AUTO_INCLUDE', True)
_pattern = re.compile(getattr(settings, 'SASS_PROCESSOR_INCLUDE_FILE_PATTERN', r'^_.+\.(scss|sass)$'))
def ready(self):
if self._auto_include:
app_configs = apps.get_app_configs()
for app_config in app_configs:
static_dir = os.path.join(app_config.path, self._storage.base_url.strip(os.path.sep))
if os.path.isdir(static_dir):
self.traverse_tree(static_dir)
@classmethod
def traverse_tree(cls, static_dir):
"""traverse the static folders an look for at least one file ending in .scss/.sass"""
for root, dirs, files in os.walk(static_dir):
for filename in files:
if cls._pattern.match(filename):
APPS_INCLUDE_DIRS.append(static_dir)
return
| 36.638889 | 106 | 0.673995 |
aa3b2ab131134d716e8ae48821afb98c48a3882c | 2,285 | py | Python | gaphor/services/tests/test_copyservice.py | albanobattistella/gaphor | 5fc6b0ff39ba6dbbb73cb9b111f32d1eda790e14 | [
"Apache-2.0"
] | 1 | 2020-11-27T12:39:15.000Z | 2020-11-27T12:39:15.000Z | gaphor/services/tests/test_copyservice.py | albanobattistella/gaphor | 5fc6b0ff39ba6dbbb73cb9b111f32d1eda790e14 | [
"Apache-2.0"
] | null | null | null | gaphor/services/tests/test_copyservice.py | albanobattistella/gaphor | 5fc6b0ff39ba6dbbb73cb9b111f32d1eda790e14 | [
"Apache-2.0"
] | 3 | 2020-01-23T14:13:59.000Z | 2020-02-18T18:21:47.000Z | from gaphor import UML
from gaphor.diagram.classes import AssociationItem, ClassItem
from gaphor.diagram.general import CommentItem
from gaphor.services.copyservice import CopyService
from gaphor.storage.verify import orphan_references
from gaphor.tests.testcase import TestCase
class CopyServiceTestCase(TestCase):
services = TestCase.services + [
"main_window",
"properties",
"undo_manager",
"export_menu",
"tools_menu",
]
def setUp(self):
super().setUp()
self.service = CopyService(
self.get_service("event_manager"),
self.get_service("element_factory"),
self.get_service("main_window"),
)
def test_copy(self):
service = self.service
ef = self.element_factory
diagram = ef.create(UML.Diagram)
ci = diagram.create(CommentItem, subject=ef.create(UML.Comment))
service.copy([ci])
assert diagram.canvas.get_all_items() == [ci]
service.paste(diagram)
assert len(diagram.canvas.get_all_items()) == 2, diagram.canvas.get_all_items()
def _skip_test_copy_paste_undo(self):
"""
Test if copied data is undoable.
"""
service = self.service
# Setting the stage:
ci1 = self.create(ClassItem, UML.Class)
ci2 = self.create(ClassItem, UML.Class)
a = self.create(AssociationItem)
self.connect(a, a.head, ci1)
self.connect(a, a.tail, ci2)
assert a.subject
assert a.head_end.subject
assert a.tail_end.subject
# The act: copy and paste, perform undo afterwards
service.copy([ci1, ci2, a])
service.paste(self.diagram)
all_items = list(self.diagram.canvas.get_all_items())
assert 6 == len(all_items)
assert not orphan_references(self.element_factory)
assert all_items[0].subject is all_items[3].subject
assert all_items[1].subject is all_items[4].subject
assert all_items[2].subject is all_items[5].subject
undo_manager = self.get_service("undo_manager")
undo_manager.undo_transaction()
assert 3 == len(self.diagram.canvas.get_all_items())
assert not orphan_references(self.element_factory)
| 28.5625 | 87 | 0.64814 |
dc4dcee90cd4ac7ad9913f1788856fb04217a8f0 | 14,583 | py | Python | open_spiel/python/games/dynamic_routing_utils.py | dat-boris/open_spiel | 48d14f20793493722f7c1f59fd140ad21022320b | [
"Apache-2.0"
] | null | null | null | open_spiel/python/games/dynamic_routing_utils.py | dat-boris/open_spiel | 48d14f20793493722f7c1f59fd140ad21022320b | [
"Apache-2.0"
] | 1 | 2021-10-05T16:07:01.000Z | 2021-10-05T16:07:01.000Z | open_spiel/python/games/dynamic_routing_utils.py | dat-boris/open_spiel | 48d14f20793493722f7c1f59fd140ad21022320b | [
"Apache-2.0"
] | 1 | 2022-02-22T20:05:37.000Z | 2022-02-22T20:05:37.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Utils module for dynamic routing game and mean field routing game.
This module has three main classes:
- Network
- Vehicle
- OriginDestinationDemand
"""
# pylint: disable=g-bad-todo
# pylint: disable=eval-used
from typing import Dict, Iterable, List, Tuple
# In case one vehicle has reached a end node, then it cannot do anything. In
# this case its action is 0. Action 0 is reserved to encode no possible action
# as requested by Open Spiel.
NO_POSSIBLE_ACTION = 0
def _nodes_to_road_section(origin: str, destination: str) -> str:
"""Create a road section 'A->B' from two nodes 'A' and 'B'."""
return f"{origin}->{destination}"
def _road_section_to_nodes(movement: str) -> Tuple[str, str]:
"""Split a road section 'A->B' to two nodes 'A' and 'B'."""
origin, destination = movement.split("->")
return origin, destination
class Network:
"""Network implementation.
A network is basically a directed graph with a volume delay function on each
of its edges. Each vertex is refered to as a string (for example "A") and each
edge as a string f"{node1}->{node2}" (for example "A->B"). The network is
created from a adjacency list. Each road section is mapped to an action index
(positive integer) in _road_section_to_action, and vice versa in
_action_to_road_section. The volume delay function on each link is given by
_probability_to_exit_functions.
If one would like to plot the network then node position should be passed
in the constructor. Then return_list_for_matplotlib_quiver can be used with
Matplotlib:
```python3
fig, ax = plt.subplots()
o_xs, o_ys, d_xs, d_ys = g.return_list_for_matplotlib_quiver()
ax.quiver(o_xs, o_ys, np.subtract(d_xs, o_xs), np.subtract(d_ys, o_ys),
color="b", angles='xy', scale_units='xy', scale=1)
```
See the Network tests for an example.
Attributes:
_action_to_road_section: dictionary that maps action id to road section.
_adjacency_list: adjacency list of the line graph of the road network.
_node_position: dictionary that maps node to couple of float encoding x and
y position of the node. None by default.
_probability_to_exit_functions: dictionary of functions as string assigned
to road sections. A function is a string that will be evaluated with x as
parameter (for example '1/(1+x)'). Each function takes as input x; the
volume of cars on the road section, and output the probability that a
given car exits the road section in the next time step. If average over
all the cars on the road section, this function gives the volume of cars
exiting the road section during a given time step as a function of the
volume of cars on the road section. Such functions are called fundamental
diagram of traffic flow.
_road_section_to_action: dictionary that maps road section to action id.
"""
_action_to_road_section: Dict[int, str]
_adjacency_list: Dict[str, Iterable[str]]
_node_position: Dict[str, Tuple[float, float]]
_probability_to_exit_functions: Dict[str, str]
_road_section_to_action: Dict[str, int]
def __init__(self,
adjacency_list: Dict[str, Iterable[str]],
node_position: Dict[str, Tuple[float, float]] = None,
probability_to_exit_functions: Dict[str, str] = None):
self._adjacency_list = adjacency_list
self._road_section_to_action, self._action_to_road_section = (
self._create_movement_to_action_and_action_to_road_section())
nodes = set(adjacency_list)
# pylint: disable=g-complex-comprehension
assert all(destination_node in nodes
for destination_nodes in self._adjacency_list.values()
for destination_node in destination_nodes), (
"Adjacency list is not correct.")
if node_position:
assert set(node_position) == nodes
self._node_position = node_position
else:
self._node_position = None
if probability_to_exit_functions:
assert set(probability_to_exit_functions) == set(
self._road_section_to_action), (
"Exit functions are not defined for each road sections.")
self._probability_to_exit_functions = probability_to_exit_functions
else:
self._probability_to_exit_functions = {}
for road_section in self._road_section_to_action:
self._probability_to_exit_functions[road_section] = "1 / (1+x)"
assert hasattr(self, "_adjacency_list")
assert hasattr(self, "_node_position")
assert hasattr(self, "_probability_to_exit_functions")
def _create_movement_to_action_and_action_to_road_section(
self) -> Tuple[Dict[str, int], Dict[int, str]]:
"""Create dictionary that maps movement to action.
The dictionary that maps movement to action is used to define the action
from a movement that a vehicle would like to do. The dictionary that maps an
action to the destintion of the movement is used to move a vehicle that does
an action to the destination of its movement.
Returns:
road_section_to_action: dictionary with key begin a movement for example
"O->A" and value the action numbers. Action numbers are succesive
integers indexed from 1.
action_to_road_section: map an action number to the end node of the
movement. if road_section_to_action["O->A"] = 0 then,
action_to_road_section[0] = "O->A"
"""
road_section_to_action = {}
action_to_road_section = {}
action_number = 1
for origin, successors in self._adjacency_list.items():
for destination in successors:
road_section = _nodes_to_road_section(origin, destination)
if road_section in road_section_to_action:
# TODO: enable parallel links.
raise ValueError((
f"{road_section} exists twice in the adjacency list. The current "
"network implementation does not enable parallel links."))
road_section_to_action[road_section] = action_number
action_to_road_section[action_number] = road_section
action_number += 1
return road_section_to_action, action_to_road_section
def num_links(self) -> int:
"""Returns the number of road sections."""
return len(self._road_section_to_action)
def num_actions(self) -> int:
"""Returns the number of possible actions.
Equal to the number of road section + 1. An action could either be moving to
a specific road section or not move.
"""
return 1 + self.num_links()
def links(self) -> List[str]:
"""Returns the road sections as a list."""
return list(self._road_section_to_action)
def get_successors(self, node: str) -> Iterable[str]:
"""Returns the successor nodes of the node."""
return self._adjacency_list[node]
def get_action_id_from_movement(self, origin: str, destination: str) -> int:
"""Maps two connected nodes to an action."""
return self._road_section_to_action[_nodes_to_road_section(
origin, destination)]
def get_road_section_from_action_id(self, action_id: int) -> str:
"""Maps a action to the corresponding road section."""
return self._action_to_road_section[action_id]
def is_location_at_sink_node(self, road_section: str) -> bool:
"""Returns True if the road section has no successors."""
start_section, end_section_node = _road_section_to_nodes(road_section)
if start_section not in self._adjacency_list:
raise KeyError(f"{start_section} is not a network node.")
return not self.get_successors(end_section_node)
def check_list_of_vehicles_is_correct(self, vehicles: Iterable["Vehicle"]):
"""Assert that vehicles have valid origin and destination."""
for vehicle in vehicles:
if (vehicle.origin not in self._road_section_to_action or
vehicle.destination not in self._road_section_to_action):
raise ValueError(f"Incorrect origin or destination for {vehicle}")
def check_list_of_od_demand_is_correct(
self, vehicles: Iterable["OriginDestinationDemand"]):
"""Assert that OD demands have valid origin and destination."""
for vehicle in vehicles:
if (vehicle.origin not in self._road_section_to_action or
vehicle.destination not in self._road_section_to_action):
raise ValueError(f"Incorrect origin or destination for {vehicle}")
def __str__(self) -> str:
return str(self._adjacency_list)
def get_probability_to_exit(self, road_section: str, volume: float) -> float:
"""Returns probability to exit road_section with volume cars."""
# TODO: find another way to pass the function.
# pylint: disable=unused-argument
def probability_to_exit(x):
return eval(self._probability_to_exit_functions[road_section])
prob = probability_to_exit(volume)
assert 0 <= prob <= 1
return prob
def assert_valid_action(self, action: int, road_section: str = None):
"""Assert that an action as a int is valid.
The action should be a int between 1 and num_actions. In case road_section
is not None then it is test if the action correspond to going on a road
section which is a successor of road_section.
Args:
action: the action,
road_section: the road section.
"""
assert isinstance(action, int), f"{action} is not a int."
assert 1 <= action < self.num_actions()
if road_section is not None:
new_road_section = self.get_road_section_from_action_id(action)
origin_new_section, end_new_section = _road_section_to_nodes(
new_road_section)
_, end_section_node = _road_section_to_nodes(road_section)
assert end_section_node == origin_new_section, (
f"The action is not legal, trying to go to {new_road_section} "
f"from {road_section} without going through {end_section_node}"
".")
successors = self.get_successors(origin_new_section)
assert end_new_section in successors, (
f"Invalid action {new_road_section}. It is not a successors of"
f" {end_section_node}: {successors}.")
def return_position_of_road_section(self,
road_section: str) -> Tuple[float, float]:
"""Returns position of the middle of theroad section as (x,y)."""
assert self._node_position is not None, (
"The network should have node positions in order to be plot.")
o_link, d_link = _road_section_to_nodes(road_section)
o_x, o_y = self._node_position[o_link]
d_x, d_y = self._node_position[d_link]
return (o_x + d_x) / 2, (o_y + d_y) / 2
def return_list_for_matplotlib_quiver(
self) -> Tuple[List[float], List[float], List[float], List[float]]:
"""Returns 4 list of encoding the positions of the road sections.
```python3
fig, ax = plt.subplots()
o_xs, o_ys, d_xs, d_ys = g.return_list_for_matplotlib_quiver()
ax.quiver(o_xs, o_ys, np.subtract(d_xs, o_xs), np.subtract(d_ys, o_ys),
color="b", angles='xy', scale_units='xy', scale=1)
```
will show the network.
Returns:
o_xs, o_ys, d_xs, d_ys: list of the start x and y positions and of the end
x and y postions of each road section. Each element of each list
corresponds to one road section.
"""
assert self._node_position is not None, (
"The network should have node positions in order to be plot.")
o_xs = []
o_ys = []
d_xs = []
d_ys = []
for road_section in self._road_section_to_action:
o_link, d_link = _road_section_to_nodes(road_section)
o_x, o_y = self._node_position[o_link]
d_x, d_y = self._node_position[d_link]
o_xs.append(o_x)
o_ys.append(o_y)
d_xs.append(d_x)
d_ys.append(d_y)
return o_xs, o_ys, d_xs, d_ys
class Vehicle:
"""A Vehicle is one origin and one destination.
Both the origin and the destination of the vehicle are road section, therefore
they are string formatted as "{str}->{str}".
Attributes:
destination: destination of the vehicle.
origin: origin of the vehicle.
departure_time: departure time of the vehicle.
"""
_destination: str
_origin: str
_departure_time: float
def __init__(self,
origin: str,
destination: str,
departure_time: float = 0.0):
assert all("->" in node for node in [origin, destination])
self._origin = origin
self._destination = destination
self._departure_time = departure_time
@property
def origin(self) -> str:
"""Returns vehicle's origin."""
return self._origin
@property
def destination(self) -> str:
"""Returns vehicle's destination."""
return self._destination
@property
def departure_time(self) -> float:
"""Returns vehicle's departure time."""
return self._departure_time
def __str__(self):
return (f"Vehicle with origin {self.origin}, destination {self.destination}"
f" and departure time {self._departure_time}.")
class OriginDestinationDemand(Vehicle):
"""Number of trips from origin to destination for a specific departure time.
Both the origin and the destination of the vehicle are road section, therefore
they are string formatted as "{str}->{str}".
Attributes:
destination: destination of the vehicles.
origin: origin of the vehicles.
departure_time: departure time of the vehicles.
counts: the number of vehicles with the origin, destination and departure
time.
"""
_counts: float
def __init__(self, origin: str, destination: str, departure_time: float,
counts: float):
super().__init__(origin, destination, departure_time)
self._counts = counts
@property
def counts(self) -> float:
"""Returns the number of vehicles in the instance."""
return self._counts
def __str__(self):
return (f"{self._counts} with origin {self.origin}, destination "
f"{self.destination} and departure time {self._departure_time}.")
| 40.396122 | 80 | 0.706576 |
eb6c373c966cdb16b70402d656f576a3bab33367 | 714 | py | Python | pystratum_pgsql/wrapper/PgSqlFunctionsWrapper.py | DatabaseStratum/py-stratum-pgsql | 03489fda257bc44bf5ed03a9a17b69eb8b302249 | [
"MIT"
] | null | null | null | pystratum_pgsql/wrapper/PgSqlFunctionsWrapper.py | DatabaseStratum/py-stratum-pgsql | 03489fda257bc44bf5ed03a9a17b69eb8b302249 | [
"MIT"
] | null | null | null | pystratum_pgsql/wrapper/PgSqlFunctionsWrapper.py | DatabaseStratum/py-stratum-pgsql | 03489fda257bc44bf5ed03a9a17b69eb8b302249 | [
"MIT"
] | null | null | null | from typing import Any, Dict
from pystratum_common.wrapper.FunctionsWrapper import FunctionsWrapper
from pystratum_pgsql.wrapper.PgSqlWrapper import PgSqlWrapper
class PgSqlFunctionsWrapper(PgSqlWrapper, FunctionsWrapper):
"""
Wrapper method generator for stored functions.
"""
# ------------------------------------------------------------------------------------------------------------------
def _write_result_handler(self, routine: Dict[str, Any]) -> None:
self._write_line('return self.execute_singleton1({0!s})'.format(self._generate_command(routine)))
# ----------------------------------------------------------------------------------------------------------------------
| 39.666667 | 120 | 0.512605 |
482d2af43d6b47b351ae041e7470e1967db54008 | 5,652 | py | Python | chatplug-client/client.py | ChatPlug/client-py | 02784ecca974426fac1dc4f7700a9def3fa41510 | [
"MIT"
] | null | null | null | chatplug-client/client.py | ChatPlug/client-py | 02784ecca974426fac1dc4f7700a9def3fa41510 | [
"MIT"
] | null | null | null | chatplug-client/client.py | ChatPlug/client-py | 02784ecca974426fac1dc4f7700a9def3fa41510 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import string
import random
import json
import aiohttp
from abc import ABC, abstractmethod
import asyncio
import websockets
sendMessageMutation = """
mutation sendMessage($body: String!, $originId: String!, $originThreadId: String!, $username: String!, $authorOriginId: String!, $authorAvatarUrl: String!, $attachments: [AttachmentInput!]!) {
sendMessage(
input: {
body: $body,
originId: $originId,
originThreadId: $originThreadId,
author: {
username: $username,
originId: $authorOriginId,
avatarUrl: $authorAvatarUrl
},
attachments: $attachments
}
) {
id
}
}
"""
messageReceivedSubscription = """
subscription {
messageReceived {
message {
body
id
originId
attachments {
type
sourceUrl
originId
id
}
thread {
id
originId
name
}
threadGroupId
author {
username
originId
avatarUrl
}
}
targetThreadId
}
}
"""
requestConfigurationRequest = """
subscription confRequest($fields: [ConfigurationField!]!){
configurationReceived(configuration:{fields: $fields}) {
fieldValues
}
}
"""
setInstanceStatusMutation = """
mutation {
setInstanceStatus(status:INITIALIZED) {
status
name
}
}
"""
class GQLClient():
def __init__(self, ws_url, http_url, access_token):
self.ws_url = ws_url
self.http_url = http_url
self.access_token = access_token
async def connect(self, connected_callback, message_callback):
async with websockets.connect(self.ws_url, subprotocols=["graphql-ws"]) as ws:
self.ws = ws
await ws.send(json.dumps({
'type': 'connection_init',
'payload': {'accessToken': self.access_token}}))
await ws.recv()
asyncio.ensure_future(connected_callback())
async for msg in ws:
await message_callback(json.loads(msg))
async def start_subscription(self, query, variables={}, headers={}):
sub_id = ''.join(random.choice(
string.ascii_letters + string.digits) for _ in range(6))
payload = {
'type': 'start',
'id': sub_id,
'payload': {
'headers': {},
'variables': variables,
'query': query
}
}
await self.ws.send(json.dumps(payload))
return sub_id
async def query(self, query, variables={}):
async with aiohttp.ClientSession(headers={'Authentication': self.access_token}) as session:
async with session.post(self.http_url, json={'query': query, 'variables': variables}) as resp:
return await resp.json()
def stop_subscription(self, sub_id):
payload = {
'type': 'stop',
'id': sub_id
}
self.ws.send(json.dumps(payload))
def close(self):
self.ws.close()
# def query(self, query, variables = {}, headers = {}):
class ChatPlugService(ABC):
def __init__(self, access_token, ws_url, http_url):
self.ws_url = ws_url
self.access_token = access_token
self.http_url = http_url
async def receive_msg(self, data):
print(data)
if data["type"] == "ka":
return # keep alive
if data["type"] == "data":
if data["id"] == self.msg_sub_id:
msg_packet = data["payload"]["data"]["messageReceived"]
await self.on_message_received(msg_packet)
elif data["id"] == self.conf_recv_id:
cfg = data["payload"]["data"]["configurationReceived"]
await self.on_configuration_received(cfg)
@abstractmethod
async def on_message_received(self, msg):
pass
@abstractmethod
async def on_configuration_received(self, conf):
pass
@abstractmethod
async def on_connected(self):
pass
async def send_message(self, body, origin_id, origin_thread_id, username, author_origin_id, author_avatar_url,
attachments):
resp = await self.ws.query(sendMessageMutation, variables={
'body': body,
'originId': origin_id,
'originThreadId': origin_thread_id,
'username': username,
'authorOriginId': author_origin_id,
'authorAvatarUrl': author_avatar_url,
'attachments': attachments,
})
print(resp)
async def subscribe_configuration(self, conf_fields):
self.conf_recv_id = await self.ws.start_subscription(requestConfigurationRequest,
variables={'fields': conf_fields})
async def ws_connected(self):
self.msg_sub_id = await self.ws.start_subscription(messageReceivedSubscription)
print(self.msg_sub_id)
await self.on_connected()
async def connect(self):
self.ws = GQLClient(self.ws_url, self.http_url, self.access_token)
await self.ws.connect(self.ws_connected, self.receive_msg)
| 30.224599 | 196 | 0.544232 |
639d8fedeb50a6b4d196c79672c3d25e6fa675d3 | 2,626 | py | Python | tests/primary_tests.py | cptq/ds-spectra | ab8c6598aa0bad886cb1bbd98e874eafe37acca4 | [
"MIT"
] | 1 | 2019-12-06T02:58:28.000Z | 2019-12-06T02:58:28.000Z | tests/primary_tests.py | cptq/ds-spectra | ab8c6598aa0bad886cb1bbd98e874eafe37acca4 | [
"MIT"
] | null | null | null | tests/primary_tests.py | cptq/ds-spectra | ab8c6598aa0bad886cb1bbd98e874eafe37acca4 | [
"MIT"
] | 1 | 2021-07-07T14:04:36.000Z | 2021-07-07T14:04:36.000Z | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src/')))
from permhull import *
from evenpermhull import *
def test_perms():
from math import factorial
# tests that generated groups are correct size
for n in range(2,7):
assert len(list(symmetric_group(n))) == factorial(n)
assert len(list(alternating_group(n))) == factorial(n)//2
def test_cycle_types():
assert len(list(even_cycle_types(6))) == 6
assert len(list(even_cycle_types(7))) == 8
num_partitions = [0,1,2,3,5,7,11,15,22,30,42,
56,77,101,135,176]
num_even_cycle_types = [0,1,1,2,3,4,6,8]
for n in range(1,15):
cycles = cycle_types(n)
assert len(list(cycles)) == num_partitions[n]
assert all(len(lst)==n for lst in cycles)
for i in range(len(list(cycles))):
for j in range(i+1, len(list(cycles))):
assert cycles[i] != cycles[j]
even_cycles = even_cycle_types(n)
for i in range(len(list(even_cycles))):
for j in range(i+1, len(list(even_cycles))):
assert not np.all(even_cycles[i] != even_cycles[j])
assert all(len(lst)==n for lst in even_cycle_types(n))
def test_all_comb_coeff():
from scipy.special import binom
# checks that all tuples of convex coefficients sum to 1
eps = 1e-14
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(2,1,.049))
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(2,1,.1))
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(2,1,.12))
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(3,1,.1))
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(3,1,.12))
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(4,1,.1))
assert all(abs(sum(c)-1)< eps for c in all_comb_coeff(4,1,.12))
def test_in_region():
# PM_3
x_ranges, lines = pm_boundary(3)
assert in_region((.49 + .0j), x_ranges, lines)
assert in_region((.49 + .2j), x_ranges, lines)
assert not in_region((.49 - .5j), x_ranges, lines)
assert not in_region((-.51 + .49j), x_ranges, lines)
# PM_4
x_ranges, lines = pm_boundary(4)
assert in_region((.49 + .0j), x_ranges, lines)
assert in_region((.49 + .2j), x_ranges, lines)
assert not in_region((.49 - .52j), x_ranges, lines)
assert in_region((-.51 + .49j), x_ranges, lines)
if __name__ == "__main__":
test_perms()
test_cycle_types()
test_all_comb_coeff()
test_in_region()
print("All tests pass")
| 37.514286 | 87 | 0.615004 |
9da6f0ff6500b3d2138e54696b1d8a89cb6ba239 | 886 | py | Python | mod/distributorapi/distributor/version.py | onap/dcaegen2-platform | 9e930892d28fc4a3378fad8f942c9f91cffe4698 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | mod/distributorapi/distributor/version.py | onap/dcaegen2-platform | 9e930892d28fc4a3378fad8f942c9f91cffe4698 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | mod/distributorapi/distributor/version.py | onap/dcaegen2-platform | 9e930892d28fc4a3378fad8f942c9f91cffe4698 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-10-15T15:02:20.000Z | 2021-10-15T15:02:20.000Z | # ============LICENSE_START=======================================================
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
__version__ = "1.0.1"
| 52.117647 | 82 | 0.573363 |
1f3f89e9f9c937857f8abf271d6378c919ecd292 | 1,453 | py | Python | scripts/utils/utils.py | daniele21/DL_soccer_prediction_v2 | 97bafe911fd8883d6679cf55fd0fff34db67ef06 | [
"MIT"
] | null | null | null | scripts/utils/utils.py | daniele21/DL_soccer_prediction_v2 | 97bafe911fd8883d6679cf55fd0fff34db67ef06 | [
"MIT"
] | null | null | null | scripts/utils/utils.py | daniele21/DL_soccer_prediction_v2 | 97bafe911fd8883d6679cf55fd0fff34db67ef06 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import os
import sys
# output_file_handler = logging.handlers.RotatingFileHandler(filename='logs/logs.logger',
# mode='a')
# stdout_handler = logging.StreamHandler(sys.stdout)
#
# logging.basicConfig(
# #format="%(asctime)s %(name)-15s %(levelname)-6s %(message)s",
# #datefmt="%y-%m-%d %H:%M:%S",
# handlers=[
# output_file_handler,
# stdout_handler
# ]
# )
logger = logging.getLogger('Logger')
logger.setLevel(logging.DEBUG)
def spent_time(start_time, end_time):
minutes = (end_time - start_time)//60
seconds = (end_time - start_time) - (minutes*60)
return ' {:.0f} min {:.2f} sec'.format(minutes,seconds)
def ensure_folder(folder):
if not os.path.exists(folder):
logger.info(f'> Creating folder at {folder}')
os.makedirs(folder)
return
def multiply_all_list_elements(input_list):
result = 1
for x in input_list:
if(x > 0):
result *= x
elif(x < 0):
result = -1
break
else:
raise ValueError('Multiplication element')
return result
def consecutive_numbers(number_list):
nums = sorted(number_list)
gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 1 < e]
edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])
return list(zip(edges, edges))
| 24.627119 | 89 | 0.573297 |
2df9bc46ef4d2a298d3ea4dcd570b093d152c820 | 665 | py | Python | twisted/test/test_iosim.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 3 | 2018-11-25T01:09:55.000Z | 2021-08-24T01:56:36.000Z | twisted/test/test_iosim.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 1 | 2022-03-04T17:40:22.000Z | 2022-03-04T17:40:22.000Z | twisted/test/test_iosim.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | 3 | 2018-11-09T03:38:09.000Z | 2020-02-24T06:26:10.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.test.iosim}.
"""
from twisted.test.iosim import FakeTransport
from twisted.trial.unittest import TestCase
class FakeTransportTests(TestCase):
"""
Tests for L{FakeTransport}
"""
def test_connectionSerial(self):
"""
Each L{FakeTransport} receives a serial number that uniquely identifies
it.
"""
a = FakeTransport(object(), True)
b = FakeTransport(object(), False)
self.assertIsInstance(a.serial, int)
self.assertIsInstance(b.serial, int)
self.assertNotEqual(a.serial, b.serial)
| 25.576923 | 79 | 0.664662 |
1b9300ccfa4ad4e18eac03c7082b9bee982d14b3 | 49 | py | Python | mwoauth/defaults.py | minrk/python-mwoauth | 24d0dbb094e144ec3ddeed8adc54011ae6582c6c | [
"MIT"
] | 7 | 2015-10-18T14:18:03.000Z | 2021-06-26T00:14:58.000Z | mwoauth/defaults.py | minrk/python-mwoauth | 24d0dbb094e144ec3ddeed8adc54011ae6582c6c | [
"MIT"
] | 23 | 2016-05-06T22:15:57.000Z | 2021-10-17T19:38:54.000Z | mwoauth/defaults.py | minrk/python-mwoauth | 24d0dbb094e144ec3ddeed8adc54011ae6582c6c | [
"MIT"
] | 10 | 2016-05-06T23:45:12.000Z | 2020-07-12T11:11:04.000Z | USER_AGENT = "python-mwoauth default user agent"
| 24.5 | 48 | 0.795918 |
961a02a76c467986b244068dabf237205b812742 | 6,991 | py | Python | generator/extract.py | Kerry-zzx/Graph-Transformer | f3b3ad54da12688326f6bf1422db122a960127a4 | [
"MIT"
] | 2 | 2020-08-30T08:28:00.000Z | 2020-09-07T22:39:19.000Z | generator/extract.py | Kerry-zzx/Graph-Transformer | f3b3ad54da12688326f6bf1422db122a960127a4 | [
"MIT"
] | null | null | null | generator/extract.py | Kerry-zzx/Graph-Transformer | f3b3ad54da12688326f6bf1422db122a960127a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from smatch import AMR
from AMRGraph import AMRGraph, number_regexp
from collections import Counter
import json, re
from AMRGraph import _is_abs_form
from multiprocessing import Pool
class AMRIO:
def __init__(self):
pass
@staticmethod
def read(file_path):
with open(file_path, encoding='utf-8') as f:
for line in f:
line = line.rstrip()
if line.startswith('# ::id '):
amr_id = line[len('# ::id '):]
elif line.startswith('# ::snt '):
sentence = line[len('# ::snt '):]
elif line.startswith('# ::tokens '):
tokens = json.loads(line[len('# ::tokens '):])
tokens = [ to if _is_abs_form(to) else to.lower() for to in tokens]
elif line.startswith('# ::lemmas '):
lemmas = json.loads(line[len('# ::lemmas '):])
lemmas = [ le if _is_abs_form(le) else le.lower() for le in lemmas]
elif line.startswith('# ::pos_tags '):
pos_tags = json.loads(line[len('# ::pos_tags '):])
elif line.startswith('# ::ner_tags '):
ner_tags = json.loads(line[len('# ::ner_tags '):])
elif line.startswith('# ::abstract_map '):
abstract_map = json.loads(line[len('# ::abstract_map '):])
graph_line = AMR.get_amr_line(f)
amr = AMR.parse_AMR_line(graph_line)
myamr = AMRGraph(amr)
yield tokens, lemmas, abstract_map, myamr
class LexicalMap(object):
# build our lexical mapping (from concept to token/lemma), useful for copy mechanism.
# 构建词汇映射, 用于copy机制
def __init__(self):
pass
#cp_seq, token2idx, idx2token = lex_map.get(concept, vocabs['predictable_token'])
def get(self, concept, vocab=None):
cp_seq = []
for conc in concept:
cp_seq.append(conc)
if vocab is None:
return cp_seq
# 如果cp是词库意外的词, 将cp添加到new_tokens中
new_tokens = set(cp for cp in cp_seq if vocab.token2idx(cp) == vocab.unk_idx)
token2idx, idx2token = dict(), dict()
nxt = vocab.size
# 将新token添加到vocab中
for x in new_tokens:
token2idx[x] = nxt
idx2token[nxt] = x
nxt += 1
return cp_seq, token2idx, idx2token
def read_file(filename):
# read preprocessed amr file
token, lemma, abstract, amrs = [], [], [], []
for _tok, _lem, _abstract, _myamr in AMRIO.read(filename):
token.append(_tok)
lemma.append(_lem)
abstract.append(_abstract)
amrs.append(_myamr)
print ('read from %s, %d amrs'%(filename, len(token)))
return amrs, token, lemma, abstract
def make_vocab(batch_seq, char_level=False):
cnt = Counter()
for seq in batch_seq:
cnt.update(seq)
if not char_level:
return cnt
char_cnt = Counter()
for x, y in cnt.most_common():
for ch in list(x):
char_cnt[ch] += y
return cnt, char_cnt
def write_vocab(vocab, path):
with open(path, 'w') as fo:
for x, y in vocab.most_common():
fo.write('%s\t%d\n'%(x,y))
import argparse
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--train_data', type=str)
parser.add_argument('--amr_files', type=str, nargs='+')
parser.add_argument('--nprocessors', type=int, default=4)
return parser.parse_args()
if __name__ == "__main__":
args = parse_config()
amrs, token, lemma, abstract = read_file(args.train_data)
lexical_map = LexicalMap()
# collect concepts and relations
def work(data):
amr, lem, tok = data
concept, depth, relation, ok = amr.collect_concepts_and_relations()
assert ok, "not connected"
lexical_concepts = set(lexical_map.get(concept))
predictable = [ c for c in tok if c not in lexical_concepts]
return concept, depth, relation, predictable
pool = Pool(args.nprocessors)
res = pool.map(work, zip(amrs, lemma, token), len(amrs)//args.nprocessors)
tot_pairs = 0
multi_path_pairs = 0
tot_paths = 0
extreme_long_paths = 0
avg_path_length = 0.
conc, rel, predictable_token = [], [], []
for concept, depth, relation, predictable in res:
conc.append(concept)
predictable_token.append(predictable)
for x in relation:
for y in relation[x]:
tot_pairs += 1
if len(relation[x][y]) > 1:
multi_path_pairs +=1
for path in relation[x][y]:
tot_paths += 1
path_len = path['length']
rel.append(path['edge'])
if path_len > 8:
extreme_long_paths += 1
avg_path_length += path_len
avg_path_length = avg_path_length / tot_paths
print ('tot_paths', tot_paths, 'avg_path_length', avg_path_length)
print ('extreme_long_paths', extreme_long_paths, \
'extreme_long_paths_percentage', extreme_long_paths/tot_paths)
print ('multi_path_percentage', multi_path_pairs, tot_pairs, multi_path_pairs/tot_pairs)
# make vocabularies
token_vocab, token_char_vocab = make_vocab(token, char_level=True)
lemma_vocab, lemma_char_vocab = make_vocab(lemma, char_level=True)
conc_vocab, conc_char_vocab = make_vocab(conc, char_level=True)
predictable_token_vocab = make_vocab(predictable_token)
num_predictable_token = sum(len(x) for x in predictable_token)
num_token = sum(len(x) for x in token)
print ('predictable token coverage (1. - copyable token coverage)', num_predictable_token, num_token, num_predictable_token/num_token)
rel_vocab = make_vocab(rel)
print ('make vocabularies')
write_vocab(token_vocab, 'token_vocab')
write_vocab(token_char_vocab, 'token_char_vocab')
write_vocab(predictable_token_vocab, 'predictable_token_vocab')
#write_vocab(lemma_vocab, 'lem_vocab')
#write_vocab(lemma_char_vocab, 'lem_char_vocab')
write_vocab(conc_vocab, 'concept_vocab')
write_vocab(conc_char_vocab, 'concept_char_vocab')
write_vocab(rel_vocab, 'relation_vocab')
for file in args.amr_files:
my_data = []
amrs, token, lemma, abstract = read_file(file)
res = pool.map(work, zip(amrs, lemma, token), len(amrs)//args.nprocessors)
for gr, to, le, ab in zip(res, token, lemma, abstract):
concept, depth, relation, _ = gr
item = {
'concept': concept,
'depth': depth,
'relation': relation,
'token': to,
'lemma': le,
'abstract': ab
}
my_data.append(item)
json.dump(my_data, open(file+'.json', 'w', encoding='utf-8'))
| 37.586022 | 138 | 0.59877 |
7d00eed51b18c0eb30ef56fa3a3167a0136c43fc | 241 | py | Python | indic-nlp-library/Indicization.py | gcdeshpande/IndicNLP | 55d0062e28c2f42b75a18706907215f6232ef838 | [
"CC0-1.0"
] | 12 | 2020-12-18T21:22:30.000Z | 2021-12-08T18:22:53.000Z | indic-nlp-library/Indicization.py | gcdeshpande/IndicNLP | 55d0062e28c2f42b75a18706907215f6232ef838 | [
"CC0-1.0"
] | null | null | null | indic-nlp-library/Indicization.py | gcdeshpande/IndicNLP | 55d0062e28c2f42b75a18706907215f6232ef838 | [
"CC0-1.0"
] | 11 | 2020-12-18T18:39:15.000Z | 2021-09-19T06:01:14.000Z | from indicnlp.transliterate.unicode_transliterate import ItransTransliterator
input_text='pAlakkAda'
# input_text='pitL^In'
lang='ml'
x=ItransTransliterator.from_itrans(input_text,lang)
print(x)
for y in x:
print('{:x}'.format(ord(y)))
| 24.1 | 77 | 0.780083 |
83a44949854994a942117a5d3cdf36ae5eaf5627 | 156 | py | Python | nifti_gridview/ngv_model/__init__.py | alabamagan/NIfTI-gridview | 79d6501f78374555b85d52248b380241db53d3ab | [
"MIT"
] | 2 | 2020-11-26T06:49:13.000Z | 2020-11-26T15:40:20.000Z | nifti_gridview/ngv_model/__init__.py | alabamagan/NIfTI-gridview | 79d6501f78374555b85d52248b380241db53d3ab | [
"MIT"
] | null | null | null | nifti_gridview/ngv_model/__init__.py | alabamagan/NIfTI-gridview | 79d6501f78374555b85d52248b380241db53d3ab | [
"MIT"
] | null | null | null | from .draw_grid import *
from .draw_grid_wrapper import *
from .ngv_logger import *
__all__ = ['draw_grid', 'draw_grid_wrapper', 'colormaps', 'NGV_Logger'] | 31.2 | 71 | 0.75641 |
8556695ee39c856bb770e7e2a0a5e6e7d3f5b73a | 153 | py | Python | Code/Models/Options/RunSOEonly.py | MridulS/cAndCwithStickyE | ecbb63f5733d315f9bb7a82acb1048d0a0beed8d | [
"Apache-2.0"
] | null | null | null | Code/Models/Options/RunSOEonly.py | MridulS/cAndCwithStickyE | ecbb63f5733d315f9bb7a82acb1048d0a0beed8d | [
"Apache-2.0"
] | null | null | null | Code/Models/Options/RunSOEonly.py | MridulS/cAndCwithStickyE | ecbb63f5733d315f9bb7a82acb1048d0a0beed8d | [
"Apache-2.0"
] | 1 | 2019-08-05T07:51:31.000Z | 2019-08-05T07:51:31.000Z | '''
This file sets the model options to only run the SOE model.
'''
# Choose which models to do work for
do_SOE = True
do_DSGE = False
do_RA = False
| 17 | 59 | 0.699346 |
a7a97096557eba41f09175b47e1f442d48799f8b | 3,799 | py | Python | stubs/rest_handler_nodb_template.py | pythononwheels/redmonty | 6255bb0c48575e29c0234a143ad05eba72a6b8c6 | [
"MIT"
] | 3 | 2019-09-29T07:05:00.000Z | 2019-11-13T06:50:33.000Z | stubs/rest_handler_nodb_template.py | pythononwheels/redmonty | 6255bb0c48575e29c0234a143ad05eba72a6b8c6 | [
"MIT"
] | 2 | 2019-09-28T21:10:13.000Z | 2019-09-28T21:13:20.000Z | stubs/rest_handler_nodb_template.py | pythononwheels/redmonty | 6255bb0c48575e29c0234a143ad05eba72a6b8c6 | [
"MIT"
] | null | null | null | from {{appname}}.handlers.powhandler import PowHandler
from {{appname}}.config import myapp
from {{appname}}.application import app
import tornado.web
# sample data
data = [
{"1" : "one", "name" : "name_1" },
{"2" : "two", "name" : "name_2"},
{"3" : "three", "name" : "name_3"},
{"4" : "four", "name" : "name_4"},
{"5" : "five", "name" : "name_5"},
{"6" : "six", "name" : "name_6"},
{"7" : "seven", "name" : "name_7"}
}]
@app.add_rest_routes("{{handler_name}}")
class {{handler_class_name}}(PowHandler):
"""
every pow handler automatically gets these RESTful routes
when you add the : app.add_rest_routes() decorator.
1 GET /{{handler_name}} #=> list
2 GET /{{handler_name}}/<uuid:identifier> #=> show
3 GET /{{handler_name}}/new #=> new
4 GET /{{handler_name}}/<uuid:identifier>/edit #=> edit
5 GET /{{handler_name}}/page/<uuid:identifier> #=> page
6 GET /{{handler_name}}/search #=> search
7 PUT /{{handler_name}}/<uuid:identifier> #=> update
8 PUT /{{handler_name}} #=> update (You have to send the id as json payload)
9 POST /{{handler_name}} #=> create
10 DELETE /{{handler_name}}/<uuid:identifier> #=> destroy
Standard supported http methods are:
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT", "OPTIONS")
you can overwrite any of those directly or leave the @add_rest_routes out to have a basic
handler.
curl tests:
for windows: (the quotes need to be escape in cmd.exe)
(You must generate a post model andf handler first... update the db...)
POST: curl -H "Content-Type: application/json" -X POST -d "{ \"title\" : \"first {{handler_name}}\" }" http://localhost:8080/{{handler_name}}
GET: curl -H "Content-Type: application/json" -X GET http://localhost:8080/{{handler_name}}
PUT: curl -H "Content-Type: application/json" -X PUT -d "{ \"id\" : \"1\", \"text\": \"lalala\" }" http://localhost:8080/{{handler_name}}
DELETE: curl -H "Content-Type: application/json" -X DELETE -d "{ \"id\" : \"1\" }" http://localhost:8080/{{handler_name}}
"""
model=Model()
def show(self, id=None):
try:
self.success(message="{{handler_name}} show", data=data[id])
except Exception as e:
self.error(message="{{handler_name}} show: " + str(e))
def list(self):
self.success(message="{{handler_name}}, index", data=data)
def page(self, page=0):
page_size=myapp["page_size"]
page = int(page or 0)
try:
start_idx = (page*(page_size-1))
end_idx = (page*page_size)+(page_size)
self.success(
message="rest_nodb page: #" + str(page), data=data[start_idx:end_idx] )
except Exception as e:
self.error( message="base.error: rest_nodb page: " + str(e), data=data)
@tornado.web.authenticated
def edit(self, id=None):
self.success(message="{{handler_name}}, edit id: " + str(id))
@tornado.web.authenticated
def new(self):
self.success("{{handler_name}}, new")
@tornado.web.authenticated
def create(self):
self.success(message="{{handler_name}}, create")
@tornado.web.authenticated
def update(self, id=None):
self.success("{{handler_name}}, update id: " + str(id))
@tornado.web.authenticated
def destroy(self, id=None):
self.success("{{handler_name}}, destroy id: " + str(id))
def search(self):
return self.error(message="{{handler_name}} search: not implemented yet" )
| 41.747253 | 149 | 0.56936 |
53dda0007ac8ced129bfc36696a38d33eab9b4d8 | 17,494 | py | Python | Fumagalli_Motta_Tarantino_2020/tests/Test_AdditionalModels.py | manuelbieri/Fumagalli_2020 | d3150b075f9c92b028c2aff2bcc2498b77c78877 | [
"MIT"
] | null | null | null | Fumagalli_Motta_Tarantino_2020/tests/Test_AdditionalModels.py | manuelbieri/Fumagalli_2020 | d3150b075f9c92b028c2aff2bcc2498b77c78877 | [
"MIT"
] | null | null | null | Fumagalli_Motta_Tarantino_2020/tests/Test_AdditionalModels.py | manuelbieri/Fumagalli_2020 | d3150b075f9c92b028c2aff2bcc2498b77c78877 | [
"MIT"
] | null | null | null | import Fumagalli_Motta_Tarantino_2020.tests.Test_Models as Test
import Fumagalli_Motta_Tarantino_2020 as FMT20
class TestMircoFoundationModel(Test.TestOptimalMergerPolicyModel):
def setUp(self) -> None:
self.calculate_properties_profits_consumer_surplus()
def setupModel(self, **kwargs) -> None:
self.model = FMT20.MicroFoundationModel(**kwargs)
def calculate_properties_profits_consumer_surplus(self) -> None:
# calculations made with Gamma = 0.3
self.test_incumbent_profit_without_innovation = 0.25
self.test_cs_without_innovation = 0.125
self.test_incumbent_profit_with_innovation = 1 / 2.6
self.test_cs_with_innovation = 1 / 5.2
self.test_incumbent_profit_duopoly = 1 / (2.3**2)
self.test_startup_profit_duopoly = self.test_incumbent_profit_duopoly
self.test_cs_duopoly = 1.3 / (2.3**2)
def get_welfare_value(self, market_situation: str) -> float:
if market_situation == "duopoly":
return (
self.test_cs_duopoly
+ self.test_startup_profit_duopoly
+ self.test_incumbent_profit_duopoly
)
if market_situation == "without_innovation":
return (
self.test_cs_without_innovation
+ self.test_incumbent_profit_without_innovation
)
if market_situation == "with_innovation":
return (
self.test_cs_with_innovation
+ self.test_incumbent_profit_with_innovation
)
def test_properties_profits_consumer_surplus(self):
self.setupModel()
self.assertTrue(
self.are_floats_equal(
self.test_cs_without_innovation, self.model.cs_without_innovation
)
)
self.assertTrue(
self.are_floats_equal(
self.test_incumbent_profit_without_innovation,
self.model.incumbent_profit_without_innovation,
)
)
self.assertTrue(
self.are_floats_equal(
self.test_cs_duopoly,
self.model.cs_duopoly,
)
)
self.assertTrue(
self.are_floats_equal(
self.test_incumbent_profit_duopoly,
self.model.incumbent_profit_duopoly,
)
)
self.assertTrue(
self.are_floats_equal(
self.test_startup_profit_duopoly,
self.model.startup_profit_duopoly,
)
)
self.assertTrue(
self.are_floats_equal(
self.test_cs_with_innovation,
self.model.cs_with_innovation,
)
)
self.assertTrue(
self.are_floats_equal(
self.test_incumbent_profit_with_innovation,
self.model.incumbent_profit_with_innovation,
)
)
def test_intermediate_optimal_merger_policy(self):
self.setupModel(gamma=0.2)
self.assertEqual(
FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
self.model.get_optimal_merger_policy(),
)
self.assertTrue(self.model.is_intermediate_optimal())
def test_string_representation(self):
self.setupModel(gamma=0.3)
self.assertEqual(
"Merger Policy: Strict\n"
"Is start-up credit rationed?: False\n"
"Type of early takeover attempt: No bid\n"
"Is the early takeover approved?: False\n"
"Does the owner attempt the development?: True\n"
"Is the development successful?: True\n"
"Type of late takeover attempt: No bid\n"
"Is the late takeover approved?: False\n"
"Optimal merger policy: Strict",
str(self.model),
)
def test_laissez_faire_optimal_merger_policy(self):
# laissez-faire is never optimal -> dominated by strict
self.setupModel()
self.assertFalse(self.model.is_laissez_faire_optimal())
def test_tolerated_harm_strict(self):
self.setupModel()
self.assertEqual(0, self.model.tolerated_harm)
def test_tolerated_harm_intermediate_late_takeover_allowed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Intermediate_late_takeover_prohibited
)
self.assertTrue(
self.are_floats_equal(
(1 - 0.5070508811267713)
* (
0.7
* (
self.get_welfare_value("duopoly")
- self.get_welfare_value("without_innovation")
)
- 0.1
),
self.model.tolerated_harm,
)
)
def test_tolerated_harm_intermediate_late_takeover_prohibited(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Intermediate_late_takeover_allowed
)
self.assertEqual(
self.get_welfare_value("duopoly")
- self.get_welfare_value("with_innovation"),
self.model.tolerated_harm,
)
def test_tolerated_harm_laissez_faire(self):
self.setupModel(merger_policy=FMT20.MergerPolicies.Laissez_faire)
self.assertEqual(float("inf"), self.model.tolerated_harm)
class TestPerfectInformationModel(Test.TestOptimalMergerPolicyModel):
def setupModel(self, **kwargs) -> None:
self.model = FMT20.PerfectInformationModel(**kwargs)
def test_laissez_faire_optimal_merger_policy(self):
self.setupModel()
self.assertFalse(self.model.is_laissez_faire_optimal())
class TestStrictPerfectInformationModel(TestPerfectInformationModel):
def test_not_profitable_not_credit_rationed(self):
self.setupModel()
self.assertEqual(FMT20.MergerPolicies.Strict, self.model.merger_policy)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.No, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertFalse(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_not_profitable_credit_rationed(self):
self.setupModel(startup_assets=0.01)
self.assertEqual(FMT20.MergerPolicies.Strict, self.model.merger_policy)
self.assertTrue(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.No, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertFalse(self.model.is_owner_investing)
self.assertFalse(self.model.is_development_successful)
self.assertFalse(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_profitable_not_credit_rationed(self):
self.setupModel(
startup_assets=0.06,
development_costs=0.075,
success_probability=0.79,
private_benefit=0.07,
incumbent_profit_without_innovation=0.3,
startup_profit_duopoly=0.11,
incumbent_profit_with_innovation=0.4,
)
self.assertEqual(FMT20.MergerPolicies.Strict, self.model.merger_policy)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.No, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertFalse(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_profitable_credit_rationed(self):
self.setupModel(
development_costs=0.075,
success_probability=0.79,
private_benefit=0.07,
incumbent_profit_without_innovation=0.3,
startup_profit_duopoly=0.11,
incumbent_profit_with_innovation=0.4,
)
self.assertEqual(FMT20.MergerPolicies.Strict, self.model.merger_policy)
self.assertTrue(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.Separating, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertTrue(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
class TestIntermediatePerfectInformationModel(TestPerfectInformationModel):
def test_not_profitable_not_credit_rationed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
consumer_surplus_duopoly=0.46,
consumer_surplus_without_innovation=0.2,
consumer_surplus_with_innovation=0.35,
)
self.assertEqual(
FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
self.model.merger_policy,
)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.No, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.Pooling, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertFalse(self.model.is_early_takeover)
self.assertTrue(self.model.is_late_takeover)
def test_not_profitable_not_credit_rationed_unsuccessful(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
consumer_surplus_duopoly=0.46,
consumer_surplus_without_innovation=0.2,
consumer_surplus_with_innovation=0.35,
development_success=False,
)
self.assertEqual(
FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
self.model.merger_policy,
)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.No, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertFalse(self.model.is_development_successful)
self.assertFalse(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_profitable_not_credit_rationed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
development_costs=0.09,
incumbent_profit_without_innovation=0.35,
consumer_surplus_duopoly=0.46,
consumer_surplus_without_innovation=0.2,
consumer_surplus_with_innovation=0.35,
)
self.assertEqual(
FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
self.model.merger_policy,
)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.Pooling, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertTrue(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_profitable_credit_rationed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
private_benefit=0.075,
startup_assets=0.005,
development_costs=0.076,
success_probability=0.79,
incumbent_profit_with_innovation=0.179,
incumbent_profit_without_innovation=0.08,
incumbent_profit_duopoly=0.05,
startup_profit_duopoly=0.1,
consumer_surplus_duopoly=0.46,
consumer_surplus_without_innovation=0.2,
consumer_surplus_with_innovation=0.35,
)
self.assertEqual(
FMT20.MergerPolicies.Intermediate_late_takeover_allowed,
self.model.merger_policy,
)
self.assertTrue(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.Separating, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertTrue(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
class TestLaissezFairePerfectInformationModel(TestPerfectInformationModel):
def test_not_profitable_not_credit_rationed(self):
self.setupModel(merger_policy=FMT20.MergerPolicies.Laissez_faire)
self.assertEqual(FMT20.MergerPolicies.Laissez_faire, self.model.merger_policy)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.Pooling, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertFalse(self.model.is_owner_investing)
self.assertFalse(self.model.is_development_successful)
self.assertTrue(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_not_profitable_credit_rationed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Laissez_faire,
startup_assets=0.01,
private_benefit=0.099,
success_probability=0.51,
development_costs=0.1,
startup_profit_duopoly=0.339,
incumbent_profit_duopoly=0.01,
incumbent_profit_with_innovation=0.35,
consumer_surplus_with_innovation=0.4,
incumbent_profit_without_innovation=0.3,
)
self.assertEqual(FMT20.MergerPolicies.Laissez_faire, self.model.merger_policy)
self.assertTrue(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.No, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertFalse(self.model.is_owner_investing)
self.assertFalse(self.model.is_development_successful)
self.assertFalse(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_profitable_not_credit_rationed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Laissez_faire,
private_benefit=0.075,
development_costs=0.078,
success_probability=0.76,
incumbent_profit_with_innovation=0.51,
)
self.assertEqual(FMT20.MergerPolicies.Laissez_faire, self.model.merger_policy)
self.assertFalse(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.Pooling, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertTrue(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
def test_profitable_credit_rationed(self):
self.setupModel(
merger_policy=FMT20.MergerPolicies.Laissez_faire,
private_benefit=0.075,
startup_assets=0.005,
development_costs=0.076,
success_probability=0.79,
incumbent_profit_with_innovation=0.179,
incumbent_profit_without_innovation=0.08,
incumbent_profit_duopoly=0.05,
startup_profit_duopoly=0.1,
)
self.assertEqual(FMT20.MergerPolicies.Laissez_faire, self.model.merger_policy)
self.assertTrue(self.model.is_startup_credit_rationed)
self.assertEqual(FMT20.Takeover.Separating, self.model.get_early_bidding_type)
self.assertEqual(FMT20.Takeover.No, self.model.get_late_bidding_type)
self.assertTrue(self.model.is_owner_investing)
self.assertTrue(self.model.is_development_successful)
self.assertTrue(self.model.is_early_takeover)
self.assertFalse(self.model.is_late_takeover)
class TestEquityModel(Test.TestOptimalMergerPolicyModel):
def setupModel(self, **kwargs) -> None:
self.model = FMT20.EquityContract(**kwargs)
def test_thresholds(self):
self.setupModel()
self.assertEqual(
self.model.asset_threshold, self.model.asset_threshold_late_takeover
)
def test_debt_not_preferred(self):
self.setupModel()
self.assertFalse(self.model.does_startup_strictly_prefer_debt())
def test_debt_preferred(self):
self.setupModel(merger_policy=FMT20.MergerPolicies.Laissez_faire)
self.assertTrue(self.model.does_startup_strictly_prefer_debt())
# TODO: Adjust optimal merger policies tests.
def test_laissez_faire_optimal_merger_policy(self):
pass
def test_intermediate_optimal_merger_policy(self):
pass
| 42.877451 | 86 | 0.688579 |
ca4f97b3a2d8fb21260341a9fa2e4d944918ff89 | 46,939 | py | Python | shakelib/multigmpe.py | mhearne-usgs/shakemap | 69f81ad831f345735ad36d4993fe12e1b9a5d990 | [
"CC0-1.0"
] | null | null | null | shakelib/multigmpe.py | mhearne-usgs/shakemap | 69f81ad831f345735ad36d4993fe12e1b9a5d990 | [
"CC0-1.0"
] | null | null | null | shakelib/multigmpe.py | mhearne-usgs/shakemap | 69f81ad831f345735ad36d4993fe12e1b9a5d990 | [
"CC0-1.0"
] | 2 | 2016-07-14T21:48:43.000Z | 2018-03-01T19:10:23.000Z | #!/usr/bin/env python
import copy
from importlib import import_module
import logging
import numpy as np
from openquake.hazardlib.gsim.base import GMPE
from openquake.hazardlib.gsim.boore_2014 import BooreEtAl2014
from openquake.hazardlib.gsim.campbell_bozorgnia_2014 import (
CampbellBozorgnia2014)
from openquake.hazardlib.imt import PGA, PGV, SA
from openquake.hazardlib import const
from openquake.hazardlib.valid import gsim
from openquake.hazardlib.contexts import RuptureContext
from shakelib.conversions.imt.abrahamson_bhasin_2020 import AbrahamsonBhasin2020
from shakelib.conversions.imc.boore_kishida_2017 import BooreKishida2017
from shakelib.sites import Sites
# Special case GMPEs:
from shakelib.gmpe.nga_east import NGAEast
def set_sites_depth_parameters(sites, gmpe):
"""
Need to select the appropriate z1pt0 value for different GMPEs.
Note that these are required site parameters, so even though
OQ has these equations built into the class in most cases.
I have submitted an issue to OQ requesting subclasses of these
methods that do not require the depth parameters in the
SitesContext to make this easier.
Args:
sites:1 An OQ sites context.
gmpe: An OQ GMPE instance.
Returns:
An OQ sites context with the depth parameters set for the
requested GMPE.
"""
if gmpe == '[MultiGMPE]':
return sites
Sites._addDepthParameters(sites)
if gmpe == "[AbrahamsonEtAl2014]" or \
gmpe == "[AbrahamsonEtAl2014]\nregion = 'TWN'" or \
gmpe == "[AbrahamsonEtAl2014]\nregion = 'CHN'":
sites.z1pt0 = sites.z1pt0_ask14_cal
if gmpe == "[AbrahamsonEtAl2014]\nregion = 'JPN'":
sites.z1pt0 = sites.z1pt0_ask14_jpn
if gmpe == '[ChiouYoungs2014]' or \
isinstance(gmpe, BooreEtAl2014):
sites.z1pt0 = sites.z1pt0_cy14_cal
if isinstance(gmpe, CampbellBozorgnia2014):
if gmpe == '[CampbellBozorgnia2014JapanSite]' or \
gmpe == '[CampbellBozorgnia2014HighQJapanSite]' or \
gmpe == '[CampbellBozorgnia2014LowQJapanSite]':
sites.z2pt5 = sites.z2pt5_cb14_jpn
else:
sites.z2pt5 = sites.z2pt5_cb14_cal
if gmpe == '[ChiouYoungs2008]' or \
gmpe == '[Bradley2013]' or \
gmpe == '[Bradley2013Volc]':
sites.z1pt0 = sites.z1pt0_cy08
if gmpe == '[CampbellBozorgnia2008]':
sites.z2pt5 = sites.z2pt5_cb07
if gmpe == '[AbrahamsonSilva2008]':
sites.z1pt0 = gmpe._compute_median_z1pt0(sites.vs30)
return sites
def stuff_context(sites, rup, dists):
"""
Function to fill a rupture context with the contents of all of the
other contexts.
Args:
sites (SiteCollection): A SiteCollection object.
rup (RuptureContext): A RuptureContext object.
dists (DistanceContext): A DistanceContext object.
Returns:
RuptureContext: A new RuptureContext whose attributes are all of
the elements of the three inputs.
"""
ctx = RuptureContext()
for name in [name for name in vars(sites) if not name.startswith("__")]:
setattr(ctx, name, getattr(sites, name))
for name in [name for name in vars(rup) if not name.startswith("__")]:
setattr(ctx, name, getattr(rup, name))
for name in [name for name in vars(dists) if not name.startswith("__")]:
setattr(ctx, name, getattr(dists, name))
return ctx
def get_gmpe_from_name(name, conf):
# Only import the NullGMPE when we're testing
# We'll want to import any other GMPEs we add at the top of this module
# so that gsim() picks them up; anything in OQ is already included
if name == 'NullGMPE':
mod = import_module(conf['gmpe_modules'][name][1])
return gsim(name)
class MultiGMPE(GMPE):
"""
Implements a GMPE that is the combination of multiple GMPEs.
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = None
DEFINED_FOR_INTENSITY_MEASURE_TYPES = None
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = None
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
REQUIRES_SITES_PARAMETERS = None
REQUIRES_RUPTURE_PARAMETERS = None
REQUIRES_DISTANCES = None
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See superclass `method <http://docs.openquake.org/oq-hazardlib/master/gsim/index.html#openquake.hazardlib.gsim.base.GroundShakingIntensityModel.get_mean_and_stddevs>`__.
Unlike the superclass method, the stddev list returned by this
function will have twice as many arrays as are requested in
stddev_types: The first set will include the standard deviation
inflation due to the point-source to finite fault conversion (if
any), and the second set will not include this inflation. In the
case where a finite rupture is provided (and, thus, no point-source
to finite rupture adjustments are made) the two sets of stddev
arrays will be identical. Thus, if::
stddev_types = [const.StdDev.TOTAL, const.StdDev.INTRA_EVENT,
const.StdDev.INTER_EVENT]
the returned stddev list will contain six arrays: the first three
will include the point-source inflation, and the second three will
not.
""" # noqa
# ---------------------------------------------------------------------
# Sort out shapes of the sites and dists elements
# Need to turn all 2D arrays into 1D arrays because of
# inconsistencies in how arrays are handled in OpenQuake.
# ---------------------------------------------------------------------
shapes = []
for k, v in sites.__dict__.items():
if k == '_slots_':
continue
if (k != 'lons') and (k != 'lats'):
shapes.append(v.shape)
sites.__dict__[k] = np.reshape(sites.__dict__[k], (-1,))
for k, v in dists.__dict__.items():
if k == '_slots_':
continue
if (k != 'lons') and (k != 'lats') and v is not None:
shapes.append(v.shape)
dists.__dict__[k] = np.reshape(dists.__dict__[k], (-1,))
shapeset = set(shapes)
if len(shapeset) != 1:
raise Exception(
'All dists and sites elements must have same shape.')
else:
orig_shape = list(shapeset)[0]
sd_avail = self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if not sd_avail.issuperset(set(stddev_types)):
raise Exception("Requested an unavailable stddev_type.")
# Evaluate MultiGMPE:
lnmu, lnsd = self.__get_mean_and_stddevs__(
sites, rup, dists, imt, stddev_types)
# Check for large-distance cutoff/weights
if hasattr(self, 'CUTOFF_DISTANCE'):
lnmu_large, lnsd_large = self.__get_mean_and_stddevs__(
sites, rup, dists, imt, stddev_types, large_dist=True)
# Stomp on lnmu and lnsd at large distances
dist_cutoff = self.CUTOFF_DISTANCE
lnmu[dists.rjb > dist_cutoff] = lnmu_large[dists.rjb > dist_cutoff]
for i in range(len(lnsd)):
lnsd[i][dists.rjb > dist_cutoff] = \
lnsd_large[i][dists.rjb > dist_cutoff]
# Undo reshapes of inputs
for k, v in dists.__dict__.items():
if k == '_slots_':
continue
if (k != 'lons') and (k != 'lats') and v is not None:
dists.__dict__[k] = np.reshape(dists.__dict__[k], orig_shape)
for k, v in sites.__dict__.items():
if k == '_slots_':
continue
if (k != 'lons') and (k != 'lats'):
sites.__dict__[k] = np.reshape(sites.__dict__[k], orig_shape)
# Reshape output
lnmu = np.reshape(lnmu, orig_shape)
for i in range(len(lnsd)):
lnsd[i] = np.reshape(lnsd[i], orig_shape)
return lnmu, lnsd
def __get_mean_and_stddevs__(self, sites, rup, dists, imt, stddev_types,
large_dist=False):
# ---------------------------------------------------------------------
# Sort out which set of weights to use
# ---------------------------------------------------------------------
if large_dist is False:
wts = self.WEIGHTS
else:
wts = self.WEIGHTS_LARGE_DISTANCE
# ---------------------------------------------------------------------
# This is the array to hold the weighted combination of the GMPEs
# ---------------------------------------------------------------------
lnmu = np.zeros_like(sites.vs30)
# ---------------------------------------------------------------------
# Hold on to the individual means and stddevs so we can compute the
# combined stddev
# ---------------------------------------------------------------------
lnmu_list = []
lnsd_list = []
for i, gmpe in enumerate(self.GMPES):
# -----------------------------------------------------------------
# Loop over GMPE list
# -----------------------------------------------------------------
set_sites_depth_parameters(sites, gmpe)
# -----------------------------------------------------------------
# Select the IMT
# -----------------------------------------------------------------
gmpe_imts = [imt.__name__ for imt in
list(gmpe.DEFINED_FOR_INTENSITY_MEASURE_TYPES)]
if not isinstance(gmpe, MultiGMPE) and \
(imt.string == "PGV") and ("PGV" not in gmpe_imts):
ab2020 = AbrahamsonBhasin2020(rup.mag)
timt = SA(ab2020.getTref())
else:
timt = imt
# -----------------------------------------------------------------
# Grab GMPE_LIMITS in gmpe instance for later as the multigmpe
# nests downward.
# -----------------------------------------------------------------
if hasattr(self, 'GMPE_LIMITS'):
# Remember that GMPE_LIMITS is only present if it is getting
# loaded from a config... we could change this eventually.
gmpe.GMPE_LIMITS = self.GMPE_LIMITS
# -----------------------------------------------------------------
# Apply GMPE_LIMITS if applicable
# -----------------------------------------------------------------
if hasattr(gmpe, 'GMPE_LIMITS'):
gmpes_with_limits = list(gmpe.GMPE_LIMITS.keys())
gmpe_class_str = str(gmpe).replace('[', '').replace(']', '')
if gmpe_class_str in gmpes_with_limits:
limit_dict = gmpe.GMPE_LIMITS[gmpe_class_str]
for k, v in limit_dict.items():
if k == 'vs30':
vs30min = float(v[0])
vs30max = float(v[1])
sites.vs30 = np.clip(sites.vs30, vs30min, vs30max)
Sites._addDepthParameters(sites)
# -----------------------------------------------------------------
# Evaluate
# -----------------------------------------------------------------
if not isinstance(gmpe, MultiGMPE):
ctx = stuff_context(sites, rup, dists)
lmean, lsd = gmpe.get_mean_and_stddevs(ctx, ctx, ctx, timt,
stddev_types)
else:
lmean, lsd = gmpe.get_mean_and_stddevs(sites, rup, dists, timt,
stddev_types)
if not isinstance(gmpe, MultiGMPE):
# -------------------------------------------------------------
# We may need to inflate the standard deviations to account for
# the point-source to finite rupture conversion.
# -------------------------------------------------------------
lsd_new = self.__inflatePSSigma__(gmpe, lmean, lsd, sites, rup,
dists, timt, stddev_types)
for sd in lsd:
lsd_new.append(sd)
lsd = lsd_new
# -------------------------------------------------------------
# If IMT is PGV and PGV is not given by the GMPE, then
# convert from the appropriate PSA
# -------------------------------------------------------------
if (imt.string == "PGV") and ("PGV" not in gmpe_imts):
lmean, lsd = ab2020.getPGVandSTDDEVS(
lmean, lsd, stddev_types, ctx.rrup, ctx.vs30)
# -------------------------------------------------------------
# -------------------------------------------------------------
if self.HAS_SITE[i] is False:
lamps = self.__get_site_factors__(
sites, rup, dists, timt, default=True)
lmean = lmean + lamps
# -------------------------------------------------------------
# Convertions due to component definition
# -------------------------------------------------------------
imc_in = gmpe.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT
imc_out = self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT
if imc_in != imc_out:
bk17 = BooreKishida2017(imc_in, imc_out)
lmean = bk17.convertAmps(imt, lmean, dists.rrup, rup.mag)
#
# The extra sigma from the component conversion appears to
# apply to the total sigma, so the question arises as to
# how to apportion it between the intra- and inter-event
# sigma. Here we assume it all enters as intra-event sigma.
#
for j, stddev_type in enumerate(stddev_types):
if stddev_type == const.StdDev.INTER_EVENT:
continue
lsd[j] = bk17.convertSigmas(imt, lsd[j])
# End: if GMPE is not MultiGMPE
#
# At this point lsd will have 2 * len(stddev_types) entries, the
# first group will have the point-source to finite rupture
# inflation (if any), and the second set will not; in cases where
# a finite rupture is used, the two sets will be identical
#
# -----------------------------------------------------------------
# Compute weighted mean and collect the elements to compute sd
# -----------------------------------------------------------------
lnmu = lnmu + wts[i] * lmean
lnmu_list.append(lmean)
lnsd_list = lnsd_list + lsd
# -----------------------------------------------------------------
# The mean is a weighted sum of random variables, so the stddev
# is the weighted sum of of their covariances (effectively). See:
# https://en.wikipedia.org/wiki/Variance#Weighted_sum_of_variables
# for an explanation. Also see:
# http://usgs.github.io/shakemap/manual4_0/tg_processing.html#ground-motion-prediction
# for a discussion on the way this is implemented here.
# -------------------------------------------------------------- # noqa
nwts = len(wts)
npwts = np.array(wts).reshape((1, -1))
nsites = len(lnmu)
# Find the correlation coefficients among the gmpes; if there are
# fewer than 10 points, just use an approximation (noting that the
# correlation among GMPEs tends to be quite high).
if nsites < 10:
cc = np.full((nwts, nwts), 0.95)
np.fill_diagonal(cc, 1.0)
else:
np.seterr(divide='ignore', invalid='ignore')
cc = np.reshape(np.corrcoef(lnmu_list), (nwts, nwts))
np.seterr(divide='warn', invalid='warn')
cc[np.isnan(cc)] = 1.0
# Multiply the correlation coefficients by the weights matrix
# (this is cheaper than multiplying all of elements of each
# stddev array by their weights since we have to multiply
# everything by the correlation coefficient matrix anyway))
cc = ((npwts * npwts.T) * cc).reshape((nwts, nwts, 1))
nstds = len(stddev_types)
lnsd_new = []
for i in range(nstds * 2):
sdlist = []
for j in range(nwts):
sdlist.append(
lnsd_list[j * nstds * 2 + i].reshape((1, 1, -1)))
sdstack = np.hstack(sdlist)
wcov = (sdstack * np.transpose(sdstack, axes=(1, 0, 2))) * cc
# This sums the weighted covariance as each point in the output
lnsd_new.append(np.sqrt(wcov.sum((0, 1))))
return lnmu, lnsd_new
@classmethod
def __from_config__(cls, conf, filter_imt=None):
"""
Construct a MultiGMPE from a config file.
Args:
conf (dict): Dictionary of config options.
filter_imt (IMT): An optional IMT to filter/reweight the GMPE list.
Returns:
MultiGMPE object.
"""
IMC = getattr(const.IMC, conf['interp']['component'])
selected_gmpe = conf['modeling']['gmpe']
logging.debug('selected_gmpe: %s' % selected_gmpe)
logging.debug('IMC: %s' % IMC)
# ---------------------------------------------------------------------
# Allow for selected_gmpe to be found in either conf['gmpe_sets'] or
# conf['gmpe_modules'], if it is a GMPE set, then all entries must be
# either a GMPE or a GMPE set (cannot have a GMPE set that is a mix of
# GMPEs and GMPE sets).
# ---------------------------------------------------------------------
if selected_gmpe in conf['gmpe_sets'].keys():
selected_gmpe_sets = conf['gmpe_sets'][selected_gmpe]['gmpes']
gmpe_set_weights = \
[float(w) for w in conf['gmpe_sets'][selected_gmpe]['weights']]
logging.debug('selected_gmpe_sets: %s' % selected_gmpe_sets)
logging.debug('gmpe_set_weights: %s' % gmpe_set_weights)
# -----------------------------------------------------------------
# If it is a GMPE set, does it contain GMPEs or GMPE sets?
# -----------------------------------------------------------------
set_of_gmpes = all([s in conf['gmpe_modules'] for s in
selected_gmpe_sets])
set_of_sets = all([s in conf['gmpe_sets'] for s in
selected_gmpe_sets])
if set_of_sets is True:
mgmpes = []
for s in selected_gmpe_sets:
mgmpes.append(cls.__multigmpe_from_gmpe_set__(
conf, s, filter_imt=filter_imt))
out = MultiGMPE.__from_list__(mgmpes, gmpe_set_weights, imc=IMC)
elif set_of_gmpes is True:
out = cls.__multigmpe_from_gmpe_set__(
conf,
selected_gmpe,
filter_imt=filter_imt)
else:
raise TypeError("%s must consist exclusively of keys in "
"conf['gmpe_modules'] or conf['gmpe_sets']"
% selected_gmpe)
elif selected_gmpe in conf['gmpe_modules'].keys():
modinfo = conf['gmpe_modules'][selected_gmpe]
# mod = import_module(modinfo[1])
# tmpclass = getattr(mod, modinfo[0])
# out = MultiGMPE.__from_list__([tmpclass()], [1.0], imc=IMC)
out = MultiGMPE.__from_list__(
[get_gmpe_from_name(modinfo[0], conf)], [1.0], imc=IMC)
else:
raise TypeError("conf['modeling']['gmpe'] must be a key in "
"conf['gmpe_modules'] or conf['gmpe_sets']")
out.DESCRIPTION = selected_gmpe
# ---------------------------------------------------------------------
# Deal with GMPE limits
# ---------------------------------------------------------------------
gmpe_lims = conf['gmpe_limits']
# We need to replace the short name in the dictionary key with module
# name here since the conf is not available within the MultiGMPE class.
mods = conf['gmpe_modules']
mod_keys = mods.keys()
new_gmpe_lims = {}
for k, v in gmpe_lims.items():
if k in mod_keys:
new_gmpe_lims[mods[k][0]] = v
else:
new_gmpe_lims[k] = v
out.GMPE_LIMITS = new_gmpe_lims
return out
def __multigmpe_from_gmpe_set__(conf, set_name, filter_imt=None):
"""
Private method for constructing a MultiGMPE from a set_name.
Args:
conf (ConfigObj): A ShakeMap config object.
filter_imt (IMT): An optional IMT to filter/reweight the GMPE list.
set_name (str): Set name; must correspond to a key in
conf['set_name'].
Returns:
MultiGMPE.
"""
IMC = getattr(const.IMC, conf['interp']['component'])
selected_gmpes = conf['gmpe_sets'][set_name]['gmpes']
selected_gmpe_weights = \
[float(w) for w in conf['gmpe_sets'][set_name]['weights']]
# Check for large distance GMPEs
if 'weights_large_dist' in conf['gmpe_sets'][set_name].keys():
if not conf['gmpe_sets'][set_name]['weights_large_dist']:
selected_weights_large_dist = None
else:
selected_weights_large_dist = \
[float(w) for w in
conf['gmpe_sets'][set_name]['weights_large_dist']]
else:
selected_weights_large_dist = None
if 'dist_cutoff' in conf['gmpe_sets'][set_name].keys():
if np.isnan(conf['gmpe_sets'][set_name]['dist_cutoff']):
selected_dist_cutoff = None
else:
selected_dist_cutoff = \
float(conf['gmpe_sets'][set_name]['dist_cutoff'])
else:
selected_dist_cutoff = None
if 'site_gmpes' in conf['gmpe_sets'][set_name].keys():
if not conf['gmpe_sets'][set_name]['site_gmpes']:
selected_site_gmpes = None
else:
selected_site_gmpes = \
conf['gmpe_sets'][set_name]['site_gmpes']
else:
selected_site_gmpes = None
if 'weights_site_gmpes' in conf['gmpe_sets'][set_name].keys():
if not conf['gmpe_sets'][set_name]['weights_site_gmpes']:
selected_weights_site_gmpes = None
else:
selected_weights_site_gmpes = \
conf['gmpe_sets'][set_name]['weights_site_gmpes']
else:
selected_weights_site_gmpes = None
# ---------------------------------------------------------------------
# Import GMPE modules and initialize classes into list
# ---------------------------------------------------------------------
gmpes = []
for g in selected_gmpes:
# This is the old school way of importing the modules; I'm
# leaving it in here temporarily just for documentation.
# mod = import_module(conf['gmpe_modules'][g][1])
# tmpclass = getattr(mod, conf['gmpe_modules'][g][0])
# gmpes.append(tmpclass())
gmpe_name = conf['gmpe_modules'][g][0]
gmpes.append(get_gmpe_from_name(gmpe_name, conf))
# ---------------------------------------------------------------------
# Filter out GMPEs not applicable to this period
# ---------------------------------------------------------------------
if filter_imt is not None:
filtered_gmpes, filtered_wts = filter_gmpe_list(
gmpes, selected_gmpe_weights, filter_imt)
else:
filtered_gmpes, filtered_wts = gmpes, selected_gmpe_weights
# ---------------------------------------------------------------------
# Import site GMPEs
# ---------------------------------------------------------------------
if selected_site_gmpes is not None:
if isinstance(selected_site_gmpes, str):
selected_site_gmpes = [selected_site_gmpes]
site_gmpes = []
for g in selected_site_gmpes:
# This is the old school way of importing the modules; I'm
# leaving it in here temporarily just for documentation.
# mod = import_module(conf['gmpe_modules'][g][1])
# tmpclass = getattr(mod, conf['gmpe_modules'][g][0])
# site_gmpes.append(tmpclass())
gmpe_name = conf['gmpe_modules'][g][0]
site_gmpes.append(get_gmpe_from_name(gmpe_name, conf))
else:
site_gmpes = None
# ---------------------------------------------------------------------
# Filter out site GMPEs not applicable to this period
# ---------------------------------------------------------------------
if site_gmpes is not None:
if filter_imt is not None:
filtered_site_gmpes, filtered_site_wts = filter_gmpe_list(
site_gmpes, selected_weights_site_gmpes, filter_imt)
else:
filtered_site_gmpes = copy.copy(site_gmpes)
filtered_site_wts = copy.copy(selected_weights_site_gmpes)
else:
filtered_site_gmpes = None
filtered_site_wts = None
# ---------------------------------------------------------------------
# Construct MultiGMPE
# ---------------------------------------------------------------------
logging.debug(' filtered_gmpes: %s' % filtered_gmpes)
logging.debug(' filtered_wts: %s' % filtered_wts)
mgmpe = MultiGMPE.__from_list__(
filtered_gmpes, filtered_wts,
default_gmpes_for_site=filtered_site_gmpes,
default_gmpes_for_site_weights=filtered_site_wts,
imc=IMC)
# ---------------------------------------------------------------------
# Append large-distance info if specified
# ---------------------------------------------------------------------
if selected_dist_cutoff is not None:
if filter_imt is not None:
filtered_gmpes_ld, filtered_wts_ld = filter_gmpe_list(
gmpes, selected_weights_large_dist, filter_imt)
else:
filtered_wts_ld = copy.copy(selected_weights_large_dist)
mgmpe.CUTOFF_DISTANCE = copy.copy(selected_dist_cutoff)
mgmpe.WEIGHTS_LARGE_DISTANCE = copy.copy(filtered_wts_ld)
mgmpe.DESCRIPTION = set_name
return mgmpe
@classmethod
def __from_list__(cls, gmpes, weights,
imc=const.IMC.GREATER_OF_TWO_HORIZONTAL,
default_gmpes_for_site=None,
default_gmpes_for_site_weights=None,
reference_vs30=760):
"""
Construct a MultiGMPE instance from lists of GMPEs and weights.
Args:
gmpes (list): List of OpenQuake
`GMPE <http://docs.openquake.org/oq-hazardlib/master/gsim/index.html#built-in-gsims>`__
instances.
weights (list): List of weights; must sum to 1.0.
imc: Requested intensity measure component. Must be one listed
`here <http://docs.openquake.org/oq-hazardlib/master/const.html?highlight=imc#openquake.hazardlib.const.IMC>`__.
The amplitudes returned by the GMPEs will be converted to this
IMT. Default is 'GREATER_OF_TWO_HORIZONTAL', which is used by
ShakeMap. See discussion in
`this section <http://usgs.github.io/shakemap/tg_choice_of_parameters.html#use-of-peak-values-rather-than-mean>`__
of the ShakeMap manual.
default_gmpes_for_site (list):
Optional list of OpenQuake GMPE instance to use as a site term
for any of the GMPEs that do not have a site term.
Notes:
* We do not check for consistency in the reference rock
defintion, so the user nees to be aware of this issue and
holds responsibiilty for ensuring compatibility.
* We check whether or not a GMPE has a site term by c
hecking the REQUIRES_SITES_PARAMETERS slot for vs30.
default_gmpes_for_site_weights: Weights for default_gmpes_for_site.
Must sum to one and be same length as default_gmpes_for_site.
If None, then weights are set to be equal.
reference_vs30:
Reference rock Vs30 in m/s. We do not check that this matches
the reference rock in the GMPEs so this is the responsibility
of the user.
""" # noqa
# ---------------------------------------------------------------------
# Check that GMPE weights sum to 1.0:
# ---------------------------------------------------------------------
if np.abs(np.sum(weights) - 1.0) > 1e-7:
raise Exception('Weights must sum to one.')
# ---------------------------------------------------------------------
# Check that length of GMPE weights equals length of gmpe list
# ---------------------------------------------------------------------
if len(weights) != len(gmpes):
raise Exception(
'Length of weights must match length of GMPE list.')
# ---------------------------------------------------------------------
# Check that gmpes is a list of OQ GMPE instances
# ---------------------------------------------------------------------
for g in gmpes:
if not isinstance(g, GMPE):
raise Exception("\"%s\" is a %s not a GMPE instance."
% (g, type(g)))
self = cls()
self.GMPES = gmpes
self.WEIGHTS = weights
# ---------------------------------------------------------------------
# Combine the intensity measure types. This is problematic:
# - Logically, we should only include the intersection of the sets
# of imts for the different GMPEs.
# - In practice, this is not feasible because most GMPEs in CEUS and
# subduction zones do not have PGV.
# - So instead we will use the union of the imts and then convert
# to get the missing imts later in get_mean_and_stddevs.
# ---------------------------------------------------------------------
imts = [set(g.DEFINED_FOR_INTENSITY_MEASURE_TYPES) for g in gmpes]
self.DEFINED_FOR_INTENSITY_MEASURE_TYPES = set.union(*imts)
# ---------------------------------------------------------------------
# For VirtualIPE class, we also want to know if ALL of the GMPEs are
# defined for PGV, in which case we will convert from PGV to MI,
# otherwise use PGA or Sa.
# ---------------------------------------------------------------------
haspgv = [PGV in set(g.DEFINED_FOR_INTENSITY_MEASURE_TYPES)
for g in gmpes]
self.ALL_GMPES_HAVE_PGV = all(haspgv)
# ---------------------------------------------------------------------
# Store intensity measure types for conversion in get_mean_and_stddevs.
# ---------------------------------------------------------------------
self.IMCs = [g.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT for g in gmpes]
# ---------------------------------------------------------------------
# Store the component
# ---------------------------------------------------------------------
self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = imc
# ---------------------------------------------------------------------
# Intersection of GMPE standard deviation types
# ---------------------------------------------------------------------
stdlist = [set(g.DEFINED_FOR_STANDARD_DEVIATION_TYPES) for g in gmpes]
self.DEFINED_FOR_STANDARD_DEVIATION_TYPES = \
set.intersection(*stdlist)
# ---------------------------------------------------------------------
# Need union of site parameters, but it is complicated by the
# different depth parameter flavors.
# ---------------------------------------------------------------------
sitepars = [set(g.REQUIRES_SITES_PARAMETERS) for g in gmpes]
self.REQUIRES_SITES_PARAMETERS = set.union(*sitepars)
# ---------------------------------------------------------------------
# Construct a list of whether or not each GMPE has a site term
# ---------------------------------------------------------------------
self.HAS_SITE = ['vs30' in g.REQUIRES_SITES_PARAMETERS for g in gmpes]
# ---------------------------------------------------------------------
# Checks and sort out defaults
# ---------------------------------------------------------------------
# things to check if default_gmpes_for_site is provided
if default_gmpes_for_site is not None:
# check that default_gmpe_for_site are OQ GMPEs or None
for g in default_gmpes_for_site:
if not isinstance(g, GMPE):
raise Exception("\"%s\" is not a GMPE instance." % g)
# apply default weights if necessary
if default_gmpes_for_site_weights is None:
n = len(default_gmpes_for_site)
default_gmpes_for_site_weights = [1 / n] * n
# Things to check if one or more GMPE does not have a site term
if not all(self.HAS_SITE):
# Raise an exception if no default site is provided
if default_gmpes_for_site is None:
raise Exception('Must provide default_gmpes_for_site if one or'
' more GMPE does not have site term.')
# If weights are unspecified, use equal weight
if default_gmpes_for_site_weights is None:
default_gmpes_for_site_weights = \
[1 / len(default_gmpes_for_site)] * \
len(default_gmpes_for_site)
# check that length of default_gmpe_for_site matches length of
# default_gmpe_for_site_weights
if len(default_gmpes_for_site_weights) != \
len(default_gmpes_for_site):
raise Exception('Length of default_gmpes_for_site_weights '
'must match length of default_gmpes_for_site '
'list.')
# check weights sum to one if needed
if not all(self.HAS_SITE):
if np.sum(default_gmpes_for_site_weights) != 1.0:
raise Exception('default_gmpes_for_site_weights must sum'
' to one.')
# Note: if ALL of the GMPEs do not have a site term (requiring Vs30),
# then REQUIRES_SITES_PARAMETERS for the MultiGMPE will not
# include Vs30 even though it will be needed to compute the
# default site term. So if the site checks have passed to this
# point, we should add Vs30 to the set of required site pars:
self.REQUIRES_SITES_PARAMETERS = set.union(
set(self.REQUIRES_SITES_PARAMETERS), set(['vs30']))
self.DEFAULT_GMPES_FOR_SITE = default_gmpes_for_site
self.DEFAULT_GMPES_FOR_SITE_WEIGHTS = default_gmpes_for_site_weights
self.REFERENCE_VS30 = reference_vs30
# ---------------------------------------------------------------------
# Union of rupture parameters
# ---------------------------------------------------------------------
ruppars = [set(g.REQUIRES_RUPTURE_PARAMETERS) for g in gmpes]
self.REQUIRES_RUPTURE_PARAMETERS = set.union(*ruppars)
# ---------------------------------------------------------------------
# Union of distance parameters
# ---------------------------------------------------------------------
distpars = [set(g.REQUIRES_DISTANCES) for g in gmpes]
self.REQUIRES_DISTANCES = set.union(*distpars)
return self
def __get_site_factors__(self, sites, rup, dists, imt, default=False):
"""
Method for computing site amplification factors from the defalut GMPE
to be applied to GMPEs which do not have a site term.
**NOTE** Amps are calculated in natural log units and so the ln(amp)
is returned.
Args:
sites (SitesContext): Instance of SitesContext.
rup (RuptureContext): Instance of RuptureContext.
dists (DistancesContext): Instance of DistancesContext.
imt: An instance openquake.hazardlib.imt.
default (bool): Boolean of whether or not to return the
amplificaiton factors for the gmpes or default_gmpes_for_site.
This argument is primarily only intended to be used internally
for when we just need to access the default amplifications to
apply to those GMPEs that do not have site terms.
Returns:
Site amplifications in natural log units.
"""
# ---------------------------------------------------------------------
# Make reference sites context
# ---------------------------------------------------------------------
ref_sites = copy.deepcopy(sites)
ref_sites.vs30 = np.full_like(sites.vs30, self.REFERENCE_VS30)
# TODO: Should we reset the Sites depth parameters here? Probably.
# ---------------------------------------------------------------------
# If default True, construct new MultiGMPE with default GMPE/weights
# ---------------------------------------------------------------------
if default is True:
tmp = MultiGMPE.__from_list__(
self.DEFAULT_GMPES_FOR_SITE,
self.DEFAULT_GMPES_FOR_SITE_WEIGHTS,
self.DEFINED_FOR_INTENSITY_MEASURE_COMPONENT)
# ---------------------------------------------------------------------
# If default False, just use self
# ---------------------------------------------------------------------
else:
tmp = self
lmean, lsd = tmp.get_mean_and_stddevs(
sites, rup, dists, imt,
list(tmp.DEFINED_FOR_STANDARD_DEVIATION_TYPES))
lmean_ref, lsd = tmp.get_mean_and_stddevs(
ref_sites, rup, dists, imt,
list(tmp.DEFINED_FOR_STANDARD_DEVIATION_TYPES))
lamps = lmean - lmean_ref
return lamps
def __describe__(self):
"""
Construct a dictionary that describes the MultiGMPE.
Note: For simplicity, this method ignores issues related to
GMPEs used for the site term and changes in the GMPE with
distance. For this level of detail, please see the config files.
Returns:
A dictionary representation of the MultiGMPE.
"""
gmpe_dict = {
'gmpes': [],
'weights': [],
'name': self.DESCRIPTION
}
for i in range(len(self.GMPES)):
gmpe_dict['weights'].append(self.WEIGHTS[i])
if isinstance(self.GMPES[i], MultiGMPE):
gmpe_dict['gmpes'].append(
self.GMPES[i].__describe__()
)
else:
gmpe_dict['gmpes'].append(str(self.GMPES[i]))
return gmpe_dict
def __inflatePSSigma__(self, gmpe, lmean, lsd, sites, rup, dists, imt,
stddev_types):
"""
If the point-source to finite-fault factors are used, we need to
inflate the intra-event and total standard deviations. We do this
by standard propagation of error techniques: taking the (numerical)
derivative of the GMPE (as a function of distance) squared times the
additional variance from the conversion, added
to the variance of the GMPE (then taking the square root). We do
this separately for each of Rrup and Rjb and sum the results.
If Rrup and Rjb are calculated from a finite rupture model, their
variance arrays will be "None" and lsd will remain unchanged.
Otherwise the error inflation will be applied. Normally one or the
other of Rrup/Rjb will not be used and so that term will be zero; in
some cases both may be used and both may result in non-zero
derivatives.
Args:
gmpe:
The GMPE to use for the calculations. Must be a base GMPE and
not a GMPE set, otherwise no action is taken.
lmean:
The mean values returned by the "normal" evaluation of the
GMPE.
lsd:
The standard deviations returned by the "normal" evaluation
of the GMPE.
sites:
The sites context required by the GMPE.
rup:
The rupture context required by the GMPE.
dists:
The distance context required by the GMPE.
imt:
The intensity measure type being evaluated.
stddev_types:
The list of stddev types found in lsd.
Returns:
list: A list of arrays of inflated standard deviations
corresponding to the elements of lsd.
"""
new_sd = []
delta_distance = 0.01
delta_var = [0, 0]
for i, dtype in enumerate(('rrup', 'rjb')):
# Skip dtype if the gmpe does not require it
if dtype not in gmpe.REQUIRES_DISTANCES:
continue
# Skip dtype if it has not been subject to a point-source to
# finite rupture conversion
dvar = getattr(dists, dtype + '_var', None)
if dvar is None:
continue
# Add a small amound to the rupture distance (rrup or rjb)
# and re-evaluate the GMPE
rup_dist = getattr(dists, dtype)
rup_dist += delta_distance
ctx = stuff_context(sites, rup, dists)
tmean, tsd = gmpe.get_mean_and_stddevs(ctx, ctx, ctx, imt,
stddev_types)
# Find the derivative w.r.t. the rupture distance
dm_dr = (lmean - tmean) / delta_distance
# The additional variance is (dm/dr)^2 * dvar
delta_var[i] = dm_dr**2 * dvar
# Put the rupture distance back to what it was
rup_dist -= delta_distance
for i, stdtype in enumerate(stddev_types):
if stdtype == const.StdDev.INTER_EVENT:
new_sd.append(lsd[i].copy())
continue
new_sd.append(np.sqrt(lsd[i]**2 + delta_var[0] + delta_var[1]))
return new_sd
def filter_gmpe_list(gmpes, wts, imt):
"""
Method to remove GMPEs from the GMPE list that are not applicable
to a specific IMT. Rescales the weights to sum to one.
Args:
gmpes (list): List of GMPE instances.
wts (list): List of floats indicating the weight of the GMPEs.
imt (IMT): OQ IMT to filter GMPE list for.
Returns:
tuple: List of GMPE instances and list of weights.
"""
if wts is None:
n = len(gmpes)
wts = [1 / n] * n
per_max = [np.max(get_gmpe_sa_periods(g)) for g in gmpes]
per_min = [np.min(get_gmpe_sa_periods(g)) for g in gmpes]
if imt == PGA():
sgmpe = [g for g in gmpes if PGA in
g.DEFINED_FOR_INTENSITY_MEASURE_TYPES]
swts = [w for g, w in zip(gmpes, wts) if PGA in
g.DEFINED_FOR_INTENSITY_MEASURE_TYPES]
elif imt == PGV():
sgmpe = []
swts = []
for i in range(len(gmpes)):
if (PGV in gmpes[i].DEFINED_FOR_INTENSITY_MEASURE_TYPES) or\
(per_max[i] >= 1.0 and per_min[i] <= 1.0):
sgmpe.append(gmpes[i])
swts.append(wts[i])
else:
per = imt.period
sgmpe = []
swts = []
for i in range(len(gmpes)):
if (per_max[i] >= per and per_min[i] <= per):
sgmpe.append(gmpes[i])
swts.append(wts[i])
if len(sgmpe) == 0:
raise KeyError('No applicable GMPEs from GMPE list for %s' % str(imt))
# Scale weights to sum to one
swts = np.array(swts)
swts = swts / np.sum(swts)
return sgmpe, swts
def get_gmpe_sa_periods(gmpe):
"""
Method to extract the SA periods defined by a GMPE.
Args:
gmpe (GMPE): A GMPE instance.
Retunrs:
list: List of periods.
"""
if gmpe == '[NGAEast]':
per = gmpe.per_array
else:
ctab = get_gmpe_coef_table(gmpe).sa_coeffs
ilist = list(ctab.keys())
per = [i.period for i in ilist]
return per
def get_gmpe_coef_table(gmpe):
"""
Method for finding the (or "a") GMPE table.
Notes:
* The reason for the complexity here is that there can be multiple
coefficient tables, and some of them may not have the sa_coeffs
attribute, which is the main reason for getting the table.
* We are also assuming that if there are more than one coefficient
table, the range of periods will be the same across all of the
tables.
Args:
gmpe (GMPE): An OQ GMPE instance.
Returns:
The associated coefficient table.
"""
stuff = gmpe.__dir__()
coef_list = [s for s in stuff if 'COEFFS' in s]
for coef_sel in coef_list:
cobj = getattr(gmpe, coef_sel)
if "sa_coeffs" in cobj.__dir__():
return cobj
raise Exception("GMPE %s does not contain sa_coeffs attribute." % gmpe)
| 43.381701 | 177 | 0.512772 |
cc85897df9b4dc83905feac43c000bb197ad7880 | 30,170 | py | Python | pybamm/expression_tree/binary_operators.py | gyouhoc/PyBaMM | 6852e0e518157e6802ce83a2549562e7d0ed4b9f | [
"BSD-3-Clause"
] | null | null | null | pybamm/expression_tree/binary_operators.py | gyouhoc/PyBaMM | 6852e0e518157e6802ce83a2549562e7d0ed4b9f | [
"BSD-3-Clause"
] | null | null | null | pybamm/expression_tree/binary_operators.py | gyouhoc/PyBaMM | 6852e0e518157e6802ce83a2549562e7d0ed4b9f | [
"BSD-3-Clause"
] | null | null | null | #
# Binary operator classes
#
import pybamm
import numpy as np
import numbers
from scipy.sparse import issparse, csr_matrix
def is_scalar_zero(expr):
"""
Utility function to test if an expression evaluates to a constant scalar zero
"""
if expr.is_constant():
result = expr.evaluate_ignoring_errors(t=None)
return isinstance(result, numbers.Number) and result == 0
else:
return False
def is_matrix_zero(expr):
"""
Utility function to test if an expression evaluates to a constant matrix zero
"""
if expr.is_constant():
result = expr.evaluate_ignoring_errors(t=None)
return (issparse(result) and result.count_nonzero() == 0) or (
isinstance(result, np.ndarray) and np.all(result == 0)
)
else:
return False
def is_scalar_one(expr):
"""
Utility function to test if an expression evaluates to a constant scalar one
"""
if expr.is_constant():
result = expr.evaluate_ignoring_errors(t=None)
return isinstance(result, numbers.Number) and result == 1
else:
return False
def zeros_of_shape(shape):
"""
Utility function to create a scalar zero, or a vector or matrix of zeros of
the correct shape
"""
if shape == ():
return pybamm.Scalar(0)
else:
if len(shape) == 1 or shape[1] == 1:
return pybamm.Vector(np.zeros(shape))
else:
return pybamm.Matrix(csr_matrix(shape))
class BinaryOperator(pybamm.Symbol):
"""A node in the expression tree representing a binary operator (e.g. `+`, `*`)
Derived classes will specify the particular operator
**Extends**: :class:`Symbol`
Parameters
----------
name : str
name of the node
left : :class:`Symbol` or :class:`Number`
lhs child node (converted to :class:`Scalar` if Number)
right : :class:`Symbol` or :class:`Number`
rhs child node (converted to :class:`Scalar` if Number)
"""
def __init__(self, name, left, right):
left, right = self.format(left, right)
domain = self.get_children_domains(left.domain, right.domain)
auxiliary_domains = self.get_children_auxiliary_domains([left, right])
super().__init__(
name,
children=[left, right],
domain=domain,
auxiliary_domains=auxiliary_domains,
)
self.left = self.children[0]
self.right = self.children[1]
def format(self, left, right):
"Format children left and right into compatible form"
# Turn numbers into scalars
if isinstance(left, numbers.Number):
left = pybamm.Scalar(left)
if isinstance(right, numbers.Number):
right = pybamm.Scalar(right)
# Check both left and right are pybamm Symbols
if not (isinstance(left, pybamm.Symbol) and isinstance(right, pybamm.Symbol)):
raise NotImplementedError(
"""'{}' not implemented for symbols of type {} and {}""".format(
self.__class__.__name__, type(left), type(right)
)
)
# Do some broadcasting in special cases, to avoid having to do this manually
if left.domain != [] and right.domain != []:
if (
left.domain != right.domain
and "secondary" in right.auxiliary_domains
and left.domain == right.auxiliary_domains["secondary"]
):
left = pybamm.PrimaryBroadcast(left, right.domain)
if (
right.domain != left.domain
and "secondary" in left.auxiliary_domains
and right.domain == left.auxiliary_domains["secondary"]
):
right = pybamm.PrimaryBroadcast(right, left.domain)
return left, right
def __str__(self):
""" See :meth:`pybamm.Symbol.__str__()`. """
return "{!s} {} {!s}".format(self.left, self.name, self.right)
def get_children_domains(self, ldomain, rdomain):
"Combine domains from children in appropriate way"
if ldomain == rdomain:
return ldomain
elif ldomain == []:
return rdomain
elif rdomain == []:
return ldomain
else:
raise pybamm.DomainError(
"""
children must have same (or empty) domains, but left.domain is '{}'
and right.domain is '{}'
""".format(
ldomain, rdomain
)
)
def new_copy(self):
""" See :meth:`pybamm.Symbol.new_copy()`. """
# process children
new_left = self.left.new_copy()
new_right = self.right.new_copy()
# make new symbol, ensure domain(s) remain the same
out = self._binary_new_copy(new_left, new_right)
out.copy_domains(self)
return out
def _binary_new_copy(self, left, right):
"Default behaviour for new_copy"
return self.__class__(left, right)
def evaluate(self, t=None, y=None, y_dot=None, inputs=None, known_evals=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
if known_evals is not None:
id = self.id
try:
return known_evals[id], known_evals
except KeyError:
left, known_evals = self.left.evaluate(t, y, y_dot, inputs, known_evals)
right, known_evals = self.right.evaluate(
t, y, y_dot, inputs, known_evals
)
value = self._binary_evaluate(left, right)
known_evals[id] = value
return value, known_evals
else:
left = self.left.evaluate(t, y, y_dot, inputs)
right = self.right.evaluate(t, y, y_dot, inputs)
return self._binary_evaluate(left, right)
def _evaluate_for_shape(self):
""" See :meth:`pybamm.Symbol.evaluate_for_shape()`. """
left = self.children[0].evaluate_for_shape()
right = self.children[1].evaluate_for_shape()
return self._binary_evaluate(left, right)
def _binary_jac(self, left_jac, right_jac):
""" Calculate the jacobian of a binary operator. """
raise NotImplementedError
def _binary_simplify(self, new_left, new_right):
""" Simplify a binary operator. Default behaviour: unchanged"""
return self._binary_new_copy(new_left, new_right)
def _binary_evaluate(self, left, right):
""" Perform binary operation on nodes 'left' and 'right'. """
raise NotImplementedError
def evaluates_on_edges(self):
""" See :meth:`pybamm.Symbol.evaluates_on_edges()`. """
return self.left.evaluates_on_edges() or self.right.evaluates_on_edges()
class Power(BinaryOperator):
"""A node in the expression tree representing a `**` power operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("**", left, right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
# apply chain rule and power rule
base, exponent = self.orphans
# derivative if variable is in the base
diff = exponent * (base ** (exponent - 1)) * base.diff(variable)
# derivative if variable is in the exponent (rare, check separately to avoid
# unecessarily big tree)
if any(variable.id == x.id for x in exponent.pre_order()):
diff += (base ** exponent) * pybamm.log(base) * exponent.diff(variable)
return diff
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
# apply chain rule and power rule
left, right = self.orphans
if left.evaluates_to_number() and right.evaluates_to_number():
return pybamm.Scalar(0)
elif right.evaluates_to_number():
return (right * left ** (right - 1)) * left_jac
elif left.evaluates_to_number():
return (left ** right * pybamm.log(left)) * right_jac
else:
return (left ** (right - 1)) * (
right * left_jac + left * pybamm.log(left) * right_jac
)
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
# don't raise RuntimeWarning for NaNs
with np.errstate(invalid="ignore"):
return left ** right
def _binary_simplify(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_simplify()`. """
# anything to the power of zero is one
if is_scalar_zero(right):
return pybamm.Scalar(1)
# zero to the power of anything is zero
if is_scalar_zero(left):
return pybamm.Scalar(0)
# anything to the power of one is itself
if is_scalar_one(right):
return left
return self.__class__(left, right)
class Addition(BinaryOperator):
"""A node in the expression tree representing an addition operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("+", left, right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
return self.left.diff(variable) + self.right.diff(variable)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
return left_jac + right_jac
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
return left + right
def _binary_simplify(self, left, right):
"""
See :meth:`pybamm.BinaryOperator._binary_simplify()`.
Note
----
We check for scalars first, then matrices. This is because
(Zero Matrix) + (Zero Scalar)
should return (Zero Matrix), not (Zero Scalar).
"""
# anything added by a scalar zero returns the other child
if is_scalar_zero(left):
return right
if is_scalar_zero(right):
return left
# Check matrices after checking scalars
if is_matrix_zero(left):
if isinstance(right, pybamm.Scalar):
return pybamm.Array(right.value * np.ones(left.shape_for_testing))
else:
return right
if is_matrix_zero(right):
if isinstance(left, pybamm.Scalar):
return pybamm.Array(left.value * np.ones(right.shape_for_testing))
else:
return left
return pybamm.simplify_addition_subtraction(self.__class__, left, right)
class Subtraction(BinaryOperator):
"""A node in the expression tree representing a subtraction operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("-", left, right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
return self.left.diff(variable) - self.right.diff(variable)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
return left_jac - right_jac
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
return left - right
def _binary_simplify(self, left, right):
"""
See :meth:`pybamm.BinaryOperator._binary_simplify()`.
Note
----
We check for scalars first, then matrices. This is because
(Zero Matrix) - (Zero Scalar)
should return (Zero Matrix), not -(Zero Scalar).
"""
# anything added by a scalar zero returns the other child
if is_scalar_zero(left):
return -right
if is_scalar_zero(right):
return left
# Check matrices after checking scalars
if is_matrix_zero(left):
if isinstance(right, pybamm.Scalar):
return pybamm.Array(-right.value * np.ones(left.shape_for_testing))
else:
return -right
if is_matrix_zero(right):
if isinstance(left, pybamm.Scalar):
return pybamm.Array(left.value * np.ones(right.shape_for_testing))
else:
return left
return pybamm.simplify_addition_subtraction(self.__class__, left, right)
class Multiplication(BinaryOperator):
"""
A node in the expression tree representing a multiplication operator
(Hadamard product). Overloads cases where the "*" operator would usually return a
matrix multiplication (e.g. scipy.sparse.coo.coo_matrix)
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("*", left, right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
# apply product rule
left, right = self.orphans
return left.diff(variable) * right + left * right.diff(variable)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
# apply product rule
left, right = self.orphans
if left.evaluates_to_number() and right.evaluates_to_number():
return pybamm.Scalar(0)
elif left.evaluates_to_number():
return left * right_jac
elif right.evaluates_to_number():
return right * left_jac
else:
return right * left_jac + left * right_jac
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
if issparse(left):
return csr_matrix(left.multiply(right))
elif issparse(right):
# Hadamard product is commutative, so we can switch right and left
return csr_matrix(right.multiply(left))
else:
return left * right
def _binary_simplify(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_simplify()`. """
# simplify multiply by scalar zero, being careful about shape
if is_scalar_zero(left):
return zeros_of_shape(right.shape_for_testing)
if is_scalar_zero(right):
return zeros_of_shape(left.shape_for_testing)
# if one of the children is a zero matrix, we have to be careful about shapes
if is_matrix_zero(left) or is_matrix_zero(right):
shape = (left * right).shape
return zeros_of_shape(shape)
# anything multiplied by a scalar one returns itself
if is_scalar_one(left):
return right
if is_scalar_one(right):
return left
return pybamm.simplify_multiplication_division(self.__class__, left, right)
class MatrixMultiplication(BinaryOperator):
"""A node in the expression tree representing a matrix multiplication operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("@", left, right)
def diff(self, variable):
""" See :meth:`pybamm.Symbol.diff()`. """
# We shouldn't need this
raise NotImplementedError(
"diff not implemented for symbol of type 'MatrixMultiplication'"
)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
# We only need the case where left is an array and right
# is a (slice of a) state vector, e.g. for discretised spatial
# operators of the form D @ u (also catch cases of (-D) @ u)
left, right = self.orphans
if isinstance(left, pybamm.Array) or (
isinstance(left, pybamm.Negate) and isinstance(left.child, pybamm.Array)
):
left = pybamm.Matrix(csr_matrix(left.evaluate()))
return left @ right_jac
else:
raise NotImplementedError(
"""jac of 'MatrixMultiplication' is only
implemented for left of type 'pybamm.Array',
not {}""".format(
left.__class__
)
)
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
return left @ right
def _binary_simplify(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_simplify()`. """
if is_matrix_zero(left) or is_matrix_zero(right):
shape = (left @ right).shape
return zeros_of_shape(shape)
return pybamm.simplify_multiplication_division(self.__class__, left, right)
class Division(BinaryOperator):
"""A node in the expression tree representing a division operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("/", left, right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
# apply quotient rule
top, bottom = self.orphans
return (top.diff(variable) * bottom - top * bottom.diff(variable)) / bottom ** 2
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
# apply quotient rule
left, right = self.orphans
if left.evaluates_to_number() and right.evaluates_to_number():
return pybamm.Scalar(0)
elif left.evaluates_to_number():
return -left / right ** 2 * right_jac
elif right.evaluates_to_number():
return left_jac / right
else:
return (right * left_jac - left * right_jac) / right ** 2
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
if issparse(left):
return csr_matrix(left.multiply(1 / right))
else:
if isinstance(right, numbers.Number) and right == 0:
# don't raise RuntimeWarning for NaNs
with np.errstate(invalid="ignore"):
return left * np.inf
else:
return left / right
def _binary_simplify(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_simplify()`. """
# zero divided by zero returns nan scalar
if is_scalar_zero(left) and is_scalar_zero(right):
return pybamm.Scalar(np.nan)
# zero divided by anything returns zero (being careful about shape)
if is_scalar_zero(left):
return zeros_of_shape(right.shape_for_testing)
# matrix zero divided by anything returns matrix zero (i.e. itself)
if is_matrix_zero(left):
return left
# anything divided by zero returns inf
if is_scalar_zero(right):
if left.shape_for_testing == ():
return pybamm.Scalar(np.inf)
else:
return pybamm.Array(np.inf * np.ones(left.shape_for_testing))
# anything divided by one is itself
if is_scalar_one(right):
return left
return pybamm.simplify_multiplication_division(self.__class__, left, right)
class Inner(BinaryOperator):
"""
A node in the expression tree which represents the inner (or dot) product. This
operator should be used to take the inner product of two mathematical vectors
(as opposed to the computational vectors arrived at post-discretisation) of the
form v = v_x e_x + v_y e_y + v_z e_z where v_x, v_y, v_z are scalars
and e_x, e_y, e_z are x-y-z-directional unit vectors. For v and w mathematical
vectors, inner product returns v_x * w_x + v_y * w_y + v_z * w_z. In addition,
for some spatial discretisations mathematical vector quantities (such as
i = grad(phi) ) are evaluated on a different part of the grid to mathematical
scalars (e.g. for finite volume mathematical scalars are evaluated on the nodes but
mathematical vectors are evaluated on cell edges). Therefore, inner also transfers
the inner product of the vector onto the scalar part of the grid if required
by a particular discretisation.
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("inner product", left, right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
# apply product rule
left, right = self.orphans
return left.diff(variable) * right + left * right.diff(variable)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
# apply product rule
left, right = self.orphans
if left.evaluates_to_number() and right.evaluates_to_number():
return pybamm.Scalar(0)
elif left.evaluates_to_number():
return left * right_jac
elif right.evaluates_to_number():
return right * left_jac
else:
return right * left_jac + left * right_jac
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
if issparse(left):
return left.multiply(right)
elif issparse(right):
# Hadamard product is commutative, so we can switch right and left
return right.multiply(left)
else:
return left * right
def _binary_simplify(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_simplify()`. """
# simplify multiply by scalar zero, being careful about shape
if is_scalar_zero(left):
return zeros_of_shape(right.shape_for_testing)
if is_scalar_zero(right):
return zeros_of_shape(left.shape_for_testing)
# if one of the children is a zero matrix, we have to be careful about shapes
if is_matrix_zero(left) or is_matrix_zero(right):
shape = (left * right).shape
return zeros_of_shape(shape)
# anything multiplied by a scalar one returns itself
if is_scalar_one(left):
return right
if is_scalar_one(right):
return left
return pybamm.simplify_multiplication_division(self.__class__, left, right)
def evaluates_on_edges(self):
""" See :meth:`pybamm.Symbol.evaluates_on_edges()`. """
return False
def inner(left, right):
"""
Return inner product of two symbols.
"""
return pybamm.Inner(left, right)
class Heaviside(BinaryOperator):
"""A node in the expression tree representing a heaviside step function.
Adding this operation to the rhs or algebraic equations in a model can often cause a
discontinuity in the solution. For the specific cases listed below, this will be
automatically handled by the solver. In the general case, you can explicitly tell
the solver of discontinuities by adding a :class:`Event` object with
:class:`EventType` DISCONTINUITY to the model's list of events.
In the case where the Heaviside function is of the form `pybamm.t < x`, `pybamm.t <=
x`, `x < pybamm.t`, or `x <= pybamm.t`, where `x` is any constant equation, this
DISCONTINUITY event will automatically be added by the solver.
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, name, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__(name, left, right)
def diff(self, variable):
""" See :meth:`pybamm.Symbol.diff()`. """
# Heaviside should always be multiplied by something else so hopefully don't
# need to worry about shape
return pybamm.Scalar(0)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
# Heaviside should always be multiplied by something else so hopefully don't
# need to worry about shape
return pybamm.Scalar(0)
class EqualHeaviside(Heaviside):
"A heaviside function with equality (return 1 when left = right)"
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("<=", left, right)
def __str__(self):
""" See :meth:`pybamm.Symbol.__str__()`. """
return "{!s} <= {!s}".format(self.left, self.right)
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
# don't raise RuntimeWarning for NaNs
with np.errstate(invalid="ignore"):
return left <= right
class NotEqualHeaviside(Heaviside):
"A heaviside function without equality (return 0 when left = right)"
def __init__(self, left, right):
super().__init__("<", left, right)
def __str__(self):
""" See :meth:`pybamm.Symbol.__str__()`. """
return "{!s} < {!s}".format(self.left, self.right)
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
# don't raise RuntimeWarning for NaNs
with np.errstate(invalid="ignore"):
return left < right
class Minimum(BinaryOperator):
" Returns the smaller of two objects "
def __init__(self, left, right):
super().__init__("minimum", left, right)
def __str__(self):
""" See :meth:`pybamm.Symbol.__str__()`. """
return "minimum({!s}, {!s})".format(self.left, self.right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
left, right = self.orphans
return (left <= right) * left.diff(variable) + (left > right) * right.diff(
variable
)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
left, right = self.orphans
return (left <= right) * left_jac + (left > right) * right_jac
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
# don't raise RuntimeWarning for NaNs
return np.minimum(left, right)
class Maximum(BinaryOperator):
" Returns the smaller of two objects "
def __init__(self, left, right):
super().__init__("maximum", left, right)
def __str__(self):
""" See :meth:`pybamm.Symbol.__str__()`. """
return "maximum({!s}, {!s})".format(self.left, self.right)
def _diff(self, variable):
""" See :meth:`pybamm.Symbol._diff()`. """
left, right = self.orphans
return (left >= right) * left.diff(variable) + (left < right) * right.diff(
variable
)
def _binary_jac(self, left_jac, right_jac):
""" See :meth:`pybamm.BinaryOperator._binary_jac()`. """
left, right = self.orphans
return (left >= right) * left_jac + (left < right) * right_jac
def _binary_evaluate(self, left, right):
""" See :meth:`pybamm.BinaryOperator._binary_evaluate()`. """
# don't raise RuntimeWarning for NaNs
return np.maximum(left, right)
def minimum(left, right):
"""
Returns the smaller of two objects. Not to be confused with :meth:`pybamm.min`,
which returns min function of child.
"""
return pybamm.simplify_if_constant(Minimum(left, right), keep_domains=True)
def maximum(left, right):
"""
Returns the larger of two objects. Not to be confused with :meth:`pybamm.max`,
which returns max function of child.
"""
return pybamm.simplify_if_constant(Maximum(left, right), keep_domains=True)
def source(left, right, boundary=False):
"""A convinience function for creating (part of) an expression tree representing
a source term. This is necessary for spatial methods where the mass matrix
is not the identity (e.g. finite element formulation with piecwise linear
basis functions). The left child is the symbol representing the source term
and the right child is the symbol of the equation variable (currently, the
finite element formulation in PyBaMM assumes all functions are constructed
using the same basis, and the matrix here is constructed accoutning for the
boundary conditions of the right child). The method returns the matrix-vector
product of the mass matrix (adjusted to account for any Dirichlet boundary
conditions imposed the the right symbol) and the discretised left symbol.
Parameters
----------
left : :class:`Symbol`
The left child node, which represents the expression for the source term.
right : :class:`Symbol`
The right child node. This is the symbol whose boundary conditions are
accounted for in the construction of the mass matrix.
boundary : bool, optional
If True, then the mass matrix should is assembled over the boundary,
corresponding to a source term which only acts on the boundary of the
domain. If False (default), the matrix is assembled over the entire domain,
corresponding to a source term in the bulk.
"""
# Broadcast if left is number
if isinstance(left, numbers.Number):
left = pybamm.PrimaryBroadcast(left, "current collector")
if left.domain != ["current collector"] or right.domain != ["current collector"]:
raise pybamm.DomainError(
"""'source' only implemented in the 'current collector' domain,
but symbols have domains {} and {}""".format(
left.domain, right.domain
)
)
if boundary:
return pybamm.BoundaryMass(right) @ left
else:
return pybamm.Mass(right) @ left
| 36.262019 | 88 | 0.619191 |
55b8e504c8c767b8e745972852b1dd6ae7400e97 | 64 | py | Python | snippets/python/gui/fuzzy_filter.py | c6401/Snippets | a88d97005658eeda99f1a2766e3d069a64e142cb | [
"MIT"
] | null | null | null | snippets/python/gui/fuzzy_filter.py | c6401/Snippets | a88d97005658eeda99f1a2766e3d069a64e142cb | [
"MIT"
] | null | null | null | snippets/python/gui/fuzzy_filter.py | c6401/Snippets | a88d97005658eeda99f1a2766e3d069a64e142cb | [
"MIT"
] | null | null | null | 'rofi -dmenu -filter "???" << EOF\n' + '\n'.join(opts)+ '\nEOF'
| 32 | 63 | 0.5 |
63972d724d98d5b19632f5f775282dfc2b9f4f47 | 6,762 | py | Python | subcmds/status.py | phamvandai/git-repo | d92076d930af11bb9a3025a6b2f12ca139c0436f | [
"Apache-2.0"
] | 22 | 2017-05-02T07:48:13.000Z | 2021-12-23T10:22:23.000Z | subcmds/status.py | phamvandai/git-repo | d92076d930af11bb9a3025a6b2f12ca139c0436f | [
"Apache-2.0"
] | null | null | null | subcmds/status.py | phamvandai/git-repo | d92076d930af11bb9a3025a6b2f12ca139c0436f | [
"Apache-2.0"
] | 18 | 2017-07-07T06:31:44.000Z | 2021-12-06T08:11:17.000Z | # -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from command import PagedCommand
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import glob
import itertools
import os
from color import Coloring
import platform_utils
class Status(PagedCommand):
common = True
helpSummary = "Show the working tree status"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
'%prog' compares the working tree to the staging area (aka index),
and the most recent commit on this branch (HEAD), in each project
specified. A summary is displayed, one line per file where there
is a difference between these three states.
The -j/--jobs option can be used to run multiple status queries
in parallel.
The -o/--orphans option can be used to show objects that are in
the working directory, but not associated with a repo project.
This includes unmanaged top-level files and directories, but also
includes deeper items. For example, if dir/subdir/proj1 and
dir/subdir/proj2 are repo projects, dir/subdir/proj3 will be shown
if it is not known to repo.
# Status Display
The status display is organized into three columns of information,
for example if the file 'subcmds/status.py' is modified in the
project 'repo' on branch 'devwork':
project repo/ branch devwork
-m subcmds/status.py
The first column explains how the staging area (index) differs from
the last commit (HEAD). Its values are always displayed in upper
case and have the following meanings:
-: no difference
A: added (not in HEAD, in index )
M: modified ( in HEAD, in index, different content )
D: deleted ( in HEAD, not in index )
R: renamed (not in HEAD, in index, path changed )
C: copied (not in HEAD, in index, copied from another)
T: mode changed ( in HEAD, in index, same content )
U: unmerged; conflict resolution required
The second column explains how the working directory differs from
the index. Its values are always displayed in lower case and have
the following meanings:
-: new / unknown (not in index, in work tree )
m: modified ( in index, in work tree, modified )
d: deleted ( in index, not in work tree )
"""
def _Options(self, p):
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int', default=2,
help="number of projects to check simultaneously")
p.add_option('-o', '--orphans',
dest='orphans', action='store_true',
help="include objects in working directory outside of repo projects")
p.add_option('-q', '--quiet', action='store_true',
help="only print the name of modified projects")
def _StatusHelper(self, project, clean_counter, sem, quiet):
"""Obtains the status for a specific project.
Obtains the status for a project, redirecting the output to
the specified object. It will release the semaphore
when done.
Args:
project: Project to get status of.
clean_counter: Counter for clean projects.
sem: Semaphore, will call release() when complete.
output: Where to output the status.
"""
try:
state = project.PrintWorkTreeStatus(quiet=quiet)
if state == 'CLEAN':
next(clean_counter)
finally:
sem.release()
def _FindOrphans(self, dirs, proj_dirs, proj_dirs_parents, outstring):
"""find 'dirs' that are present in 'proj_dirs_parents' but not in 'proj_dirs'"""
status_header = ' --\t'
for item in dirs:
if not platform_utils.isdir(item):
outstring.append(''.join([status_header, item]))
continue
if item in proj_dirs:
continue
if item in proj_dirs_parents:
self._FindOrphans(glob.glob('%s/.*' % item) +
glob.glob('%s/*' % item),
proj_dirs, proj_dirs_parents, outstring)
continue
outstring.append(''.join([status_header, item, '/']))
def Execute(self, opt, args):
all_projects = self.GetProjects(args)
counter = itertools.count()
if opt.jobs == 1:
for project in all_projects:
state = project.PrintWorkTreeStatus(quiet=opt.quiet)
if state == 'CLEAN':
next(counter)
else:
sem = _threading.Semaphore(opt.jobs)
threads = []
for project in all_projects:
sem.acquire()
t = _threading.Thread(target=self._StatusHelper,
args=(project, counter, sem, opt.quiet))
threads.append(t)
t.daemon = True
t.start()
for t in threads:
t.join()
if not opt.quiet and len(all_projects) == next(counter):
print('nothing to commit (working directory clean)')
if opt.orphans:
proj_dirs = set()
proj_dirs_parents = set()
for project in self.GetProjects(None, missing_ok=True):
proj_dirs.add(project.relpath)
(head, _tail) = os.path.split(project.relpath)
while head != "":
proj_dirs_parents.add(head)
(head, _tail) = os.path.split(head)
proj_dirs.add('.repo')
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr = 'bold')
self.untracked = self.printer('untracked', fg = 'red')
orig_path = os.getcwd()
try:
os.chdir(self.manifest.topdir)
outstring = []
self._FindOrphans(glob.glob('.*') +
glob.glob('*'),
proj_dirs, proj_dirs_parents, outstring)
if outstring:
output = StatusColoring(self.manifest.globalConfig)
output.project('Objects not within a project (orphans)')
output.nl()
for entry in outstring:
output.untracked(entry)
output.nl()
else:
print('No orphan files or directories')
finally:
# Restore CWD.
os.chdir(orig_path)
| 34.151515 | 86 | 0.641674 |
ae2d3228d336c0c73103f25fd269a5a528c159ad | 1,249 | py | Python | ShockGraphAPI/serverProd.py | DEKHTIARJonathan/ShapeLearnerAPI | 6b8824da28cc5f481b550c8c7912c86097e20536 | [
"MIT"
] | null | null | null | ShockGraphAPI/serverProd.py | DEKHTIARJonathan/ShapeLearnerAPI | 6b8824da28cc5f481b550c8c7912c86097e20536 | [
"MIT"
] | null | null | null | ShockGraphAPI/serverProd.py | DEKHTIARJonathan/ShapeLearnerAPI | 6b8824da28cc5f481b550c8c7912c86097e20536 | [
"MIT"
] | null | null | null | ################################ Import Libraries ################################
import os.path
import sys
sys.path.append('../Config/')
dllsPath = os.path.dirname(os.path.realpath(__file__))+'\dlls'
os.environ['PATH'] = dllsPath + ';' + os.environ['PATH']
from loadConf import loadDBConf, loadAPIConf
import api
#######################################################################################
#######################################################################################
#######################################################################################
configDB = loadDBConf()
configAPI = loadAPIConf()
prodDBServer = {'ip':configDB['prodDB']['ip'], 'port':configDB['prodDB']['port'], 'dbUser':configDB['prodDB']['dbUser'], 'dbPass':configDB['prodDB']['dbPass'], 'dbName':configDB['prodDB']['dbName']}
shockGraphServerAPI = {'ip':configAPI['shockGraphProdServer']['ip'], 'port':configAPI['shockGraphProdServer']['port'], 'local':configAPI['shockGraphProdServer']['local']}
jobServerAPI = {'ip':configAPI['jobServer']['ip'], 'port':configAPI['jobServer']['port'], 'local':configAPI['jobServer']['local']}
api = api.API(jobServerAPI, shockGraphServerAPI['port'], shockGraphServerAPI['local'], prodDBServer)
api.start() | 49.96 | 198 | 0.52522 |
5fea06c0c727b839cdd1b66d6a546c679acb53db | 2,501 | py | Python | symposion/teams/migrations/0001_initial.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 154 | 2015-01-17T02:29:24.000Z | 2022-03-20T20:37:24.000Z | symposion/teams/migrations/0001_initial.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 316 | 2015-01-10T04:01:50.000Z | 2020-09-30T20:18:08.000Z | symposion/teams/migrations/0001_initial.py | azkarmoulana/pycon | 931388e6f640c35b892bb4b2d12581ba7ec8cf4e | [
"BSD-3-Clause"
] | 89 | 2015-01-10T05:25:21.000Z | 2022-02-27T03:28:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state', models.CharField(max_length=20, choices=[(b'applied', b'applied'), (b'invited', b'invited'), (b'declined', b'declined'), (b'rejected', b'rejected'), (b'member', b'member'), (b'manager', b'manager')])),
('message', models.TextField(blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(unique=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('access', models.CharField(max_length=20, choices=[(b'open', b'open'), (b'application', b'by application'), (b'invitation', b'by invitation')])),
('created', models.DateTimeField(default=datetime.datetime.now, editable=False)),
('manager_permissions', models.ManyToManyField(related_name='manager_teams', to='auth.Permission', blank=True)),
('permissions', models.ManyToManyField(related_name='member_teams', to='auth.Permission', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='membership',
name='team',
field=models.ForeignKey(related_name='memberships', to='teams.Team'),
preserve_default=True,
),
migrations.AddField(
model_name='membership',
name='user',
field=models.ForeignKey(related_name='memberships', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('user', 'team')]),
),
]
| 41 | 227 | 0.577769 |
2111c25f0ebf19cf926c91b790bec6180c2a916a | 8,628 | py | Python | Dining-Concierge/DataProcessing/5_get_yelp_restaurant.py | MikeYan01/Serverless-Apps | 22c353997ddba66f3e574f1f35f14560521fd62e | [
"Apache-2.0"
] | null | null | null | Dining-Concierge/DataProcessing/5_get_yelp_restaurant.py | MikeYan01/Serverless-Apps | 22c353997ddba66f3e574f1f35f14560521fd62e | [
"Apache-2.0"
] | null | null | null | Dining-Concierge/DataProcessing/5_get_yelp_restaurant.py | MikeYan01/Serverless-Apps | 22c353997ddba66f3e574f1f35f14560521fd62e | [
"Apache-2.0"
] | null | null | null | import requests
import csv
import time
import boto3
import pandas as pd
import os
from datetime import datetime
from decimal import Decimal
# constants #
# AWS config
AWS_DB_REGION = 'us-east-1'
AWS_TABLE_NAME = 'Yelp_Restaurants'
AWS_PRIMARY_KEY = 'RestaurantID'
# local csv config
CSV_FILE = 'Yelp_Restaurants.csv'
CSV_HEAD = [AWS_PRIMARY_KEY, 'Name', 'Cuisine', 'Rating', 'NumberOfReviews',
'Address', 'ZipCode', 'Latitude', 'Longitude', 'IsClosed',
'InsertTime']
# Yelp API config
YELP_API_KEY = 'XXX'
YELP_CLIENT_ID = 'XXX'
YELP_ENDPOINT = 'https://api.yelp.com/v3/businesses/search'
YELP_ENDPOINT_ID = 'https://api.yelp.com/v3/businesses/' + YELP_CLIENT_ID
YELP_REQ_HEADERS = {'Authorization': 'bearer %s' % YELP_API_KEY}
YELP_REQ_PARAMETERS = {
'term': 'food',
'limit': 50,
'radius': 10000,
'location': 'Manhattan',
'sort_by': 'distance'}
YELP_REQ_CUISINES = ['italian', 'chinese', 'mexican', 'american', 'japanese',
'pizza', 'healthy', 'brunch', 'korean', 'thai',
'vietnamese', 'indian', 'seafood', 'dessert']
YELP_REQ_AREAS = ['Central Harlem, Manhattan',
'Washington Heights, Manhattan',
'East Harlem, Manhattan',
'Upper West Side, Manhattan',
'Upper East Side, Manhattan',
'Midtown West, Manhattan',
'Midtown East, Manhattan',
'Chelsea, Manhattan',
'Murray Hill, Manhattan',
'Gramercy, Manhattan',
'Greenwich, Manhattan',
'East Village, Manhattan',
'Soho, Manhattan',
'Lower East Side, Manhattan',
'Tribeca, Manhattan',
'Chinatown, Manhattan',
'Financial District, Manhattan']
AREA_ZIP = {'Central Harlem, Manhattan': [10026, 10027, 10030, 10037, 10039],
'Washington Heights, Manhattan': [10031, 10032, 10033, 10034, 10040],
'East Harlem, Manhattan': [10029, 10035],
'Upper West Side, Manhattan': [10023, 10024, 10025],
'Upper East Side, Manhattan': [10021, 10028, 10044, 10065, 10075, 10128],
'Midtown West, Manhattan': [10019, 10020],
'Midtown East, Manhattan': [10022],
'Chelsea, Manhattan': [10001, 10011, 10018, 10036],
'Murray Hill, Manhattan': [10017],
'Gramercy, Manhattan': [10010, 10016],
'Greenwich, Manhattan': [10012, 10014],
'East Village, Manhattan': [10003, 10009],
'Soho, Manhattan': [10013],
'Lower East Side, Manhattan': [10002],
'Tribeca, Manhattan': [10282, 10007],
'Chinatown, Manhattan': [10038],
'Financial District, Manhattan': [10006, 10280, 10005, 10004]}
# Functions #
# check data
def valid(input):
if len(str(input)) == 0:
return 'N/A'
else:
return input
# write to CSV file
def writeCSV(data):
with open(CSV_FILE, 'a+', newline='', encoding='utf-8') as f:
f_csv = csv.DictWriter(f, CSV_HEAD)
f_csv.writeheader()
f_csv.writerows(data)
def getDataFromYelp():
# init
if os.path.exists(CSV_FILE):
os.remove(CSV_FILE)
print ('=========== start requesting data from Yelp ===========')
start = time.time()
area_time = start
area_idx = 1
total_item = 0
# get data
for area in YELP_REQ_AREAS:
# itr each area
YELP_REQ_PARAMETERS['location'] = area
area_time = time.time()
area_restaurants = []
area_item = 0
# itr each cuisine
for cuisine in YELP_REQ_CUISINES:
YELP_REQ_PARAMETERS['term'] = cuisine
response = requests.get(url=YELP_ENDPOINT,
params=YELP_REQ_PARAMETERS,
headers=YELP_REQ_HEADERS)
try:
business_data = response.json()['businesses']
except 'businesses' not in response.json():
print ('Yelp API Request/Return Error')
business_data = response.json()['businesses']
# process request
for data in business_data:
time_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
item = {CSV_HEAD[0]: valid(data['id']),
CSV_HEAD[1]: valid(data['name']),
CSV_HEAD[2]: valid(cuisine),
CSV_HEAD[3]: valid(Decimal(data['rating'])),
CSV_HEAD[4]: valid(Decimal(data['review_count'])),
CSV_HEAD[5]: valid(data['location']['address1']),
CSV_HEAD[6]: valid(data['location']['zip_code']),
CSV_HEAD[7]: valid(str(data['coordinates']['latitude'])),
CSV_HEAD[8]: valid(str(data['coordinates']['longitude'])),
CSV_HEAD[9]: valid(str(data['is_closed'])),
CSV_HEAD[10]: valid(time_string)}
# write restaurant data to local area restaurants list
area_restaurants.append(item)
area_item += 1
total_item += 1
# finsih area restaurants data and write to CSV
writeCSV(area_restaurants)
print ('(%d/%d) "%s" downloaded, item count: %d, total item: %d, time spent: %ds, total time: %ds' % (
area_idx, len(YELP_REQ_AREAS), area, area_item, total_item,
int(time.time() - area_time), int(time.time() - start)))
area_idx += 1
print ('=========== requesting data from Yelp done ===========')
# format CSV data: remove duplicates and sort
def formatCSV():
print ('=========== start formatting data ===========')
f = pd.read_csv(CSV_FILE)
print ('Items before format:', len(f))
f = f[~f[CSV_HEAD[0]].str.contains(CSV_HEAD[0])]
f.drop_duplicates(subset=[AWS_PRIMARY_KEY], keep='first', inplace=True)
f.sort_values([CSV_HEAD[2], CSV_HEAD[6]], inplace=True)
print ('Items after format:', len(f))
f.to_csv(CSV_FILE, index=False)
print ('=========== formatting data done ===========')
# upload to DynamoDB
def uploadToDynamoDB():
print ('=========== start uploading data to DynamoDB ===========')
# init csv file and AWS
yelp_csv = pd.read_csv(CSV_FILE)
dynamodb = boto3.resource('dynamodb', region_name=AWS_DB_REGION)
table = dynamodb.Table(AWS_TABLE_NAME)
# init counting var
cuisine_last = str(yelp_csv[CSV_HEAD[2]][0])
cuisine_type = 1
cuisine_count = 0
total_count = 0
start_time = time.time()
point_time = start_time
# upload
for i in range(len(yelp_csv)):
cuisine_curr = str(yelp_csv[CSV_HEAD[2]][i])
cuisine_count += 1
total_count += 1
item = {CSV_HEAD[0]: str(yelp_csv[CSV_HEAD[0]][i]),
CSV_HEAD[1]: str(yelp_csv[CSV_HEAD[1]][i]),
CSV_HEAD[2]: str(yelp_csv[CSV_HEAD[2]][i]),
CSV_HEAD[3]: Decimal(yelp_csv[CSV_HEAD[3]][i].astype(Decimal)),
CSV_HEAD[4]: Decimal(yelp_csv[CSV_HEAD[4]][i].astype(Decimal)),
CSV_HEAD[5]: str(yelp_csv[CSV_HEAD[5]][i]),
CSV_HEAD[6]: str(yelp_csv[CSV_HEAD[6]][i]),
CSV_HEAD[7]: str(yelp_csv[CSV_HEAD[7]][i]),
CSV_HEAD[8]: str(yelp_csv[CSV_HEAD[8]][i]),
CSV_HEAD[9]: str(yelp_csv[CSV_HEAD[9]][i].astype(str)),
CSV_HEAD[10]: str(yelp_csv[CSV_HEAD[10]][i])}
table.put_item(Item=item)
# finish uploading a cuisine type
if cuisine_curr != cuisine_last:
now = time.time()
print ('(%d/%d) cuisine: "%s" uploaded, time spent: %ds, total time: %ds, current item: %d, total item: %d' % (
cuisine_type, len(YELP_REQ_CUISINES), cuisine_last,
int(now - point_time), int(now - start_time),
cuisine_count, total_count))
point_time = now
cuisine_last = cuisine_curr
cuisine_count = 0
cuisine_type += 1
# finish uploading last cuisine type
print ('(%d/%d) cuisine: "%s" uploaded, time spent: %ds, total time: %ds, current item: %d, total item: %d' % (
cuisine_type, len(YELP_REQ_CUISINES), cuisine_curr,
int(now - point_time), int(now - start_time),
cuisine_count, total_count))
print ('=========== uploading data to DynamoDB done ===========')
# Process #
getDataFromYelp()
formatCSV()
uploadToDynamoDB()
| 38.008811 | 123 | 0.562007 |
07e656fb5d18cd346bc7f3c4ba19c4e09e871f3e | 10,478 | py | Python | scripts/ur5_teleop.py | sdsonawani/irl_robots | f393fd4ad37e3016062b9f719407d2b56b2159b6 | [
"MIT"
] | null | null | null | scripts/ur5_teleop.py | sdsonawani/irl_robots | f393fd4ad37e3016062b9f719407d2b56b2159b6 | [
"MIT"
] | null | null | null | scripts/ur5_teleop.py | sdsonawani/irl_robots | f393fd4ad37e3016062b9f719407d2b56b2159b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import numpy as np
import rospy
import subprocess
import sys
import time
from irl_robots.msg import ur5Control
from irl_robots.msg import ur5Joints
from irl_robots.msg import ur5Tool
from sensor_msgs.msg import Joy
import kdl_parser_py.urdf
import PyKDL as kdl
# ***** NOTE *****
# I modified the URDF to introduce a yaw offset to the world_joint of 0.7854.
# This aligns the UR5 with the orientation of the physical robot in our lab.
VERBOSE = False
# Joy message axes mappings for Dualshock 4 controller
LEFT_X = 0
LEFT_Y = 1
RIGHT_X = 3
RIGHT_Y = 4
DPAD_X = 6
DPAD_Y = 7
LEFT_TRIGGER = 2
RIGHT_TRIGGER = 5
# Buttons
X_BUTTON = 0
CIRCLE_BUTTON = 1
TRIANGLE_BUTTON = 2
SQUARE_BUTTON = 3
L1_BUTTON = 4
R1_BUTTON = 5
L2_BUTTON = 6
R2_BUTTON = 7
START = 9
SELECT = 8
L3_BUTTON = 11
R3_BUTTON = 12
PS_BUTTON = 10
# Axis ranges
STICK_MAX_VALUE = 1.0
STICK_MIN_VALUE = -1.0
TRIGGER_MAX_VALUE = 1.0
TRIGGER_MIN_VALUE = -1.0
# Gamepad thumbstick deadzone
STICK_DEADZONE = 0.15
CONTROL_FREQUENCY = 30
NUM_JOINTS = 6
MAX_CARTESIAN_ACCELERATION = 8.0
MIN_CARTESIAN_ACCELERATION = -8.0
MAX_CARTESIAN_VELOCITY = 5.0
MAX_ANGULAR_ACCELERATION = 13.0
MIN_ANGULAR_ACCELERATION = -8.0
MAX_ANGULAR_VELOCITY = 7.0
MAX_RADIUS_ACCELERATION = 8.0
MIN_RADIUS_ACCELERATION = -5.0
MAX_RADIUS_VELOCITY = 2.5
# CONTROL_MODE = 0 # X/Y Grid
CONTROL_MODE = 1 # Radial
ONE_HANDED = True
class UR5Teleop:
def __init__(self):
'''Initialize ros publisher, ros subscriber'''
self.joint_state = None
self.tool_state = None
self.joint_command = ur5Control()
self.joint_command.acceleration = 5.0
self.joint_command.blend = 0
self.joint_command.command = "speedj"
self.joint_command.gain = 0
self.joint_command.jointcontrol = True
self.joint_command.lookahead = 0
self.joint_command.time = 1.0 / CONTROL_FREQUENCY
self.joint_command.velocity = 0
# Desired control velocity in x, y, z.
self.desired_angular_velocity = 0.0
self.desired_radius_velocity = 0.0
self.desired_cartesian_velocity = [0.0, 0.0, 0.0]
self.desired_wrist1_velocity = 0.0
self.desired_wrist2_velocity = 0.0
self.desired_wrist3_velocity = 0.0
self.current_angular_velocity = 0.0
self.current_radius_velocity = 0.0
self.current_cartesian_velocity = [0.0, 0.0, 0.0]
self.ur5_kdl_tree = kdl_parser_py.urdf.treeFromFile("/home/local/ASUAD/jacampb1/Downloads/ur_description/urdf/ur5_robot.urdf")[1]
self.ur5_transform = self.ur5_kdl_tree.getChain("world", "ee_link")
self.kdl_fk = kdl.ChainFkSolverPos_recursive(self.ur5_transform)
self.kdl_ik = kdl.ChainIkSolverPos_LMA(self.ur5_transform)
self.kdl_initial = kdl.JntArray(NUM_JOINTS)
self.kdl_joints = kdl.JntArray(NUM_JOINTS)
self.kdl_effector = kdl.Frame()
# subscribed Topic
self.gamepad_sub = rospy.Subscriber("/joy", Joy, self.gamepad_callback, queue_size = 1)
self.ur5_joint_sub = rospy.Subscriber("/ur5/joints", ur5Joints, self.ur5_joint_callback, queue_size = 1)
# self.ur5_tool_sub = rospy.Subscriber("/ur5/tool", ur5Tool, self.ur5_tool_callback, queue_size = 1)
self.ur5_command_pub = rospy.Publisher("/ur5/control", ur5Control, queue_size = 1)
# time.sleep(3)
# self.go_to_initial()
def go_to_initial(self):
self.joint_command.values = [0.57988, -0.99270, 2.03353, -0.96921, 1.40923, 1.5766]
rate = rospy.Rate(CONTROL_FREQUENCY)
while not rospy.is_shutdown() and not np.allclose(self.joint_command.values, self.joint_state.positions, atol=1e-02):
self.ur5_command_pub.publish(self.joint_command)
def ur5_joint_callback(self, message):
self.joint_state = message
# def ur5_tool_callback(self, message):
# self.tool_state = message
def publish_joint_command(self):
if(self.joint_state is not None):
for idx in range(NUM_JOINTS):
self.kdl_joints[idx] = self.joint_state.positions[idx]
self.kdl_fk.JntToCart(self.kdl_joints, self.kdl_effector)
self.tool_state = self.kdl_effector.p
if(self.tool_state is not None and self.joint_state is not None):
if(CONTROL_MODE == 0):
for idx in range(3):
self.tool_state[idx] += self.desired_cartesian_velocity[idx] / CONTROL_FREQUENCY
if(CONTROL_MODE == 1):
if(self.desired_angular_velocity - self.current_angular_velocity > MAX_ANGULAR_ACCELERATION / CONTROL_FREQUENCY):
self.current_angular_velocity += MAX_ANGULAR_ACCELERATION / CONTROL_FREQUENCY
elif(self.desired_angular_velocity - self.current_angular_velocity < MIN_ANGULAR_ACCELERATION / CONTROL_FREQUENCY):
self.current_angular_velocity += MIN_ANGULAR_ACCELERATION / CONTROL_FREQUENCY
else:
self.current_angular_velocity = self.desired_angular_velocity
if(self.desired_radius_velocity - self.current_radius_velocity > MAX_RADIUS_ACCELERATION / CONTROL_FREQUENCY):
self.current_radius_velocity += MAX_RADIUS_ACCELERATION / CONTROL_FREQUENCY
elif(self.desired_radius_velocity - self.current_radius_velocity < MIN_RADIUS_ACCELERATION / CONTROL_FREQUENCY):
self.current_radius_velocity += MIN_RADIUS_ACCELERATION / CONTROL_FREQUENCY
else:
self.current_radius_velocity = self.desired_radius_velocity
if(self.desired_cartesian_velocity[2] - self.current_cartesian_velocity[2] > MAX_CARTESIAN_ACCELERATION / CONTROL_FREQUENCY):
self.current_cartesian_velocity[2] += MAX_CARTESIAN_ACCELERATION / CONTROL_FREQUENCY
elif(self.desired_cartesian_velocity[2] - self.current_cartesian_velocity[2] < MIN_CARTESIAN_ACCELERATION / CONTROL_FREQUENCY):
self.current_cartesian_velocity[2] += MIN_CARTESIAN_ACCELERATION / CONTROL_FREQUENCY
else:
self.current_cartesian_velocity[2] = self.desired_cartesian_velocity[2]
# Finish doing this and add min acceleration. So then we can have separate max/min.
# Calculate current radius of arm.
radius = np.hypot(self.tool_state[0], self.tool_state[1])
# Calculate current angle from origin
current_angle = np.arctan2(self.tool_state[1], self.tool_state[0])
# Calculate angle to desired point
desired_angle = current_angle + ((self.current_angular_velocity / CONTROL_FREQUENCY) / radius)
# Calculate desired radius to desired point
desired_radius = radius + (self.current_radius_velocity / CONTROL_FREQUENCY)
# Compute desired point
self.tool_state[0] = desired_radius * np.cos(desired_angle)
self.tool_state[1] = desired_radius * np.sin(desired_angle)
self.tool_state[2] += self.current_cartesian_velocity[2] / CONTROL_FREQUENCY
self.kdl_effector.p = self.tool_state
for idx in range(NUM_JOINTS):
self.kdl_initial[idx] = self.joint_state.positions[idx]
self.kdl_ik.CartToJnt(self.kdl_initial, self.kdl_effector, self.kdl_joints)
print(self.kdl_joints)
for idx in range(NUM_JOINTS):
self.joint_command.values[idx] = self.kdl_joints[idx]
if(not np.allclose(self.desired_wrist1_velocity, 0.0)):
self.joint_command.values[3] = self.joint_state.positions[3] + (self.desired_wrist1_velocity / CONTROL_FREQUENCY)
# if(not np.allclose(self.desired_wrist2_velocity, 0.0)):
self.joint_command.values[4] = self.joint_state.positions[4] + (self.desired_wrist2_velocity / CONTROL_FREQUENCY)
if(not np.allclose(self.desired_wrist3_velocity, 0.0)):
self.joint_command.values[5] = self.joint_state.positions[5] + (self.desired_wrist3_velocity / CONTROL_FREQUENCY)
self.ur5_command_pub.publish(self.joint_command)
def gamepad_callback(self, message):
if(self.tool_state is None and self.joint_state is not None):
for idx in range(NUM_JOINTS):
self.kdl_joints[idx] = self.joint_state.positions[idx]
self.kdl_fk.JntToCart(self.kdl_joints, self.kdl_effector)
self.tool_state = self.kdl_effector.p
if(CONTROL_MODE == 0):
self.desired_cartesian_velocity[0] = (message.axes[LEFT_X] / STICK_MAX_VALUE) * MAX_CARTESIAN_VELOCITY
self.desired_cartesian_velocity[1] = -(message.axes[LEFT_Y] / STICK_MAX_VALUE) * MAX_CARTESIAN_VELOCITY
self.desired_cartesian_velocity[2] = (message.buttons[L1_BUTTON] * MAX_CARTESIAN_VELOCITY) - (message.buttons[L2_BUTTON] * MAX_CARTESIAN_VELOCITY)
self.desired_wrist1_velocity = (message.axes[RIGHT_Y] / STICK_MAX_VALUE) * MAX_CARTESIAN_VELOCITY
self.desired_wrist2_velocity = (message.axes[RIGHT_X] / STICK_MAX_VALUE) * MAX_CARTESIAN_VELOCITY
elif(CONTROL_MODE == 1):
self.desired_angular_velocity = -(message.axes[LEFT_X] / STICK_MAX_VALUE) * MAX_ANGULAR_VELOCITY
self.desired_radius_velocity = (message.axes[LEFT_Y] / STICK_MAX_VALUE) * MAX_RADIUS_VELOCITY
self.desired_cartesian_velocity[2] = (message.buttons[L1_BUTTON] * MAX_CARTESIAN_VELOCITY) - (message.buttons[L2_BUTTON] * MAX_CARTESIAN_VELOCITY)
self.desired_wrist1_velocity = -(message.axes[RIGHT_Y] / STICK_MAX_VALUE) * MAX_ANGULAR_VELOCITY
self.desired_wrist2_velocity = (message.axes[RIGHT_X] / STICK_MAX_VALUE) * MAX_ANGULAR_VELOCITY
self.desired_wrist3_velocity = (message.buttons[R1_BUTTON] * MAX_ANGULAR_VELOCITY) - (message.buttons[R2_BUTTON] * MAX_ANGULAR_VELOCITY)
def main(args):
'''Initializes and cleanup ros node'''
rospy.init_node("ur5_teleop_node")
talker = UR5Teleop()
rate = rospy.Rate(CONTROL_FREQUENCY) # 10hz
while not rospy.is_shutdown():
talker.publish_joint_command()
rate.sleep()
#try:
# rospy.spin()
#except KeyboardInterrupt:
# print "Shutting down ROS gamepad talker module"
if __name__ == '__main__':
main(sys.argv)
| 42.942623 | 158 | 0.688681 |
d3ee9a0a6fab02dfa5dc3a34b5192f44dbfe114a | 2,725 | py | Python | app/views.py | HannesHolst1/pydoku | 628abfa35e230007c9b14e550096b47ec489e0fe | [
"MIT"
] | null | null | null | app/views.py | HannesHolst1/pydoku | 628abfa35e230007c9b14e550096b47ec489e0fe | [
"MIT"
] | null | null | null | app/views.py | HannesHolst1/pydoku | 628abfa35e230007c9b14e550096b47ec489e0fe | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# Flask modules
# from os import replace
from flask import render_template, request, send_file, redirect, session
from flask.helpers import url_for
from jinja2 import TemplateNotFound
from flask_session import Session
# App modules
from app import app
# Backend
from backend import main
import io
import os.path
import jsonpickle
import jsonpickle.ext.numpy as jsonpickle_numpy
from app import session_data
jsonpickle_numpy.register_handlers()
Session(app)
# App main route + generic routing
@app.route('/', defaults={'path': 'index.html'})
@app.route('/<path>')
def index(path):
try:
# Serve the file (if exists) from app/templates/FILE.html
return render_template( path )
except TemplateNotFound:
return render_template('page-404.html'), 404
@app.route('/', methods = ['POST'])
def upload_file():
if request.method == 'POST':
session_data.clean() # awesome old session data handling
f = request.files['file']
in_memory_file = io.BytesIO()
f.save(in_memory_file)
puzzle = main.Sudoku()
puzzle.set_problem(in_memory_file=in_memory_file)
puzzle.solve()
obj_buffer = jsonpickle.encode(puzzle.get_problem())
session['problem'] = obj_buffer
obj_buffer = jsonpickle.encode(puzzle.get_output())
session['output'] = obj_buffer
return render_template('index.html', processed=True, solved_sudoku=puzzle.solved, status=puzzle.status)
else:
return redirect(url_for('index'))
@app.route('/demo/<number>')
def provide_demo(number):
session_data.clean() # awesome old session data handling
demofile = app.config['DEMOFILE_MASK']
demofile = demofile.replace('*', str(number))
demofile = app.config['DEMOFILE_PATH'] + demofile
if not os.path.exists(demofile):
return render_template('index.html', processed=False, solved_sudoku=False, status='Could not open demo {}'.format(number))
puzzle = main.Sudoku()
puzzle.set_problem(filename=demofile)
puzzle.solve()
obj_buffer = jsonpickle.encode(puzzle.get_problem())
session['problem'] = obj_buffer
obj_buffer = jsonpickle.encode(puzzle.get_output())
session['output'] = obj_buffer
return render_template('index.html', processed=True, solved_sudoku=puzzle.solved, status=puzzle.status)
@app.route('/problem/')
def display_problem():
byte_io = jsonpickle.decode(session.get('problem'))
return send_file(byte_io, mimetype='image/jpeg', cache_timeout=0)
@app.route('/output/')
def display_output():
byte_io = jsonpickle.decode(session.get('output'))
return send_file(byte_io, mimetype='image/jpeg', cache_timeout=0) | 29.945055 | 130 | 0.702018 |
a7b9348a109378aeb03adba98354313b13c2b684 | 246 | py | Python | run.py | alcinos/auto_yolo | 78727596f937b38d4de47dd9f0a7cc8c6104323f | [
"MIT"
] | 54 | 2018-12-10T21:08:42.000Z | 2022-02-18T02:44:19.000Z | run.py | alcinos/auto_yolo | 78727596f937b38d4de47dd9f0a7cc8c6104323f | [
"MIT"
] | 8 | 2019-04-02T10:31:13.000Z | 2022-03-31T13:44:25.000Z | run.py | alcinos/auto_yolo | 78727596f937b38d4de47dd9f0a7cc8c6104323f | [
"MIT"
] | 16 | 2019-04-26T11:45:08.000Z | 2022-02-09T07:59:25.000Z | from dps import cfg
from dps.utils import Config
from auto_yolo.envs import run_experiment
if __name__ == "__main__":
_config = Config()
with _config:
cfg.update_from_command_line()
run_experiment("local_run", _config, "")
| 20.5 | 44 | 0.715447 |
fd63dc6c7379cac0b70ca439f69729c0ed1181e6 | 13,954 | py | Python | contrib/devtools/github-merge.py | paycheckcash/paycheckcash | 383a1e1c42264f9c1e298867c5fbe5e795206e00 | [
"MIT"
] | 1 | 2019-05-18T19:32:50.000Z | 2019-05-18T19:32:50.000Z | contrib/devtools/github-merge.py | paycheckcash/paycheckcash | 383a1e1c42264f9c1e298867c5fbe5e795206e00 | [
"MIT"
] | null | null | null | contrib/devtools/github-merge.py | paycheckcash/paycheckcash | 383a1e1c42264f9c1e298867c5fbe5e795206e00 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2017 PaycheckCash Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
from __future__ import division,print_function,unicode_literals
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import sys
import json
import codecs
try:
from urllib.request import Request,urlopen
except:
from urllib2 import Request,urlopen
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%h %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError:
return default
def retrieve_pr_info(repo,pull):
'''
Retrieve pull request information from github.
Return None if no title can be found, or an error happens.
'''
try:
req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull)
result = urlopen(req)
reader = codecs.getreader('utf-8')
obj = json.load(reader(result))
return obj
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def print_merge_details(pull, title, branch, base_branch, head_branch):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
githubmerge.host (default: git@github.com),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','git@github.com')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
host_repo = host+":"+repo # shortcut for push/pull target
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull)
if info is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull, 'w', encoding="utf8")
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*',
'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot find pull request #%s or branch %s on %s." % (pull,branch,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Put tree SHA512 into the message
try:
first_sha512 = tree_sha512sum()
message += '\n\nTree-SHA512: ' + first_sha512
except subprocess.CalledProcessError:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
try:
subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')])
except subprocess.CalledProcessError:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
| 42.03012 | 159 | 0.618532 |
8ea2473f15846fb01b592fd96ea7cd59517451f6 | 16,286 | py | Python | unused/model_2.py | jackyjsy/SCGAN | c2b5db470cdeab608073f51fd9e8053402cc5d8a | [
"MIT"
] | 5 | 2022-01-08T02:28:04.000Z | 2022-03-09T04:55:12.000Z | unused/model_2.py | jackyjsy/SCGAN | c2b5db470cdeab608073f51fd9e8053402cc5d8a | [
"MIT"
] | 1 | 2022-01-03T16:22:43.000Z | 2022-01-08T02:22:56.000Z | unused/model_2.py | jackyjsy/SCGAN | c2b5db470cdeab608073f51fd9e8053402cc5d8a | [
"MIT"
] | 1 | 2021-12-16T22:53:04.000Z | 2021-12-16T22:53:04.000Z | # ResNet generator and discriminator
import torch
from torch import nn
import torch.nn.functional as F
from spectral_normalization import SpectralNorm
import numpy as np
channels = 3
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return x + self.main(x)
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform(self.conv1.weight.data, 1.)
nn.init.xavier_uniform(self.conv2.weight.data, 1.)
self.model = nn.Sequential(
# nn.BatchNorm2d(in_channels),
nn.InstanceNorm2d(in_channels, affine=True),
nn.ReLU(),
nn.Upsample(scale_factor=2),
self.conv1,
# nn.BatchNorm2d(out_channels),
nn.InstanceNorm2d(out_channels, affine=True),
nn.ReLU(),
self.conv2
)
self.conv_sc = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0)
nn.init.xavier_uniform(self.conv_sc.weight.data, 1.)
self.bypass = nn.Sequential()
if stride != 1:
self.bypass = nn.Sequential(
nn.Upsample(scale_factor=2),
self.conv_sc
)
def forward(self, x):
# print(x.size())
# print(self.model(x).size())
# print(self.bypass(x).size())
return self.model(x) + self.bypass(x)
class ResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform(self.conv1.weight.data, 1.)
nn.init.xavier_uniform(self.conv2.weight.data, 1.)
if stride == 1:
self.model = nn.Sequential(
nn.ReLU(),
SpectralNorm(self.conv1),
nn.ReLU(),
SpectralNorm(self.conv2)
)
else:
self.model = nn.Sequential(
nn.ReLU(),
SpectralNorm(self.conv1),
nn.ReLU(),
SpectralNorm(self.conv2),
nn.AvgPool2d(2, stride=stride, padding=0)
)
self.bypass = nn.Sequential()
if stride != 1:
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0)
nn.init.xavier_uniform(self.bypass_conv.weight.data, np.sqrt(2))
self.bypass = nn.Sequential(
SpectralNorm(self.bypass_conv),
nn.AvgPool2d(2, stride=stride, padding=0)
)
# if in_channels == out_channels:
# self.bypass = nn.AvgPool2d(2, stride=stride, padding=0)
# else:
# self.bypass = nn.Sequential(
# SpectralNorm(nn.Conv2d(in_channels,out_channels, 1, 1, padding=0)),
# nn.AvgPool2d(2, stride=stride, padding=0)
# )
def forward(self, x):
return self.model(x) + self.bypass(x)
# special ResBlock just for the first layer of the discriminator
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FirstResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
self.bypass_conv = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0)
nn.init.xavier_uniform(self.conv1.weight.data, 1.)
nn.init.xavier_uniform(self.conv2.weight.data, 1.)
nn.init.xavier_uniform(self.bypass_conv.weight.data, np.sqrt(2))
# we don't want to apply ReLU activation to raw image before convolution transformation.
self.model = nn.Sequential(
SpectralNorm(self.conv1),
nn.ReLU(),
SpectralNorm(self.conv2),
nn.AvgPool2d(2)
)
self.bypass = nn.Sequential(
nn.AvgPool2d(2),
SpectralNorm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
GEN_SIZE=64
DISC_SIZE=64
class Generator(nn.Module):
def __init__(self, z_dim, c_dim):
super(Generator, self).__init__()
self.z_dim = z_dim
self.dense = nn.Linear(self.z_dim, 4 * 4 * GEN_SIZE*16)
self.final = nn.Conv2d(GEN_SIZE, channels, 3, stride=1, padding=1)
nn.init.xavier_uniform(self.dense.weight.data, 1.)
nn.init.xavier_uniform(self.final.weight.data, 1.)
conv_intermediate = nn.Conv2d(GEN_SIZE*4 + c_dim, GEN_SIZE*4, 3, 1, padding=1)
nn.init.xavier_uniform(conv_intermediate.weight.data, 1.)
self.model1 = nn.Sequential(
ResBlockGenerator(GEN_SIZE*16, GEN_SIZE*16, stride=2),
ResBlockGenerator(GEN_SIZE*16, GEN_SIZE*8, stride=2),
ResBlockGenerator(GEN_SIZE*8, GEN_SIZE*4, stride=2),
nn.BatchNorm2d(GEN_SIZE*4),
nn.ReLU()
)
self.model2 = nn.Sequential(
conv_intermediate,
ResBlockGenerator(GEN_SIZE*4, GEN_SIZE*2, stride=2),
ResBlockGenerator(GEN_SIZE*2, GEN_SIZE, stride=2),
nn.BatchNorm2d(GEN_SIZE),
nn.ReLU(),
self.final,
nn.Tanh())
def forward(self, z, c):
h = self.model1(self.dense(z).view(-1, GEN_SIZE*16, 4, 4))
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), h.size(2), h.size(3))
hc = torch.cat([h, c], dim=1)
return self.model2(hc)
class Generator_SC(nn.Module):
def __init__(self, z_dim, c_dim, s_dim):
super(Generator_SC, self).__init__()
self.z_dim = z_dim
self.dense = nn.Linear(self.z_dim, 4 * 4 * GEN_SIZE*2)
self.final = nn.Conv2d(GEN_SIZE, channels, 3, stride=1, padding=1)
nn.init.xavier_uniform(self.dense.weight.data, 1.)
nn.init.xavier_uniform(self.final.weight.data, 1.)
conv_intermediate = nn.Conv2d(GEN_SIZE*4 + c_dim + GEN_SIZE*2, GEN_SIZE*4, 3, 1, padding=1)
nn.init.xavier_uniform(conv_intermediate.weight.data, 1.)
conv_s1 = nn.Conv2d(s_dim, GEN_SIZE, kernel_size=4, stride=2, padding=1)
conv_s2 = nn.Conv2d(GEN_SIZE, GEN_SIZE*2, kernel_size=4, stride=2, padding=1)
nn.init.xavier_uniform(conv_s1.weight.data, 1.)
nn.init.xavier_uniform(conv_s2.weight.data, 1.)
self.model1 = nn.Sequential(
ResBlockGenerator(GEN_SIZE*16, GEN_SIZE*16, stride=2),
ResBlockGenerator(GEN_SIZE*16, GEN_SIZE*8, stride=2),
ResBlockGenerator(GEN_SIZE*8, GEN_SIZE*4, stride=2),
# nn.BatchNorm2d(GEN_SIZE*4),
nn.InstanceNorm2d(GEN_SIZE*4, affine=True),
nn.ReLU()
)
self.model2 = nn.Sequential(
conv_intermediate,
ResBlockGenerator(GEN_SIZE*4, GEN_SIZE*2, stride=2),
ResBlockGenerator(GEN_SIZE*2, GEN_SIZE, stride=2),
nn.InstanceNorm2d(GEN_SIZE, affine=True),
nn.ReLU(),
self.final,
nn.Tanh())
self.model_s = nn.Sequential(
conv_s1,
# nn.BatchNorm2d(GEN_SIZE),
nn.InstanceNorm2d(GEN_SIZE, affine=True),
nn.ReLU(),
conv_s2,
# nn.BatchNorm2d(GEN_SIZE*2),
nn.InstanceNorm2d(GEN_SIZE*2, affine=True),
nn.ReLU()
)
def forward(self, z, c, s):
h = self.model1(self.dense(z).view(-1, GEN_SIZE*16, 4, 4))
s = self.model_s(s)
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), h.size(2), h.size(3))
hcs = torch.cat([h, c, s], dim=1)
return self.model2(hcs)
class Generator_SC_2(nn.Module):
def __init__(self, z_dim, c_dim, s_dim):
super(Generator_SC_2, self).__init__()
self.z_dim = z_dim
self.dense = nn.Linear(self.z_dim, 16 * 16 * GEN_SIZE*2)
self.final = nn.Conv2d(GEN_SIZE, channels, 3, stride=1, padding=1)
conv_s1 = nn.Conv2d(s_dim, GEN_SIZE, kernel_size=4, stride=2, padding=1)
conv_s2 = nn.Conv2d(GEN_SIZE, GEN_SIZE*2, kernel_size=4, stride=2, padding=1)
conv_s3 = nn.Conv2d(GEN_SIZE*2, GEN_SIZE*4, kernel_size=4, stride=2, padding=1)
conv_intermediate = nn.Conv2d(GEN_SIZE*4 + c_dim, GEN_SIZE*4, 3, 1, padding=1)
conv_1 = nn.Conv2d(GEN_SIZE*4+GEN_SIZE*2, GEN_SIZE*8, kernel_size=3, stride=1, padding=1)
nn.init.xavier_uniform(conv_intermediate.weight.data, 1.)
nn.init.xavier_uniform(self.dense.weight.data, 1.)
nn.init.xavier_uniform(self.final.weight.data, 1.)
nn.init.xavier_uniform(conv_s1.weight.data, 1.)
nn.init.xavier_uniform(conv_s2.weight.data, 1.)
nn.init.xavier_uniform(conv_s3.weight.data, 1.)
nn.init.xavier_uniform(conv_1.weight.data, 1.)
self.model1 = nn.Sequential(
conv_1,
ResBlockGenerator(GEN_SIZE*8, GEN_SIZE*4, stride=2),
nn.InstanceNorm2d(GEN_SIZE*4, affine=True),
nn.ReLU(),
)
self.model2 = nn.Sequential(
conv_intermediate,
ResBlockGenerator(GEN_SIZE*4, GEN_SIZE*2, stride=2),
ResBlockGenerator(GEN_SIZE*2, GEN_SIZE, stride=2),
nn.InstanceNorm2d(GEN_SIZE, affine=True),
nn.ReLU(),
self.final,
nn.Tanh())
self.model_s = nn.Sequential(
conv_s1,
nn.InstanceNorm2d(GEN_SIZE, affine=True),
nn.ReLU(),
conv_s2,
nn.InstanceNorm2d(GEN_SIZE*2, affine=True),
nn.ReLU(),
conv_s3,
nn.InstanceNorm2d(GEN_SIZE*4, affine=True),
nn.ReLU()
)
def forward(self, z, c, s):
# print(z.size())
# print(c.size())
# print(s.size())
s = self.model_s(s)
z = self.dense(z).view(s.size(0), -1, s.size(2), s.size(3))
h = torch.cat([s, z], dim=1)
h = self.model1(h)
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), h.size(2), h.size(3))
# print(type(c))
# print(type(h))
h = torch.cat([h, c], dim=1)
return self.model2(h)
class Discriminator(nn.Module):
def __init__(self, c_dim):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
FirstResBlockDiscriminator(channels, DISC_SIZE, stride=2),
ResBlockDiscriminator(DISC_SIZE, DISC_SIZE*2, stride=2),
ResBlockDiscriminator(DISC_SIZE*2, DISC_SIZE*4, stride=2),
ResBlockDiscriminator(DISC_SIZE*4, DISC_SIZE*8, stride=2),
ResBlockDiscriminator(DISC_SIZE*8, DISC_SIZE*16, stride=2),
ResBlockDiscriminator(DISC_SIZE*16, DISC_SIZE*16),
nn.ReLU(),
nn.AvgPool2d(4),
)
self.fc = nn.Linear(DISC_SIZE*16, 1)
nn.init.xavier_uniform(self.fc.weight.data, 1.)
self.fc = SpectralNorm(self.fc)
self.classify = nn.Linear(DISC_SIZE*16, c_dim)
nn.init.xavier_uniform(self.classify.weight.data, 1.)
def forward(self, x):
h = self.model(x).view(-1,DISC_SIZE*16)
return self.fc(h), self.classify(h)
class Generator_CNN(nn.Module):
# Network Architecture is exactly same as in infoGAN (https://arxiv.org/abs/1606.03657)
# Architecture : FC1024_BR-FC7x7x128_BR-(64)4dc2s_BR-(1)4dc2s_S
def __init__(self, z_dim, c_dim):
super(Generator_CNN, self).__init__()
self.input_height = 128
self.input_width = 128
self.input_dim = z_dim
self.output_dim = 3
self.fc = nn.Sequential(
nn.Linear(self.input_dim, 1024*4*4),
nn.BatchNorm1d(1024*4*4),
nn.ReLU(),
)
self.deconv1 = nn.Sequential(
nn.ConvTranspose2d(1024, 512, 4, 2, 1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.ConvTranspose2d(512, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose2d(256+c_dim, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1),
nn.Tanh(),
)
def forward(self, input, c):
x = self.fc(input)
x = x.view(-1, 1024, 4, 4)
x = self.deconv1(x)
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
x = self.deconv2(x)
return x
class Discriminator_CNN(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, c_dim=5):
super(Discriminator_CNN, self).__init__()
image_size=128
conv_dim=64
repeat_num=6
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = curr_dim * 2
k_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
def forward(self, x):
h = self.main(x)
out_real = self.conv1(h)
out_aux = self.conv2(h)
return out_real.squeeze(), out_aux.squeeze()
class Segmentor(nn.Module):
"""Segmentor."""
def __init__(self, conv_dim=64, repeat_num=4):
super(Segmentor, self).__init__()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
# Up-Sampling
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 7, kernel_size=7, stride=1, padding=3, bias=False))
# layers.append(nn.LogSoftmax())
# layers.append(nn.Softmax2d())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x) | 38.32 | 116 | 0.588358 |
641d2097aa0caafa416d02fad016f21cfaef1a9a | 1,443 | py | Python | bench/test_hpack.py | steamraven/hpack | c76d07a6b07a3473bde21b972353be3863a9b68f | [
"MIT"
] | 55 | 2015-07-07T15:33:23.000Z | 2022-01-23T18:30:21.000Z | bench/test_hpack.py | steamraven/hpack | c76d07a6b07a3473bde21b972353be3863a9b68f | [
"MIT"
] | 69 | 2015-06-29T17:25:39.000Z | 2021-11-20T18:22:12.000Z | bench/test_hpack.py | steamraven/hpack | c76d07a6b07a3473bde21b972353be3863a9b68f | [
"MIT"
] | 28 | 2015-11-12T13:35:53.000Z | 2021-04-29T12:05:59.000Z | from hpack.hpack import (
encode_integer,
decode_integer
)
class TestHpackEncodingIntegersBenchmarks:
def test_encode_small_integer_large_prefix(self, benchmark):
benchmark(encode_integer, integer=120, prefix_bits=7)
def test_encode_small_integer_small_prefix(self, benchmark):
benchmark(encode_integer, integer=120, prefix_bits=1)
def test_encode_large_integer_large_prefix(self, benchmark):
benchmark(encode_integer, integer=120000, prefix_bits=7)
def test_encode_large_integer_small_prefix(self, benchmark):
benchmark(encode_integer, integer=120000, prefix_bits=1)
class TestHpackDecodingIntegersBenchmarks:
def test_decode_small_integer_large_prefix(self, benchmark):
data = bytes(encode_integer(integer=120, prefix_bits=7))
benchmark(decode_integer, data=data, prefix_bits=7)
def test_decode_small_integer_small_prefix(self, benchmark):
data = bytes(encode_integer(integer=120, prefix_bits=1))
benchmark(decode_integer, data=data, prefix_bits=1)
def test_decode_large_integer_large_prefix(self, benchmark):
data = bytes(encode_integer(integer=120000, prefix_bits=7))
benchmark(decode_integer, data=data, prefix_bits=7)
def test_decode_large_integer_small_prefix(self, benchmark):
data = bytes(encode_integer(integer=120000, prefix_bits=1))
benchmark(decode_integer, data=data, prefix_bits=1)
| 39 | 67 | 0.765073 |
6f62727ae39fc1cc3aad284a60b706569de775f6 | 2,436 | py | Python | tempson/RestrictedPython/MutatingWalker.py | tempson-py/tempson | dc25378a4466766da986613bc3a1f8f51519bad1 | [
"MIT"
] | 1 | 2016-12-27T13:23:18.000Z | 2016-12-27T13:23:18.000Z | tempson/RestrictedPython/MutatingWalker.py | tempson-py/tempson | dc25378a4466766da986613bc3a1f8f51519bad1 | [
"MIT"
] | null | null | null | tempson/RestrictedPython/MutatingWalker.py | tempson-py/tempson | dc25378a4466766da986613bc3a1f8f51519bad1 | [
"MIT"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__version__='$Revision: 1.6 $'[11:-2]
from SelectCompiler import ast
ListType = type([])
TupleType = type(())
SequenceTypes = (ListType, TupleType)
class MutatingWalker:
def __init__(self, visitor):
self.visitor = visitor
self._cache = {}
def defaultVisitNode(self, node, walker=None, exclude=None):
for name, child in node.__dict__.items():
if exclude is not None and name in exclude:
continue
v = self.dispatchObject(child)
if v is not child:
# Replace the node.
node.__dict__[name] = v
return node
def visitSequence(self, seq):
res = seq
for idx in range(len(seq)):
child = seq[idx]
v = self.dispatchObject(child)
if v is not child:
# Change the sequence.
if type(res) is ListType:
res[idx : idx + 1] = [v]
else:
res = res[:idx] + (v,) + res[idx + 1:]
return res
def dispatchObject(self, ob):
'''
Expected to return either ob or something that will take
its place.
'''
if isinstance(ob, ast.Node):
return self.dispatchNode(ob)
elif type(ob) in SequenceTypes:
return self.visitSequence(ob)
else:
return ob
def dispatchNode(self, node):
klass = node.__class__
meth = self._cache.get(klass, None)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className,
self.defaultVisitNode)
self._cache[klass] = meth
return meth(node, self)
def walk(tree, visitor):
return MutatingWalker(visitor).dispatchNode(tree)
| 32.48 | 78 | 0.548851 |
76b927788ce9fa127f2c87178b6fc389f5acbaf2 | 5,511 | py | Python | contrib/seeds/makeseeds.py | nscoincommunity/XDNA | 07d57142f30ee39bbe1ccc6149e7a017ea59fad4 | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | nscoincommunity/XDNA | 07d57142f30ee39bbe1ccc6149e7a017ea59fad4 | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | nscoincommunity/XDNA | 07d57142f30ee39bbe1ccc6149e7a017ea59fad4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 1
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/NSCOINCore:1.0.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.040698 | 186 | 0.566685 |
cb43b86e9dfc13d150745365cf3fa01b5ee89b06 | 1,586 | py | Python | package_settings.py | bloomsburyai/cape-api-helpers | 76881b2ec208451e03bd4f2f22c77fb4de184bde | [
"Apache-2.0"
] | 6 | 2018-07-26T21:47:45.000Z | 2018-08-23T12:27:44.000Z | package_settings.py | bloomsburyai/cape-document-manager | 4f1450c1da7fc6ee5c02f3fd4fb3c92aa3983782 | [
"Apache-2.0"
] | null | null | null | package_settings.py | bloomsburyai/cape-document-manager | 4f1450c1da7fc6ee5c02f3fd4fb3c92aa3983782 | [
"Apache-2.0"
] | 9 | 2018-09-27T14:03:34.000Z | 2020-12-14T20:16:04.000Z | # Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from urllib.parse import urlparse
from setuptools import find_packages
_THIS_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__)))
_README_FILEPATH = os.path.join(_THIS_FOLDER, 'README.md')
if os.path.isfile(_README_FILEPATH):
with open(_README_FILEPATH) as file_pointer:
DESCRIPTION = file_pointer.read()
else:
DESCRIPTION = ''
# setup tools renames the folders so this does not work : os.path.split(THIS_FOLDER)[1]
_origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']).strip().decode()
NAME = os.path.split(urlparse(_origin).path)[1]
VERSION = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode()
PACKAGES = find_packages(exclude=['tests'])
_VERSION_FILEPATH = os.path.join(_THIS_FOLDER, PACKAGES[0], 'version.py')
with open(_VERSION_FILEPATH, 'w') as version_file:
version_file.write(f"""
VERSION = ""\"{VERSION}""\"
NAME = ""\"{NAME}""\"
DESCRIPTION = ""\"{DESCRIPTION}""\"
""")
| 37.761905 | 99 | 0.733922 |
24b1f9ec5f919bdb1ef1f683673501508c9b17c1 | 553 | py | Python | benchmarks/query_benchmarks/query_dates/benchmark.py | deepakdinesh1123/actions | 859455c8582f6e3fc4d65b7266163f4276d04127 | [
"MIT"
] | null | null | null | benchmarks/query_benchmarks/query_dates/benchmark.py | deepakdinesh1123/actions | 859455c8582f6e3fc4d65b7266163f4276d04127 | [
"MIT"
] | null | null | null | benchmarks/query_benchmarks/query_dates/benchmark.py | deepakdinesh1123/actions | 859455c8582f6e3fc4d65b7266163f4276d04127 | [
"MIT"
] | null | null | null | from ...utils import bench_setup
from .models import Book
class QueryDates:
def setup(self):
bench_setup(migrate=True)
def time_query_dates(self):
list(Book.objects.dates("created_date", "year", "ASC"))
list(Book.objects.dates("created_date", "year", "DESC"))
list(Book.objects.dates("created_date", "month", "ASC"))
list(Book.objects.dates("created_date", "month", "DESC"))
list(Book.objects.dates("created_date", "day", "ASC"))
list(Book.objects.dates("created_date", "day", "DESC"))
| 34.5625 | 65 | 0.641953 |
cce2522a6d83678ba73113a4b244e8f8fa785cc4 | 4,520 | py | Python | src/python/pants/backend/project_info/dependees_test.py | gshuflin/pants | cf483ead6d4d4a4cc4fc4ae18e3b5b633509d933 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/project_info/dependees_test.py | gshuflin/pants | cf483ead6d4d4a4cc4fc4ae18e3b5b633509d933 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/project_info/dependees_test.py | gshuflin/pants | cf483ead6d4d4a4cc4fc4ae18e3b5b633509d933 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
from typing import List
from pants.backend.project_info.dependees import DependeesGoal
from pants.backend.project_info.dependees import DependeesOutputFormat as OutputFormat
from pants.backend.project_info.dependees import rules as dependee_rules
from pants.engine.target import Dependencies, Target
from pants.testutil.test_base import TestBase
class MockTarget(Target):
alias = "tgt"
core_fields = (Dependencies,)
class DependeesTest(TestBase):
@classmethod
def target_types(cls):
return [MockTarget]
@classmethod
def rules(cls):
return (*super().rules(), *dependee_rules())
def setUp(self) -> None:
super().setUp()
self.add_to_build_file("base", "tgt()")
self.add_to_build_file("intermediate", "tgt(dependencies=['base'])")
self.add_to_build_file("leaf", "tgt(dependencies=['intermediate'])")
def assert_dependees(
self,
*,
targets: List[str],
expected: List[str],
transitive: bool = False,
closed: bool = False,
output_format: OutputFormat = OutputFormat.text,
) -> None:
args = [f"--output-format={output_format.value}"]
if transitive:
args.append("--transitive")
if closed:
args.append("--closed")
result = self.run_goal_rule(DependeesGoal, args=[*args, *targets])
assert result.stdout.splitlines() == expected
def test_no_targets(self) -> None:
self.assert_dependees(targets=[], expected=[])
self.assert_dependees(targets=[], output_format=OutputFormat.json, expected=["{}"])
def test_normal(self) -> None:
self.assert_dependees(targets=["base"], expected=["intermediate"])
self.assert_dependees(
targets=["base"],
output_format=OutputFormat.json,
expected=dedent(
"""\
{
"base": [
"intermediate"
]
}"""
).splitlines(),
)
def test_no_dependees(self) -> None:
self.assert_dependees(targets=["leaf"], expected=[])
self.assert_dependees(
targets=["leaf"],
output_format=OutputFormat.json,
expected=dedent(
"""\
{
"leaf": []
}"""
).splitlines(),
)
def test_closed(self) -> None:
self.assert_dependees(targets=["leaf"], closed=True, expected=["leaf"])
self.assert_dependees(
targets=["leaf"],
closed=True,
output_format=OutputFormat.json,
expected=dedent(
"""\
{
"leaf": [
"leaf"
]
}"""
).splitlines(),
)
def test_transitive(self) -> None:
self.assert_dependees(targets=["base"], transitive=True, expected=["intermediate", "leaf"])
self.assert_dependees(
targets=["base"],
transitive=True,
output_format=OutputFormat.json,
expected=dedent(
"""\
{
"base": [
"intermediate",
"leaf"
]
}"""
).splitlines(),
)
def test_multiple_specified_targets(self) -> None:
# This tests that --output-format=text will deduplicate and that --output-format=json will
# preserve which dependee belongs to which specified target.
self.assert_dependees(
targets=["base", "intermediate"],
transitive=True,
# NB: `intermediate` is not included because it's a root and we have `--no-closed`.
expected=["leaf"],
)
self.assert_dependees(
targets=["base", "intermediate"],
transitive=True,
output_format=OutputFormat.json,
expected=dedent(
"""\
{
"base": [
"intermediate",
"leaf"
],
"intermediate": [
"leaf"
]
}"""
).splitlines(),
)
| 31.830986 | 99 | 0.520133 |
db17471feb5f603ad33c546888b1f9c07fb23763 | 357 | py | Python | PythonExercicios/ex017.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | PythonExercicios/ex017.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | PythonExercicios/ex017.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | #Faça um programa que leia a comprimento do cateto oposto e do cateto adjacente de um
# triângulo retângulo, calcule a mostre o compriemnto da hipotenusa
from math import hypot
co = float(input('Qual o comprimento do cateto oposto:'))
ca = float(input('Qual o comprimento do cateto adjacente:'))
print('A hipotenusa vai medir {:.2f}'.format(hypot(co, ca)))
| 51 | 85 | 0.761905 |
91bd0cf1c11ad4c777a050fa4d831bf14ad0b72e | 1,149 | py | Python | Coinbase/coinbase-netdud.py | MadDud/coinbase-netdud | 731784b4287b8ace8ea009475edfc6a1ab5191c6 | [
"MIT"
] | null | null | null | Coinbase/coinbase-netdud.py | MadDud/coinbase-netdud | 731784b4287b8ace8ea009475edfc6a1ab5191c6 | [
"MIT"
] | null | null | null | Coinbase/coinbase-netdud.py | MadDud/coinbase-netdud | 731784b4287b8ace8ea009475edfc6a1ab5191c6 | [
"MIT"
] | null | null | null | import sys
import configparser
print("Coinbase netdud - v1.0")
print("---")
def getCoinbaseData(path):
import urllib2, json
import hmac
import hashlib
import base64
import time
import json
import configparser
# read config
config = configparser.ConfigParser()
config.read('coinbase-netdud.conf')
apiKey = str(config.get('default','apiKey'))
apiSecret = str(config.get('default','apiSecret'))
# main
url = "https://api.coinbase.com%s" % path
headers = {}
timestamp = int(time.time())
headers['CB-ACCESS-KEY'] = apiKey
headers['CB-ACCESS-TIMESTAMP'] = timestamp
headers['CB-VERSION'] = '2018-02-01'
toSign = str(timestamp) + 'GET' + path
signature = hmac.new(apiSecret, toSign, hashlib.sha256).hexdigest()
headers['CB-ACCESS-SIGN'] = signature
try:
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request, timeout=2.5)
data = json.loads(response.read())
print json.dumps(data,indent=3)
except Exception as e:
print "ERROR: %s" % str(e)
getCoinbaseData('/v2/'+str(sys.argv[1]))
| 24.446809 | 71 | 0.641427 |
75b0d9b2ed11a401d21346a64812a9f3b8729764 | 6,861 | py | Python | test/test_input_format.py | DaniFdezAlvarez/shexer | 4f4dffc95bcad038cbc1bd85b58e5558c7c0a6d1 | [
"Apache-2.0"
] | 16 | 2019-03-18T21:32:36.000Z | 2022-03-28T17:53:57.000Z | test/test_input_format.py | DaniFdezAlvarez/shexer | 4f4dffc95bcad038cbc1bd85b58e5558c7c0a6d1 | [
"Apache-2.0"
] | 67 | 2019-02-27T12:58:55.000Z | 2022-03-28T20:38:48.000Z | test/test_input_format.py | DaniFdezAlvarez/shexer | 4f4dffc95bcad038cbc1bd85b58e5558c7c0a6d1 | [
"Apache-2.0"
] | 2 | 2019-03-09T00:30:28.000Z | 2020-01-09T18:04:14.000Z | import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, G1_JSON_LD, G1_NT, G1_TSVO_SPO, G1_XML, G1_N3, \
default_namespaces, G1_TTL_WITH_BASE, G1_TTL_WITH_USELESS_BNODE
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import NT, TSV_SPO, RDF_XML, JSON_LD, N3, TURTLE, TURTLE_ITER
_BASE_DIR = BASE_FILES + "general" + pth.sep
class TestInputFormat(unittest.TestCase):
def test_ttl(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_nt(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_NT,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=NT,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_n3(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_N3,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=N3,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_tsv_spo(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_TSVO_SPO,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TSV_SPO,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_xml(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_XML,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=RDF_XML,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_json_ld(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_JSON_LD,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=JSON_LD,
disable_comments=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_ttl_iter(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE_ITER,
disable_comments=True,
infer_numeric_types_for_untyped_literals=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_ttl_iter_with_base(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_TTL_WITH_BASE,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE_ITER,
disable_comments=True,
infer_numeric_types_for_untyped_literals=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
def test_ttl_iter_with_bnode(self):
shaper = Shaper(target_classes=["http://xmlns.com/foaf/0.1/Person",
"http://xmlns.com/foaf/0.1/Document"],
graph_file_input=G1_TTL_WITH_USELESS_BNODE,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE_ITER,
disable_comments=True,
infer_numeric_types_for_untyped_literals=True)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "g1_all_classes_no_comments.shex",
str_target=str_result))
| 53.601563 | 110 | 0.555458 |
1545246e6b800eb7e51969735a523edce9dc68eb | 18,092 | py | Python | src/garage/torch/algos/vpg.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | src/garage/torch/algos/vpg.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | src/garage/torch/algos/vpg.py | bainro/garage | c5afbb19524792d9bbad9b9741f45e1d48ddca3d | [
"MIT"
] | null | null | null | """Vanilla Policy Gradient (REINFORCE)."""
import collections
import copy
from dowel import tabular
import numpy as np
import torch
import torch.nn.functional as F
from garage import log_performance, TrajectoryBatch
from garage.misc import tensor_utils as tu
from garage.np.algos import BatchPolopt
from garage.torch.algos import (compute_advantages, filter_valids, pad_to_last)
from garage.torch.optimizers import OptimizerWrapper
class VPG(BatchPolopt):
"""Vanilla Policy Gradient (REINFORCE).
VPG, also known as Reinforce, trains stochastic policy in an on-policy way.
Args:
env_spec (garage.envs.EnvSpec): Environment specification.
policy (garage.torch.policies.base.Policy): Policy.
value_function (garage.torch.value_functions.ValueFunction): The value
function.
policy_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer
for policy.
vf_optimizer (garage.torch.optimizer.OptimizerWrapper): Optimizer for
value function.
max_path_length (int): Maximum length of a single rollout.
num_train_per_epoch (int): Number of train_once calls per epoch.
discount (float): Discount.
gae_lambda (float): Lambda used for generalized advantage
estimation.
center_adv (bool): Whether to rescale the advantages
so that they have mean 0 and standard deviation 1.
positive_adv (bool): Whether to shift the advantages
so that they are always positive. When used in
conjunction with center_adv the advantages will be
standardized before shifting.
policy_ent_coeff (float): The coefficient of the policy entropy.
Setting it to zero would mean no entropy regularization.
use_softplus_entropy (bool): Whether to estimate the softmax
distribution of the entropy to prevent the entropy from being
negative.
stop_entropy_gradient (bool): Whether to stop the entropy gradient.
entropy_method (str): A string from: 'max', 'regularized',
'no_entropy'. The type of entropy method to use. 'max' adds the
dense entropy to the reward for each time step. 'regularized' adds
the mean entropy to the surrogate objective. See
https://arxiv.org/abs/1805.00909 for more details.
"""
def __init__(
self,
env_spec,
policy,
value_function,
policy_optimizer=None,
vf_optimizer=None,
max_path_length=500,
num_train_per_epoch=1,
discount=0.99,
gae_lambda=1,
center_adv=True,
positive_adv=False,
policy_ent_coeff=0.0,
use_softplus_entropy=False,
stop_entropy_gradient=False,
entropy_method='no_entropy',
):
self._value_function = value_function
self._gae_lambda = gae_lambda
self._center_adv = center_adv
self._positive_adv = positive_adv
self._policy_ent_coeff = policy_ent_coeff
self._use_softplus_entropy = use_softplus_entropy
self._stop_entropy_gradient = stop_entropy_gradient
self._entropy_method = entropy_method
self._maximum_entropy = (entropy_method == 'max')
self._entropy_regularzied = (entropy_method == 'regularized')
self._check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient,
policy_ent_coeff)
self._episode_reward_mean = collections.deque(maxlen=100)
if policy_optimizer:
self._policy_optimizer = policy_optimizer
else:
self._policy_optimizer = OptimizerWrapper(torch.optim.Adam, policy)
if vf_optimizer:
self._vf_optimizer = vf_optimizer
else:
self._vf_optimizer = OptimizerWrapper(torch.optim.Adam,
value_function)
super().__init__(env_spec=env_spec,
policy=policy,
baseline=value_function,
discount=discount,
max_path_length=max_path_length,
n_samples=num_train_per_epoch)
self._old_policy = copy.deepcopy(self.policy)
@staticmethod
def _check_entropy_configuration(entropy_method, center_adv,
stop_entropy_gradient, policy_ent_coeff):
if entropy_method not in ('max', 'regularized', 'no_entropy'):
raise ValueError('Invalid entropy_method')
if entropy_method == 'max':
if center_adv:
raise ValueError('center_adv should be False when '
'entropy_method is max')
if not stop_entropy_gradient:
raise ValueError('stop_gradient should be True when '
'entropy_method is max')
if entropy_method == 'no_entropy':
if policy_ent_coeff != 0.0:
raise ValueError('policy_ent_coeff should be zero '
'when there is no entropy method')
def train_once(self, itr, paths):
"""Train the algorithm once.
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths.
Returns:
numpy.float64: Calculated mean value of undiscounted returns.
"""
obs, actions, rewards, returns, valids, baselines = \
self.process_samples(itr, paths)
if self._maximum_entropy:
policy_entropies = self._compute_policy_entropy(obs)
rewards += self._policy_ent_coeff * policy_entropies
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
returns_flat = torch.cat(filter_valids(returns, valids))
advs_flat = self._compute_advantage(rewards, valids, baselines)
with torch.no_grad():
policy_loss_before = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_before = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_before = self._compute_kl_constraint(obs)
self._train(obs_flat, actions_flat, rewards_flat, returns_flat,
advs_flat)
with torch.no_grad():
policy_loss_after = self._compute_loss_with_adv(
obs_flat, actions_flat, rewards_flat, advs_flat)
vf_loss_after = self._value_function.compute_loss(
obs_flat, returns_flat)
kl_after = self._compute_kl_constraint(obs)
policy_entropy = self._compute_policy_entropy(obs)
with tabular.prefix(self.policy.name):
tabular.record('/LossBefore', policy_loss_before.item())
tabular.record('/LossAfter', policy_loss_after.item())
tabular.record('/dLoss',
(policy_loss_before - policy_loss_after).item())
tabular.record('/KLBefore', kl_before.item())
tabular.record('/KL', kl_after.item())
tabular.record('/Entropy', policy_entropy.mean().item())
with tabular.prefix(self._value_function.name):
tabular.record('/LossBefore', vf_loss_before.item())
tabular.record('/LossAfter', vf_loss_after.item())
tabular.record('/dLoss',
vf_loss_before.item() - vf_loss_after.item())
self._old_policy.load_state_dict(self.policy.state_dict())
undiscounted_returns = log_performance(
itr,
TrajectoryBatch.from_trajectory_list(self.env_spec, paths),
discount=self.discount)
return np.mean(undiscounted_returns)
def _train(self, obs, actions, rewards, returns, advs):
r"""Train the policy and value function with minibatch.
Args:
obs (torch.Tensor): Observation from the environment with shape
:math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment with shape
:math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards with shape :math:`(N, )`.
returns (torch.Tensor): Acquired returns with shape :math:`(N, )`.
advs (torch.Tensor): Advantage value at each step with shape
:math:`(N, )`.
"""
for dataset in self._policy_optimizer.get_minibatch(
obs, actions, rewards, advs):
self._train_policy(*dataset)
for dataset in self._vf_optimizer.get_minibatch(obs, returns):
self._train_value_function(*dataset)
def _train_policy(self, obs, actions, rewards, advantages):
r"""Train the policy.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of policy loss (float).
"""
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step()
return loss
def _train_value_function(self, obs, returns):
r"""Train the value function.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, O*)`.
returns (torch.Tensor): Acquired returns
with shape :math:`(N, )`.
Returns:
torch.Tensor: Calculated mean scalar value of value function loss
(float).
"""
self._vf_optimizer.zero_grad()
loss = self._value_function.compute_loss(obs, returns)
loss.backward()
self._vf_optimizer.step()
return loss
def _compute_loss(self, obs, actions, rewards, valids, baselines):
r"""Compute mean value of loss.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N, P, A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, P)`.
valids (list[int]): Numbers of valid steps in each paths
baselines (torch.Tensor): Value function estimation at each step
with shape :math:`(N, P)`.
Returns:
torch.Tensor: Calculated negative mean scalar value of
objective (float).
"""
obs_flat = torch.cat(filter_valids(obs, valids))
actions_flat = torch.cat(filter_valids(actions, valids))
rewards_flat = torch.cat(filter_valids(rewards, valids))
advantages_flat = self._compute_advantage(rewards, valids, baselines)
return self._compute_loss_with_adv(obs_flat, actions_flat,
rewards_flat, advantages_flat)
def _compute_loss_with_adv(self, obs, actions, rewards, advantages):
r"""Compute mean value of loss.
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated negative mean scalar value of objective.
"""
objectives = self._compute_objective(advantages, obs, actions, rewards)
if self._entropy_regularzied:
policy_entropies = self._compute_policy_entropy(obs)
objectives += self._policy_ent_coeff * policy_entropies
return -objectives.mean()
def _compute_advantage(self, rewards, valids, baselines):
r"""Compute mean value of loss.
Notes: P is the maximum path length (self.max_path_length)
Args:
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N, P)`.
valids (list[int]): Numbers of valid steps in each paths
baselines (torch.Tensor): Value function estimation at each step
with shape :math:`(N, P)`.
Returns:
torch.Tensor: Calculated advantage values given rewards and
baselines with shape :math:`(N \dot [T], )`.
"""
advantages = compute_advantages(self.discount, self._gae_lambda,
self.max_path_length, baselines,
rewards)
advantage_flat = torch.cat(filter_valids(advantages, valids))
if self._center_adv:
means = advantage_flat.mean()
variance = advantage_flat.var()
advantage_flat = (advantage_flat - means) / (variance + 1e-8)
if self._positive_adv:
advantage_flat -= advantage_flat.min()
return advantage_flat
def _compute_kl_constraint(self, obs):
r"""Compute KL divergence.
Compute the KL divergence between the old policy distribution and
current policy distribution.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
Returns:
torch.Tensor: Calculated mean scalar value of KL divergence
(float).
"""
with torch.no_grad():
old_dist = self._old_policy(obs)
new_dist = self.policy(obs)
kl_constraint = torch.distributions.kl.kl_divergence(
old_dist, new_dist)
return kl_constraint.mean()
def _compute_policy_entropy(self, obs):
r"""Compute entropy value of probability distribution.
Notes: P is the maximum path length (self.max_path_length)
Args:
obs (torch.Tensor): Observation from the environment
with shape :math:`(N, P, O*)`.
Returns:
torch.Tensor: Calculated entropy values given observation
with shape :math:`(N, P)`.
"""
if self._stop_entropy_gradient:
with torch.no_grad():
policy_entropy = self.policy.entropy(obs)
else:
policy_entropy = self.policy.entropy(obs)
# This prevents entropy from becoming negative for small policy std
if self._use_softplus_entropy:
policy_entropy = F.softplus(policy_entropy)
return policy_entropy
def _compute_objective(self, advantages, obs, actions, rewards):
r"""Compute objective value.
Args:
advantages (torch.Tensor): Advantage value at each step
with shape :math:`(N \dot [T], )`.
obs (torch.Tensor): Observation from the environment
with shape :math:`(N \dot [T], O*)`.
actions (torch.Tensor): Actions fed to the environment
with shape :math:`(N \dot [T], A*)`.
rewards (torch.Tensor): Acquired rewards
with shape :math:`(N \dot [T], )`.
Returns:
torch.Tensor: Calculated objective values
with shape :math:`(N \dot [T], )`.
"""
del rewards
log_likelihoods = self.policy.log_likelihood(obs, actions)
return log_likelihoods * advantages
def process_samples(self, itr, paths):
r"""Process sample data based on the collected paths.
Notes: P is the maximum path length (self.max_path_length)
Args:
itr (int): Iteration number.
paths (list[dict]): A list of collected paths
Returns:
torch.Tensor: The observations of the environment
with shape :math:`(N, P, O*)`.
torch.Tensor: The actions fed to the environment
with shape :math:`(N, P, A*)`.
torch.Tensor: The acquired rewards with shape :math:`(N, P)`.
list[int]: Numbers of valid steps in each paths.
torch.Tensor: Value function estimation at each step
with shape :math:`(N, P)`.
"""
valids = torch.Tensor([len(path['actions']) for path in paths]).int()
obs = torch.stack([
pad_to_last(path['observations'],
total_length=self.max_path_length,
axis=0) for path in paths
])
actions = torch.stack([
pad_to_last(path['actions'],
total_length=self.max_path_length,
axis=0) for path in paths
])
rewards = torch.stack([
pad_to_last(path['rewards'], total_length=self.max_path_length)
for path in paths
])
returns = torch.stack([
pad_to_last(tu.discount_cumsum(path['rewards'],
self.discount).copy(),
total_length=self.max_path_length) for path in paths
])
with torch.no_grad():
baselines = self._value_function(obs)
return obs, actions, rewards, returns, valids, baselines
| 39.075594 | 79 | 0.597557 |
ef246265f69b06db5df91cec2e6e0423aa371da1 | 958 | py | Python | connect.py | Mozilla-GitHub-Standards/1b5242f34a4e5d2d3d71c27b0d7d100be5a497cf08caad050cbbccb52c91dc98 | cc37cb87dc504a74defe6e5bb89a05b9bff01159 | [
"MIT"
] | 41 | 2018-05-26T15:47:31.000Z | 2020-05-12T08:57:14.000Z | connect.py | Mozilla-GitHub-Standards/1b5242f34a4e5d2d3d71c27b0d7d100be5a497cf08caad050cbbccb52c91dc98 | cc37cb87dc504a74defe6e5bb89a05b9bff01159 | [
"MIT"
] | 11 | 2018-05-22T16:08:43.000Z | 2020-06-22T23:20:18.000Z | connect.py | Mozilla-GitHub-Standards/1b5242f34a4e5d2d3d71c27b0d7d100be5a497cf08caad050cbbccb52c91dc98 | cc37cb87dc504a74defe6e5bb89a05b9bff01159 | [
"MIT"
] | 17 | 2018-05-22T15:48:46.000Z | 2020-08-07T12:43:20.000Z | import machine
import network
import time
import config
def start_ftp():
print('Starting FTP...')
network.ftp.start()
def start_ntp():
print('Syncing to NTP...')
rtc = machine.RTC()
rtc.ntp_sync(server='pool.ntp.org')
if not rtc.synced():
print(' waiting for time sync...', end='')
time.sleep(0.5)
while not rtc.synced():
print('.', end='')
time.sleep(0.5)
print('')
print('Time:', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
def connect_to_ap():
station = network.WLAN(network.STA_IF)
if not station.active():
station.active(True)
if not station.isconnected():
print('Connecting....')
station.connect(config.SSID, config.PASSWORD)
while not station.isconnected():
time.sleep(1)
print('.', end='')
print('')
print('ifconfig =', station.ifconfig())
| 24.564103 | 72 | 0.550104 |
0927fe4b160058e23553510aa7e07adb4ab54a92 | 2,335 | py | Python | src/bounds.py | parthbhope/NC_Concolic_Testing | d2622ba3f7fd667b6534bda09d29f1c95c59799f | [
"BSD-3-Clause"
] | 1 | 2021-11-22T10:38:37.000Z | 2021-11-22T10:38:37.000Z | src/bounds.py | parthbhope/NC_Concolic_Testing | d2622ba3f7fd667b6534bda09d29f1c95c59799f | [
"BSD-3-Clause"
] | null | null | null | src/bounds.py | parthbhope/NC_Concolic_Testing | d2622ba3f7fd667b6534bda09d29f1c95c59799f | [
"BSD-3-Clause"
] | 1 | 2020-11-25T18:40:39.000Z | 2020-11-25T18:40:39.000Z | from utils import *
from engine import _InputsStatBasedInitializable, Input
# ---
class UniformBounds (Bounds):
"""
Basic class to represent any uniform bounds on inputs.
"""
def __init__ (self, LB = 0.0, UB = 1.0, **kwds):
super().__init__(**kwds)
self.LB = LB
self.UB = UB
@property
def low (self):
return np.array([self.LB])
@property
def up (self):
return np.array([self.UB])
def __getitem__ (self, _idx: Tuple[int, ...]) -> Tuple[float, float]:
return self.LB, self.UB
# ---
class StatBasedInputBounds (Bounds, _InputsStatBasedInitializable):
"""
Stat-based bounds for generating inputs.
Analyzes given training samples to compute per-component bounds for
inputs.
- `looseness` is a factor that widens the range by some amount (0.1%
by default).
- `hard_bounds` is an optional object of type :class:`Bounds`, that
is used to restrict the bounds after they have been widenned as
above.
"""
def __init__(self, looseness: float = .001, hard_bounds: Bounds = None, **kwds):
assert hard_bounds is None or isinstance (hard_bounds, Bounds)
self.looseness = looseness
self.hard_bounds = hard_bounds
super ().__init__(**kwds)
def inputs_stat_initialize (self,
train_data: raw_datat = None,
test_data: raw_datat = None):
if isinstance (self.hard_bounds, _InputsStatBasedInitializable):
# Forward call to to hard_bounds, in case.
self.hard_bounds.inputs_stat_initialize (train_data, test_data)
np1 ('Initializing stat-based input bounds with {} training samples... '
.format (len (train_data.data)))
ptp = np.ptp (train_data.data, axis = 0)
self._up = np.amax (train_data.data, axis = 0) + self.looseness * ptp
self._low = np.amin (train_data.data, axis = 0) - self.looseness * ptp
if self.hard_bounds is not None:
np.minimum (self._up, self.hard_bounds.up, out = self._up)
np.maximum (self._low, self.hard_bounds.low, out = self._low)
c1 ('done')
@property
def low (self) -> np.array(float):
return self._low
@property
def up (self) -> np.array(float):
return self._up
def __getitem__ (self, idx: Tuple[int, ...]) -> Tuple[float, float]:
return self._low[idx], self._up[idx]
# ---
| 26.235955 | 82 | 0.653105 |
c0c685efe7171272999f77b19c8b9a90f4b50cf1 | 865 | py | Python | main/pywitch_manager.py | ouriquegustavo/twitch_tower_defense | 20e1b41e3f1c363856515eda1c2a9288ce42e442 | [
"MIT"
] | null | null | null | main/pywitch_manager.py | ouriquegustavo/twitch_tower_defense | 20e1b41e3f1c363856515eda1c2a9288ce42e442 | [
"MIT"
] | null | null | null | main/pywitch_manager.py | ouriquegustavo/twitch_tower_defense | 20e1b41e3f1c363856515eda1c2a9288ce42e442 | [
"MIT"
] | null | null | null | import string
import random
import main.token as tk
from pywitch import (
validate_token,
PyWitchHeat
)
valid_char = string.ascii_letters + string.digits
def random_string(length):
return ''.join([random.choice(valid_char) for i in range(length)])
event_data_click = {}
def heat_callback(data):
event_id = random_string(16)
data['event_id'] = event_id
event_data_click.update(data)
class PyWitchManager():
def __init__(self):
self.token = tk.token
self.channel = 'gleenus'
self.users = {}
self.data_click = event_data_click
def start(self):
self.validation, self.helix_headers = validate_token(self.token)
self.heat = PyWitchHeat(self.channel, self.token, heat_callback, self.users)
self.heat.start()
# def get_data(self):
# self.data = event_data
| 24.714286 | 84 | 0.672832 |
3778f9c7413157591ace37eb7cb70c9926feb828 | 2,537 | py | Python | fima/viz/brainregions.py | gpiantoni/fima | 52bba27409f99dc22f3495e3adc907201f69387e | [
"MIT"
] | null | null | null | fima/viz/brainregions.py | gpiantoni/fima | 52bba27409f99dc22f3495e3adc907201f69387e | [
"MIT"
] | null | null | null | fima/viz/brainregions.py | gpiantoni/fima | 52bba27409f99dc22f3495e3adc907201f69387e | [
"MIT"
] | null | null | null | from bidso.utils import read_tsv
import plotly.graph_objects as go
from numpy import sign
from ..names import name
from ..read import load
from .surf import AXIS
def plot_brain_regions(parameters, ieeg_file, region_type):
"""
region_type can be one of:
'aparc.a2009s',
'aparc.DKTatlas',
'BA_exvivo',
'BA_exvivo.thresh',
"""
brainregions_file = name(parameters, 'brainregions', ieeg_file)
electrodes = read_tsv(brainregions_file)
pial = load('pial', parameters, ieeg_file)
annot = load(region_type, parameters, ieeg_file)
colors = []
labels = []
for elec in electrodes:
region = elec[region_type]
labels.append(f'{elec["chan"]} = {region}')
colors.append(annot['regions']['colors'][region])
# to normalize plotly
n_regions = len(annot['regions']['names'])
right_or_left = sign((electrodes['x'] > 0).sum() / electrodes.shape[0] - .5)
traces = [
go.Mesh3d(
x=pial.vert[:, 0],
y=pial.vert[:, 1],
z=pial.vert[:, 2],
i=pial.tri[:, 0],
j=pial.tri[:, 1],
k=pial.tri[:, 2],
intensity=annot['regions']['values'] / n_regions,
colorscale=annot['regions']['colorscale'],
hoverinfo='skip',
showscale=False,
flatshading=False,
lighting=dict(
ambient=0.18,
diffuse=1,
fresnel=0.1,
specular=1,
roughness=0.1,
),
lightposition=dict(
x=0,
y=0,
z=-1,
),
),
go.Scatter3d(
x=electrodes['x'],
y=electrodes['y'],
z=electrodes['z'],
text=labels,
mode='markers',
hoverinfo='text',
marker=dict(
size=5,
color=colors,
),
)
]
fig = go.Figure(
data=traces,
layout=go.Layout(
scene=dict(
xaxis=AXIS,
yaxis=AXIS,
zaxis=AXIS,
camera=dict(
eye=dict(
x=right_or_left,
y=0,
z=0.5,
),
projection=dict(
type='orthographic',
),
),
),
),
)
return fig
| 25.887755 | 80 | 0.447379 |
f4352a2926460204703a84016eda5c3575c219d1 | 901 | py | Python | share/qt/clean_mac_info_plist.py | Chrisuk4/PotatoCoin | 85cf10945a6f5a4714c584b033089518033912bc | [
"MIT"
] | null | null | null | share/qt/clean_mac_info_plist.py | Chrisuk4/PotatoCoin | 85cf10945a6f5a4714c584b033089518033912bc | [
"MIT"
] | null | null | null | share/qt/clean_mac_info_plist.py | Chrisuk4/PotatoCoin | 85cf10945a6f5a4714c584b033089518033912bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Potatocoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Potatocoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| 30.033333 | 109 | 0.72697 |
57feba5ef8eb22b5b9a84b7ebb247935b05379a2 | 3,798 | py | Python | wagtail/actions/move_page.py | fairhopeweb/wagtail | fe658f021f2cd43bde4a16d1c4d9fa292ec547f3 | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/actions/move_page.py | fairhopeweb/wagtail | fe658f021f2cd43bde4a16d1c4d9fa292ec547f3 | [
"BSD-3-Clause"
] | null | null | null | wagtail/actions/move_page.py | fairhopeweb/wagtail | fe658f021f2cd43bde4a16d1c4d9fa292ec547f3 | [
"BSD-3-Clause"
] | null | null | null | import logging
from django.core.exceptions import PermissionDenied
from django.db import transaction
from treebeard.mp_tree import MP_MoveHandler
from wagtail.log_actions import log
from wagtail.signals import post_page_move, pre_page_move
logger = logging.getLogger("wagtail")
class MovePagePermissionError(PermissionDenied):
"""
Raised when the page move cannot be performed due to insufficient permissions.
"""
pass
class MovePageAction:
def __init__(self, page, target, pos=None, user=None):
self.page = page
self.target = target
self.pos = pos
self.user = user
def check(self, skip_permission_checks=False):
if self.user and not skip_permission_checks:
if not self.page.permissions_for_user(self.user).can_move_to(self.target):
raise MovePagePermissionError(
"You do not have permission to move the page to the target specified."
)
def _move_page(self, page, target, pos=None):
from wagtail.models import Page
# Determine old and new parents
parent_before = page.get_parent()
if pos in ("first-child", "last-child", "sorted-child"):
parent_after = target
else:
parent_after = target.get_parent()
# Determine old and new url_paths
# Fetching new object to avoid affecting `page`
old_page = Page.objects.get(id=page.id)
old_url_path = old_page.url_path
new_url_path = old_page.set_url_path(parent=parent_after)
url_path_changed = old_url_path != new_url_path
# Emit pre_page_move signal
pre_page_move.send(
sender=page.specific_class or page.__class__,
instance=page,
parent_page_before=parent_before,
parent_page_after=parent_after,
url_path_before=old_url_path,
url_path_after=new_url_path,
)
# Only commit when all descendants are properly updated
with transaction.atomic():
# Allow treebeard to update `path` values
MP_MoveHandler(page, target, pos).process()
# Treebeard's move method doesn't actually update the in-memory instance,
# so we need to work with a freshly loaded one now
new_page = Page.objects.get(id=page.id)
new_page.url_path = new_url_path
new_page.save()
# Update descendant paths if url_path has changed
if url_path_changed:
new_page._update_descendant_url_paths(old_url_path, new_url_path)
# Emit post_page_move signal
post_page_move.send(
sender=page.specific_class or page.__class__,
instance=new_page,
parent_page_before=parent_before,
parent_page_after=parent_after,
url_path_before=old_url_path,
url_path_after=new_url_path,
)
# Log
log(
instance=page,
action="wagtail.move" if url_path_changed else "wagtail.reorder",
user=self.user,
data={
"source": {
"id": parent_before.id,
"title": parent_before.specific_deferred.get_admin_display_title(),
},
"destination": {
"id": parent_after.id,
"title": parent_after.specific_deferred.get_admin_display_title(),
},
},
)
logger.info('Page moved: "%s" id=%d path=%s', page.title, page.id, new_url_path)
def execute(self, skip_permission_checks=False):
self.check(skip_permission_checks=skip_permission_checks)
return self._move_page(self.page, self.target, pos=self.pos)
| 34.844037 | 90 | 0.626119 |
3b39d5db2cdc92a19384acac4750ba2cb6b5adbb | 491 | py | Python | fundamentals/oop/modularizing/parent.py | ZhouSusan/CodingDojoPython | 8d89c9a94a3be18e79fbf24e25348eae8c96a338 | [
"MIT"
] | null | null | null | fundamentals/oop/modularizing/parent.py | ZhouSusan/CodingDojoPython | 8d89c9a94a3be18e79fbf24e25348eae8c96a338 | [
"MIT"
] | null | null | null | fundamentals/oop/modularizing/parent.py | ZhouSusan/CodingDojoPython | 8d89c9a94a3be18e79fbf24e25348eae8c96a338 | [
"MIT"
] | null | null | null | local_val = "magical unicorns"
def square(x):
return x*x
class User:
def __init__(self, name):
self.name = name
def say_hello(self):
return "hello"
print(__name__)
if __name__ == "__main__":
print("the file is being executed directly")
else:
print("The file is being executed because it is imported by another file. The file is called: ", __name__)
print(square(5))
user = User("Anna")
print(user.name)
print(user.say_hello()) | 22.318182 | 114 | 0.643585 |
89f6418782bc3895531a7c9f051e066ea340a587 | 4,871 | py | Python | src/google/appengine/api/mail_stub_service_pb2.py | phil-lopreiato/appengine-python-standard | 5e2c400a24d299bb86e98f755a6ef510b4e1e0df | [
"Apache-2.0"
] | 2 | 2022-01-09T20:26:08.000Z | 2022-02-02T22:05:55.000Z | src/google/appengine/api/mail_stub_service_pb2.py | SOFTWARESOLUTONS-PVT-LIMITED/appengine-python-standard | 530a54b0fc0eb74d9dc29b19b7c4cdfab0556ebc | [
"Apache-2.0"
] | null | null | null | src/google/appengine/api/mail_stub_service_pb2.py | SOFTWARESOLUTONS-PVT-LIMITED/appengine-python-standard | 530a54b0fc0eb74d9dc29b19b7c4cdfab0556ebc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.appengine.api import api_base_pb2 as google_dot_appengine_dot_api_dot_api__base__pb2
from google.appengine.api import mail_service_pb2 as google_dot_appengine_dot_api_dot_mail__service__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,google/appengine/api/mail_stub_service.proto\x12\x10google.appengine\x1a#google/appengine/api/api_base.proto\x1a\'google/appengine/api/mail_service.proto\"N\n\x17GetSentMessagesResponse\x12\x33\n\x0csent_message\x18\x01 \x03(\x0b\x32\x1d.google.appengine.MailMessage\"5\n\x19\x43learSentMessagesResponse\x12\x18\n\x10messages_cleared\x18\x01 \x01(\x05\"/\n\x16GetLogMailBodyResponse\x12\x15\n\rlog_mail_body\x18\x01 \x02(\x08\".\n\x15SetLogMailBodyRequest\x12\x15\n\rlog_mail_body\x18\x01 \x02(\x08\"1\n\x17GetLogMailLevelResponse\x12\x16\n\x0elog_mail_level\x18\x01 \x02(\t\"0\n\x16SetLogMailLevelRequest\x12\x16\n\x0elog_mail_level\x18\x01 \x02(\tB2\n\x1d\x63om.google.appengine.api.mailB\x11MailStubServicePb')
_GETSENTMESSAGESRESPONSE = DESCRIPTOR.message_types_by_name['GetSentMessagesResponse']
_CLEARSENTMESSAGESRESPONSE = DESCRIPTOR.message_types_by_name['ClearSentMessagesResponse']
_GETLOGMAILBODYRESPONSE = DESCRIPTOR.message_types_by_name['GetLogMailBodyResponse']
_SETLOGMAILBODYREQUEST = DESCRIPTOR.message_types_by_name['SetLogMailBodyRequest']
_GETLOGMAILLEVELRESPONSE = DESCRIPTOR.message_types_by_name['GetLogMailLevelResponse']
_SETLOGMAILLEVELREQUEST = DESCRIPTOR.message_types_by_name['SetLogMailLevelRequest']
GetSentMessagesResponse = _reflection.GeneratedProtocolMessageType('GetSentMessagesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETSENTMESSAGESRESPONSE,
'__module__' : 'google.appengine.api.mail_stub_service_pb2'
})
_sym_db.RegisterMessage(GetSentMessagesResponse)
ClearSentMessagesResponse = _reflection.GeneratedProtocolMessageType('ClearSentMessagesResponse', (_message.Message,), {
'DESCRIPTOR' : _CLEARSENTMESSAGESRESPONSE,
'__module__' : 'google.appengine.api.mail_stub_service_pb2'
})
_sym_db.RegisterMessage(ClearSentMessagesResponse)
GetLogMailBodyResponse = _reflection.GeneratedProtocolMessageType('GetLogMailBodyResponse', (_message.Message,), {
'DESCRIPTOR' : _GETLOGMAILBODYRESPONSE,
'__module__' : 'google.appengine.api.mail_stub_service_pb2'
})
_sym_db.RegisterMessage(GetLogMailBodyResponse)
SetLogMailBodyRequest = _reflection.GeneratedProtocolMessageType('SetLogMailBodyRequest', (_message.Message,), {
'DESCRIPTOR' : _SETLOGMAILBODYREQUEST,
'__module__' : 'google.appengine.api.mail_stub_service_pb2'
})
_sym_db.RegisterMessage(SetLogMailBodyRequest)
GetLogMailLevelResponse = _reflection.GeneratedProtocolMessageType('GetLogMailLevelResponse', (_message.Message,), {
'DESCRIPTOR' : _GETLOGMAILLEVELRESPONSE,
'__module__' : 'google.appengine.api.mail_stub_service_pb2'
})
_sym_db.RegisterMessage(GetLogMailLevelResponse)
SetLogMailLevelRequest = _reflection.GeneratedProtocolMessageType('SetLogMailLevelRequest', (_message.Message,), {
'DESCRIPTOR' : _SETLOGMAILLEVELREQUEST,
'__module__' : 'google.appengine.api.mail_stub_service_pb2'
})
_sym_db.RegisterMessage(SetLogMailLevelRequest)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\035com.google.appengine.api.mailB\021MailStubServicePb'
_GETSENTMESSAGESRESPONSE._serialized_start=144
_GETSENTMESSAGESRESPONSE._serialized_end=222
_CLEARSENTMESSAGESRESPONSE._serialized_start=224
_CLEARSENTMESSAGESRESPONSE._serialized_end=277
_GETLOGMAILBODYRESPONSE._serialized_start=279
_GETLOGMAILBODYRESPONSE._serialized_end=326
_SETLOGMAILBODYREQUEST._serialized_start=328
_SETLOGMAILBODYREQUEST._serialized_end=374
_GETLOGMAILLEVELRESPONSE._serialized_start=376
_GETLOGMAILLEVELRESPONSE._serialized_end=425
_SETLOGMAILLEVELREQUEST._serialized_start=427
_SETLOGMAILLEVELREQUEST._serialized_end=475
| 46.836538 | 776 | 0.833915 |
9616d5cf65e2031107551b75d25f459087ae5676 | 1,333 | py | Python | Toolz/sqlmap/plugins/dbms/hsqldb/enumeration.py | thezakman/CTF-Toolz | b369246ea6766165cce0852e537fb6a0c970869b | [
"Unlicense"
] | 71 | 2019-02-02T11:38:46.000Z | 2022-03-31T14:08:27.000Z | tools/sqlmap/plugins/dbms/hsqldb/enumeration.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | null | null | null | tools/sqlmap/plugins/dbms/hsqldb/enumeration.py | sravani-m/Web-Application-Security-Framework | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | [
"MIT"
] | 15 | 2019-08-07T06:32:04.000Z | 2022-03-09T12:48:20.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from plugins.generic.enumeration import Enumeration as GenericEnumeration
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.common import unArrayizeValue
from lib.core.enums import DBMS
from lib.core.settings import HSQLDB_DEFAULT_SCHEMA
from lib.request import inject
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
def getBanner(self):
if not conf.getBanner:
return
if kb.data.banner is None:
infoMsg = "fetching banner"
logger.info(infoMsg)
query = queries[DBMS.HSQLDB].banner.query
kb.data.banner = unArrayizeValue(inject.getValue(query, safeCharEncode=True))
return kb.data.banner
def getPrivileges(self, *args):
warnMsg = "on HSQLDB it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def getHostname(self):
warnMsg = "on HSQLDB it is not possible to enumerate the hostname"
logger.warn(warnMsg)
def getCurrentDb(self):
return HSQLDB_DEFAULT_SCHEMA
| 28.361702 | 89 | 0.700675 |
0ebfa003b359992aa37a1a57df58587f678b5c24 | 66,826 | py | Python | emma/core/watch.py | djangowebstudio/emma | afbdaa5c02b4164687356755fddba307eb682ef4 | [
"BSD-3-Clause"
] | null | null | null | emma/core/watch.py | djangowebstudio/emma | afbdaa5c02b4164687356755fddba307eb682ef4 | [
"BSD-3-Clause"
] | null | null | null | emma/core/watch.py | djangowebstudio/emma | afbdaa5c02b4164687356755fddba307eb682ef4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#**************************************************************************************************
# Geert Dekkers Web Studio 2008, 2009, Django Web Studio 2010, info@djangowebstudio.nl
#
# watch.py for EMMA
#**************************************************************************************************
"""
EMMA (Easy Media Management Application)
Administer your image sharing webapp through a fileserver share on your macosx 10.5 client or server.
Watches over the content - updates, inserts, deletes, and more. Sees to it that the database
accurately reflects the content directory tree at settings.APP_CONTENT_ROOT.
Watch works in concurrence with fix and generatekeywords.
1. Fixes spaces in filenames and directories
2. Converts PDF, EPS, AI, PNG, PSD to JPG
3. Resizes JPG's for use as thumbs and minithumbs
4. Converts avi, mpg, mov, wmv and more to flv
5. From paired .fla / .swf files, moves .swf to gallery.
--------------
DEPLOYING EMMA
--------------
1. EMMA is a set of django apps. It needs to be deployed to a project to work (at all!). So the first
step is to start a project and add stub files for watch, generatekeywords, fix and converter. You'll
also need to add a urls.py. (sorry, no installer yet!)
2. In settings, change APP_CONTENT_ROOT to reflect the living quarters of your content. Or
move your content to where APP_CONTENT_ROOT points to. Configure your template & static paths.
Override templates & statics locally if you wish.
3. From within the django project root, run script/load -f, and leave it running. (you will need admin
permissions for this).
Refer to fix's log at /Library/Logs/[project name]/fix.log to check the progress. Wait for files to
be processed before going on to the next step.
Fix will fix filenames, renaming as needed to comply with workflow policy. And does heaps
of other useful stuff. Please refer to the programmer's notes in Fix's header.
Set interval at in app settings. IMPORTANT NOTE: Before running, the fixture will set
images_imagecount.count to a five digit number, i.e. 10000.
4. Run "script/load -l". Your site will now be filled. This could take quite long, depending on your content.
Make sure script/load is running at all times. While running, watch.py will watch over your content
as described above. It is set to rescan settings.APP_ROOT_CONTENT at an interval as set in settings.py.
This may be freely altered, but restart using script/load -r to apply changes.
"""
from __future__ import nested_scopes
import os, sys, time, subprocess
from time import strftime
from django.core.management import setup_environ
import settings
setup_environ(settings)
from emma.interface.models import Image, Keyword, Metadata
import metadata
import converter
import datetime
import utes
import logging
#--------------------------------------------------------------------------------------------------
# Logging
# A directory will be created the first time watch is run.
#--------------------------------------------------------------------------------------------------
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(settings.APP_LOGS_ROOT, 'watch.log'),
filemode='w')
#--------------------------------------------------------------------------------------------------
# Configuration
#--------------------------------------------------------------------------------------------------
gallery_images = settings.GALLERY_ROOT + '/images/'
gallery_thumbs = settings.GALLERY_ROOT + '/thumbs/'
gallery_minithumbs = settings.GALLERY_ROOT + '/miniThumbs/'
gallery_albums = settings.GALLERY_ROOT + '/albums/'
# Get a value for the tmp variable
tmp = getattr(settings, 'APP_CONTENT_TMP', False)
#--------------------------------------------------------------------------------------------------
class Watch(object):
def convertImages(self,item,file_type='', mime_type='', rotate=''):
""" Calls the appropriate functions in Converter
This function does no actual conversion itself."""
c = converter.Convert()
m = metadata.Metadata()
# assign the full path to a var
current_path = item
# get the filename
fname = os.path.split(item)[1]
# sort files by extension and initiate the appropriate converter function
# Set default values for the return variables
image_id = ''
image_path = ''
image_name = ''
image_category = ''
image_pages = 0
# Go through our files, looking at their extensions to route them to the appropriate converters
# At the moment, fix.py is handling files without extensions by extracting file format
# information from the metadata and then appending the appropriate extension.
# Todo: sort extensions using os.path.split. This will be less costly and improve code readability.
# Todo: Look into magic file definition -- API? Speed issues?
# NOTE: We're going to extract the file type using exiftool for the moment. This is quite costly, but
# as we're getting a whole heap of metadata anyway, this one bit of extra data won't slow us down much.
if file_type:
if mime_type and not mime_type == 'image/vnd.fpx':# Exclude Windows Thumbs.db if it happens to appear under another name
print 'file: %s, file_type: %s, mime_type: %s' % (current_path, file_type, mime_type)
logging.info('Converting fname: %s, file_type: %s, mime_type: %s' % (fname, file_type, mime_type))
mime_type = mime_type.lower() # Just to make sure...
if mime_type.find('video') == 0:
print 'foutput: %s' % gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv')
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs, 'small', 148, 'jpg')
image_category = 'video'
elif mime_type.find('audio') == 0:
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.pcopy(gallery_thumbs + 'sound-thumb.jpg', gallery_thumbs + fname.replace(os.path.splitext(fname)[1]) + '.jpg')
image_category = 'audio'
elif mime_type.find('image') == 0:
image_path = c.resize (current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif mime_type == 'application/pdf':
image_path, image_pages = c.convertPDF (current_path, gallery_images)
image_category = 'illustration'
elif mime_type == 'application/vnd.adobe.illustrator':
image_path, image_pages = c.convertPDF (current_path, gallery_images)
image_category = 'illustration'
elif mime_type == 'application/vnd.adobe.photoshop':
image_path = c.resize (current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'illustration'
elif mime_type == 'application/postscript':
newpath = gallery_images + fname.replace(os.path.splitext(fname)[1], '.jpg') # convertToBitmap needs to know the extension
try:
image_path = c.convertToBitmap (current_path, newpath )
c.resize(newpath, newpath, settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH) # resize the image immediately afterwards
except Exception, inst:
logging.warning("%(fname)s %(inst)s" % {'fname': fname, 'inst':inst})
image_category = 'illustration'
elif mime_type == 'application/photoshop':
image_path = c.resize (current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'illustration'
elif mime_type == 'application/msword':
# We might consider storing the pdf instead of the word doc.
try:
document_path = c.convertDocument(current_path, tmp)
if document_path:
image_path, image_pages = c.convertPDF(document_path, gallery_images)
image_category = 'document'
else:
return None
except Exception, inst:
logging.warning('Tried converting %s but gave up with error %s' % (fname,inst))
else:
logging.warning('This mime type %s is not supported right now, skipping %s' % (mime_type, item))
return None
else:
file_type = file_type.lower()
print 'No mime type; file: %s, file_type: %s' % (current_path, file_type)
logging.info('Converting items using file_type %s' % file_type)
if file_type == "eps":
newpath = gallery_images + fname.replace('.eps', '.jpg') # convertToBitmap needs to know the extension
try:
image_path = c.convertToBitmap (current_path, newpath )
c.resize(newpath, newpath, settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH) # resize the image immediately afterwards
except Exception, inst:
logging.warning("%(fname)s %(inst)s" % { 'fname': fname, 'inst':inst})
image_category = 'illustration'
elif file_type == "pdf":
image_path, image_pages = c.convertPDF (current_path, gallery_images)
image_category = 'illustration'
elif file_type == "jpeg":
image_path = c.resize (current_path, gallery_images + fname, settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif file_type == "gif":
image_path = c.resize (current_path, gallery_images + fname.replace('.gif', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'illustration'
elif file_type == "psd":
image_path = c.resize (current_path, gallery_images + fname.replace('.psd', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'illustration'
elif file_type == "png":
image_path = c.resize (current_path, gallery_images + fname.replace('.png', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif file_type == "tiff":
image_path = c.resize (current_path, gallery_images + fname.replace('.tif', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif file_type == "au":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'cropped')
image_category = 'audio'
elif file_type == "mp3":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'cropped')
image_category = 'audio'
elif file_type == "aiff":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'cropped')
image_category = 'audio'
elif file_type == "m4v":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'small')
image_category = 'video'
elif file_type == "mp4":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'small')
image_category = 'video'
elif file_type == "mov":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'small')
image_category = 'video'
elif file_type == "mpg":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'small')
image_category = 'video'
elif file_type == "avi":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'small')
image_category = 'video'
elif file_type == "wmv":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(os.path.splitext(fname)[1], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(file_type, '.flv'),'small')
image_category = 'video'
elif file_type == "flv":
image_path = c.ffmpeg(current_path, gallery_images + fname, 'large')
c.ffmpeg(current_path, gallery_thumbs + fname,'small')
image_category = 'video'
elif file_type == "fla":
image_path = gallery_images + fname
#c.pcopy(current_path, gallery_thumbs + fname.replace('.fla', '.swf')) Moving and copying is now being done in fix.
image_category = 'flash'
elif file_type == "swf":
image_path = c.pcopy(current_path, gallery_images + fname)
c.pcopy(current_path, gallery_thumbs + fname) # The only swf files you should be seeing here are standalone files (as opposed to paired fla/swf)
image_category = 'flash'
# Looking for one of txt, doc, htm, xml
else:
logging.warning( "%(file)s doesn't seem to belong to our favourite formats. We'll try to treat it as a text doc, otherwise leave it." % {'file':current_path})
try:
document_path = c.convertDocument(current_path, tmp)
if document_path:
image_path, image_pages = c.convertPDF(document_path, gallery_images)
image_category = 'document'
else:
return None
except Exception, inst:
logging.warning('Tried converting %s but gave up with error %s' % (fname,inst))
elif fname[(len(fname)-4):(len(fname)-3)] == ".":
print 'No file type, no mime type; file: %s' % (current_path)
image_id = fname[0:(len(fname)-4)]
if fname[(len(fname)-4):len(fname)] == ".eps":
newpath = gallery_images + fname.replace('.eps', '.jpg') # convertToBitmap needs to know the extension
try:
image_path = c.convertToBitmap (current_path, newpath )
c.resize(newpath, newpath, settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH) # resize the image immediately afterwards
except Exception, inst:
logging.warning("%(fname)s %(inst)s" % { 'fname': fname, 'inst':inst})
image_category = 'illustration'
elif fname[(len(fname)-4):len(fname)] == ".pdf":
image_path, image_pages = c.convertPDF (current_path, gallery_images)
image_category = 'illustration'
elif fname[(len(fname)-4):len(fname)] == ".jpg":
image_path = c.resize (current_path, gallery_images + fname, settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif fname[(len(fname)-4):len(fname)] == ".gif":
image_path = c.resize (current_path, gallery_images + fname.replace('.gif', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'illustration'
elif fname[(len(fname)-4):len(fname)] == ".psd":
image_path = c.resize (current_path, gallery_images + fname.replace('.psd', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'illustration'
elif fname[(len(fname)-4):len(fname)] == ".png":
image_path = c.resize (current_path, gallery_images + fname.replace('.png', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif fname[(len(fname)-4):len(fname)] == ".tif":
image_path = c.resize (current_path, gallery_images + fname.replace('.tif', '.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif fname[(len(fname)-4):len(fname)] == ".mp4":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".m4v":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".mov":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".mpg":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".avi":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".wmv":
image_path = c.ffmpeg(current_path, gallery_images + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'large')
c.ffmpeg(current_path, gallery_thumbs + fname.replace(fname[(len(fname)-4):len(fname)], '.flv'),'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".flv":
image_path = c.ffmpeg(current_path, gallery_images + fname, 'large')
c.ffmpeg(current_path, gallery_thumbs + fname,'small')
image_category = 'video'
elif fname[(len(fname)-4):len(fname)] == ".fla":
image_path = gallery_images + fname
#c.pcopy(current_path, gallery_thumbs + fname.replace('.fla', '.swf')) Moving and copying is now being done in fix.
image_category = 'flash'
elif fname[(len(fname)-4):len(fname)] == ".swf":
image_path = c.pcopy(current_path, gallery_images + fname)
c.pcopy(current_path, gallery_thumbs + fname) # The only swf files you should be seeing here are standalone files (as opposed to paired fla/swf)
image_category = 'flash'
# Looking for one of txt, doc, htm, xml
else:
logging.warning( "%(file)s doesn't seem to belong to our favourite formats. We'll try to treat it as a text doc, otherwise leave it." % {'file':current_path})
try:
document_path = c.convertDocument(current_path, tmp)
if document_path:
image_path, image_pages = c.convertPDF(document_path, gallery_images)
image_category = 'document'
else:
return None
except Exception, inst:
logging.warning('Tried converting %s but gave up with error %s' % (fname,inst))
elif fname[(len(fname)-3):(len(fname)-2)] == ".":
if fname[(len(fname)-3):len(fname)] == ".ai":
try:
image_path = c.convertPDF (current_path, gallery_images)[0] # Just get the first item of the tuple. Note that we're using convertPDF, which retrns a tuple.
image_category = 'illustration'
except:
pass
else:
logging.warning( "%(file)s doesn't seem to belong to our favourite formats. We'll try to treat it as a text doc, otherwise leave it" % {'file':current_path})
image_path, image_pages = c.convertPDF(c.convertDocument(current_path), gallery_images)
image_category = 'document'
elif fname[(len(fname)-5):(len(fname)-4)] == ".":
if fname[(len(fname)-5):len(fname)] == ".jpeg":
image_path = c.resize (current_path, gallery_images + fname.replace('.jpeg','.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
elif fname[(len(fname)-5):len(fname)] == ".tiff":
image_path = c.resize (current_path, gallery_images + fname.replace('.tiff','.jpg'), settings.GALLERY_IMAGE_WIDTH, settings.GALLERY_IMAGE_WIDTH)
image_category = 'photo'
else:
logging.warning( "%(file)s doesn't seem to belong to our favourite formats. We'll try to treat it as a text doc, otherwise leave it" % {'file':current_path})
try:
document_path = c.convertDocument(current_path, tmp)
if document_path:
image_path, image_pages = c.convertPDF(document_path, gallery_images)
image_category = 'document'
else:
return None
except Exception, inst:
logging.warning('Tried converting %s but gave up with error %s' % (fname,inst))
else:
print 'Everything else failed for %s' % current_path
image_id = fname
image_path = gallery_images + fname + ".jpg"
image_category = 'photo'
logging.warning( "%(file)s doesn't seem to belong to our favourite formats. We're not doing anything with it at the moment." % {'file':current_path})
try:
image_name = os.path.basename(image_path)
except Exception, inst:
logging.warning( "Error while generating image_name variable %(inst)s" % {'inst':inst})
image_name = fname
#create interface images from the resultant image ------------------------------------
if image_name[len(image_name)-4:len(image_name)] == '.jpg':
try:
c.resize(image_path, gallery_thumbs + image_name, settings.GALLERY_THUMBS_WIDTH, settings.GALLERY_THUMBS_WIDTH )
except:
logging.warning( " tried building thumbs from %(image)s , but it didn't work out." % {'image': current_path})
# Copy and resize the image to an absolute square for the album cover and miniThumbs
try:
source = os.path.join(settings.GALLERY_ROOT,'images',image_name)
target = os.path.join(settings.GALLERY_ROOT,'albums',image_name)
c.resize_with_sips(source, target, settings.GALLERY_THUMBS_WIDTH, settings.GALLERY_THUMBS_WIDTH)
c.resize(target, gallery_minithumbs + image_name, settings.GALLERY_MINITHUMBS_WIDTH, settings.GALLERY_MINITHUMBS_WIDTH )
except Exception, inst:
return logging.error('An error occurred processing the album image %s' % inst)
return image_id, image_path, image_name, fname, image_category, image_pages
def extractImage_LNID(self,filename):
"""Get what appears to be the name of the file minus extension.
We use this as image identifier all through the application.
Accommodates for dotted filenames (i.e. Marketing codes used as filenames)"""
f = os.path.split(filename)[1]
return f.replace('.' + f.split('.').pop(),'')
def renderItem(self,filename):
d = metadata.Metadata()
description = d.exif('b -description', filename).split(':')[0].replace("\n"," ").lower()
for i in wordlist:
description = description.replace(i,'')
return description
wordlist = ['fotobureau', 'let op'] # This list is to moved to some sensible location when it gets too big.
def renderKeywordsFromDescription(description):
results = description.split(':')[0].replace("\n"," ").lower()
for i in wordlist:
results = results.replace(i,'')
return results
def update_obj(obj, image_LNID, **kwargs):
"""Shorthand for repetitive object updates"""
try:
o = obj.objects.get(image_LNID=image_LNID)
for key in kwargs:
o.key = kwargs[key]
o.save()
logging.info( "Saved %s" % image_LNID)
except Exception, inst:
logging.error( "Error saving %s %s " % (obj, image_LNID))
def watch_directories (self, paths, func, delay=1.0):
# Create gallery folders if they don't already exist
makeDirs = utes.Utes()
makeDirs._mkdir(gallery_images)
makeDirs._mkdir(gallery_thumbs)
makeDirs._mkdir(gallery_minithumbs)
makeDirs._mkdir(gallery_albums)
if tmp: makeDirs._mkdir(tmp)
# So, once we've done all that, start watching...
"""(paths:[str], func:callable, delay:float)
Continuously monitors the paths and their subdirectories
for changes. If any files or directories are modified,
the callable 'func' is called with a list of the modified paths of both
files and directories. 'func' can return a Boolean value
for rescanning; if it returns True, the directory tree will be
rescanned without calling func() for any found changes.
(This is so func() can write changes into the tree and prevent itself
from being immediately called again.)
"""
# Basic principle: all_files is a dictionary mapping paths to
# modification times. We repeatedly crawl through the directory
# tree rooted at 'path', doing a stat() on each file and comparing
# the modification time.
all_files = {}
def f (self, dirname, files):
# Traversal function for directories
for filename in files:
if not filename == '.DS_Store' or not filename == 'Thumbs.db':
path = os.path.join(dirname, filename)
try:
t = os.stat(path)
except os.error:
# If a file has been deleted between os.path.walk()
# scanning the directory and now, we'll get an
# os.error here. Just ignore it -- we'll report
# the deletion on the next pass through the main loop.
continue
mtime = remaining_files.get(path)
if mtime is not None:
# Record this file as having been seen
del remaining_files[path]
# File's mtime has been changed since we last looked at it.
if t.st_mtime > mtime:
appendix = path, datetime.datetime.fromtimestamp(t.st_mtime), datetime.datetime.fromtimestamp(t.st_ctime)
changed_list.append(appendix)
else:
# No recorded modification time, so it must be
# a brand new file.
#today = datetime.datetime.now()
appendix = path, datetime.datetime.fromtimestamp(t.st_mtime), datetime.datetime.fromtimestamp(t.st_ctime)
changed_list.append(appendix)
# Record current mtime of file.
all_files[path] = t.st_mtime
# Main loop
rescan = False
while True:
changed_list = []
remaining_files = all_files.copy()
all_files = {}
for path in paths:
os.path.walk(path, f, None)
removed_list = remaining_files.keys()
if rescan:
rescan = False
elif changed_list or removed_list:
rescan = func(changed_list, removed_list)
time.sleep(delay)
def __init__(self):
def f (changed_files, removed_files):
c = converter.Convert()
m = metadata.Metadata()
u = utes.Utes()
for item, item_mtime, item_ctime in changed_files:
# Only files WITH extensions!
if item[(len(item)-5):(len(item)-4)] == "." or item[(len(item)-4):(len(item)-3)] == "." or item[(len(item)-3):(len(item)-2)] == ".":
createdate = datetime.datetime.fromtimestamp(float(m.stat(item)['st_birthtime']))
# createdate = item_ctime
modifydate = item_mtime
# Query the database first, and THEN call convertImages
# Do a query on image_LNID, image_real_path and date_modified;
# if all three are OK you wouldn't want to call convertImages at all.
#
# Note that you can only call convertImages if date_modified has changed for
# the image corresponding to a particular image_LNID. A change in image_LNID or
# image_real_path alone will NOT get you in this loop at all!
# Init vars to hold metadata info
description = ''
keywords = ''
subject = ''
creator = ''
creator_tool = ''
caption = ''
caption_writer = ''
instructions = ''
credit = ''
source = ''
location = ''
city = ''
provincestate = ''
country = ''
headline = ''
datetimeoriginal = item_ctime
softdate = ''
copyright = ''
profile = ''
title = ''
author = ''
album = ''
orientation = 0
group_status = ''
file_type = ''
mime_type= ''
managedfromfilepath = ''
documentname = ''
# other vars
mdObj_album = ''
m_album = ''
image_LNID = self.extractImage_LNID(item) # Extract the image_LNID from the filename
image_real_path = item.replace(settings.APP_CONTENT_ROOT + "/",'') # Get the image path minus the path-to-content, no leading slash
if image_LNID != '':
# We want to get an exact match on image_LNID, image path, and date modified. If any one of the three properties has
# changed, we need to update. However, the trigger for watch is always date modified. So if that doesn't change, nothing is changed.
# Excluding things like Thumbs.db. See the appropriate settings entry.
if 'APP_WATCH_EXCLUDES' in dir(settings) and settings.APP_WATCH_EXCLUDES:
if True in u.excludes(os.path.basename(item), settings.APP_WATCH_EXCLUDES):
logging.info('%s is in the excludes list, skipping...' % item)
continue
try:
imageObj = Image.objects.get(image_LNID__exact=image_LNID, image_real_path__exact=image_real_path, date_modified=modifydate)
except Image.DoesNotExist: # Because one of the three above tests failed, we're going to process the image again
# Do the conversions, get the info for the item if the image_LNID - date_modified combination doesn't exist
logging.info("Starting sequence -----------------------------------------------------")
logging.info("Doing image conversion and picking up info for %(item)s" % {'item':item})
try:
item_dict = m.exifAll(item)
# Write the dict to variables - we'll be using these over and over...
description = item_dict['description'].strip()
keywords = item_dict['keywords'].lower().strip()
copyright = item_dict['copyright']
location = item_dict['location'].strip()
subject = item_dict['title'].strip()
creator = item_dict['creator'].strip()
try:
author = item_dict['author'].strip() if item_dict.has_key('author') else creator
except Exception, inst:
logging.warning("item_dict['author']" % inst)
creator_tool = item_dict['creatortool'].strip()
caption_writer = item_dict['captionwriter'].strip()
instructions = item_dict['instructions'].strip()
credit = item_dict['credit'].strip()
profile = item_dict['colorspacedata']
source = item_dict['source'].strip()
city = item_dict['city'].strip()
provincestate = item_dict['province-state'].strip()
country = item_dict['country'].strip()
datetimeoriginal = item_dict['datetimeoriginal']
album = item_dict['album']
softdate = ''
# Get the group_status from the headline
if item_dict['headline']:
hl = item_dict['headline'].strip()
if not hl == '-':
if hl.lower() == 'leader' or hl.lower() == 'follower':
group_status = hl.lower()
file_type = item_dict['filetype']
mime_type = item_dict['mimetype']
managedfromfilepath = item_dict['managedfromfilepath']
documentname = subject if subject else item_dict['documentname']
orientation = item_dict['orientation']
results = self.convertImages(item, file_type, mime_type) # image conversions based on file type
except Exception, inst:
logging.error("Error executing exifAll with item %s %s, doing convertImages without metadata" % (item, inst))
results = self.convertImages(item) # image conversions based on file extension
if results and results[1] != '': # Test for a value for convertImages, i.e. image_path
try:
imageObj = Image.objects.get(image_LNID=image_LNID) # If we get a match here, we're updating existing file data
imageObj.image_path=results[1]
imageObj.image_name=results[2]
imageObj.image_real_name=results[3]
imageObj.image_real_path=item.replace(settings.APP_CONTENT_ROOT + "/",'')
imageObj.group_status=group_status
imageObj.date_created=createdate
imageObj.date_modified=modifydate
imageObj.date_entered=datetime.datetime.now()
imageObj.image_category=results[4]
imageObj.image_pages=results[5]
try:
imageObj.save()
logging.info( "Image updated successfully %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error("Image update failed %s" % inst)
continue
try: # Keyword for uupdate existing data
obj = Keyword.objects.get(image_LNID=image_LNID)
obj.image_name = results[2]
obj.keywords = keywords
obj.cright = copyright
obj.profile = profile
obj.save()
logging.info( "Keyword updated successfully %(image)s" % {'image':image_LNID})
except Keyword.DoesNotExist:
try:
obj = Keyword(image=imageObj,
image_LNID=image_LNID,
keywords=keywords,
image_name=results[2],
cright=copyright,
profile=profile,
image_path=item.replace(settings.APP_CONTENT_ROOT + "/",''))
obj.save()
logging.info( "new Keyword saved from existing data %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error( "Keyword error saving existing data %(inst)s" % {'inst':inst})
try: #Metadata update for existing data
mdObj = Metadata.objects.get(image_LNID=image_LNID)
mdObj.description = description
mdObj.keywords = keywords
mdObj.subject = subject
mdObj.creator = creator
mdObj.creator_tool = creator_tool
mdObj.caption_writer = caption_writer
mdObj.instructions = instructions
mdObj.credit = credit
mdObj.source = source
mdObj.location = location
mdObj.city = city
mdObj.provincestate = provincestate
mdObj.country = country
mdObj.headline = group_status
mdObj.datetimeoriginal = datetimeoriginal
mdObj.softdate = softdate
mdObj.copyright = copyright
mdObj.profile = profile
mdObj.title = title
mdObj.author = author
mdObj.album = album
mdObj.orientation = orientation
mdObj.file_type = file_type
mdObj.mime_type = mime_type
mdObj.document = managedfromfilepath
mdObj.documentname = documentname
try:
mdObj.save()
logging.info( "Metadata updated successfully from existing data %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error("Metadata update error from existing data %(d)s %(inst)s" % {'d': datetimeoriginal, 'inst': inst})
except Metadata.DoesNotExist:
try:
mdObj = Metadata(
image=imageObj,
image_LNID=image_LNID,
keyword=obj,
description=description,
keywords=keywords,
subject=subject,
creator=creator,
creator_tool=creator_tool,
caption_writer=caption_writer,
instructions=instructions,
credit=credit,
source=source,
location=location,
city=city,
provincestate=provincestate,
country=country,
headline=group_status,
datetimeoriginal=datetimeoriginal,
softdate=softdate,
copyright=copyright,
profile=profile,
title=title,
author=author,
album=album,
orientation=orientation,
file_type=file_type,
mime_type=mime_type,
document=managedfromfilepath,
documentname=documentname)
mdObj.save()
logging.info( "new Metadata saved from existing data %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error( "Metadata save error form existing data (1) %(inst)s" % {'inst':inst})
try: # Album update for existing data (case: content manager adds the file to an Album through the host's file system)
logging.info("Checking for the existence of album data...")
if group_status:
if documentname:
a = Album.objects.filter(image=imageObj) # Check if the image is already in an Album
if not a:
logging.info("This item %s doesn't seem to be part of an Album" % item)
new_album = Album.objects.filter(album_name=documentname)
if new_album:
albumObj = Album.objects.filter(album_name=documentname)[0]
logging.info("At least one Album with this documentname already exists, adding the item %s to it..." % item)
albumObj.image.add(imageObj)
mdObj.album = albumObj.album_identifier
mdObj.save()
else:
logging.info("Contructing a new Album for %s" % item)
album_identifier = ''.join(['album-',strftime("%Y%m%d%H%M%S")]) # Build an album_identifier string
albumObj, created = Album.objects.get_or_create(album_identifier=album_identifier, album_name=documentname)
albumObj.save()
albumObj.image.add(imageObj)
mdObj.album=albumObj.album_identifier
mdObj.save()
logging.info("Album %s constructed for item %s" % (albumObj.album_identifier, item)) if created else logging.info("Existing album %s updated with %s" % (albumObj.album_identifier, item))
else: logging.info("This item is already part of an album")
else: logging.warning("No documentname value, checking for album data aborted")
else: logging.warning("No group_status value, checking for album data aborted")
except Exception, inst:
logging.error("Error trying to construct an Album from item %s %s" % (image_LNID, inst))
except Image.DoesNotExist: # No matching image_LNID, so we must be dealing with a completely new file
imageObj = Image(
image_LNID=image_LNID,
image_path=results[1],
image_name=results[2],
image_real_name=results[3],
image_real_path=item.replace(settings.APP_CONTENT_ROOT + "/",''),
group_status=group_status,
date_created=createdate,
date_modified=modifydate,
date_entered=datetime.datetime.now(),
image_category=results[4],
image_pages=results[5] )
try:
imageObj.save()
logging.info( "new Image saved %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error( "error saving new Image %(inst)s" % {'inst':inst})
continue
try: # Album update for new image (case: content manager adds the file to an Album through the host's file system)
logging.info("Checking for the existence of album data for new item...")
if group_status:
if documentname:
a = Album.objects.filter(image=imageObj) # Check if the image is already in an Album
if not a:
logging.info("The item %s isn't part of an album" % item)
new_album = Album.objects.filter(album_name=documentname) # now get an album with the same documentname
if new_album:
albumObj = Album.objects.filter(album_name=documentname)[0]
logging.info("At least one Album with this documentname already exists, adding the item %s to it..." % item)
albumObj.image.add(imageObj)
mdObj_album = albumObj.album_identifier # save this to a var in order to assign to metadata obj later
else:
logging.info("Contructing a new Album for %s" % item)
album_identifier = ''.join(['album-',strftime("%Y%m%d%H%M%S")]) # Build an album_identifier string
albumObj, created = Album.objects.get_or_create(album_identifier=album_identifier, album_name=documentname)
albumObj.save()
albumObj.image.add(imageObj)
mdObj_album=albumObj.album_identifier # save this to a var in order to assign to metadata obj later
logging.info("Album %s constructed for item %s" % (albumObj.album_identifier, item)) if created else logging.info("Existing album %s updated with %s" % (albumObj.album_identifier, item))
else: logging.info("Is this NEW item already part of an album?")
else: logging.warning("No documentname value, checking for album data aborted")
else: logging.warning("No group_status value, checking for album data aborted")
except Exception, inst:
logging.error("Error trying to construct an Album for new item %s %s" % (image_LNID, inst))
try: # Is this file known to Keyword?
obj = Keyword.objects.get(image_LNID=image_LNID)
obj.image_name = results[2]
obj.keywords = keywords
obj.cright = copyright
obj.profile = profile
obj.save()
logging.info( "Keyword saved %(image)s" % {'image':image_LNID})
except Keyword.DoesNotExist:
try:
obj = Keyword(image=imageObj,
image_LNID=image_LNID,
keywords=keywords,
image_name=results[2],
cright=copyright,
profile=profile,
image_path=item.replace(settings.APP_CONTENT_ROOT + "/",''))
obj.save()
logging.info( "new Keyword saved %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error( "Keyword edit error %(inst)s" % {'inst':inst})
try: # Is this file known to Metadata?
mdObj = Metadata.objects.get(image_LNID=image_LNID)
mdObj.description = description
mdObj.keywords = keywords
mdObj.subject = subject
mdObj.creator = creator
mdObj.creator_tool = creator_tool
mdObj.caption_writer = caption_writer
mdObj.instructions = instructions
mdObj.credit = credit
mdObj.source = source
mdObj.location = location
mdObj.city = city
mdObj.provincestate = provincestate
mdObj.country = country
mdObj.headline = group_status
mdObj.datetimeoriginal = datetimeoriginal
mdObj.softdate = softdate
mdObj.copyright = copyright
mdObj.profile = profile
mdObj.title = title
mdObj.author = author
mdObj.album = mdObj_album if mdObj_album else album if album else ''
mdObj.orientation = orientation
mdObj.file_type = file_type
mdObj.mime_type = mime_type
mdObj.document = managedfromfilepath
mdObj.documentname = documentname
try:
mdObj.save()
logging.info( "Metadata saved %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error("Metadata edit error %(d)s %(inst)s" % {'d': datetimeoriginal, 'inst': inst})
except Metadata.DoesNotExist:
m_album = mdObj_album if mdObj_album else album if album else '' # get album info if it exists
try:
mdObj = Metadata(
image=imageObj,
image_LNID=image_LNID,
keyword=obj,
description=description,
keywords=keywords,
subject=subject,
creator=creator,
creator_tool=creator_tool,
caption_writer=caption_writer,
instructions=instructions,
credit=credit,
source=source,
location=location,
city=city,
provincestate=provincestate,
country=country,
headline=group_status,
datetimeoriginal=datetimeoriginal,
softdate=softdate,
copyright=copyright,
profile=profile,
title=title,
author=author,
album=m_album,
orientation=orientation,
file_type=file_type,
mime_type=mime_type,
document=managedfromfilepath,
documentname=documentname)
mdObj.save()
logging.info( "new Metadata saved %(image)s" % {'image':image_LNID})
except Exception, inst:
logging.error( "Metadata edit error (2) %(inst)s" % {'inst':inst})
for item in removed_files:
if item[(len(item)-4):(len(item)-3)] == "." or item[(len(item)-3):(len(item)-2)] == ".":
# Deletes image data, and also the generated files...
image_LNID = self.extractImage_LNID(item)
try:
Image.objects.get(image_LNID__exact=image_LNID).delete()
logging.info( "Image deleted %(item)s" % {'item':item})
try:
os.remove(gallery_images + image_LNID + ".jpg")
logging.info( "removed from %(path)s" % { 'path':gallery_images})
except Exception, inst:
logging.warning( "Error removing %(path)s %(inst)s" % { 'path': gallery_images + image_LNID + ".jpg", 'inst' :inst})
try:
os.remove(gallery_thumbs + image_LNID + ".jpg")
logging.info( "removed from %(path)s" % { 'path':gallery_thumbs})
except Exception, inst:
logging.warning( "Error removing %(path)s %(inst)s" % { 'path': gallery_thumbs + image_LNID + ".jpg", 'inst' :inst})
try:
os.remove(gallery_minithumbs + image_LNID + ".jpg")
logging.info( "removed from %(path)s" % { 'path':gallery_minithumbs})
except Exception, inst:
logging.warning( "Error removing %(path)s %(inst)s" % { 'path': gallery_minithumbs + image_LNID + ".jpg", 'inst' :inst})
except Exception, inst: logging.warning( "Image delete %(image)s %(inst)s" % {'image':image_LNID, 'inst': inst } )
try:
Keyword.objects.get(image_LNID__exact=image_LNID).delete()
logging.info( "Keyword deleted %(item)s" % {'item':item})
except Exception, inst: logging.warning( "Keyword delete %(image)s %(inst)s" % {'image':image_LNID, 'inst': inst } )
try:
Metadata.objects.get(image_LNID__exact=image_LNID).delete()
logging.info( "Metadata deleted %(item)s" % {'item':item})
except Exception, inst: logging.warning( "Metadata delete %(image)s %(inst)s" % {'image':image_LNID, 'inst': inst } )
# correct permissions
u = utes.Utes()
u.chmodRecursive(os.walk(settings.APP_CONTENT_ROOT), 0667)
logging.info('Permissions %s set.' % settings.APP_CONTENT_ROOT)
logging.info( 'WATCHING %(path)s ***************************************************************************' % {'path':settings.APP_CONTENT_ROOT})
self.watch_directories([settings.APP_CONTENT_ROOT], f, settings.APP_WATCH_DELAY) | 62.747418 | 242 | 0.456753 |
a863a0d8638b53c1d9b5d862a94c45b77724648a | 1,465 | py | Python | build/android/pylib/linker/setup.py | Fusion-Rom/android_external_chromium_org | d8b126911c6ea9753e9f526bee5654419e1d0ebd | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-16T03:57:28.000Z | 2021-01-23T15:29:45.000Z | build/android/pylib/linker/setup.py | Fusion-Rom/android_external_chromium_org | d8b126911c6ea9753e9f526bee5654419e1d0ebd | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | build/android/pylib/linker/setup.py | Fusion-Rom/android_external_chromium_org | d8b126911c6ea9753e9f526bee5654419e1d0ebd | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-04-17T13:19:09.000Z | 2021-10-21T12:55:15.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for linker tests."""
import os
import sys
from pylib import constants
from pylib.linker import test_case
from pylib.linker import test_runner
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import unittest_util # pylint: disable=F0401
def Setup(options, _devices):
"""Creates a list of test cases and a runner factory.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_cases = [
test_case.LinkerLibraryAddressTest,
test_case.LinkerSharedRelroTest,
test_case.LinkerRandomizationTest ]
low_memory_modes = [False, True]
all_tests = [t(is_low_memory=m) for t in test_cases for m in low_memory_modes]
if options.test_filter:
all_test_names = [ test.qualified_name for test in all_tests ]
filtered_test_names = unittest_util.FilterTestNames(all_test_names,
options.test_filter)
all_tests = [t for t in all_tests \
if t.qualified_name in filtered_test_names]
def TestRunnerFactory(device, _shard_index):
return test_runner.LinkerTestRunner(
device, options.tool, options.push_deps,
options.cleanup_test_files)
return (TestRunnerFactory, all_tests)
| 31.847826 | 80 | 0.692833 |
8ae5eb819488adb5a33face232da83125c78c3db | 18,646 | py | Python | ibis/backends/base/sql/alchemy/registry.py | jayceslesar/ibis | 3c950a225883d8077b20497cdc3a9a0fb35a46c9 | [
"Apache-2.0"
] | 1 | 2022-03-22T10:39:37.000Z | 2022-03-22T10:39:37.000Z | ibis/backends/base/sql/alchemy/registry.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | null | null | null | ibis/backends/base/sql/alchemy/registry.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | null | null | null | import functools
import operator
from typing import Any, Dict
import sqlalchemy as sa
import sqlalchemy.sql as sql
import ibis
import ibis.common.exceptions as com
import ibis.expr.analysis as L
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.window as W
from ibis.backends.base.sql.alchemy.database import AlchemyTable
from ibis.backends.base.sql.alchemy.geospatial import geospatial_supported
def variance_reduction(func_name):
suffix = {'sample': 'samp', 'pop': 'pop'}
def variance_compiler(t, expr):
arg, how, where = expr.op().args
if arg.type().equals(dt.boolean):
arg = arg.cast('int32')
func = getattr(
sa.func, '{}_{}'.format(func_name, suffix.get(how, 'samp'))
)
if where is not None:
arg = where.ifelse(arg, None)
return func(t.translate(arg))
return variance_compiler
def infix_op(infix_sym):
def formatter(t, expr):
op = expr.op()
left, right = op.args
left_arg = t.translate(left)
right_arg = t.translate(right)
return left_arg.op(infix_sym)(right_arg)
return formatter
def fixed_arity(sa_func, arity):
if isinstance(sa_func, str):
sa_func = getattr(sa.func, sa_func)
def formatter(t, expr):
if arity != len(expr.op().args):
raise com.IbisError('incorrect number of args')
return _varargs_call(sa_func, t, expr.op().args)
return formatter
def _varargs_call(sa_func, t, args):
trans_args = []
for raw_arg in args:
arg = t.translate(raw_arg)
try:
arg = arg.scalar_subquery()
except AttributeError:
pass
trans_args.append(arg)
return sa_func(*trans_args)
def varargs(sa_func):
def formatter(t, expr):
return _varargs_call(sa_func, t, expr.op().arg)
return formatter
def get_sqla_table(ctx, table):
if ctx.has_ref(table, parent_contexts=True):
ctx_level = ctx
sa_table = ctx_level.get_ref(table)
while sa_table is None and ctx_level.parent is not ctx_level:
ctx_level = ctx_level.parent
sa_table = ctx_level.get_ref(table)
else:
op = table.op()
if isinstance(op, AlchemyTable):
sa_table = op.sqla_table
else:
sa_table = ctx.get_compiled_expr(table)
return sa_table
def get_col_or_deferred_col(sa_table, colname):
"""
Get a `ColumnExpr`, or create a "deferred" column.
This is to handle the case when selecting a column from a join, which
happens when a join expression is cached during join traversal
We'd like to avoid generating a subquery just for selection but in
sqlalchemy the Join object is not selectable. However, at this point
know that the column can be referred to unambiguously
Later the expression is assembled into
`sa.select([sa.column(colname)]).select_from(table_set)` (roughly)
where `table_set` is `sa_table` above.
"""
try:
return sa_table.exported_columns[colname]
except KeyError:
return sa.column(colname)
def _table_column(t, expr):
op = expr.op()
ctx = t.context
table = op.table
sa_table = get_sqla_table(ctx, table)
out_expr = get_col_or_deferred_col(sa_table, op.name)
# If the column does not originate from the table set in the current SELECT
# context, we should format as a subquery
if t.permit_subquery and ctx.is_foreign_expr(table):
try:
subq = sa_table.subquery()
except AttributeError:
subq = sa_table
return sa.select(subq.c[out_expr.name])
return out_expr
def _table_array_view(t, expr):
ctx = t.context
table = ctx.get_compiled_expr(expr.op().table)
return table
def _exists_subquery(t, expr):
from ibis.backends.base.sql.alchemy.query_builder import AlchemyCompiler
op = expr.op()
ctx = t.context
filtered = op.foreign_table.filter(op.predicates).projection(
[ir.literal(1).name(ir.core.unnamed)]
)
sub_ctx = ctx.subcontext()
clause = AlchemyCompiler.to_sql(filtered, sub_ctx, exists=True)
if isinstance(op, ops.NotExistsSubquery):
clause = sa.not_(clause)
return clause
def _cast(t, expr):
op = expr.op()
arg, target_type = op.args
sa_arg = t.translate(arg)
sa_type = t.get_sqla_type(target_type)
if isinstance(arg, ir.CategoryValue) and target_type == 'int32':
return sa_arg
else:
return sa.cast(sa_arg, sa_type)
def _contains(t, expr):
op = expr.op()
left, right = (t.translate(arg) for arg in op.args)
return left.in_(right)
def _not_contains(t, expr):
return sa.not_(_contains(t, expr))
def reduction(sa_func):
def formatter(t, expr):
op = expr.op()
if op.where is not None:
arg = t.translate(op.where.ifelse(op.arg, ibis.NA))
else:
arg = t.translate(op.arg)
return sa_func(arg)
return formatter
def _group_concat(t, expr):
op = expr.op()
sep = t.translate(op.sep)
if op.where is not None:
arg = t.translate(op.where.ifelse(op.arg, ibis.NA))
else:
arg = t.translate(op.arg)
return sa.func.group_concat(arg, sep)
def _alias(t, expr):
# just compile the underlying argument because the naming is handled
# by the translator for the top level expression
op = expr.op()
return t.translate(op.arg)
def _literal(t, expr):
dtype = expr.type()
value = expr.op().value
if isinstance(dtype, dt.Set):
return list(map(sa.literal, value))
return sa.literal(value)
def _value_list(t, expr):
return [t.translate(x) for x in expr.op().values]
def _is_null(t, expr):
arg = t.translate(expr.op().args[0])
return arg.is_(sa.null())
def _not_null(t, expr):
arg = t.translate(expr.op().args[0])
return arg.isnot(sa.null())
def _round(t, expr):
op = expr.op()
arg, digits = op.args
sa_arg = t.translate(arg)
f = sa.func.round
if digits is not None:
sa_digits = t.translate(digits)
return f(sa_arg, sa_digits)
else:
return f(sa_arg)
def _floor_divide(t, expr):
left, right = map(t.translate, expr.op().args)
return sa.func.floor(left / right)
def _count_distinct(t, expr):
arg, where = expr.op().args
if where is not None:
sa_arg = t.translate(where.ifelse(arg, None))
else:
sa_arg = t.translate(arg)
return sa.func.count(sa_arg.distinct())
def _simple_case(t, expr):
op = expr.op()
cases = [op.base == case for case in op.cases]
return _translate_case(t, cases, op.results, op.default)
def _searched_case(t, expr):
op = expr.op()
return _translate_case(t, op.cases, op.results, op.default)
def _translate_case(t, cases, results, default):
case_args = [t.translate(arg) for arg in cases]
result_args = [t.translate(arg) for arg in results]
whens = zip(case_args, result_args)
default = t.translate(default)
return sa.case(list(whens), else_=default)
def _negate(t, expr):
op = expr.op()
(arg,) = map(t.translate, op.args)
return sa.not_(arg) if isinstance(expr, ir.BooleanValue) else -arg
def unary(sa_func):
return fixed_arity(sa_func, 1)
def _string_like(method_name, t, expr):
op = expr.op()
method = getattr(t.translate(op.arg), method_name)
return method(t.translate(op.pattern), escape=op.escape)
def _startswith(t, expr):
arg, start = expr.op().args
return t.translate(arg).startswith(t.translate(start))
def _endswith(t, expr):
arg, start = expr.op().args
return t.translate(arg).endswith(t.translate(start))
_cumulative_to_reduction = {
ops.CumulativeSum: ops.Sum,
ops.CumulativeMin: ops.Min,
ops.CumulativeMax: ops.Max,
ops.CumulativeMean: ops.Mean,
ops.CumulativeAny: ops.Any,
ops.CumulativeAll: ops.All,
}
def _cumulative_to_window(translator, expr, window):
win = W.cumulative_window()
win = win.group_by(window._group_by).order_by(window._order_by)
op = expr.op()
klass = _cumulative_to_reduction[type(op)]
new_op = klass(*op.args)
new_expr = new_op.to_expr()
if expr.has_name():
new_expr = new_expr.name(expr.get_name())
if type(new_op) in translator._rewrites:
new_expr = translator._rewrites[type(new_op)](new_expr)
return L.windowize_function(new_expr, win)
def _window(t, expr):
op = expr.op()
arg, window = op.args
reduction = t.translate(arg)
window_op = arg.op()
_require_order_by = (
ops.DenseRank,
ops.MinRank,
ops.NTile,
ops.PercentRank,
)
if isinstance(window_op, ops.CumulativeOp):
arg = _cumulative_to_window(t, arg, window)
return t.translate(arg)
if window.max_lookback is not None:
raise NotImplementedError(
'Rows with max lookback is not implemented '
'for SQLAlchemy-based backends.'
)
# Some analytic functions need to have the expression of interest in
# the ORDER BY part of the window clause
if isinstance(window_op, _require_order_by) and not window._order_by:
order_by = t.translate(window_op.args[0])
else:
order_by = list(map(t.translate, window._order_by))
partition_by = list(map(t.translate, window._group_by))
frame_clause_not_allowed = (
ops.Lag,
ops.Lead,
ops.DenseRank,
ops.MinRank,
ops.NTile,
ops.PercentRank,
ops.RowNumber,
)
how = {'range': 'range_'}.get(window.how, window.how)
preceding = window.preceding
additional_params = (
{}
if isinstance(window_op, frame_clause_not_allowed)
else {
how: (
-preceding if preceding is not None else preceding,
window.following,
)
}
)
result = reduction.over(
partition_by=partition_by, order_by=order_by, **additional_params
)
if isinstance(
window_op, (ops.RowNumber, ops.DenseRank, ops.MinRank, ops.NTile)
):
return result - 1
else:
return result
def _lag(t, expr):
arg, offset, default = expr.op().args
if default is not None:
raise NotImplementedError()
sa_arg = t.translate(arg)
sa_offset = t.translate(offset) if offset is not None else 1
return sa.func.lag(sa_arg, sa_offset)
def _lead(t, expr):
arg, offset, default = expr.op().args
if default is not None:
raise NotImplementedError()
sa_arg = t.translate(arg)
sa_offset = t.translate(offset) if offset is not None else 1
return sa.func.lead(sa_arg, sa_offset)
def _ntile(t, expr):
op = expr.op()
args = op.args
arg, buckets = map(t.translate, args)
return sa.func.ntile(buckets)
def _sort_key(t, expr):
# We need to define this for window functions that have an order by
by, ascending = expr.op().args
sort_direction = sa.asc if ascending else sa.desc
return sort_direction(t.translate(by))
def _string_join(t, expr):
sep, elements = expr.op().args
return sa.func.concat_ws(t.translate(sep), *map(t.translate, elements))
sqlalchemy_operation_registry: Dict[Any, Any] = {
ops.Alias: _alias,
ops.And: fixed_arity(sql.and_, 2),
ops.Or: fixed_arity(sql.or_, 2),
ops.Not: unary(sa.not_),
ops.Abs: unary(sa.func.abs),
ops.Cast: _cast,
ops.Coalesce: varargs(sa.func.coalesce),
ops.NullIf: fixed_arity(sa.func.nullif, 2),
ops.Contains: _contains,
ops.NotContains: _not_contains,
ops.Count: reduction(sa.func.count),
ops.Sum: reduction(sa.func.sum),
ops.Mean: reduction(sa.func.avg),
ops.Min: reduction(sa.func.min),
ops.Max: reduction(sa.func.max),
ops.CountDistinct: _count_distinct,
ops.GroupConcat: _group_concat,
ops.Between: fixed_arity(sa.between, 3),
ops.IsNull: _is_null,
ops.NotNull: _not_null,
ops.Negate: _negate,
ops.Round: _round,
ops.TypeOf: unary(sa.func.typeof),
ops.Literal: _literal,
ops.ValueList: _value_list,
ops.NullLiteral: lambda *_: sa.null(),
ops.SimpleCase: _simple_case,
ops.SearchedCase: _searched_case,
ops.TableColumn: _table_column,
ops.TableArrayView: _table_array_view,
ops.ExistsSubquery: _exists_subquery,
ops.NotExistsSubquery: _exists_subquery,
# miscellaneous varargs
ops.Least: varargs(sa.func.least),
ops.Greatest: varargs(sa.func.greatest),
# string
ops.LPad: fixed_arity(sa.func.lpad, 3),
ops.RPad: fixed_arity(sa.func.rpad, 3),
ops.Strip: unary(sa.func.trim),
ops.LStrip: unary(sa.func.ltrim),
ops.RStrip: unary(sa.func.rtrim),
ops.Repeat: fixed_arity(sa.func.repeat, 2),
ops.Reverse: unary(sa.func.reverse),
ops.StrRight: fixed_arity(sa.func.right, 2),
ops.Lowercase: unary(sa.func.lower),
ops.Uppercase: unary(sa.func.upper),
ops.StringAscii: unary(sa.func.ascii),
ops.StringLength: unary(sa.func.length),
ops.StringJoin: _string_join,
ops.StringReplace: fixed_arity(sa.func.replace, 3),
ops.StringSQLLike: functools.partial(_string_like, "like"),
ops.StringSQLILike: functools.partial(_string_like, "ilike"),
ops.StartsWith: _startswith,
ops.EndsWith: _endswith,
ops.StringConcat: varargs(sa.func.concat),
# math
ops.Ln: unary(sa.func.ln),
ops.Exp: unary(sa.func.exp),
ops.Sign: unary(sa.func.sign),
ops.Sqrt: unary(sa.func.sqrt),
ops.Ceil: unary(sa.func.ceil),
ops.Floor: unary(sa.func.floor),
ops.Power: fixed_arity(sa.func.pow, 2),
ops.FloorDivide: _floor_divide,
# other
ops.SortKey: _sort_key,
ops.Date: unary(lambda arg: sa.cast(arg, sa.DATE)),
ops.DateFromYMD: fixed_arity(sa.func.date, 3),
ops.TimeFromHMS: fixed_arity(sa.func.time, 3),
ops.TimestampFromYMDHMS: lambda t, expr: sa.func.make_timestamp(
*map(t.translate, expr.op().args[:6]) # ignore timezone
),
}
# TODO: unit tests for each of these
_binary_ops = {
# Binary arithmetic
ops.Add: operator.add,
ops.Subtract: operator.sub,
ops.Multiply: operator.mul,
# XXX `ops.Divide` is overwritten in `translator.py` with a custom
# function `_true_divide`, but for some reason both are required
ops.Divide: operator.truediv,
ops.Modulus: operator.mod,
# Comparisons
ops.Equals: operator.eq,
ops.NotEquals: operator.ne,
ops.Less: operator.lt,
ops.LessEqual: operator.le,
ops.Greater: operator.gt,
ops.GreaterEqual: operator.ge,
ops.IdenticalTo: lambda x, y: x.op('IS NOT DISTINCT FROM')(y),
# Boolean comparisons
# TODO
}
sqlalchemy_window_functions_registry = {
ops.Lag: _lag,
ops.Lead: _lead,
ops.NTile: _ntile,
ops.FirstValue: unary(sa.func.first_value),
ops.LastValue: unary(sa.func.last_value),
ops.RowNumber: fixed_arity(lambda: sa.func.row_number(), 0),
ops.DenseRank: unary(lambda arg: sa.func.dense_rank()),
ops.MinRank: unary(lambda arg: sa.func.rank()),
ops.PercentRank: unary(lambda arg: sa.func.percent_rank()),
ops.WindowOp: _window,
ops.CumulativeOp: _window,
ops.CumulativeMax: unary(sa.func.max),
ops.CumulativeMin: unary(sa.func.min),
ops.CumulativeSum: unary(sa.func.sum),
ops.CumulativeMean: unary(sa.func.avg),
}
if geospatial_supported:
_geospatial_functions = {
ops.GeoArea: unary(sa.func.ST_Area),
ops.GeoAsBinary: unary(sa.func.ST_AsBinary),
ops.GeoAsEWKB: unary(sa.func.ST_AsEWKB),
ops.GeoAsEWKT: unary(sa.func.ST_AsEWKT),
ops.GeoAsText: unary(sa.func.ST_AsText),
ops.GeoAzimuth: fixed_arity(sa.func.ST_Azimuth, 2),
ops.GeoBuffer: fixed_arity(sa.func.ST_Buffer, 2),
ops.GeoCentroid: unary(sa.func.ST_Centroid),
ops.GeoContains: fixed_arity(sa.func.ST_Contains, 2),
ops.GeoContainsProperly: fixed_arity(sa.func.ST_Contains, 2),
ops.GeoCovers: fixed_arity(sa.func.ST_Covers, 2),
ops.GeoCoveredBy: fixed_arity(sa.func.ST_CoveredBy, 2),
ops.GeoCrosses: fixed_arity(sa.func.ST_Crosses, 2),
ops.GeoDFullyWithin: fixed_arity(sa.func.ST_DFullyWithin, 3),
ops.GeoDifference: fixed_arity(sa.func.ST_Difference, 2),
ops.GeoDisjoint: fixed_arity(sa.func.ST_Disjoint, 2),
ops.GeoDistance: fixed_arity(sa.func.ST_Distance, 2),
ops.GeoDWithin: fixed_arity(sa.func.ST_DWithin, 3),
ops.GeoEndPoint: unary(sa.func.ST_EndPoint),
ops.GeoEnvelope: unary(sa.func.ST_Envelope),
ops.GeoEquals: fixed_arity(sa.func.ST_Equals, 2),
ops.GeoGeometryN: fixed_arity(sa.func.ST_GeometryN, 2),
ops.GeoGeometryType: unary(sa.func.ST_GeometryType),
ops.GeoIntersection: fixed_arity(sa.func.ST_Intersection, 2),
ops.GeoIntersects: fixed_arity(sa.func.ST_Intersects, 2),
ops.GeoIsValid: unary(sa.func.ST_IsValid),
ops.GeoLineLocatePoint: fixed_arity(sa.func.ST_LineLocatePoint, 2),
ops.GeoLineMerge: unary(sa.func.ST_LineMerge),
ops.GeoLineSubstring: fixed_arity(sa.func.ST_LineSubstring, 3),
ops.GeoLength: unary(sa.func.ST_Length),
ops.GeoNPoints: unary(sa.func.ST_NPoints),
ops.GeoOrderingEquals: fixed_arity(sa.func.ST_OrderingEquals, 2),
ops.GeoOverlaps: fixed_arity(sa.func.ST_Overlaps, 2),
ops.GeoPerimeter: unary(sa.func.ST_Perimeter),
ops.GeoSimplify: fixed_arity(sa.func.ST_Simplify, 3),
ops.GeoSRID: unary(sa.func.ST_SRID),
ops.GeoSetSRID: fixed_arity(sa.func.ST_SetSRID, 2),
ops.GeoStartPoint: unary(sa.func.ST_StartPoint),
ops.GeoTouches: fixed_arity(sa.func.ST_Touches, 2),
ops.GeoTransform: fixed_arity(sa.func.ST_Transform, 2),
ops.GeoUnaryUnion: unary(sa.func.ST_Union),
ops.GeoUnion: fixed_arity(sa.func.ST_Union, 2),
ops.GeoWithin: fixed_arity(sa.func.ST_Within, 2),
ops.GeoX: unary(sa.func.ST_X),
ops.GeoY: unary(sa.func.ST_Y),
# Missing Geospatial ops:
# ST_AsGML
# ST_AsGeoJSON
# ST_AsKML
# ST_AsRaster
# ST_AsSVG
# ST_AsTWKB
# ST_Distance_Sphere
# ST_Dump
# ST_DumpPoints
# ST_GeogFromText
# ST_GeomFromEWKB
# ST_GeomFromEWKT
# ST_GeomFromText
}
else:
_geospatial_functions = {}
for _k, _v in _binary_ops.items():
sqlalchemy_operation_registry[_k] = fixed_arity(_v, 2)
| 29.088924 | 79 | 0.658694 |
46bbfe2d9ee2f8e4cd5c12c3fdb821188de09fa4 | 646 | py | Python | python/py-strings/designer-door-mat.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 41 | 2018-05-11T07:54:34.000Z | 2022-03-29T19:02:32.000Z | python/py-strings/designer-door-mat.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 2 | 2021-09-13T10:03:26.000Z | 2021-10-04T10:21:05.000Z | python/py-strings/designer-door-mat.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 21 | 2019-01-23T19:06:59.000Z | 2021-12-23T16:03:47.000Z | # Python > Strings > Designer Door Mat
# Print a designer door mat.
#
# https://www.hackerrank.com/challenges/designer-door-mat/problem
#
"""
---------.|.---------
------.|..|..|.------
---.|..|..|..|..|.---
-------WELCOME-------
---.|..|..|..|..|.---
------.|..|..|.------
---------.|.---------
"""
N, M = map(int, input().split())
s = list((".|." * i).center(M, "-") for i in range(1, N, 2))
print("\n".join(s + ["WELCOME".center(M, "-")] + list(reversed(s))))
#print("\n".join((".|." * i).center(M, "-") for i in range(1, N, 2)))
#print("WELCOME".center(M, "-"))
#print("\n".join((".|." * i).center(M, "-") for i in range(N - 2, -1, -2))) | 25.84 | 75 | 0.427245 |
a8b3694a45b068aeb02eea4effd7cfd86c919b74 | 2,531 | py | Python | ipywidgets/widgets/widget_selectioncontainer.py | pbugnion/ipywidgets | fd18aa95fe4d7ca71c27ba99b8f7b90242fd3edc | [
"BSD-3-Clause"
] | null | null | null | ipywidgets/widgets/widget_selectioncontainer.py | pbugnion/ipywidgets | fd18aa95fe4d7ca71c27ba99b8f7b90242fd3edc | [
"BSD-3-Clause"
] | 3 | 2020-01-18T12:26:26.000Z | 2020-01-20T13:17:32.000Z | ipywidgets/widgets/widget_selectioncontainer.py | pbugnion/ipywidgets | fd18aa95fe4d7ca71c27ba99b8f7b90242fd3edc | [
"BSD-3-Clause"
] | 1 | 2021-01-28T05:58:42.000Z | 2021-01-28T05:58:42.000Z | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""SelectionContainer class.
Represents a multipage container that can be used to group other widgets into
pages.
"""
from .widget_box import Box
from .widget import register
from .widget_core import CoreWidget
from traitlets import Unicode, Dict, CInt, TraitError, validate
class _SelectionContainer(Box, CoreWidget):
"""Base class used to display multiple child widgets."""
_titles = Dict(help="Titles of the pages").tag(sync=True)
selected_index = CInt(
help="""The index of the selected page. This is either an integer selecting a particular sub-widget, or None to have no widgets selected.""",
allow_none=True
).tag(sync=True)
@validate('selected_index')
def _validated_index(self, proposal):
if proposal.value is None or 0 <= proposal.value < len(self.children):
return proposal.value
else:
raise TraitError('Invalid selection: index out of bounds')
# Public methods
def set_title(self, index, title):
"""Sets the title of a container page.
Parameters
----------
index : int
Index of the container page
title : unicode
New title
"""
# JSON dictionaries have string keys, so we convert index to a string
index = str(int(index))
self._titles[index] = title
self.send_state('_titles')
def get_title(self, index):
"""Gets the title of a container pages.
Parameters
----------
index : int
Index of the container page
"""
# JSON dictionaries have string keys, so we convert index to a string
index = str(int(index))
if index in self._titles:
return self._titles[index]
else:
return None
def _repr_keys(self):
# We also need to include _titles in repr for reproducibility
yield from super()._repr_keys()
if self._titles:
yield '_titles'
@register
class Accordion(_SelectionContainer):
"""Displays children each on a separate accordion page."""
_view_name = Unicode('AccordionView').tag(sync=True)
_model_name = Unicode('AccordionModel').tag(sync=True)
@register
class Tab(_SelectionContainer):
"""Displays children each on a separate accordion tab."""
_view_name = Unicode('TabView').tag(sync=True)
_model_name = Unicode('TabModel').tag(sync=True)
| 31.246914 | 149 | 0.649151 |
ff4d6b8de026b40844df66fda40d6295234b35ba | 8,676 | py | Python | cupyx/scipy/ndimage/morphology.py | xf3227/cupy | 989ef4cd8889ea5309af288beba6814584969930 | [
"MIT"
] | 1 | 2020-03-30T20:56:52.000Z | 2020-03-30T20:56:52.000Z | cupyx/scipy/ndimage/morphology.py | xf3227/cupy | 989ef4cd8889ea5309af288beba6814584969930 | [
"MIT"
] | null | null | null | cupyx/scipy/ndimage/morphology.py | xf3227/cupy | 989ef4cd8889ea5309af288beba6814584969930 | [
"MIT"
] | null | null | null | import numpy
import cupy
import warnings
from . import filters
def grey_erosion(input, size=None, footprint=None, structure=None, output=None,
mode='reflect', cval=0.0, origin=0):
"""Calculates a greyscale erosion.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale erosion. Optional if ```footprint``` or
```structure``` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale erosion. Non-zero values
give the set of neighbors of the center over which minimum is
chosen.
structure (array of ints): Structuring element used for the greyscale
erosion. ```structure``` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale erosion.
.. seealso:: :func:`scipy.ndimage.grey_erosion`
"""
if size is None and footprint is None and structure is None:
raise ValueError('size, footprint or structure must be specified')
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 'min')
def grey_dilation(input, size=None, footprint=None, structure=None,
output=None, mode='reflect', cval=0.0, origin=0):
"""Calculates a greyscale dilation.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale dilation. Optional if ```footprint``` or
```structure``` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale dilation. Non-zero values
give the set of neighbors of the center over which maximum is
chosen.
structure (array of ints): Structuring element used for the greyscale
dilation. ```structure``` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale dilation.
.. seealso:: :func:`scipy.ndimage.grey_dilation`
"""
if size is None and footprint is None and structure is None:
raise ValueError('size, footprint or structure must be specified')
if structure is not None:
structure = cupy.array(structure)
structure = structure[tuple([slice(None, None, -1)] * structure.ndim)]
if footprint is not None:
footprint = cupy.array(footprint)
footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)]
origin = filters._fix_sequence_arg(origin, input.ndim, 'origin', int)
for i in range(len(origin)):
origin[i] = -origin[i]
if footprint is not None:
sz = footprint.shape[i]
elif structure is not None:
sz = structure.shape[i]
elif numpy.isscalar(size):
sz = size
else:
sz = size[i]
if sz % 2 == 0:
origin[i] -= 1
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 'max')
def grey_closing(input, size=None, footprint=None, structure=None,
output=None, mode='reflect', cval=0.0, origin=0):
"""Calculates a multi-dimensional greyscale closing.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale closing. Optional if ```footprint``` or
```structure``` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale closing. Non-zero values
give the set of neighbors of the center over which closing is
chosen.
structure (array of ints): Structuring element used for the greyscale
closing. ```structure``` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale closing.
.. seealso:: :func:`scipy.ndimage.grey_closing`
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning,
stacklevel=2)
tmp = grey_dilation(input, size, footprint, structure, None, mode, cval,
origin)
return grey_erosion(tmp, size, footprint, structure, output, mode, cval,
origin)
def grey_opening(input, size=None, footprint=None, structure=None,
output=None, mode='reflect', cval=0.0, origin=0):
"""Calculates a multi-dimensional greyscale opening.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale opening. Optional if ```footprint``` or
```structure``` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale opening. Non-zero values
give the set of neighbors of the center over which opening is
chosen.
structure (array of ints): Structuring element used for the greyscale
opening. ```structure``` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale opening.
.. seealso:: :func:`scipy.ndimage.grey_opening`
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning,
stacklevel=2)
tmp = grey_erosion(input, size, footprint, structure, None, mode, cval,
origin)
return grey_dilation(tmp, size, footprint, structure, output, mode, cval,
origin)
| 45.663158 | 79 | 0.613762 |
6e314540161412df074c0489870b8bc32c7b60a4 | 7,943 | py | Python | src/rocommand/test/TestRosrsSync.py | A-Mazurek/ro-manager | e49b6025b89594e036fdb2b56c8b871717b3b620 | [
"MIT-0",
"MIT"
] | 11 | 2015-01-19T04:21:58.000Z | 2019-02-21T11:54:45.000Z | src/rocommand/test/TestRosrsSync.py | A-Mazurek/ro-manager | e49b6025b89594e036fdb2b56c8b871717b3b620 | [
"MIT-0",
"MIT"
] | 1 | 2016-10-18T14:35:36.000Z | 2016-10-25T19:12:05.000Z | src/rocommand/test/TestRosrsSync.py | A-Mazurek/ro-manager | e49b6025b89594e036fdb2b56c8b871717b3b620 | [
"MIT-0",
"MIT"
] | 7 | 2015-03-04T17:22:00.000Z | 2022-03-14T15:55:23.000Z | '''
Created on 15-09-2011
@author: piotrhol
'''
__author__ = "piotrhol"
__copyright__ = "PNSC (@@check)"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import urlparse
from compiler.ast import Assert
if __name__ == "__main__":
# Add main project directory and ro manager directories at start of python path
sys.path.insert(0, "../..")
sys.path.insert(0, "..")
import ro_utils
import logging
import os.path
import rdflib
import time
from MiscUtils import TestUtils
from rocommand import ro_annotation
from rocommand.test import TestROSupport
from rocommand.test.TestConfig import ro_test_config
from rocommand.ro_metadata import ro_metadata
from rocommand.ro_remote_metadata import ro_remote_metadata, createRO, deleteRO, sendZipRO
from rocommand import ro_rosrs_sync
from rocommand.ro_namespaces import ROTERMS
from rocommand.ROSRS_Session import ROSRS_Session
# Local ro_config for testing
ro_config = {
"annotationTypes": ro_annotation.annotationTypes,
"annotationPrefixes": ro_annotation.annotationPrefixes
}
# Logging object
log = logging.getLogger(__name__)
# Base directory for RO tests in this module
testbase = os.path.dirname(os.path.abspath(__file__))
class TestRosrsSync(TestROSupport.TestROSupport):
def setUp(self):
super(TestRosrsSync, self).setUp()
self.rosrs = ROSRS_Session(ro_test_config.ROSRS_URI,
accesskey=ro_test_config.ROSRS_ACCESS_TOKEN)
return
def tearDown(self):
super(TestRosrsSync, self).tearDown()
self.rosrs.close()
return
# Setup local config for local tests
def setupConfig(self):
return self.setupTestBaseConfig(testbase)
def testNull(self):
assert True, 'Null test failed'
def testPushZip(self):
httpsession = ROSRS_Session(ro_test_config.ROSRS_URI,
accesskey=ro_test_config.ROSRS_ACCESS_TOKEN)
(status, reason, headers, data) = sendZipRO(httpsession, ro_test_config.ROSRS_URI, "ro1", open("zips/pushro-6.zip", 'rb').read())
status = "RUNNING"
while (status == "RUNNING"):
time.sleep(1)
(status, target_id, processed_resources, submitted_resources) = ro_utils.parse_job(httpsession, headers['location'])
self.assertEqual("DONE", status)
deleteRO(httpsession,target_id)
self.assertEqual(reason, "Created")
def testPush(self):
rodir = self.createTestRo(testbase, "data/ro-test-1", "RO test push", "ro-testRoPush")
localRo = ro_metadata(ro_config, rodir)
localRo.addAggregatedResources(rodir, recurse=True)
# localRo.aggregateResourceExt("http://www.example.org")
roresource = "subdir1/subdir1-file.txt"
# Add anotations for file
ann1 = localRo.addSimpleAnnotation(roresource, "type", "Test file")
ann2 = localRo.addSimpleAnnotation(roresource, "description", "File in test research object")
ann3 = localRo.addSimpleAnnotation(roresource, "rdf:type", ROTERMS.resource)
annotationsCnt = 0
deleteRO(self.rosrs, urlparse.urljoin(self.rosrs.baseuri(), "TestPushRO/"))
(_,_,rouri,_) = createRO(self.rosrs, "TestPushRO")
remoteRo = ro_remote_metadata(ro_test_config, self.rosrs, rouri)
remoteRo.aggregateResourceExt("http://www.anotherexample.org")
resourcesInt = (
[ rdflib.URIRef("README-ro-test-1")
, rdflib.URIRef("minim.rdf")
, rdflib.URIRef("subdir1/subdir1-file.txt")
, rdflib.URIRef("subdir2/subdir2-file.txt")
, rdflib.URIRef("filename%20with%20spaces.txt")
, rdflib.URIRef("filename%23with%23hashes.txt")
, rdflib.URIRef(ann1)
, rdflib.URIRef(ann2)
, rdflib.URIRef(ann3)
])
resourcesIntCnt = 0
for (action, resuri) in ro_rosrs_sync.pushResearchObject(localRo, remoteRo):
log.debug("The following object has been pushed: %s (%s)"%(resuri, action))
# this assumes that the above is the only external resource
if action == ro_rosrs_sync.ACTION_AGGREGATE_EXTERNAL:
self.assertEqual(resuri, rdflib.URIRef("http://www.example.org"), "The external resource is pushed")
self.assertTrue(localRo.isAggregatedResource(resuri), "Resource that is pushed is aggregated locally")
elif action == ro_rosrs_sync.ACTION_AGGREGATE_INTERNAL:
self.assertTrue(localRo.getComponentUriRel(resuri) in resourcesInt, "Resource that is pushed is aggregated locally")
resourcesIntCnt += 1
elif action == ro_rosrs_sync.ACTION_DELETE:
self.assertFalse(localRo.isAggregatedResource(resuri), "Resource that is deaggregated in ROSRS is not aggregated locally")
self.assertEqual(resuri, rdflib.URIRef("http://www.anotherexample.org"), "The external resource is deaggregated (%s)"%resuri)
elif action == ro_rosrs_sync.ACTION_AGGREGATE_ANNOTATION:
self.assertTrue(localRo.isAnnotationNode(resuri), "Annotation that is pushed is aggregated locally (%s)"%(resuri))
annotationsCnt += 1
elif action == ro_rosrs_sync.ACTION_DELETE_ANNOTATION:
self.assertFalse(localRo.isAnnotationNode(resuri), "Annotation that is deaggregated in ROSRS is not aggregated locally")
pass
elif action == ro_rosrs_sync.ACTION_ERROR:
log.warn("Error, not necessarily a fail: %s"%resuri)
else:
self.fail("Unexpected action %s"%action)
self.assertEqual(len(resourcesInt), resourcesIntCnt, "All internal resources were aggregated (should be %d was %d)"%(len(resourcesInt), resourcesIntCnt))
# 3 annotations + manifest which in RO manager also annotates the RO
self.assertEqual(4, annotationsCnt, "All annotations were aggregated (should be %d was %d)"%(4, annotationsCnt))
for (action, resuri) in ro_rosrs_sync.pushResearchObject(localRo, remoteRo):
if action == ro_rosrs_sync.ACTION_UPDATE_ANNOTATION:
self.assertTrue(localRo.isAnnotationNode(resuri), "Annotations that is updated is aggregated locally (%s)"%(resuri))
elif action == ro_rosrs_sync.ACTION_UPDATE_OVERWRITE:
# see https://jira.man.poznan.pl/jira/browse/WFE-671
self.assertTrue(resuri in [rdflib.URIRef(ann1), rdflib.URIRef(ann2), rdflib.URIRef(ann3)], "Annotation bodies can be uploaded twice")
elif action == ro_rosrs_sync.ACTION_ERROR:
log.warn("Error, not necessarily a fail: %s"%resuri)
elif not action == ro_rosrs_sync.ACTION_SKIP:
self.fail("Nothing else should be pushed again (%s, %s)"%(action, resuri))
# Clean up
remoteRo.delete()
self.deleteTestRo(rodir)
return
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testPush"
, "testPushZip"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
]
}
return TestUtils.getTestSuite(TestRosrsSync, testdict, select=select)
if __name__ == "__main__":
TestUtils.runTests("TestRosrsSync.log", getTestSuite, sys.argv)
| 43.168478 | 161 | 0.652902 |
3fd97cbc6ded4e76580b87be6465ca0166617435 | 2,050 | py | Python | djangoDemo2/urls.py | zxuu/Django | fbb9320487ca180733902cffdea7b37cc2115a94 | [
"Apache-2.0"
] | null | null | null | djangoDemo2/urls.py | zxuu/Django | fbb9320487ca180733902cffdea7b37cc2115a94 | [
"Apache-2.0"
] | null | null | null | djangoDemo2/urls.py | zxuu/Django | fbb9320487ca180733902cffdea7b37cc2115a94 | [
"Apache-2.0"
] | null | null | null | """djangoDemo2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from service import views
from django.conf.urls import url
from djangoDemo2 import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
# 定义url,移动端通过这个url访问服务端
url(r'^users/$', views.user_api),
# username就是之前views中another_user_api方法中的参数
url(r'^users/(?P<username>[A-Za-z0-9]+)/$', views.another_user_api),
# 安卓端用api
url(r'^android_user/$', views.android_user_api),
# 上传图片
url(r'^uploadImage/$', views.uploadImages),
#上传视频相册
url(r'^uploadVideo/$', views.uploadVideo),
url(r'^uploadFile/$', views.upLoadFile),
#注册
url(r'^register/$',views.register),
#登录
url(r'^sign/$',views.sign),
#评论
url(r'^putComment/$',views.putComment),
#关注
url(r'^putFollow/$',views.putFollow),
#点赞
url(r'^putDianZan/$',views.putDianZan),
#获取我的视频相册
url(r'^getVideos/$', views.getVideos),
#获取我的相册集
url(r'^getImages/$', views.getImages),
#获取视频相关的用户
url(r'^getUser/$', views.getUser),
#获取视频相册对应相册集
url(r'^getVideoImages/$', views.getVideoImages),
#获取我关注的人
url(r'^getFollows/$', views.getFollows),
#//获取视频对应的评论
url(r'^getComments/$', views.getComments),
url(r'^uploading/$', views.uploadImg),
url(r'^showImg/$', views.showImg),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 32.03125 | 77 | 0.677073 |
c15b5b794ee97b011e2243a6710cc27518217dd8 | 811 | py | Python | setup.py | zhanglabtools/MSTD | 0a63b0b206f21997e872fb6734a6f28a888dc786 | [
"MIT"
] | 4 | 2018-08-16T12:13:15.000Z | 2020-05-31T18:34:29.000Z | setup.py | zhanglabtools/MSTD | 0a63b0b206f21997e872fb6734a6f28a888dc786 | [
"MIT"
] | null | null | null | setup.py | zhanglabtools/MSTD | 0a63b0b206f21997e872fb6734a6f28a888dc786 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 15:11:48 2017
@author: Yusen Ye
"""
import os
import sys
import shutil
from subprocess import call
from warnings import warn
from setuptools import setup
setup(name='MSTD',
version='2.0',
package_dir={'': 'src'},
packages=['MSTDlib'],
package_data={
# And include any *.msg files found in the 'hello' package, too:
'MSTD': ['data/*','*.txt','data/Celltypes_blood_17_location/*'],
},
include_package_data=True
#py_modules=['MSTDlib_test_v2'],
)
#get location of setup.py
# setup_dir = os.path.dirname(os.path.realpath(__file__))
#Copy test data
# data_dir = os.path.expanduser('~/.MSTD/data')
# if os.path.isdir(data_dir):
# shutil.rmtree(data_dir)
# shutil.copytree(setup_dir + '/data/', data_dir) | 23.171429 | 73 | 0.658446 |
59bdbf330cb4c3de98f848d26bf17b1883d77d9a | 11,051 | py | Python | catfishq/cat_fastq.py | rfetherston/catfishq | 58377e730f4fe6b08ae066324a30bdf44939f15f | [
"MIT"
] | null | null | null | catfishq/cat_fastq.py | rfetherston/catfishq | 58377e730f4fe6b08ae066324a30bdf44939f15f | [
"MIT"
] | null | null | null | catfishq/cat_fastq.py | rfetherston/catfishq | 58377e730f4fe6b08ae066324a30bdf44939f15f | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import logging
import math
import os
import re
import pysam
import sys
from pathlib import Path
from datetime import datetime, timedelta
from . import __version__
LOOKUP = []
for q in range(100):
LOOKUP.append(pow(10, -0.1 * q))
def _compute_mean_qscore(scores):
"""Returns the phred score corresponding to the mean of the probabilities
associated with the phred scores provided.
:param scores: Iterable of phred scores.
:returns: Phred score corresponding to the average error rate, as
estimated from the input phred scores.
"""
if not scores:
return 0.0
sum_prob = 0.0
for val in scores:
sum_prob += LOOKUP[val]
mean_prob = sum_prob / len(scores)
return -10.0 * math.log10(mean_prob)
def parse_args(argv):
"""
Commandline parser
:param argv: Command line arguments
:type argv: List
"""
usage = "Cat long lists of FASTQ files"
parser = argparse.ArgumentParser(
description=usage, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--log",
dest="log",
choices=[
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL",
"debug",
"info",
"warning",
"error",
"critical",
],
default="INFO",
help="Print debug information",
)
parser.add_argument(
"-o",
"--output",
dest="OUT",
type=str,
default="/dev/stdout",
help="Output file. (default: stdout)",
)
parser.add_argument(
"-l", "--min-length", dest="MIN_LEN", type=int, default=0, help="Minimum read length"
)
parser.add_argument(
"-q", "--min-qscore", dest="MIN_QSCORE", type=int, default=0, help="Minimum q-score"
)
parser.add_argument(
"--max-sequencing-time", dest="MAX_SEQ_TIME", type=int, default=None, help="Only output reads that where sequenced at or up to the given time (minutes)."
)
parser.add_argument(
"--min-sequencing-time", dest="MIN_SEQ_TIME", type=int, default=None, help="Only output reads that where sequenced at or after the given time (minutes)."
)
parser.add_argument(
"--start-time", dest="START_TIME", type=str, default=None, help="Starttime of the run as guppy time stamp (only required with --sequencing-time). If 'min' is given as argument the minimal time is detected automatically."
)
parser.add_argument(
"--filter-id", dest="FILTER_ID", type=str, default=None, help="Only print reads with IDs present in file."
)
parser.add_argument(
"--print-start-time", dest="PRINT_START_TIME", action="store_true", help="Print the minimal start_time of all fastq files"
)
parser.add_argument(
"-n", "--max_n", dest="MAX_N", type=int, default=0, help="Stop after <max_n> reads"
)
parser.add_argument(
"-b", "--max_mbp", dest="MAX_BP", type=int, default=0, help="Stop after <max_bp> mega base pairs"
)
parser.add_argument(
"-r",
"--recursive",
dest="RECURSIVE",
action="store_true",
help="Search folders recursively",
)
parser.add_argument(
"-d",
"--dedup",
dest="DEDUP",
action="store_true",
help="Remove duplicated reads.",
)
parser.add_argument(
"FASTQ",
nargs="+",
type=str,
help="FASTQ files or folders containing FASTQ files",
)
parser.add_argument(
"-v",
'--version',
action='version',
version='catfish ' + __version__,
help="Print version",
)
args = parser.parse_args(argv)
return args
def find_file_in_folder(
folder,
recursive=True,
patterns=[
"*.fastq",
"*.fastq.gz",
"*.fasta",
"*.fasta.gz",
"*.fa",
"*.fa.gz",
"*.fq",
"*.fq.gz",
],
):
if os.path.isfile(folder):
return folder
files = []
glob = Path(folder).glob
if recursive:
glob = Path(folder).rglob
for pattern in patterns:
for file in glob(pattern):
files.append(file)
if len(files) == 0:
logging.warning("Could not find {} files in {}".format(pattern, folder))
return files
def check_seq_time(comment, max_start_time,min_start_time):
#This tests if the start time of the respective read is between
#max_sequencing_time and min_sequencing_time
#If one of the times is not given the condition is automatically considered true
if (max_start_time == None and min_start_time == None):
return True
else:
matchObj = re.search( r'start_time=([^ ]+)', comment, re.M|re.I)
start_str = matchObj.group(1)
start = datetime.strptime(start_str,'%Y-%m-%dT%H:%M:%SZ')
bool_min=0
bool_max=0
if (max_start_time == None or start<=max_start_time):
bool_max=1
if (min_start_time == None or start>=min_start_time):
bool_min=1
if (bool_min == 1 and bool_max == 1):
return True
else:
return False
def compare_start_time(comment,min_start_time):
#Checks if a given min start time is smaller than the time of an entry
#The smaller time is returned
matchObj = re.search( r'start_time=([^ ]+)', comment, re.M|re.I)
start_time_str = matchObj.group(1)
start_time = datetime.strptime(start_time_str,'%Y-%m-%dT%H:%M:%SZ')
if(min_start_time==0):
return start_time
elif(min_start_time<=start_time):
return min_start_time
else:
return start_time
def parse_fastqs(filename, min_len=0, min_qscore=0, max_start_time=None, min_start_time=None):
with pysam.FastxFile(filename) as fh:
for entry in fh:
if min_len and len(entry.sequence) < min_len:
continue
if (
min_qscore
and _compute_mean_qscore(entry.get_quality_array()) < min_qscore
):
continue
if not check_seq_time(entry.comment, max_start_time, min_start_time):
continue
if entry.comment:
entry.comment = "CO:Z:{}".format(entry.comment)
yield entry
def get_file_names(path, recursive):
filenames = []
if os.path.exists(path):
filenames = [path]
if os.path.isdir(path):
logging.debug("Searching {} for FASTQ files".format(path))
filenames = find_file_in_folder(path, recursive=recursive)
else:
logging.warning("Could not find {}".format(path))
return filenames
def get_start_time(paths,recursive=False):
"""
Only print the start time.
This function automatically detects the minmal start_time of
all the given fastq files
:param paths: Input FASTQ files or folders containing FASTQ files
:return: min_start_time
"""
start = None
max_start_time = None
min_start_time=0
for path in paths:
filenames = get_file_names(path,recursive)
for filename in filenames:
with pysam.FastxFile(filename) as fh:
for entry in fh:
min_start_time=compare_start_time(entry.comment,min_start_time)
return min_start_time
def format_fq(paths, out_filename, min_len=0, min_qscore=0, max_n=0, max_bp=0, recursive=False, dedup=False, max_seq_time=0, min_seq_time=0, start_time=0, filter_read_ids_file=None):
"""
Concatenate FASTQ files
:param paths: Input FASTQ files or folders containing FASTQ files
:param out_filename: Output FASTQ file
:return: None
"""
start = None
max_start_time = None
min_start_time = None
keep_ids = None
if filter_read_ids_file:
keep_ids = set()
with open(filter_read_ids_file, "r") as fh:
for line in fh:
read_id = line.strip()
keep_ids.add(read_id)
logging.info("Found {} read ids.".format(len(keep_ids)))
if start_time:
if not start_time=="min":
start = datetime.strptime(start_time,'%Y-%m-%dT%H:%M:%SZ')
if(max_seq_time):
max_start_time = start + timedelta(minutes=max_seq_time)
if(min_seq_time):
min_start_time = start + timedelta(minutes=min_seq_time)
else:
#This option allows to automatically use the minmal start_time of
#all the given fastq files as input for --start-time
auto_start_time=get_start_time(paths,recursive)
if(max_seq_time):
max_start_time = start + timedelta(minutes=max_seq_time)
if(min_seq_time):
min_start_time = start + timedelta(minutes=min_seq_time)
read_ids = set()
n = 0
n_bp = 0
with open(out_filename, mode="w") as fout:
for path in paths:
filenames = get_file_names(path, recursive)
logging.debug("Found {} files".format(len(filenames)))
for filename in filenames:
for entry in parse_fastqs(
filename, min_len=min_len, min_qscore=min_qscore, max_start_time=max_start_time, min_start_time=min_start_time
):
if dedup and entry.name in read_ids:
continue
if keep_ids and entry.name not in keep_ids:
continue
fout.write(str(entry) + "\n")
if dedup:
read_ids.add(entry.name)
n += 1
n_bp += len(entry.sequence)
if max_n and n >= max_n or max_bp and n_bp > max_bp:
return
def main(argv=sys.argv[1:]):
"""
Basic command line interface to telemap.
:param argv: Command line arguments
:type argv: list
:return: None
:rtype: NoneType
"""
args = parse_args(argv=argv)
numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: %s" % args.log.upper())
logging.basicConfig(level=numeric_level, format="%(message)s")
if(args.PRINT_START_TIME):
min_start_time=get_start_time(args.FASTQ,args.RECURSIVE)
print(min_start_time.strftime('%Y-%m-%dT%H:%M:%SZ'))
else:
format_fq(
args.FASTQ,
args.OUT,
min_len=args.MIN_LEN,
min_qscore=args.MIN_QSCORE,
max_n=args.MAX_N,
max_bp=args.MAX_BP * 1000 * 1000,
recursive=args.RECURSIVE,
dedup=args.DEDUP,
max_seq_time=args.MAX_SEQ_TIME,
min_seq_time=args.MIN_SEQ_TIME,
start_time=args.START_TIME,
filter_read_ids_file=args.FILTER_ID
)
if __name__ == "__main__":
main()
| 29.706989 | 228 | 0.59696 |
a3d6e4ae3b08c773a5436fba9d2ee727319ab6be | 401 | py | Python | products/migrations/0010_product_video_link.py | siva-namala/django3.1 | 8666a536d07d60b61798c7bbd40737da4e6d7cde | [
"MIT"
] | null | null | null | products/migrations/0010_product_video_link.py | siva-namala/django3.1 | 8666a536d07d60b61798c7bbd40737da4e6d7cde | [
"MIT"
] | null | null | null | products/migrations/0010_product_video_link.py | siva-namala/django3.1 | 8666a536d07d60b61798c7bbd40737da4e6d7cde | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-19 20:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0009_product_can_backorder'),
]
operations = [
migrations.AddField(
model_name='product',
name='video_link',
field=models.TextField(blank=True, null=True),
),
]
| 21.105263 | 58 | 0.605985 |
dcd9d9d1fb75b7fd91eac03c44c20729da0085da | 3,602 | py | Python | bots.py | phiwei/tensor_bot | 0cfe81d4b31a50e520dbacc650950dc6e370d075 | [
"MIT"
] | 1 | 2019-08-13T14:36:34.000Z | 2019-08-13T14:36:34.000Z | bots.py | phiwei/tensor_bot | 0cfe81d4b31a50e520dbacc650950dc6e370d075 | [
"MIT"
] | 1 | 2020-06-18T13:30:37.000Z | 2020-06-18T13:30:37.000Z | bots.py | phiwei/tensor_bot | 0cfe81d4b31a50e520dbacc650950dc6e370d075 | [
"MIT"
] | null | null | null | from tensorflow.keras.callbacks import Callback
from twython import Twython
from telegram import Bot
class BaseBot(Callback):
"""
Base class for tf/keras bot callback.
Use this callback to send out live updates
of model training.
"""
def __init__(self, freq=1, init_message=None):
"""
:param freq: (int) or None. If int, will
send update message every freq epochs.
If None, will only send a final message
once model training is concluded.
:param init_message: (str) or None. If set,
this message will be send when training
begins. Could e.g. contain current
hyperparameter configuration.
"""
self.freq = freq
self.init_message = init_message
self.epoch = None
self.logs = dict()
def on_train_begin(self, logs={}):
"""
Send an initial message if defined in init.
"""
if self.init_message is not None:
self._send_message(self.init_message)
def on_train_end(self, logs={}):
"""
Send a message at end of training.
"""
message = self._generate_message()
self._send_message(message)
def on_epoch_end(self, epoch, logs={}):
"""
Send a message every freq epochs.
If freq is None, no messages are sent
from here.
"""
self.epoch = epoch
self.logs = logs
if self.freq is None:
return
elif epoch % self.freq == 0:
message = self._generate_message()
self._send_message(message)
def _send_message(self, message):
"""
Sub classes need to define how
they send out messages.
"""
raise NotImplementedError('Implement in subclass!')
def _generate_message(self):
"""
Generate the message to be send
based on current log and epoch.
Replace this function if a
different message/format is
required.
Log is a tf/keras generated dict
that contains key: value pairs
being all losses and metrics
that are monitored. Example:
loss: 0.5
val_loss: 0.6
lr: 0.001
:param epoch: (int)
:param logs: (dict)
:return: (str)
"""
message = 'epoch: ' + str(self.epoch) + '\n'
for key in self.logs:
if key == 'lr':
number = '{:.2E}'.format(self.logs[key])
else:
number = '{0:.4f}'.format(self.logs[key])
line = key \
+ ': ' \
+ number \
+ '\n'
message += line
return message
class TelegramBot(BaseBot):
"""
Telegram bot that sends
messages with information on
training progress.
"""
def __init__(self, token,
chat_id, freq=1,
init_message=None):
"""
On details for how to obtain
telegram API tokens, please
refer to readme.
:param token: (str)
:param chat_id: (str)
:param freq: (int)
:param init_message: (str)
"""
self.bot = Bot(token=token)
self.chat_id = chat_id
super().__init__(freq=freq,
init_message=init_message)
def _send_message(self, message):
"""
Send telegram message.
:param message: (str)
"""
self.bot.send_message(chat_id=self.chat_id,
text=message)
| 27.922481 | 59 | 0.538034 |
78e311a1db2b0199cd7028d0c1efeff6c25cd0e0 | 489 | py | Python | tests/models/test_watch.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 13 | 2020-07-03T01:20:54.000Z | 2021-11-22T06:06:21.000Z | tests/models/test_watch.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 660 | 2020-06-26T02:52:18.000Z | 2022-03-31T14:14:02.000Z | tests/models/test_watch.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 3 | 2020-07-12T06:18:39.000Z | 2021-06-22T06:54:47.000Z | from tests.fixtures import Factories
class TestModelWatch:
def test_watch(self, factories: Factories):
guild = factories.guild.create()
user = factories.user.create()
watch = factories.watch.create(
note="note",
user_xid=user.xid,
guild_xid=guild.xid,
)
assert watch.to_dict() == {
"guild_xid": watch.guild_xid,
"user_xid": watch.user_xid,
"note": watch.note,
}
| 25.736842 | 47 | 0.560327 |
747517866f2403427c95cecc9ed3b159b508e29a | 1,928 | py | Python | utils/scripts/heroes_ru_names.py | fennr/Samuro-HotsBot | 81e7a65a08d50602442094e52d6d2e405c98ac1a | [
"Apache-2.0"
] | 1 | 2022-03-26T11:28:00.000Z | 2022-03-26T11:28:00.000Z | utils/scripts/heroes_ru_names.py | fennr/Samuro-HotsBot | 81e7a65a08d50602442094e52d6d2e405c98ac1a | [
"Apache-2.0"
] | null | null | null | utils/scripts/heroes_ru_names.py | fennr/Samuro-HotsBot | 81e7a65a08d50602442094e52d6d2e405c98ac1a | [
"Apache-2.0"
] | null | null | null | import json
import os
import requests
from bs4 import BeautifulSoup
from pprint import pprint
def create_nick_list(filename):
nick_list = []
with open(filename, 'r', encoding='utf-8') as heroes_txt:
for line in heroes_txt:
if len(line) > 0:
line = line.replace(' ', '').replace('\n', '')
cHeroId, nick = line.split(sep=':', maxsplit=1)
# print('{} {}'.format(cHeroId, nick))
nicks = nick.split(',')
nicks = [word.capitalize() for word in nicks]
hero_nick = dict(cHeroId=cHeroId, nick=nicks)
nick_list.append(hero_nick)
return nick_list
def create_tier_dict():
tier_dict = {
1: 'S',
2: 'A',
3: 'B',
4: 'C',
5: 'D',
}
# Герои, записанные иначе
bug_names = {
'Deckard Cain': 'Deckard',
'Lúcio': 'Lucio'
}
response = requests.get('https://www.icy-veins.com/heroes/heroes-of-the-storm-general-tier-list')
soup = BeautifulSoup(response.text, 'html.parser')
tiers_table_html = soup.find_all('div', attrs={'class': 'htl'})
count = 1
tier_hero_list = []
for hero_tier in tiers_table_html:
hero_list = hero_tier.find_all('span', attrs={'class': 'hp_50x50'}) # htl_ban_true
for heroes in hero_list:
next_element = heroes.find_next_sibling("span")
name = bug_names.setdefault(next_element.text, next_element.text)
if name == '': #Доп элемент для вариана из-за span с иконкой роли
next_element = next_element.find_next_sibling("span")
name = bug_names.setdefault(next_element.text, next_element.text)
tier_hero_list.append([name, tier_dict[count]])
count += 1
tier_hero_dict = dict(tier_hero_list)
return tier_hero_dict
if __name__ == '__main__':
create_heroes_ru_data()
| 32.133333 | 101 | 0.592842 |
3025974f81646ad0484b93994de5fc3d069f8c3e | 12,645 | py | Python | src/onevision/nn/layer/plugin.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/nn/layer/plugin.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/nn/layer/plugin.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import annotations
from typing import Any
from typing import Optional
from typing import Sequence
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn as nn
from torch import Tensor
from onevision.factory import PLUGIN_LAYERS
from onevision.type import Int2T
from onevision.type import ListOrTupleAnyT
from onevision.type import Padding4T
__all__ = [
"Concat",
"ConcatPadding",
"ContextBlock",
"Flatten",
"Focus",
"Mean",
"Scale",
"Sum",
]
# MARK: - Modules
@PLUGIN_LAYERS.register(name="concat")
class Concat(nn.Module):
"""Concatenate a list of tensors along dimension.
Args:
dim (str, ellipsis, None):
Dimension to concat to. Default: `1`.
"""
# MARK: Magic Functions
def __init__(self, dim: Union[str, ellipsis, None] = 1):
super().__init__()
self.dim = dim
# MARK: Forward Pass
def forward(self, x: ListOrTupleAnyT[Tensor]) -> Tensor:
"""Run forward pass.
Args:
x (ListOrTupleAnyT[Tensor]):
A list of tensors along dimension.
Returns:
cat (Tensor):
Flattened image.
"""
return torch.cat(x, dim=self.dim)
@PLUGIN_LAYERS.register(name="concat_padding")
class ConcatPadding(nn.Module):
"""Concatenate 2 tensors with different [H, W] (resolution) along dimension.
To do this, pad the smaller tensor so that it has the same [H, W] with the
bigger one. Hence, the name `ConcatPadding`.
Args:
dim (str, ellipsis, None):
Dimension to concat to. Default: `1`.
"""
# MARK: Magic Functions
def __init__(self, dim: Union[str, ellipsis, None] = 1):
super().__init__()
self.dim = dim
# MARK: Forward Pass
def forward(self, x: Tensor, y: Tensor) -> Tensor:
"""Run forward pass.
Args:
x (Tensor):
The tensor with larger [H, W].
y (Tensor):
The tensor with smaller [H, W].
Returns:
cat (Tensor):
Concat image.
"""
_, _, xh, xw = x.size()
_, _, yh, yw = y.size()
diffY = xh - yh
diffX = xw - yw
y = F.pad(y, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
return torch.cat((x, y), dim=self.dim)
@PLUGIN_LAYERS.register(name="context_block")
class ContextBlock(nn.Module):
"""ContextBlock module in GCNet. See 'GCNet: Non-local Networks Meet
Squeeze-Excitation Networks and Beyond'
(https://arxiv.org/abs/1904.11492) for details.
Args:
in_channels (int):
Channels of the input feature map.
ratio (float):
Ratio of channels of transform bottleneck
pooling_type (str):
Pooling method for context modeling. One of: [`att`, `avg`].
`att` stands for attention pooling and `avg` stands for average
pooling. Default: `att`.
fusion_types (Sequence[str]):
Fusion method for feature fusion, One of: [`channels_add`,
`channel_mul`]. `channels_add` stands for channelwise addition
and `channel_mul` stands for multiplication.
Default: (`channel_add`,).
"""
_abbr_ = "context_block"
# MARK: Magic Functions
def __init__(
self,
in_channels : int,
ratio : float,
pooling_type: str = "att",
fusion_types: Sequence[str] = ("channel_add", ),
*args, **kwargs
):
super().__init__()
if pooling_type not in ["avg", "att"]:
raise ValueError
if not isinstance(fusion_types, (list, tuple)):
raise ValueError
valid_fusion_types = ["channel_add", "channel_mul"]
if not all([f in valid_fusion_types for f in fusion_types]):
raise ValueError
if len(fusion_types) <= 0:
raise ValueError("At least one fusion should be used.")
planes = int(in_channels * ratio)
if pooling_type == "att":
self.conv_mask = nn.Conv2d(
in_channels=in_channels, out_channels=1, kernel_size=(1, 1)
)
self.softmax = nn.Softmax(dim=2)
else:
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if "channel_add" in fusion_types:
self.channel_add_conv = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=planes,
kernel_size=(1, 1)
),
nn.LayerNorm(normalized_shape=[planes, 1, 1]),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=planes, out_channels=in_channels,
kernel_size=(1, 1)
)
)
else:
self.channel_add_conv = None
if "channel_mul" in fusion_types:
self.channel_mul_conv = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=planes,
kernel_size=(1, 1)
),
nn.LayerNorm(normalized_shape=[planes, 1, 1]),
nn.ReLU(inplace=True),
nn.Conv2d(
in_channels=planes, out_channels=in_channels,
kernel_size=(1, 1)
)
)
else:
self.channel_mul_conv = None
self.reset_parameters()
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
# [N, C, 1, 1]
context = self.spatial_pool(x=x)
yhat = x
if self.channel_mul_conv is not None:
# [N, C, 1, 1]
channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
yhat *= channel_mul_term
if self.channel_add_conv is not None:
# [N, C, 1, 1]
channel_add_term = self.channel_add_conv(context)
yhat = yhat + channel_add_term
return yhat
def spatial_pool(self, x: Tensor) -> Tensor:
batch, channel, height, width = x.size()
if self.pooling_type == "att":
input_x = x
# [N, C, H * W]
input_x = input_x.view(batch, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(batch, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
# [N, 1, H * W, 1]
context_mask = context_mask.unsqueeze(-1)
# [N, 1, C, 1]
context = torch.matmul(input_x, context_mask)
# [N, C, 1, 1]
context = context.view(batch, channel, 1, 1)
else:
# [N, C, 1, 1]
context = self.avg_pool(x)
return context
# noinspection PyMethodMayBeStatic
@PLUGIN_LAYERS.register(name="flatten")
class Flatten(nn.Module):
"""Flatten the image. Commonly used after `nn.AdaptiveAvgPool2d(1)` to
remove last 2 dimensions.
Attributes:
channels (int):
Channels to flatten the features to. Default: `-1`.
"""
# MARK: Magic Functions
def __init__(self, channels: int = -1):
super().__init__()
self.channels = channels
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
pred = x.view(x.shape[0], self.channels)
return pred
@PLUGIN_LAYERS.register(name="focus")
class Focus(nn.Module):
"""Focus wh information into c-space.
Args:
in_channels (int):
Number of channels in the input image.
out_channels (int):
Number of channels produced by the convolution.
kernel_size (Int2T):
Size of the convolving kernel. Default: `1`.
stride (Int2T):
Stride of the convolution. Default: `1`.
padding (str, int, Int2T, optional):
Zero-padding added to both sides of the input. Default: `None`.
groups (int):
Default: `1`.
apply_act (bool):
Should use activation layer. Default: `True`.
"""
# MARK: Magic Functions
def __init__(
self,
in_channels : int,
out_channels: int,
kernel_size : Int2T = (1, 1),
stride : Int2T = (1, 1),
padding : Optional[Padding4T] = None,
dilation : Int2T = (1, 1),
groups : int = 1,
bias : bool = True,
padding_mode: str = None,
device : Any = None,
dtype : Any = None,
apply_act : bool = True,
):
super().__init__()
from onevision.nn.layer import ConvBnMish
self.conv = ConvBnMish(
in_channels = in_channels * 4,
out_channels = out_channels,
kernel_size = kernel_size,
stride = stride,
padding = padding,
dilation = dilation,
groups = groups,
bias = bias,
padding_mode = padding_mode,
device = device,
dtype = dtype,
apply_act = apply_act,
)
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
"""Forward pass.
Args:
x (Tensor):
Input image.
Returns:
pred (Tensor):
Output image. input(b,c,w,h) -> pred(b,4c,w/2,h/2)
"""
return self.conv(
torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2],
x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)
)
# noinspection PyMethodMayBeStatic
@PLUGIN_LAYERS.register(name="mean")
class Mean(nn.Module):
"""Calculate mean of the image.
Attributes:
dim:
Default: `None`.
keepdim (bool):
Default: `False`.
"""
# MARK: Magic Functions
def __init__(
self,
dim : Sequence[Union[str, ellipsis, None]] = None,
keepdim: bool = False,
):
super().__init__()
self.dim = dim
self.keepdim = keepdim
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
return x.mean(dim=self.dim, keepdim=self.keepdim)
@PLUGIN_LAYERS.register(name="scale")
class Scale(nn.Module):
"""A learnable scale parameter. This layer scales the input by a learnable
factor. It multiplies a learnable scale parameter of shape (1,) with
input of any shape.
Attributes:
scale (float):
Initial value of scale factor. Default: `1.0`.
"""
# MARK: Magic Functions
def __init__(self, scale: float = 1.0):
"""
Args:
scale (float):
Initial value of scale factor. Default: `1.0`.
"""
super().__init__()
self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
return x * self.scale
@PLUGIN_LAYERS.register(name="sum")
class Sum(nn.Module):
"""Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070.
Args:
n (int):
Number of inputs.
"""
# MARK: Magic Functions
def __init__(self, n: int, weight: bool = False):
super().__init__()
self.weight = weight # Apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
# Layer weights
self.w = nn.Parameter(
-torch.arange(1.0, n) / 2, requires_grad=True
)
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
y = x[0]
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y
| 29.135945 | 80 | 0.515461 |
0607e25df109ed6debcc04ab06798afc7e42ab31 | 1,966 | py | Python | library/hash_map/bucket.py | jdkandersson/algorithms-data-structures | 423b98afa4b3ad0548c3a724c47556714f0a61e2 | [
"Apache-2.0"
] | null | null | null | library/hash_map/bucket.py | jdkandersson/algorithms-data-structures | 423b98afa4b3ad0548c3a724c47556714f0a61e2 | [
"Apache-2.0"
] | null | null | null | library/hash_map/bucket.py | jdkandersson/algorithms-data-structures | 423b98afa4b3ad0548c3a724c47556714f0a61e2 | [
"Apache-2.0"
] | null | null | null | """Bucket implemented using linked list."""
from library import linked_list
class Bucket:
"""Class for buckets."""
def __init__(self):
"""Construct."""
self._list = linked_list.LinkedList()
def insert(self, key, value):
"""
Add or update key value pair to the bucket.
Args:
key: The key identifying the value.
value: The data to store.
"""
self._list.add_first((key, value))
def get(self, key):
"""
Get the value identifyied by the key.
Raises KeyError if the key doesn't exist.
Args:
key: The key for the value to retrieve.
Returns:
The value for the key.
"""
for element in self._list:
element_key, element_value = element
if element_key == key:
return element_value
raise KeyError
def exists(self, key):
"""
Check whether the key is in the bucket.
Args:
key: The key to check for.
Returns:
Whether the key is in the bucket.
"""
for element in self._list:
element_key, _ = element
if element_key == key:
return True
return False
def delete(self, key):
"""
Delete the key from the bucket.
Raises KeyError if the key doesn't exist.
Args:
key: The key to delete.
"""
value = self.get(key)
self._list.delete((key, value))
def __iter__(self):
"""Iterate over each element in the bucket."""
return self._list.__iter__()
def clear(self):
"""Remove all elements from the bucket."""
self._list.clear()
def is_empty(self):
"""
Check whether the bucket is empty.
Returns:
Whether the bucket is empty.
"""
return self._list.is_empty()
| 21.844444 | 54 | 0.53001 |
d8ab25ab35bf60f25d25776acf3b8287adada94e | 667 | py | Python | auth-portal/auth-portal/manage.py | Stashchen/auth-playground | f9fb3238382d943d2497a99b743b0c069105b715 | [
"MIT"
] | 1 | 2020-12-11T16:56:21.000Z | 2020-12-11T16:56:21.000Z | auth-portal/auth-portal/manage.py | Stashchen/auth-playground | f9fb3238382d943d2497a99b743b0c069105b715 | [
"MIT"
] | 3 | 2020-12-14T15:06:02.000Z | 2021-01-04T19:34:41.000Z | auth-portal/auth-portal/manage.py | Stashchen/auth-playground | f9fb3238382d943d2497a99b743b0c069105b715 | [
"MIT"
] | 3 | 2020-11-19T21:55:30.000Z | 2020-12-01T14:03:22.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'auth-portal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29 | 75 | 0.67916 |
ac39adfcf129bb92dfb9cfe31773936eae31cef0 | 6,967 | py | Python | examples/ConfigActuation/tests/test_config_actuation.py | laroque/volttron | 3ea851b718aa87d7f2c824298cf8a1a4c5920460 | [
"Apache-2.0"
] | 2 | 2019-10-03T17:00:34.000Z | 2019-10-03T17:00:38.000Z | examples/ConfigActuation/tests/test_config_actuation.py | laroque/volttron | 3ea851b718aa87d7f2c824298cf8a1a4c5920460 | [
"Apache-2.0"
] | 2 | 2018-08-29T13:45:17.000Z | 2018-09-06T12:34:23.000Z | examples/ConfigActuation/tests/test_config_actuation.py | cbs-iiith/volttron | a676d4af19a808581dde172ab08820087854e157 | [
"Apache-2.0"
] | 1 | 2019-04-04T17:13:46.000Z | 2019-04-04T17:13:46.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""
Pytest test cases for testing actuator agent using rpc calls.
"""
from datetime import datetime, timedelta
import json
import gevent
import gevent.subprocess as subprocess
import pytest
from gevent.subprocess import Popen
from mock import MagicMock
from volttron.platform import get_services_core, get_examples
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.messaging import topics
from volttron.platform.agent.known_identities import PLATFORM_DRIVER, CONFIGURATION_STORE
REQUEST_CANCEL_SCHEDULE = 'request_cancel_schedule'
REQUEST_NEW_SCHEDULE = 'request_new_schedule'
PLATFORM_ACTUATOR = 'platform.actuator'
TEST_AGENT = 'test-agent'
PRIORITY_LOW = 'LOW'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
@pytest.fixture(scope="module")
def publish_agent(request, volttron_instance1):
"""
Fixture used for setting up the environment.
1. Creates fake driver configs
2. Starts the master driver agent with the created fake driver agents
3. Starts the actuator agent
4. Creates an instance Agent class for publishing and returns it
:param request: pytest request object
:param volttron_instance1: instance of volttron in which test cases are run
:return: an instance of fake agent used for publishing
"""
# Reset master driver config store
cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all']
process = Popen(cmd, env=volttron_instance1.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = process.wait()
print(result)
assert result == 0
# Add master driver configuration files to config store.
cmd = ['volttron-ctl', 'config', 'store',PLATFORM_DRIVER,
'fake.csv', 'fake_unit_testing.csv', '--csv']
process = Popen(cmd, env=volttron_instance1.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = process.wait()
print(result)
assert result == 0
config_name = "devices/fakedriver"
cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER,
config_name, 'fake_unit_testing.config', '--json']
process = Popen(cmd, env=volttron_instance1.env,
cwd='scripts/scalability-testing',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = process.wait()
print(result)
assert result == 0
# Start the master driver agent which would intern start the fake driver
# using the configs created above
master_uuid = volttron_instance1.install_agent(
agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
print("agent id: ", master_uuid)
gevent.sleep(2) # wait for the agent to start and start the devices
# Start the actuator agent through which publish agent should communicate
# to fake device. Start the master driver agent which would intern start
# the fake driver using the configs created above
actuator_uuid = volttron_instance1.install_agent(
agent_dir=get_services_core("ActuatorAgent"),
config_file=get_services_core("ActuatorAgent/tests/actuator.config"),
start=True)
print("agent id: ", actuator_uuid)
gevent.sleep(2)
example_uuid = volttron_instance1.install_agent(
agent_dir=get_examples("ConfigActuation"),
config_file={},
vip_identity="config_actuation")
gevent.sleep(2)
# 3: Start a fake agent to publish to message bus
publish_agent = volttron_instance1.build_agent(identity=TEST_AGENT)
# 4: add a tear down method to stop sqlhistorian agent and the fake agent
# \that published to message bus
def stop_agent():
print("In teardown method of module")
volttron_instance1.stop_agent(actuator_uuid)
volttron_instance1.stop_agent(master_uuid)
volttron_instance1.stop_agent(example_uuid)
volttron_instance1.remove_agent(actuator_uuid)
volttron_instance1.remove_agent(master_uuid)
volttron_instance1.remove_agent(example_uuid)
publish_agent.core.stop()
request.addfinalizer(stop_agent)
return publish_agent
@pytest.mark.skipif("True", "4.1 need to fix")
def test_thing(publish_agent):
value = publish_agent.vip.rpc.call(PLATFORM_ACTUATOR,
"get_point",
"fakedriver/SampleWritableFloat1").get()
assert value == 10.0
publish_agent.vip.rpc.call(CONFIGURATION_STORE,
"manage_store",
"config_actuation",
"fakedriver",
json.dumps({"SampleWritableFloat1": 42.0}),
"json").get()
value = publish_agent.vip.rpc.call(PLATFORM_ACTUATOR,
"get_point",
"fakedriver/SampleWritableFloat1").get()
assert value == 42.0
| 40.505814 | 89 | 0.70145 |
cd67e28964982135877181a5a0102f667cd8156c | 11,878 | py | Python | eoxserver/services/ows/wcs/v20/geteocoverageset.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | eoxserver/services/ows/wcs/v20/geteocoverageset.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | eoxserver/services/ows/wcs/v20/geteocoverageset.py | constantinius/eoxserver_combined | 68f261133fed65a4e8a6ddba82b0d2845171e4bf | [
"OML"
] | null | null | null | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import sys
import os
import tempfile
import logging
from itertools import chain
import mimetypes
from django.db.models import Q
from django.http import HttpResponse
try:
from django.http import StreamingHttpResponse
except:
StreamingHttpResponse = HttpResponse
from eoxserver.core import Component, implements, ExtensionPoint
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.decoders import xml, kvp, typelist, enum
from eoxserver.resources.coverages import models
from eoxserver.services.ows.interfaces import (
ServiceHandlerInterface, GetServiceHandlerInterface,
PostServiceHandlerInterface
)
from eoxserver.services.ows.wcs.v20.util import (
nsmap, parse_subset_kvp, parse_subset_xml
)
from eoxserver.services.ows.wcs.v20.parameters import WCS20CoverageRenderParams
from eoxserver.services.ows.common.config import WCSEOConfigReader
from eoxserver.services.ows.wcs.interfaces import (
WCSCoverageRendererInterface, PackageWriterInterface
)
from eoxserver.services.subset import Subsets, Trim
from eoxserver.services.exceptions import (
NoSuchDatasetSeriesOrCoverageException, InvalidRequestException,
InvalidSubsettingException
)
logger = logging.getLogger(__name__)
class WCS20GetEOCoverageSetHandler(Component):
implements(ServiceHandlerInterface)
implements(GetServiceHandlerInterface)
implements(PostServiceHandlerInterface)
coverage_renderers = ExtensionPoint(WCSCoverageRendererInterface)
package_writers = ExtensionPoint(PackageWriterInterface)
service = "WCS"
versions = ("2.0.0", "2.0.1")
request = "GetEOCoverageSet"
index = 21
def get_decoder(self, request):
if request.method == "GET":
return WCS20GetEOCoverageSetKVPDecoder(request.GET)
elif request.method == "POST":
return WCS20GetEOCoverageSetXMLDecoder(request.body)
def get_params(self, coverage, decoder, request):
return WCS20CoverageRenderParams(
coverage, Subsets(decoder.subsets), http_request=request
)
def get_renderer(self, params):
for renderer in self.coverage_renderers:
if renderer.supports(params):
return renderer
raise InvalidRequestException(
"Could not find renderer for coverage '%s'."
)
def get_pacakge_writer(self, format, params):
for writer in self.package_writers:
if writer.supports(format, params):
return writer
raise InvalidRequestException(
"Format '%s' is not supported." % format, locator="format"
)
@property
def constraints(self):
reader = WCSEOConfigReader(get_eoxserver_config())
return {
"CountDefault": reader.paging_count_default
}
def handle(self, request):
decoder = self.get_decoder(request)
eo_ids = decoder.eo_ids
format, format_params = decoder.format
writer = self.get_pacakge_writer(format, format_params)
containment = decoder.containment
count_default = self.constraints["CountDefault"]
count = decoder.count
if count_default is not None:
count = min(count, count_default)
try:
subsets = Subsets(
decoder.subsets,
crs="http://www.opengis.net/def/crs/EPSG/0/4326",
allowed_types=Trim
)
except ValueError, e:
raise InvalidSubsettingException(str(e))
if len(eo_ids) == 0:
raise
# fetch a list of all requested EOObjects
available_ids = models.EOObject.objects.filter(
identifier__in=eo_ids
).values_list("identifier", flat=True)
# match the requested EOIDs against the available ones. If any are
# requested, that are not available, raise and exit.
failed = [eo_id for eo_id in eo_ids if eo_id not in available_ids]
if failed:
raise NoSuchDatasetSeriesOrCoverageException(failed)
collections_qs = subsets.filter(models.Collection.objects.filter(
identifier__in=eo_ids
), containment="overlaps")
# create a set of all indirectly referenced containers by iterating
# recursively. The containment is set to "overlaps", to also include
# collections that might have been excluded with "contains" but would
# have matching coverages inserted.
def recursive_lookup(super_collection, collection_set):
sub_collections = models.Collection.objects.filter(
collections__in=[super_collection.pk]
).exclude(
pk__in=map(lambda c: c.pk, collection_set)
)
sub_collections = subsets.filter(sub_collections, "overlaps")
# Add all to the set
collection_set |= set(sub_collections)
for sub_collection in sub_collections:
recursive_lookup(sub_collection, collection_set)
collection_set = set(collections_qs)
for collection in set(collection_set):
recursive_lookup(collection, collection_set)
collection_pks = map(lambda c: c.pk, collection_set)
# Get all either directly referenced coverages or coverages that are
# within referenced containers. Full subsetting is applied here.
coverages_qs = models.Coverage.objects.filter(
Q(identifier__in=eo_ids) | Q(collections__in=collection_pks)
)
coverages_qs = subsets.filter(coverages_qs, containment=containment)
# save a reference before limits are applied to obtain the full number
# of matched coverages.
coverages_no_limit_qs = coverages_qs
# compute how many (if any) coverages can be retrieved. This depends on
# the "count" parameter and default setting. Also, if we already
# exceeded the count, limit the number of dataset series aswell
"""
if inc_dss_section:
num_collections = len(collection_set)
else:
num_collections = 0
if num_collections < count and inc_cov_section:
coverages_qs = coverages_qs.order_by("identifier")[:count - num_collections]
elif num_collections == count or not inc_cov_section:
coverages_qs = []
else:
coverages_qs = []
collection_set = sorted(collection_set, key=lambda c: c.identifier)[:count]
"""
# get a number of coverages that *would* have been included, but are not
# because of the count parameter
# count_all_coverages = coverages_no_limit_qs.count()
# TODO: if containment is "within" we need to check all collections
# again
if containment == "within":
collection_set = filter(lambda c: subsets.matches(c), collection_set)
coverages = []
dataset_series = []
# finally iterate over everything that has been retrieved and get
# a list of dataset series and coverages to be encoded into the response
for eo_object in chain(coverages_qs, collection_set):
if issubclass(eo_object.real_type, models.Coverage):
coverages.append(eo_object.cast())
fd, pkg_filename = tempfile.mkstemp()
tmp = os.fdopen(fd)
tmp.close()
package = writer.create_package(pkg_filename, format, format_params)
for coverage in coverages:
params = self.get_params(coverage, decoder, request)
renderer = self.get_renderer(params)
result_set = renderer.render(params)
all_filenames = set()
for result_item in result_set:
if not result_item.filename:
ext = mimetypes.guess_extension(result_item.content_type)
filename = coverage.identifier + ext
else:
filename = result_item.filename
if filename in all_filenames:
continue # TODO: create new filename
all_filenames.add(filename)
location = "%s/%s" % (coverage.identifier, filename)
writer.add_to_package(
package, result_item.data_file, result_item.size, location
)
mime_type = writer.get_mime_type(package, format, format_params)
ext = writer.get_file_extension(package, format, format_params)
writer.cleanup(package)
response = StreamingHttpResponse(
tempfile_iterator(pkg_filename), mime_type
)
response["Content-Disposition"] = 'inline; filename="ows%s"' % ext
response["Content-Length"] = str(os.path.getsize(pkg_filename))
return response
def tempfile_iterator(filename, chunksize=2048, delete=True):
with open(filename) as file_obj:
while True:
data = file_obj.read(chunksize)
if not data:
break
yield data
if delete:
os.remove(filename)
def pos_int(value):
value = int(value)
if value < 0:
raise ValueError("Negative values are not allowed.")
return value
containment_enum = enum(
("overlaps", "contains"), False
)
def parse_format(string):
parts = string.split(";")
params = dict(
param.strip().split("=", 1) for param in parts[1:]
)
return parts[0], params
class WCS20GetEOCoverageSetKVPDecoder(kvp.Decoder):
eo_ids = kvp.Parameter("eoid", type=typelist(str, ","), num=1, locator="eoid")
subsets = kvp.Parameter("subset", type=parse_subset_kvp, num="*")
containment = kvp.Parameter(type=containment_enum, num="?")
count = kvp.Parameter(type=pos_int, num="?", default=sys.maxint)
format = kvp.Parameter(num=1, type=parse_format)
class WCS20GetEOCoverageSetXMLDecoder(xml.Decoder):
eo_ids = xml.Parameter("/wcseo:EOID/text()", num="+", locator="eoid")
subsets = xml.Parameter("/wcs:DimensionTrim", type=parse_subset_xml, num="*")
containment = xml.Parameter("/wcseo:containment/text()", type=containment_enum, locator="containment")
count = xml.Parameter("/@count", type=pos_int, num="?", default=sys.maxint, locator="count")
format = xml.Parameter("/wcs:format/text()", type=parse_format, num=1, locator="format")
namespaces = nsmap
| 37.352201 | 106 | 0.657518 |
91c24c9b867550f512db9f99f7473338352d5e9d | 12,495 | py | Python | tests/todo.py | tomschr/ics.py | f6cb12281bf78e210d51c6ff5bd324d8f2c01aef | [
"Apache-2.0"
] | null | null | null | tests/todo.py | tomschr/ics.py | f6cb12281bf78e210d51c6ff5bd324d8f2c01aef | [
"Apache-2.0"
] | null | null | null | tests/todo.py | tomschr/ics.py | f6cb12281bf78e210d51c6ff5bd324d8f2c01aef | [
"Apache-2.0"
] | null | null | null | import unittest
from datetime import datetime, datetime as dt, timedelta, timezone
from dateutil.tz import UTC as dateutil_tzutc
from ics.alarm.display import DisplayAlarm
from ics.grammar.parse import Container
from ics.icalendar import Calendar
from ics.todo import Todo
from .fixture import cal27, cal28, cal29, cal30, cal31
datetime_tzutc = timezone.utc
CRLF = "\r\n"
class TestTodo(unittest.TestCase):
maxDiff = None
def test_init(self):
t = Todo()
self.assertIsNotNone(t.uid)
self.assertIsNotNone(t.dtstamp)
self.assertIsNone(t.completed)
self.assertIsNone(t.created)
self.assertIsNone(t.description)
self.assertIsNone(t.begin)
self.assertIsNone(t.location)
self.assertIsNone(t.percent)
self.assertIsNone(t.priority)
self.assertIsNone(t.name)
self.assertIsNone(t.url)
self.assertIsNone(t.status)
self.assertEqual(t.extra, Container(name='VTODO'))
def test_init_non_exclusive_arguments(self):
# attributes percent, priority, begin, due, and duration
# aren't tested here
dtstamp = datetime(2018, 2, 18, 12, 19, tzinfo=datetime_tzutc)
completed = dtstamp + timedelta(days=1)
created = dtstamp + timedelta(seconds=1)
alarms = [DisplayAlarm]
t = Todo(
uid='uid',
dtstamp=dtstamp,
completed=completed,
created=created,
description='description',
location='location',
name='name',
url='url',
alarms=alarms)
self.assertEqual(t.uid, 'uid')
self.assertEqual(t.dtstamp, dtstamp)
self.assertEqual(t.completed, completed)
self.assertEqual(t.created, created)
self.assertEqual(t.description, 'description')
self.assertEqual(t.location, 'location')
self.assertEqual(t.name, 'name')
self.assertEqual(t.url, 'url')
self.assertEqual(t.alarms, alarms)
def test_percent(self):
t1 = Todo(percent=0)
self.assertEqual(t1.percent, 0)
t2 = Todo(percent=100)
self.assertEqual(t2.percent, 100)
with self.assertRaises(ValueError):
Todo(percent=-1)
with self.assertRaises(ValueError):
Todo(percent=101)
def test_priority(self):
t1 = Todo(priority=0)
self.assertEqual(t1.priority, 0)
t2 = Todo(priority=9)
self.assertEqual(t2.priority, 9)
with self.assertRaises(ValueError):
Todo(priority=-1)
with self.assertRaises(ValueError):
Todo(priority=10)
def test_begin(self):
begin = datetime(2018, 2, 18, 12, 19, tzinfo=datetime_tzutc)
t = Todo(begin=begin)
self.assertEqual(t.begin, begin)
# begin after due
t = Todo(due=datetime.fromtimestamp(1))
with self.assertRaises(ValueError):
t.begin = datetime.fromtimestamp(2)
def test_duration(self):
begin = datetime(2018, 2, 18, 12, 19, tzinfo=datetime_tzutc)
t1 = Todo(begin=begin, duration={'hours': 1})
self.assertEqual(t1.duration, timedelta(hours=1))
t2 = Todo(begin=begin, duration=(1,))
self.assertEqual(t2.duration, timedelta(days=1))
t3 = Todo(begin=begin, duration=timedelta(minutes=1))
self.assertEqual(t3.duration, timedelta(minutes=1))
# Calculate duration from begin and due values
t4 = Todo(begin=begin, due=begin + timedelta(1))
self.assertEqual(t4.duration, timedelta(1))
def test_due(self):
begin = datetime(2018, 2, 18, 12, 19, tzinfo=datetime_tzutc)
due = begin + timedelta(1)
t1 = Todo(due=due)
self.assertEqual(t1.due, begin + timedelta(1))
due = begin - timedelta(1)
with self.assertRaises(ValueError):
Todo(begin=begin, due=due)
# Calculate due from begin and duration value
t2 = Todo(begin=begin, duration=(1,))
self.assertEqual(t2.due, begin + timedelta(1))
def test_invalid_time_attributes(self):
# due and duration must not be set at the same time
with self.assertRaises(ValueError):
Todo(begin=datetime.now(), due=datetime.now() + timedelta(1), duration=timedelta(1))
# duration requires begin
with self.assertRaises(ValueError):
Todo(duration=timedelta(1))
def test_repr(self):
begin = datetime(2018, 2, 18, 12, 19, tzinfo=datetime_tzutc)
t1 = Todo()
self.assertEqual(repr(t1), '<floating Todo>')
t2 = Todo(name='foo')
self.assertEqual(repr(t2), "<floating Todo 'foo'>")
t3 = Todo(name='foo', begin=begin)
self.assertEqual(repr(t3), "<Todo 'foo' begin: 2018-02-18 12:19:00+00:00>")
t4 = Todo(name='foo', due=begin)
self.assertEqual(repr(t4), "<Todo 'foo' fixed due: 2018-02-18 12:19:00+00:00>")
t4 = Todo(name='foo', begin=begin, due=begin + timedelta(1))
self.assertEqual(repr(t4),
"<Todo 'foo' begin: 2018-02-18 12:19:00+00:00 fixed due: 2018-02-19 12:19:00+00:00 duration: 1 day, 0:00:00>")
def test_todo_lt(self):
t1 = Todo()
t2 = Todo(name='a')
t3 = Todo(name='b')
t4 = Todo(due=datetime.fromtimestamp(10))
t5 = Todo(due=datetime.fromtimestamp(20))
# Check comparison by name
self.assertFalse(t1 < t1)
self.assertTrue(t1 < t2)
self.assertFalse(t2 < t1)
self.assertTrue(t2 < t3)
self.assertFalse(t3 < t2)
# Check comparison by due time
self.assertTrue(t4 < t5)
self.assertFalse(t4 < t4)
self.assertFalse(t5 < t4)
# Check invalid call
with self.assertRaises(TypeError):
t4 > t4.due
with self.assertRaises(TypeError):
t2 < 1
def test_todo_le(self):
t1 = Todo()
t2 = Todo(name='a')
t3 = Todo(name='b')
t4 = Todo(due=datetime.fromtimestamp(10))
t5 = Todo(due=datetime.fromtimestamp(20))
# Check comparison by name
self.assertTrue(t1 <= t1)
self.assertTrue(t1 <= t2)
self.assertFalse(t2 <= t1)
self.assertTrue(t2 <= t3)
self.assertTrue(t2 <= t2)
self.assertFalse(t3 <= t2)
# Check comparison by due time
self.assertTrue(t4 <= t5)
self.assertTrue(t4 <= t4)
self.assertFalse(t5 <= t4)
# Check invalid call
with self.assertRaises(TypeError):
t4 > t4.due
with self.assertRaises(TypeError):
t2 <= 1
def test_todo_gt(self):
t1 = Todo()
t2 = Todo(name='a')
t3 = Todo(name='b')
t4 = Todo(due=datetime.fromtimestamp(10))
t5 = Todo(due=datetime.fromtimestamp(20))
# Check comparison by name
self.assertFalse(t1 > t1)
self.assertFalse(t1 > t2)
self.assertTrue(t2 > t1)
self.assertFalse(t2 > t3)
self.assertFalse(t2 > t2)
self.assertTrue(t3 > t2)
# Check comparison by due time
self.assertFalse(t4 > t5)
self.assertFalse(t4 > t4)
self.assertTrue(t5 > t4)
# Check invalid call
with self.assertRaises(TypeError):
t4 > t4.due
with self.assertRaises(TypeError):
t2 > 1
def test_todo_ge(self):
t1 = Todo()
t2 = Todo(name='a')
t3 = Todo(name='b')
t4 = Todo(due=datetime.fromtimestamp(10))
t5 = Todo(due=datetime.fromtimestamp(20))
# Check comparison by name
self.assertTrue(t1 >= t1)
self.assertTrue(t1 <= t2)
self.assertFalse(t2 <= t1)
self.assertFalse(t2 >= t3)
self.assertTrue(t2 >= t2)
self.assertTrue(t3 >= t2)
# Check comparison by due time
self.assertFalse(t4 >= t5)
self.assertTrue(t4 >= t4)
self.assertTrue(t5 >= t4)
# Check invalid call
with self.assertRaises(TypeError):
t4 > t4.due
with self.assertRaises(TypeError):
t2 >= 1
def test_todo_eq(self):
t1 = Todo()
t2 = Todo()
self.assertTrue(t1 == t1)
self.assertFalse(t1 == t2)
def test_todo_ne(self):
t1 = Todo()
t2 = Todo()
self.assertFalse(t1 != t1)
self.assertTrue(t1 != t2)
def test_extract(self):
c = Calendar(cal27)
t = next(iter(c.todos))
self.assertEqual(t.dtstamp, dt(2018, 2, 18, 15, 47, 00, tzinfo=dateutil_tzutc))
self.assertEqual(t.uid, 'Uid')
self.assertEqual(t.completed, dt(2018, 4, 18, 15, 00, 00, tzinfo=dateutil_tzutc))
self.assertEqual(t.created, dt(2018, 2, 18, 15, 48, 00, tzinfo=dateutil_tzutc))
self.assertEqual(t.description, 'Lorem ipsum dolor sit amet.')
self.assertEqual(t.begin, dt(2018, 2, 18, 16, 48, 00, tzinfo=dateutil_tzutc))
self.assertEqual(t.location, 'Earth')
self.assertEqual(t.percent, 0)
self.assertEqual(t.priority, 0)
self.assertEqual(t.name, 'Name')
self.assertEqual(t.url, 'https://www.example.com/cal.php/todo.ics')
self.assertEqual(t.duration, timedelta(minutes=10))
self.assertEqual(len(t.alarms), 1)
def test_extract_due(self):
c = Calendar(cal28)
t = next(iter(c.todos))
self.assertEqual(t.due, dt(2018, 2, 18, 16, 48, 00, tzinfo=dateutil_tzutc))
def test_extract_due_error_duration(self):
with self.assertRaises(ValueError):
Calendar(cal29)
def test_extract_duration_error_due(self):
with self.assertRaises(ValueError):
Calendar(cal30)
def test_output(self):
c = Calendar(cal27)
t = next(iter(c.todos))
test_str = CRLF.join(("BEGIN:VTODO",
"SEQUENCE:0",
"BEGIN:VALARM",
"ACTION:DISPLAY",
"DESCRIPTION:Event reminder",
"TRIGGER:PT1H",
"END:VALARM",
"COMPLETED:20180418T150000Z",
"CREATED:20180218T154800Z",
"DESCRIPTION:Lorem ipsum dolor sit amet.",
"DTSTAMP:20180218T154700Z",
"DURATION:PT10M",
"LOCATION:Earth",
"PERCENT-COMPLETE:0",
"PRIORITY:0",
"DTSTART:20180218T164800Z",
"SUMMARY:Name",
"UID:Uid",
"URL:https://www.example.com/cal.php/todo.ics",
"END:VTODO"))
self.assertEqual(str(t), test_str)
def test_output_due(self):
dtstamp = datetime(2018, 2, 19, 21, 00, tzinfo=datetime_tzutc)
due = datetime(2018, 2, 20, 1, 00, tzinfo=datetime_tzutc)
t = Todo(dtstamp=dtstamp, uid='Uid', due=due)
test_str = CRLF.join(("BEGIN:VTODO",
"DTSTAMP:20180219T210000Z",
"DUE:20180220T010000Z",
"UID:Uid",
"END:VTODO"))
self.assertEqual(str(t), test_str)
def test_unescape_texts(self):
c = Calendar(cal31)
t = next(iter(c.todos))
self.assertEqual(t.name, "Hello, \n World; This is a backslash : \\ and another new \n line")
self.assertEqual(t.location, "In, every text field")
self.assertEqual(t.description, "Yes, all of them;")
def test_escape_output(self):
dtstamp = datetime(2018, 2, 19, 21, 00, tzinfo=datetime_tzutc)
t = Todo(dtstamp=dtstamp, uid='Uid')
t.name = "Hello, with \\ special; chars and \n newlines"
t.location = "Here; too"
t.description = "Every\nwhere ! Yes, yes !"
test_str = CRLF.join(("BEGIN:VTODO",
"DESCRIPTION:Every\\nwhere ! Yes\\, yes !",
"DTSTAMP:20180219T210000Z",
"LOCATION:Here\\; too",
"SUMMARY:Hello\\, with \\\\ special\\; chars and \\n newlines",
"UID:Uid",
"END:VTODO"))
self.assertEqual(str(t), test_str)
| 34.902235 | 135 | 0.561745 |
b4290eafff2e2a63f5a179f526de7d361345b652 | 730 | py | Python | matbench/tests/util.py | sparks-baird/matbench | 4424609454286e32fff2bcc724379b2a316c5a76 | [
"MIT"
] | 15 | 2021-11-01T09:02:19.000Z | 2022-03-19T10:59:41.000Z | matbench/tests/util.py | sparks-baird/matbench | 4424609454286e32fff2bcc724379b2a316c5a76 | [
"MIT"
] | 62 | 2021-09-20T14:09:59.000Z | 2022-03-30T19:03:22.000Z | matbench/tests/util.py | ardunn/matbench | 7d11a2d63766339ec00e610e2255be29b81544d3 | [
"MIT"
] | 4 | 2021-03-22T10:37:42.000Z | 2021-07-20T14:11:28.000Z | import os
import random
import numpy as np
from matbench.constants import CLF_KEY, REG_KEY
MB_TEST_RANDOM_SEED = 1001
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
FULL_TEST = os.environ.get("MB_FULL_TESTS", False)
def model_random(training_outputs, test_inputs, response_type):
r = random.Random(MB_TEST_RANDOM_SEED)
r = np.random.RandomState(MB_TEST_RANDOM_SEED)
l = len(test_inputs)
if response_type == CLF_KEY:
return r.choice([True, False], size=l)
# Regression: simply sample from random distribution bounded by max
# and min training samples
if response_type == REG_KEY:
pred = r.uniform(max(training_outputs), min(training_outputs), size=l)
return pred
| 26.071429 | 78 | 0.731507 |
19d2c0b0d1fe52339ad167a2e6dc5ea2767dfa77 | 2,395 | py | Python | tests/unit/test_validation_rule_hostname_validity.py | sh8121att/airship-drydock | def51688e49c40d398c6adff7c4ff9cec9a38a3b | [
"Apache-2.0"
] | 14 | 2017-03-07T17:00:22.000Z | 2021-04-02T14:15:04.000Z | tests/unit/test_validation_rule_hostname_validity.py | sh8121att/airship-drydock | def51688e49c40d398c6adff7c4ff9cec9a38a3b | [
"Apache-2.0"
] | 82 | 2017-02-16T16:54:18.000Z | 2018-06-04T13:40:32.000Z | tests/unit/test_validation_rule_hostname_validity.py | att-comdev/drydock | 506e06623a5f1c11c0d34f2089851cc8381f06ae | [
"Apache-2.0"
] | 16 | 2017-02-14T19:47:00.000Z | 2018-04-26T10:13:05.000Z | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Validation Rule Hostname Validity"""
import logging
from drydock_provisioner.orchestrator.orchestrator import Orchestrator
from drydock_provisioner.orchestrator.validations.hostname_validity import HostnameValidity
LOG = logging.getLogger(__name__)
class TestHostnameValidity(object):
def test_hostname(self, mocker, deckhand_ingester, drydock_state,
input_files):
input_file = input_files.join("validation.yaml")
design_ref = "file://%s" % str(input_file)
orch = Orchestrator(
state_manager=drydock_state, ingester=deckhand_ingester)
status, site_design = Orchestrator.get_effective_site(orch, design_ref)
validator = HostnameValidity()
message_list = validator.execute(site_design, orchestrator=orch)
msg = message_list[0].to_dict()
assert 'Hostname' in msg.get('message')
assert msg.get('error') is False
assert len(message_list) == 1
def test_invalid_hostname(self, mocker, deckhand_ingester, drydock_state,
input_files):
input_file = input_files.join("invalid_validation.yaml")
design_ref = "file://%s" % str(input_file)
orch = Orchestrator(
state_manager=drydock_state, ingester=deckhand_ingester)
status, site_design = Orchestrator.get_effective_site(orch, design_ref)
validator = HostnameValidity()
message_list = validator.execute(site_design, orchestrator=orch)
for msg in message_list:
msg = msg.to_dict()
LOG.debug(msg)
assert msg.get('error')
assert len(msg.get('documents')) > 0
assert "bad__name" in msg.get('message')
assert len(message_list) == 1
| 36.846154 | 91 | 0.695198 |
e6bb8013a280a024cd95ca1b8f5df3bd8a1b608c | 372 | py | Python | microbees/helper.py | microBeesTech/pythonSDK | 9537c864e3d6d81f8a926fffbc8d0ce6ff2ede68 | [
"MIT"
] | null | null | null | microbees/helper.py | microBeesTech/pythonSDK | 9537c864e3d6d81f8a926fffbc8d0ce6ff2ede68 | [
"MIT"
] | 1 | 2021-03-16T11:12:02.000Z | 2021-03-16T11:12:02.000Z | microbees/helper.py | microBeesTech/pythonSDK | 9537c864e3d6d81f8a926fffbc8d0ce6ff2ede68 | [
"MIT"
] | null | null | null | def urljoin(*parts):
# first strip extra forward slashes (except http:// and the likes) and create list
part_list = []
for part in parts:
p = str(part)
if p.endswith('//'):
p = p[0:-1]
else:
p = p.strip('/')
part_list.append(p)
# join everything together
url = '/'.join(part_list)
return url
| 26.571429 | 86 | 0.532258 |
898eef9db36c771da278aa47cb129f28023917eb | 725 | py | Python | tests/sources/test_law_status_database.py | openoereb/pyramid_oereb | d70c7fb15803b9454954e9f1d23727224bfd10bc | [
"BSD-2-Clause"
] | 4 | 2019-03-28T09:58:34.000Z | 2020-04-29T15:08:44.000Z | tests/sources/test_law_status_database.py | openoereb/pyramid_oereb | d70c7fb15803b9454954e9f1d23727224bfd10bc | [
"BSD-2-Clause"
] | 452 | 2019-02-05T10:10:43.000Z | 2022-03-31T08:27:38.000Z | tests/sources/test_law_status_database.py | openoereb/pyramid_oereb | d70c7fb15803b9454954e9f1d23727224bfd10bc | [
"BSD-2-Clause"
] | 20 | 2019-02-01T09:04:48.000Z | 2021-12-23T09:23:07.000Z | # -*- coding: utf-8 -*-
import pytest
from pyramid_oereb.lib.config import Config
from pyramid_oereb.lib.adapter import DatabaseAdapter
from pyramid_oereb.standard.sources.law_status import DatabaseSource
from pyramid_oereb.standard.models.main import LawStatus
from tests.mockrequest import MockParameter
@pytest.mark.run(order=2)
def test_init():
source = DatabaseSource(**Config.get_law_status_config().get('source').get('params'))
assert isinstance(source._adapter_, DatabaseAdapter)
assert source._model_ == LawStatus
def test_read():
source = DatabaseSource(**Config.get_law_status_config().get('source').get('params'))
source.read(MockParameter())
assert isinstance(source.records, list)
| 31.521739 | 89 | 0.775172 |
0e671d969608739e5b0b09e7d778cb5a3e4b374f | 4,522 | py | Python | lib-opencc-android/src/main/jni/OpenCC/deps/pybind11-2.5.0/tests/test_copy_move.py | huxiaomao/android-opencc | a251591316323151a97d977c39c85e0571c60971 | [
"MIT"
] | 5,895 | 2015-01-01T12:28:18.000Z | 2022-03-31T07:50:46.000Z | lib-opencc-android/src/main/jni/OpenCC/deps/pybind11-2.5.0/tests/test_copy_move.py | huxiaomao/android-opencc | a251591316323151a97d977c39c85e0571c60971 | [
"MIT"
] | 514 | 2015-02-05T14:56:54.000Z | 2021-06-25T09:29:52.000Z | lib-opencc-android/src/main/jni/OpenCC/deps/pybind11-2.5.0/tests/test_copy_move.py | huxiaomao/android-opencc | a251591316323151a97d977c39c85e0571c60971 | [
"MIT"
] | 888 | 2015-01-01T11:17:44.000Z | 2022-03-31T06:44:44.000Z | import pytest
from pybind11_tests import copy_move_policies as m
def test_lacking_copy_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_copy_ctor.get_one()
assert "is non-copyable!" in str(excinfo.value)
def test_lacking_move_ctor():
with pytest.raises(RuntimeError) as excinfo:
m.lacking_move_ctor.get_one()
assert "is neither movable nor copyable!" in str(excinfo.value)
def test_move_and_copy_casts():
"""Cast some values in C++ via custom type casters and count the number of moves/copies."""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
# The type move constructions/assignments below each get incremented: the move assignment comes
# from the type_caster load; the move construction happens when extracting that via a cast or
# loading into an argument.
assert m.move_and_copy_casts(3) == 18
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 2
assert c_m.move_constructions >= 2
assert c_mc.alive() == 0
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 2
assert c_mc.move_constructions >= 2
assert c_c.alive() == 0
assert c_c.copy_assignments == 2
assert c_c.copy_constructions >= 2
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
def test_move_and_copy_loads():
"""Call some functions that load arguments via custom type casters and count the number of
moves/copies."""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
assert m.move_only(10) == 10 # 1 move, c_m
assert m.move_or_copy(11) == 11 # 1 move, c_mc
assert m.copy_only(12) == 12 # 1 copy, c_c
assert m.move_pair((13, 14)) == 27 # 1 c_m move, 1 c_mc move
assert m.move_tuple((15, 16, 17)) == 48 # 2 c_m moves, 1 c_mc move
assert m.copy_tuple((18, 19)) == 37 # 2 c_c copies
# Direct constructions: 2 c_m moves, 2 c_mc moves, 1 c_c copy
# Extra moves/copies when moving pairs/tuples: 3 c_m, 3 c_mc, 2 c_c
assert m.move_copy_nested((1, ((2, 3, (4,)), 5))) == 15
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 6
assert c_m.move_constructions == 9
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 5
assert c_mc.move_constructions == 8
assert c_c.copy_assignments == 4
assert c_c.copy_constructions == 6
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
@pytest.mark.skipif(not m.has_optional, reason='no <optional>')
def test_move_and_copy_load_optional():
"""Tests move/copy loads of std::optional arguments"""
cstats = m.move_and_copy_cstats()
c_m, c_mc, c_c = cstats["MoveOnlyInt"], cstats["MoveOrCopyInt"], cstats["CopyOnlyInt"]
# The extra move/copy constructions below come from the std::optional move (which has to move
# its arguments):
assert m.move_optional(10) == 10 # c_m: 1 move assign, 2 move construct
assert m.move_or_copy_optional(11) == 11 # c_mc: 1 move assign, 2 move construct
assert m.copy_optional(12) == 12 # c_c: 1 copy assign, 2 copy construct
# 1 move assign + move construct moves each of c_m, c_mc, 1 c_c copy
# +1 move/copy construct each from moving the tuple
# +1 move/copy construct each from moving the optional (which moves the tuple again)
assert m.move_optional_tuple((3, 4, 5)) == 12
assert c_m.copy_assignments + c_m.copy_constructions == 0
assert c_m.move_assignments == 2
assert c_m.move_constructions == 5
assert c_mc.copy_assignments + c_mc.copy_constructions == 0
assert c_mc.move_assignments == 2
assert c_mc.move_constructions == 5
assert c_c.copy_assignments == 2
assert c_c.copy_constructions == 5
assert c_m.alive() + c_mc.alive() + c_c.alive() == 0
def test_private_op_new():
"""An object with a private `operator new` cannot be returned by value"""
with pytest.raises(RuntimeError) as excinfo:
m.private_op_new_value()
assert "is neither movable nor copyable" in str(excinfo.value)
assert m.private_op_new_reference().value == 1
def test_move_fallback():
"""#389: rvp::move should fall-through to copy on non-movable objects"""
m2 = m.get_moveissue2(2)
assert m2.value == 2
m1 = m.get_moveissue1(1)
assert m1.value == 1
| 40.017699 | 99 | 0.697258 |
49109fb3d8a6eda13d9b53579d6d8ee76b10b73e | 1,232 | py | Python | tests/test_patterns.py | AnastasiaLobanova/climetlab | 4382525da25c66cf10ac0e5cbf6d6d244e7aa097 | [
"Apache-2.0"
] | 1 | 2021-04-14T13:15:46.000Z | 2021-04-14T13:15:46.000Z | tests/test_patterns.py | AnastasiaLobanova/climetlab | 4382525da25c66cf10ac0e5cbf6d6d244e7aa097 | [
"Apache-2.0"
] | null | null | null | tests/test_patterns.py | AnastasiaLobanova/climetlab | 4382525da25c66cf10ac0e5cbf6d6d244e7aa097 | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
from climetlab.utils.patterns import Pattern
def test_patterns():
p = Pattern("{date:date(%Y%m%d)}-{param}-{level:int}-{level:int(%03d)}")
assert p.names == ["date", "level", "param"], p.names
assert (
p.substitute(dict(date="2000-01-01", param="2t", level=12))
== "20000101-2t-12-012"
)
p = Pattern("{variable:enum(2t,tp)}.{type:enum(rt,hc)}.{date:date(%Y%m%d)}.grib")
assert (
p.substitute(dict(date="2000-01-01", variable="tp", type="rt"))
== "tp.rt.20000101.grib"
)
assert p.substitute(dict(date="2000-01-01", variable=["tp", "2t"], type="rt")) == [
"tp.rt.20000101.grib",
"2t.rt.20000101.grib",
]
if __name__ == "__main__":
for k, f in sorted(globals().items()):
if k.startswith("test_") and callable(f):
print(k)
f()
| 30.04878 | 87 | 0.62013 |
54e206174b1acd17a3c752af3821ea8d4d29bcf3 | 3,834 | py | Python | pymc3/step_methods/elliptical_slice.py | sanmitraghosh/pymc3 | 284e31c0dcc9bf60523f5d2f30328b29ab751f4d | [
"Apache-2.0"
] | 2 | 2020-10-31T18:01:32.000Z | 2020-11-01T07:22:48.000Z | pymc3/step_methods/elliptical_slice.py | sanmitraghosh/pymc3 | 284e31c0dcc9bf60523f5d2f30328b29ab751f4d | [
"Apache-2.0"
] | null | null | null | pymc3/step_methods/elliptical_slice.py | sanmitraghosh/pymc3 | 284e31c0dcc9bf60523f5d2f30328b29ab751f4d | [
"Apache-2.0"
] | 1 | 2019-01-02T09:02:18.000Z | 2019-01-02T09:02:18.000Z | import numpy as np
import numpy.random as nr
import theano.tensor as tt
from .arraystep import ArrayStep, Competence
from ..model import modelcontext
from ..theanof import inputvars
from ..distributions import draw_values
__all__ = ['EllipticalSlice']
def get_chol(cov, chol):
"""Get Cholesky decomposition of the prior covariance.
Ensure that exactly one of the prior covariance or Cholesky
decomposition is passed. If the prior covariance was passed, then
return its Cholesky decomposition.
Parameters
----------
cov : array, optional
Covariance matrix of the multivariate Gaussian prior.
chol : array, optional
Cholesky decomposition of the covariance matrix of the
multivariate Gaussian prior.
"""
if len([i for i in [cov, chol] if i is not None]) != 1:
raise ValueError('Must pass exactly one of cov or chol')
if cov is not None:
chol = tt.slinalg.cholesky(cov)
return chol
class EllipticalSlice(ArrayStep):
"""Multivariate elliptical slice sampler step.
Elliptical slice sampling (ESS) [1]_ is a variant of slice sampling
that allows sampling from distributions with multivariate Gaussian
prior and arbitrary likelihood. It is generally about as fast as
regular slice sampling, mixes well even when the prior covariance
might otherwise induce a strong dependence between samples, and
does not depend on any tuning parameters.
The Gaussian prior is assumed to have zero mean.
Parameters
----------
vars : list
List of variables for sampler.
prior_cov : array, optional
Covariance matrix of the multivariate Gaussian prior.
prior_chol : array, optional
Cholesky decomposition of the covariance matrix of the
multivariate Gaussian prior.
model : PyMC Model
Optional model for sampling step. Defaults to None (taken from
context).
References
----------
.. [1] I. Murray, R. P. Adams, and D. J. C. MacKay. "Elliptical Slice
Sampling", The Proceedings of the 13th International Conference on
Artificial Intelligence and Statistics (AISTATS), JMLR W&CP
9:541-548, 2010.
"""
default_blocked = True
def __init__(self, vars=None, prior_cov=None, prior_chol=None, model=None,
**kwargs):
self.model = modelcontext(model)
chol = get_chol(prior_cov, prior_chol)
self.prior_chol = tt.as_tensor_variable(chol)
if vars is None:
vars = self.model.cont_vars
vars = inputvars(vars)
super().__init__(vars, [self.model.fastlogp], **kwargs)
def astep(self, q0, logp):
"""q0 : current state
logp : log probability function
"""
# Draw from the normal prior by multiplying the Cholesky decomposition
# of the covariance with draws from a standard normal
chol = draw_values([self.prior_chol])[0]
nu = np.dot(chol, nr.randn(chol.shape[0]))
y = logp(q0) - nr.standard_exponential()
# Draw initial proposal and propose a candidate point
theta = nr.uniform(0, 2 * np.pi)
theta_max = theta
theta_min = theta - 2 * np.pi
q_new = q0 * np.cos(theta) + nu * np.sin(theta)
while logp(q_new) <= y:
# Shrink the bracket and propose a new point
if theta < 0:
theta_min = theta
else:
theta_max = theta
theta = nr.uniform(theta_min, theta_max)
q_new = q0 * np.cos(theta) + nu * np.sin(theta)
return q_new
@staticmethod
def competence(var, has_grad):
# Because it requires a specific type of prior, this step method
# should only be assigned explicitly.
return Competence.INCOMPATIBLE
| 32.769231 | 78 | 0.651017 |
86f26c93c643613c8a7b02a9e7c2f034959b1071 | 9,925 | py | Python | mavlink_api/px4/dev/gps_setpoint/px4_mavros_to_GPS.py | hddxds/scripts_from_gi | afb8977c001b860335f9062464e600d9115ea56e | [
"Apache-2.0"
] | null | null | null | mavlink_api/px4/dev/gps_setpoint/px4_mavros_to_GPS.py | hddxds/scripts_from_gi | afb8977c001b860335f9062464e600d9115ea56e | [
"Apache-2.0"
] | null | null | null | mavlink_api/px4/dev/gps_setpoint/px4_mavros_to_GPS.py | hddxds/scripts_from_gi | afb8977c001b860335f9062464e600d9115ea56e | [
"Apache-2.0"
] | null | null | null | import rospy
from mavros_msgs.msg import GlobalPositionTarget, State
from mavros_msgs.srv import CommandBool,SetMode, CommandHome
from geometry_msgs.msg import PoseStamped, Twist
from sensor_msgs.msg import Imu, NavSatFix
import time
import math
import pyquaternion as q
#global variables
global_imu = Imu()
global_gps = NavSatFix()
local_pose = PoseStamped()
current_state = State()
remote_control = False
ever_entered_offboard = False
# for keeping the number of successes of finishing one job
num_of_success = 0
# initial_gps
num_gps_received = 0
home_gps = NavSatFix()
home_lat = 0.0
home_long = 0.0
home_alt = 0.0
# get and set ever entered offboard variable
def get_ever_entered_offboard():
global ever_entered_offboard
return ever_entered_offboard
def set_ever_entered_offboard(val):
global ever_entered_offboard
ever_entered_offboard = val
# set and get remote control
def set_remote_control(val):
global remote_control
remote_control = val
def get_remote_control():
global remote_control
return remote_control
def imu_callback(data):
global global_imu
global_imu = data
def gps_callback(data):
global global_gps, home_lat, home_long, home_alt, num_gps_received
global_gps = data
if num_gps_received < 10:
home_lat = home_lat + data.latitude
home_long = home_long + data.longitude
home_alt = home_alt + data.altitude
num_gps_received = num_gps_received + 1
elif num_gps_received == 10:
home_lat = home_lat / 10.0
home_long = home_long / 10.0
home_alt = home_alt / 10.0
num_gps_received = num_gps_received + 1
print "Home gps set to:", home_lat, home_long, home_alt
set_home(using_current_position = True)
def state_callback(data):
ever_offboard = get_ever_entered_offboard()
if ever_offboard:
if data.mode != "OFFBOARD":
print("Received mode: ",data.mode)
current_state = data
set_remote_control(True)
else:
if data.mode == "OFFBOARD":
set_ever_entered_offboard(True)
def local_pose_callback(data):
global local_pose
local_pose = data
def set_pose(x=0, y=0, z=2, use_current_heading=True):
pose = PoseStamped()
pose.header.stamp = rospy.Time.now()
pose.pose.position.x = x
pose.pose.position.y = y
pose.pose.position.z = z
if (use_current_heading):
pose.pose.orientation = global_imu.orientation
else:
print("Not using current heading!")
return pose
def set_gps(lat, long, alt):
gps = GlobalPositionTarget()
gps.header.stamp = rospy.Time.now()
gps.latitude = float(lat)
gps.longitude = float(long)
gps.altitude = float(alt)
return gps
def set_yaw(yaw):
global global_imu
gps = GlobalPositionTarget()
gps.header.stamp = rospy.Time.now()
imu_q = q.Quaternion(global_imu.orientation.w, global_imu.orientation.x, global_imu.orientation.y, global_imu.orientation.z)
yaw_q = q.Quaternion(axis=[0,0,1], angle=yaw)
new_q = yaw_q * imu_q
print "imu_q", imu_q
print "yaw_q", yaw_q
print "new_q", yaw_q
gps.latitude = global_gps.latitude
gps.longitude = global_gps.longitude
gps.altitude = global_gps.altitude
gps.yaw = new_q[-1]
return gps
# only linear speed
def set_speed(x=0.2,y=0.2,z=0.2):
speed = Twist()
speed.linear.x = x
speed.linear.y = y
speed.linear.z = z
return speed
def set_home(using_current_position, lat=-1, long=-1, alt=-1):
global global_gps, home_lat, home_long, home_alt
if using_current_position:
result = setHomeService(True, home_lat, home_long, home_alt)
if result:
print "Set home succeed!"
else:
print "Set home failed!"
else:
assert(lat > 0 and long > 0 and alt > 0)
result = setHomeService(True, lat, long, alt)
if result:
print "Set home succeed!"
else:
print "Set home failed!"
# 1e5 -> accuracy in 1 meter
# 1e6 -> accuracy in 0.1 meter
def gps_distance(gps1, gps2):
delta_lat = math.fabs(gps1.latitude-gps2.latitude)
delta_long = math.fabs(gps1.longitude - gps2.longitude)
delta_alt = math.fabs(gps1.altitude - gps2.altitude)
# gives lat and long more weight
return 0.5*1e5*delta_lat + 0.5*1e5*delta_long + delta_alt
def pose_distance(p1, p2):
delta_x = math.fabs(p1.pose.position.x - p2.pose.position.x)
delta_y = math.fabs(p1.pose.position.y - p2.pose.position.y)
delta_z = math.fabs(p1.pose.position.z - p2.pose.position.z)
return math.sqrt(delta_x**2 + delta_y**2 + delta_z**2)
def readCSV(path):
file = open(path).readlines()
tasks = []
for line in file:
tasks.append(line.split('\n')[0].split(','))
print "There are ", len(tasks), "tasks in total!"
for idx, task in enumerate(tasks):
print "Task", idx, ":", task
return tasks
def do_task(task_list, id): # id is the line num of csv file.
global num_of_success, global_gps, home_lat, home_long, home_alt
if task_list[id][0] == "takeoff":
pose = set_pose(z=float(task_list[id][1]) )
#height_pub.publish(pose)
local_pos_pub.publish(pose)
if pose_distance(local_pose, pose) < 0.2:
num_of_success = num_of_success + 1
if num_of_success >= 50:
print "Taking off succeed!"
num_of_success = 0
return True
else:
return False
else:
return False
if task_list[id][0] == "gps":
#assert(float(task_list[id][3])>0)
gps = set_gps(task_list[id][1], task_list[id][2], home_alt + float(task_list[id][3]) )
global_gps_pub.publish(gps)
print "gps distance:", gps_distance(global_gps, gps)
if gps_distance(global_gps, gps) < 0.5:
num_of_success = num_of_success + 1
if num_of_success >= 50:
print "reached gps task", task_list[id]
num_of_success = 0
return True
else:
return False
else:
return False
if task_list[id][0] == "yaw":
gps_yaw = set_yaw(float(task_list[id][1]) )
global_gps_pub.publish(gps_yaw)
num_of_success = num_of_success + 1
if num_of_success >= 5000:
print "reached yaw", task_list[id]
num_of_success = 0
return True
else:
return False
if task_list[id][0] == "xyz":
pose = set_pose(x=float(task_list[id][1]), y=float(task_list[id][2]), z=float(task_list[id][3]) )
local_pos_pub.publish(pose)
print "pose_distance:", pose_distance(local_pose, pose)
if pose_distance(local_pose, pose) < 0.3:
num_of_success = num_of_success + 1
if num_of_success >= 50:
print "task", task_list[id], "succeed!"
num_of_success = 0
return True
else:
return False
else:
return False
if task_list[id][0] == "land":
isModeChanged = flightModeService(custom_mode='AUTO.LAND')
return False
# not recommanded for now as the drone will fly 30 meters above ground
# which is dangerous
if task_list[id][0] == "rtl":
isModeChanged = flightModeService(custom_mode='AUTO.RTL')
return False
if __name__ == '__main__':
# for arming and changing mode
armService = rospy.ServiceProxy('/mavros/cmd/arming', CommandBool)
flightModeService = rospy.ServiceProxy('/mavros/set_mode', SetMode)
setHomeService = rospy.ServiceProxy('/mavros/cmd/set_home', CommandHome)
# for fetching current IMU data, GPS data and current mode
imu_sub = rospy.Subscriber("/mavros/imu/data", Imu, imu_callback)
gps_sub = rospy.Subscriber("/mavros/global_position/global", NavSatFix, gps_callback)
state_sub = rospy.Subscriber("mavros/state", State, state_callback)
local_pose_sub = rospy.Subscriber("mavros/local_position/pose", PoseStamped, local_pose_callback)
# for setting target position in local and global frame, speed
local_pos_pub = rospy.Publisher('mavros/setpoint_position/local', PoseStamped, queue_size=10)
global_gps_pub = rospy.Publisher('mavros/setpoint_position/global', GlobalPositionTarget, queue_size=10)
speed_pub = rospy.Publisher("mavros/setpoint_velocity/cmd_vel_unstamped", Twist, queue_size=10)
height_pub = rospy.Publisher("mavros/setpoint_attitude/attitude", PoseStamped, queue_size=10)
rospy.init_node("offboard_node")
rate = rospy.Rate(25)
pose = set_pose(z=2)
print("Initializing ...")
for i in range(100):
local_pos_pub.publish(pose)
rate.sleep()
print("Initializing finished!")
#speed = set_speed()
#speed_pub.publish(speed)
#print("Setting speed to: ", speed.linear.x, speed.linear.y, speed.linear.z)
#arm vehicle
armService(True)
#try to enter offboard mode
Enter_offboard_result = flightModeService(custom_mode = 'OFFBOARD')
while not Enter_offboard_result:
print 'enter offboard failed, retrying!.'
Enter_offboard_result = flightModeService(custom_mode = 'OFFBOARD')
while not get_ever_entered_offboard():
print('Not in offboard mode. Waiting.')
rate.sleep()
tasks = readCSV('/home/gishr/software/codes/scripts/mavlink_api/px4/dev/gps_setpoint/tasks')
task_id = 0
set_home(False, 39.903358670410626,116.38344507942071, 50)
while not get_remote_control():
if not do_task(tasks, task_id):
continue
else:
task_id = task_id + 1
rate.sleep()
print('Exited main loop!')
| 28.93586 | 128 | 0.647154 |
f41274805d9bd4ea5d956f95eeef3f661d5bf6e3 | 7,170 | py | Python | Sample-Scripts-and-Notebooks/Official/Scripts/evaluate.py | SillyKeith/Project-Santa-Cruz-Preview | 28ccfc5d5b92f3caf61158f7b15849bb08903976 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | Sample-Scripts-and-Notebooks/Official/Scripts/evaluate.py | SillyKeith/Project-Santa-Cruz-Preview | 28ccfc5d5b92f3caf61158f7b15849bb08903976 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | Sample-Scripts-and-Notebooks/Official/Scripts/evaluate.py | SillyKeith/Project-Santa-Cruz-Preview | 28ccfc5d5b92f3caf61158f7b15849bb08903976 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | """
Script to evaluate a model.
To use this script, first train a model, then export whichever checkpoint you want
to use for evaluation using the scripts/export_frozen_graph.py script.
"""
from PIL import Image
import argparse
import cv2
import importlib.util
import numpy as np
import os
import sys
import tensorflow as tf
def detect_on_image(imgfpath: str, detection_graph, min_threshold: float):
"""
Detects and displays bounding boxes on a single image.
"""
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess: # pylint: disable=no-member
# Read in the image and convert from BGR to RGB
image_np = cv2.resize(cv2.imread(imgfpath), (250, 250))[:,:,::-1] # pylint: disable=no-member
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Extract image tensor
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Extract detection boxes
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Extract detection scores
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# Extract detection classes
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Extract number of detections
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
boxes, scores, classes, num_detections = sess.run([boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded})
print("BOXES (shaped {}):\n{}".format(boxes.shape, boxes))
print("SCORES (shaped {}):\n{}".format(scores.shape, scores))
print("CLASSES (shaped {}):\n{}".format(classes.shape, classes))
print("NDETECTIONS (shaped {}):\n{}".format(num_detections.shape, num_detections))
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=3,
min_score_thresh=min_threshold
)
cv2.imshow("Image", image_np[:,:,::-1]) # pylint: disable=no-member
cv2.waitKey(0) # pylint: disable=no-member
def detect_on_all_images_in_directory(imgdpath: str, detection_graph, min_threshold: float):
"""
Detects and displays bounding boxes on one image at a time.
"""
for fname in os.listdir(imgdpath):
fpath = os.path.join(imgdpath, fname)
detect_on_image(fpath, detection_graph, min_threshold)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("graph", type=str, help="Path to the frozen graph.")
parser.add_argument("pbtxt", type=str, help=".pbtxt file for the labels in the dataset.")
parser.add_argument("modelpath", type=str, help="Path to the root of the Tensorflow Object Detection API repository.")
parser.add_argument("nclasses", type=int, help="Number of classes the detector was trained on.")
parser.add_argument("image", type=str, help="Path to the image or image directory.")
parser.add_argument("--min-threshold", '-m', type=float, default=0.30, help="Threshold IOU needed to display a bounding box.")
args = parser.parse_args()
# Sanity check the args
graphpath = args.graph
if not os.path.isfile(graphpath):
print("Given a path to a frozen graph, but it does not exist: {}".format(graphpath))
exit(1)
pbtxtfpath = args.pbtxt
if not os.path.isfile(pbtxtfpath):
print(".pbtxt file is not a file. Given: {}".format(pbtxtfpath))
exit(2)
modeldpath = args.modelpath
if not os.path.isdir(modeldpath):
print("Model path is not a directory. Given: {}".format(modeldpath))
exit(3)
if args.nclasses <= 0:
print("Number of classes given is: {}, but must be greater than zero.".format(args.nclasses))
exit(4)
# Abspath everything so we don't have to keep doing it
graphpath = os.path.abspath(graphpath)
modeldpath = os.path.abspath(modeldpath)
pbtxtfpath = os.path.abspath(pbtxtfpath)
# Set up GPU growth (from TF's website)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# Adjust python path
research = os.path.join(modeldpath, "research")
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] += ":" + research
else:
os.environ['PYTHONPATH'] = research
sys.path.insert(0, research)
obj_detection_utils = os.path.join(modeldpath, "research", "object_detection", "utils")
spec = importlib.util.spec_from_file_location("label_map_util", os.path.join(obj_detection_utils, "label_map_util.py"))
label_map_util = importlib.util.module_from_spec(spec)
spec.loader.exec_module(label_map_util)
spec = importlib.util.spec_from_file_location("visualization_utils", os.path.join(obj_detection_utils, "visualization_utils.py"))
vis_util = importlib.util.module_from_spec(spec)
spec.loader.exec_module(vis_util)
# Read the frozen graph
detection_graph = tf.Graph()
with detection_graph.as_default(): # pylint: disable=not-context-manager
od_graph_def = tf.GraphDef() # pylint: disable=no-member
with tf.io.gfile.GFile(graphpath, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Load the categories for visualization in the bounding box labels
label_map = label_map_util.load_labelmap(pbtxtfpath)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=args.nclasses, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Detect on each image in the directory, or on just the single image
if os.path.isfile(args.image):
detect_on_image(args.image, detection_graph, args.min_threshold)
elif os.path.isdir(args.image):
detect_on_all_images_in_directory(args.image, detection_graph, args.min_threshold)
else:
print("Given something I don't understand for image. Should be a file or a directory. Given: {}".format(args.image))
exit(5)
| 44.259259 | 148 | 0.673222 |
a419affe456e7577bb4d137bed0011a7dca371dc | 12,298 | py | Python | python/subwayflow/a1.py | StevenRCE0/MassTransportUtilizer | 6b3ac1cd447d736094cf16e8a482924b16076705 | [
"MIT"
] | null | null | null | python/subwayflow/a1.py | StevenRCE0/MassTransportUtilizer | 6b3ac1cd447d736094cf16e8a482924b16076705 | [
"MIT"
] | null | null | null | python/subwayflow/a1.py | StevenRCE0/MassTransportUtilizer | 6b3ac1cd447d736094cf16e8a482924b16076705 | [
"MIT"
] | null | null | null |
import os, json, csv, calendar
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
# import shortestpass
import graph
import pymysql
from pymysql.converters import escape_string
from functools import partial
transfer_stations = ['Sta89', 'Sta127', 'Sta41', 'Sta134', 'Sta3', 'Sta15', 'Sta140', 'Sta75', 'Sta90', 'Sta47', 'Sta23', 'Sta56', 'Sta115', 'Sta63', 'Sta114', 'Sta135', 'Sta87']
allstations = ['Sta65', 'Sta49', 'Sta149', 'Sta74', 'Sta128', 'Sta34', 'Sta106', 'Sta110', 'Sta97', 'Sta80', 'Sta89', 'Sta64', 'Sta150', 'Sta154', 'Sta107', 'Sta83', 'Sta108', 'Sta159', 'Sta1', 'Sta129', 'Sta9', 'Sta163', 'Sta53', 'Sta79', 'Sta18', 'Sta47', 'Sta123', 'Sta127', 'Sta81', 'Sta27', 'Sta48', 'Sta151', 'Sta68', 'Sta52', 'Sta76', 'Sta57', 'Sta71', 'Sta139', 'Sta105', 'Sta51', 'Sta24', 'Sta143', 'Sta156', 'Sta61', 'Sta50', 'Sta119', 'Sta66', 'Sta12', 'Sta161', 'Sta21', 'Sta133', 'Sta22', 'Sta138', 'Sta41', 'Sta30', 'Sta67', 'Sta144', 'Sta29', 'Sta126', 'Sta40', 'Sta131', 'Sta39', 'Sta100', 'Sta167', 'Sta113', 'Sta141', 'Sta142', 'Sta158', 'Sta44', 'Sta117', 'Sta147', 'Sta42', 'Sta35', 'Sta109', 'Sta33', 'Sta112', 'Sta153', 'Sta125', 'Sta121', 'Sta11', 'Sta157', 'Sta114', 'Sta168', 'Sta135', 'Sta134', 'Sta85', 'Sta2', 'Sta4', 'Sta103', 'Sta145', 'Sta88', 'Sta87', 'Sta94', 'Sta160', 'Sta7', 'Sta6', 'Sta8', 'Sta75', 'Sta102', 'Sta84', 'Sta59', 'Sta19', 'Sta62', 'Sta165', 'Sta38', 'Sta58', 'Sta43', 'Sta10', 'Sta96', 'Sta132', 'Sta37', 'Sta16', 'Sta69', 'Sta54', 'Sta77', 'Sta122', 'Sta36', 'Sta28', 'Sta124', 'Sta166', 'Sta99', 'Sta45', 'Sta152', 'Sta164', 'Sta82', 'Sta111', 'Sta140', 'Sta13', 'Sta70', 'Sta55', 'Sta20', 'Sta23', 'Sta56', 'Sta118', 'Sta115', 'Sta162', 'Sta15', 'Sta86', 'Sta46', 'Sta3','Sta63', 'Sta25', 'Sta146', 'Sta130', 'Sta120', 'Sta136', 'Sta137', 'Sta101', 'Sta31', 'Sta17', 'Sta26', 'Sta90', 'Sta95', 'Sta72', 'Sta93', 'Sta92', 'Sta116', 'Sta32', 'Sta91', 'Sta60', 'Sta148', 'Sta73']
class DB():
def __init__(self, DB, month, day):
DB_USER = 'maker0'
DB_PASS = 'Maker0000'
DB_HOST = 'rm-bp11labi01950io698o.mysql.rds.aliyuncs.com'
DB_PORT = 3306
DATABASE = DB
try:
self.connect_info = pymysql.connect(user=DB_USER, passwd=DB_PASS, host=DB_HOST, port=DB_PORT, db=DATABASE) #1
self.cursor = self.connect_info.cursor()
# 查询语句,选出testexcel表中的所有数据
# sql = """select * from trips"""
# read_sql_query的两个参数: sql语句, 数据库连接
# df = pd.read_sql_query(sql,con=self.connect_info)
# 输出testexcel表的查询结果
# print('连接成功')
table_name_1 = f'list1_{month}_{day}'
table_name_2 = f'list2_{month}_{day}'
table_name_3 = f'list3_{month}_{day}'
table_name_4 = f'list4_{month}_{day}'
# print(table_name)
# self.cursor.execute(f"DROP TABLE IF EXISTS {table_name}")
# 使用预处理语句创建表
sql1 = f"""CREATE TABLE {table_name_1} (
station CHAR(20) NOT NULL,
in_flow FLOAT,
out_flow FLOAT,
in_flow_plus FLOAT,
out_flow_plus FLOAT,
time_start CHAR(20) NOT NULL )"""
sql2 = f"""CREATE TABLE {table_name_2} (
station_in CHAR(20) NOT NULL,
station_out CHAR(20) NOT NULL,
flow FLOAT,
flow_plus FLOAT,
time_start CHAR(20) NOT NULL )"""
sql3 = f"""CREATE TABLE {table_name_3} (
station_1 CHAR(20) NOT NULL,
station_2 CHAR(20) NOT NULL,
flow FLOAT,
time_start CHAR(20) NOT NULL )"""
sql4 = f'''CREATE TABLE {table_name_4} (
linename CHAR(20) NOT NULL,
flow FLOAT,
time_start CHAR(20) NOT NULL )'''
try: self.cursor.execute(sql1)
except: pass
try: self.cursor.execute(sql2)
except: pass
try: self.cursor.execute(sql3)
except: pass
try: self.cursor.execute(sql4)
except: pass
except pymysql.Error as e:
print("数据库连接失败")
raise e
def write_list_to_json(list, json_file_name, json_file_save_path):
os.chdir(json_file_save_path)
with open(json_file_name, 'w', encoding='utf-8') as f:
json.dump(list, f)
def main():
# b = DB('library_flow')
trips = pd.DataFrame(pd.read_csv('./data/new_trips.csv'))
stations = pd.DataFrame(pd.read_csv('./data/station.csv', encoding='gbk'))
year = 2020
flow = {}
for month in [7,1,2,3,4,5,6,7,8,12]:
# 获取该月的所有trips数据
res = calendar.monthrange(year,month)
df = pd.DataFrame(trips.loc[trips['inmonth'] == month])
for day in range(1, res[1]):
# 获取该天的所有trips数据
df_day = df.loc[df['inday'] == day]
aaa = []
for hour in range(6,23):
df_day_hour = df_day.loc[df_day['inhour']==hour]
for minute in [0,30]:
df_day_hour_minute = df_day_hour.loc[(df_day_hour['inminute']<30+minute) & (df_day_hour['inminute']>=0+minute)]
aaa.append([df_day_hour_minute, month, day, hour, minute])
# print(f'{month}月{day}日 {hour}:{minute}')
# partial_work = partial(job, month=month, day=day, hour=hour, minute=minute)
# re = pool.map(partial_work, aaa)
pool = mp.Pool(processes=8) # 定义CPU核数量为3
re = pool.map(job, aaa)
print(re)
# job(df_day_hour_minute, hour, minute)
# break
# break
break
break
def job(df):
month = df[1]
day = df[2]
hour = df[3]
minute = df[4]
df = df[0]
list1_1,list1_2,list2_1,list2_2,list3,list4 = [],[],[],[],[],[]
temp, temp2, temp3, temp4 = {}, {}, {}, {}
transfer_stations_temp, transfer_stations_temp2 = {}, {}
for index, row in df.iterrows():
# print(index)
stain, staout = row[2], row[4]
if stain in allstations and staout in allstations:
pass
else:
continue
# 1.
if stain == staout :
continue
if stain in temp.keys():
temp[stain][0] += 1
else:
temp[stain] = [1, 0]
if staout in temp.keys():
temp[staout][1] += 1
else:
temp[staout] = [0, 1]
# print(temp)
# 2.
if (stain, staout) in temp2.keys():
temp2[(stain,staout)] += 1
else:
temp2[(stain,staout)] = 1
# 3/4
# print(stain,staout)
try:
graph_object = graph.Dfs()
big_list = graph_object.getPassInfo(stain, staout)
if big_list[0][-1] > 0:
# print(big_list[0][-1])
pass
else:
print(big_list)
continue
# small_list[0] = shortestpass.main(stain, staout)
except:
continue
# print(small_list[0])
for small_list in big_list:
# 遍历所有站点
for i in range(len(small_list[0])):
if i!=0 and i!=len(small_list[0]):
if small_list[0][i] in transfer_stations:
# 1_2
if small_list[0][i] in transfer_stations_temp.keys():
transfer_stations_temp[small_list[0][i]] += 1*small_list[-1]
else:
transfer_stations_temp[small_list[0][i]] = 1*small_list[-1]
# 3
if i != len(small_list[0])-1:
if (small_list[0][i], small_list[0][i+1]) in temp3.keys():
temp3[(small_list[0][i], small_list[0][i+1])] += 1*small_list[-1]
else:
temp3[(small_list[0][i], small_list[0][i+1])] = 1*small_list[-1]
# 2_2
for small_list in big_list:
ods = small_list[2]
if ods==[]:
if (stain, staout) in transfer_stations_temp2.keys():
transfer_stations_temp2[(stain, staout)] += 1*small_list[-1]
else:
transfer_stations_temp2[stain, staout] = 1*small_list[-1]
else:
for od in ods:
if od in transfer_stations_temp2.keys():
transfer_stations_temp2[od] += 1*small_list[-1]
else:
transfer_stations_temp2[od] = 1*small_list[-1]
# 4_1 线路客流量
for small_list in big_list:
lines = small_list[4]
rate = small_list[-1]
# print(lines, rate)
for line in lines:
if line in temp4.keys():
temp4[line] += 1*rate
else:
temp4[line] = 1*rate
db = DB('library_flow', month, day)
# db.cursor.execute("""INSERT INTO list1 VALUES ('Mac', 'Mohan', 20, 'M', 2000)""")
# 1_1
a = 0
# for key in temp.keys():
# list1_1.append({'station':key,'in':temp[key][0], 'out':temp[key][1]})
# a += temp[key][0]
# 1_2
for key in temp.keys():
temp[key].extend(temp[key])
for key in transfer_stations_temp.keys():
if key in temp.keys():
# temp[key][0] + transfer_stations_temp[key]
# temp[key][1] + transfer_stations_temp[key]
temp[key][2] = (temp[key][0] + transfer_stations_temp[key])
temp[key][3] = (temp[key][1] + transfer_stations_temp[key])
else:
# temp[key] = [transfer_stations_temp[key], transfer_stations_temp[key]]
temp[key]=[transfer_stations_temp[key], transfer_stations_temp[key], transfer_stations_temp[key], transfer_stations_temp[key]]
for key in temp.keys():
# if len(temp[key])< 4:
# print(temp[key])
list1_2.append({'station':key,'in_flow':temp[key][0], 'out_flow':temp[key][1], 'in_flow_plus':temp[key][2], 'out_flow_plus':temp[key][3]})
key = escape_string(key)
time = escape_string(f'{hour}:{minute}')
a = temp[key][0]
b = temp[key][1]
c = temp[key][2]
d = temp[key][3]
db.cursor.execute( f"""INSERT INTO list1_{month}_{day} VALUES ('{key}', {a},{b}, {c}, {d},'{time}')""")
# db.cursor.execute( f"""INSERT INTO list1_{month}_{day} VALUES ('%s',%f,%f,%f,%f,'%s')""" % (key,8.9,9.8,5.7,7.99,'gfxzgfh'))
# 2_1
for key in temp2.keys():
# list2_1.append({'stain':key[0], 'staout': key[1], 'flow':temp2[key]})
temp2[key] = [temp2[key], temp2[key]]
# 2_2
for key in transfer_stations_temp2.keys():
try:
temp2[key][1] = temp2[key] + transfer_stations_temp2[key]
except:
temp2[key] = [transfer_stations_temp2[key], transfer_stations_temp2[key]]
for key in temp2.keys():
list2_2.append({'stain':key[0], 'staout': key[1], 'flow':temp2[key]})
db.cursor.execute( f"""INSERT INTO list2_{month}_{day} VALUES ('{key[0]}', '{key[1]}',{temp2[key][0]}, {temp2[key][1]},'{time}')""")
# 3
for key in temp3.keys():
list3.append({'x1':key[0], 'x2': key[1], 'flow':temp3[key]})
db.cursor.execute( f"""INSERT INTO list3_{month}_{day} VALUES ('{key[0]}', '{key[1]}',{temp3[key]},'{time}')""")
# 4
for key in temp4.keys():
list4.append({'line':temp4[key]})
db.cursor.execute( f"""INSERT INTO list4_{month}_{day} VALUES ('{key}', {temp4[key]},'{time}')""")
db.connect_info.commit()
db.connect_info.close()
# k = row[3].split(' ')[1]
# write_list_to_json(list1_1, f'{k}list1_1.json', './json/')
# write_list_to_json(list1_2, 'list1_2.json', './')
# write_list_to_json(list2_1, 'list2_1.json', './')
# write_list_to_json(list2_2, 'list2_2.json', './')
# write_list_to_json(list3, 'list3.json', './')
# print(a)
return a
if __name__=='__main__':
main() | 43.45583 | 1,531 | 0.52602 |
a5cee59124162e21c8e7dae5e191b44646f3189d | 10,611 | py | Python | securesystemslib/util.py | mnm678/securesystemslib | caf029ea61339dc5fa9beedf230ca1d026129169 | [
"MIT"
] | null | null | null | securesystemslib/util.py | mnm678/securesystemslib | caf029ea61339dc5fa9beedf230ca1d026129169 | [
"MIT"
] | null | null | null | securesystemslib/util.py | mnm678/securesystemslib | caf029ea61339dc5fa9beedf230ca1d026129169 | [
"MIT"
] | null | null | null | """
<Program Name>
util.py
<Author>
Konstantin Andrianov
<Started>
March 24, 2012. Derived from original util.py written by Geremy Condra.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Provides utility services. This module supplies utility functions such as:
get_file_details() that computes the length and hash of a file, import_json
that tries to import a working json module, load_json_* functions, etc.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import gzip
import shutil
import logging
import tempfile
import warnings
import securesystemslib.exceptions
import securesystemslib.settings
import securesystemslib.hash
import securesystemslib.formats
import six
logger = logging.getLogger(__name__)
def get_file_details(filepath, hash_algorithms=['sha256']):
"""
<Purpose>
To get file's length and hash information. The hash is computed using the
sha256 algorithm. This function is used in the signerlib.py and updater.py
modules.
<Arguments>
filepath:
Absolute file path of a file.
hash_algorithms:
<Exceptions>
securesystemslib.exceptions.FormatError: If hash of the file does not match
HASHDICT_SCHEMA.
securesystemslib.exceptions.Error: If 'filepath' does not exist.
<Returns>
A tuple (length, hashes) describing 'filepath'.
"""
# Making sure that the format of 'filepath' is a path string.
# 'securesystemslib.exceptions.FormatError' is raised on incorrect format.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
securesystemslib.formats.HASHALGORITHMS_SCHEMA.check_match(hash_algorithms)
# The returned file hashes of 'filepath'.
file_hashes = {}
# Does the path exists?
if not os.path.exists(filepath):
raise securesystemslib.exceptions.Error('Path ' + repr(filepath) + ' doest'
' not exist.')
filepath = os.path.abspath(filepath)
# Obtaining length of the file.
file_length = os.path.getsize(filepath)
# Obtaining hash of the file.
for algorithm in hash_algorithms:
digest_object = securesystemslib.hash.digest_filename(filepath, algorithm)
file_hashes.update({algorithm: digest_object.hexdigest()})
# Performing a format check to ensure 'file_hash' corresponds HASHDICT_SCHEMA.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HASHDICT_SCHEMA.check_match(file_hashes)
return file_length, file_hashes
def persist_temp_file(temp_file, persist_path):
"""
<Purpose>
Copies 'temp_file' (a file like object) to a newly created non-temp file at
'persist_path' and closes 'temp_file' so that it is removed.
<Arguments>
temp_file:
File object to persist, typically a file object returned by one of the
interfaces in the tempfile module of the standard library.
persist_path:
File path to create the persistent file in.
<Exceptions>
None.
<Return>
None.
"""
temp_file.flush()
temp_file.seek(0)
with open(persist_path, 'wb') as destination_file:
shutil.copyfileobj(temp_file, destination_file)
# Force the destination file to be written to disk from Python's internal
# and the operation system's buffers. os.fsync() should follow flush().
destination_file.flush()
os.fsync(destination_file.fileno())
temp_file.close()
def ensure_parent_dir(filename):
"""
<Purpose>
To ensure existence of the parent directory of 'filename'. If the parent
directory of 'name' does not exist, create it.
Example: If 'filename' is '/a/b/c/d.txt', and only the directory '/a/b/'
exists, then directory '/a/b/c/d/' will be created.
<Arguments>
filename:
A path string.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filename' is improperly
formatted.
<Side Effects>
A directory is created whenever the parent directory of 'filename' does not
exist.
<Return>
None.
"""
# Ensure 'filename' corresponds to 'PATH_SCHEMA'.
# Raise 'securesystemslib.exceptions.FormatError' on a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filename)
# Split 'filename' into head and tail, check if head exists.
directory = os.path.split(filename)[0]
if directory and not os.path.exists(directory):
# mode = 'rwx------'. 448 (decimal) is 700 in octal.
os.makedirs(directory, 448)
def file_in_confined_directories(filepath, confined_directories):
"""
<Purpose>
Check if the directory containing 'filepath' is in the list/tuple of
'confined_directories'.
<Arguments>
filepath:
A string representing the path of a file. The following example path
strings are viewed as files and not directories: 'a/b/c', 'a/b/c.txt'.
confined_directories:
A list, or a tuple, of directory strings.
<Exceptions>
securesystemslib.exceptions.FormatError: On incorrect format of the input.
<Return>
Boolean. True, if path is either the empty string
or in 'confined_paths'; False, otherwise.
"""
# Do the arguments have the correct format?
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
securesystemslib.formats.PATHS_SCHEMA.check_match(confined_directories)
for confined_directory in confined_directories:
# The empty string (arbitrarily chosen) signifies the client is confined
# to all directories and subdirectories. No need to check 'filepath'.
if confined_directory == '':
return True
# Normalized paths needed, to account for up-level references, etc.
# callers have the option of setting the list of directories in
# 'confined_directories'.
filepath = os.path.normpath(filepath)
confined_directory = os.path.normpath(confined_directory)
# A caller may restrict himself to specific directories on the
# remote repository. The list of paths in 'confined_path', not including
# each path's subdirectories, are the only directories the client will
# download targets from.
if os.path.dirname(filepath) == confined_directory:
return True
return False
_json_module = None
def import_json():
"""
<Purpose>
Tries to import json module. We used to fall back to the simplejson module,
but we have dropped support for that module. We are keeping this interface
intact for backwards compatibility.
<Arguments>
None.
<Exceptions>
ImportError: on failure to import the json module.
<Side Effects>
None.
<Return>
json module
"""
global _json_module
if _json_module is not None:
return _json_module
else:
# TODO: Drop Python < 2.6 case handling
try:
module = __import__('json')
# The 'json' module is available in Python > 2.6, and thus this exception
# should not occur in all supported Python installations (> 2.6).
except ImportError: #pragma: no cover
raise ImportError('Could not import the json module')
else:
_json_module = module
return module
json = import_json()
def load_json_string(data):
"""
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
"""
deserialized_object = None
try:
deserialized_object = json.loads(data)
except TypeError:
message = 'Invalid JSON string: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
except ValueError:
message = 'Cannot deserialize to a Python object: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
else:
return deserialized_object
def load_json_file(filepath):
"""
<Purpose>
Deserialize a JSON object from a file containing the object.
<Arguments>
filepath:
Absolute path of JSON file.
<Exceptions>
securesystemslib.exceptions.FormatError: If 'filepath' is improperly
formatted.
securesystemslib.exceptions.Error: If 'filepath' cannot be deserialized to
a Python object.
IOError in case of runtime IO exceptions.
<Side Effects>
None.
<Return>
Deserialized object. For example, a dictionary.
"""
# Making sure that the format of 'filepath' is a path string.
# securesystemslib.exceptions.FormatError is raised on incorrect format.
securesystemslib.formats.PATH_SCHEMA.check_match(filepath)
deserialized_object = None
# The file is mostly likely gzipped.
if filepath.endswith('.gz'):
logger.debug('gzip.open(' + str(filepath) + ')')
fileobject = six.StringIO(gzip.open(filepath).read().decode('utf-8'))
else:
logger.debug('open(' + str(filepath) + ')')
fileobject = open(filepath)
try:
deserialized_object = json.load(fileobject)
except (ValueError, TypeError) as e:
raise securesystemslib.exceptions.Error('Cannot deserialize to a'
' Python object: ' + repr(filepath))
else:
fileobject.close()
return deserialized_object
finally:
fileobject.close()
def digests_are_equal(digest1, digest2):
"""
<Purpose>
While protecting against timing attacks, compare the hexadecimal arguments
and determine if they are equal.
<Arguments>
digest1:
The first hexadecimal string value to compare.
digest2:
The second hexadecimal string value to compare.
<Exceptions>
securesystemslib.exceptions.FormatError: If the arguments are improperly
formatted.
<Side Effects>
None.
<Return>
Return True if 'digest1' is equal to 'digest2', False otherwise.
"""
# Ensure the arguments have the appropriate number of objects and object
# types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if there is a mismatch.
securesystemslib.formats.HEX_SCHEMA.check_match(digest1)
securesystemslib.formats.HEX_SCHEMA.check_match(digest2)
if len(digest1) != len(digest2):
return False
are_equal = True
for element in range(len(digest1)):
if digest1[element] != digest2[element]:
are_equal = False
return are_equal
| 26.863291 | 80 | 0.722741 |
eab04f7f36938f95ecde7a2fe7d3088148cd7823 | 2,766 | py | Python | mean_absolute-square_error.py | luis-alarcon/mean_absolute-square_error | 47e5291b38af3f6157a1e49ecbfd0f1f45e3ad30 | [
"MIT"
] | null | null | null | mean_absolute-square_error.py | luis-alarcon/mean_absolute-square_error | 47e5291b38af3f6157a1e49ecbfd0f1f45e3ad30 | [
"MIT"
] | null | null | null | mean_absolute-square_error.py | luis-alarcon/mean_absolute-square_error | 47e5291b38af3f6157a1e49ecbfd0f1f45e3ad30 | [
"MIT"
] | null | null | null | #import packages
import sys
import numpy as np
import pandas as pd
# Formula for Absolute trick and Square Trick
def trick(cho,w1,w2):
#p = input("Input the point: (sepate the x and y with commas): ")
point = input("\nInput the point: (sepate the x and y with commas): ")
point = point.split(",")
print("\nPoint: ")
print(point)
learning_rate = float(input("\nInput Learning Rate: "))
if cho ==1:
p = float(point[0])
w_1 = w1 + p*learning_rate
w_2 = w2+learning_rate
print("\nAbsolute Trick Formula: ")
print("\ny = "+str(w_1)+"x + ("+str(w_2)+")")
elif cho ==2:
p = float(point[0])
q = float(point[1])
qi = w1*p+w2
w_1 = w1+p*(q-qi)*learning_rate
w_2 = w2+(q-qi)*learning_rate
print("\nAbsolute Square Formula: ")
print("\ny = "+str(w_1)+"x + ("+str(w_2)+")")
# Formula for Mean Absolute Error and Mean Square Error
def mean_error(cho,w1,w2):
num_points = int(input("\nHow many points are you going to analyse: "))
l_points = []
for i in range(num_points):
point = input("\nInput the point: (sepate the x and y with commas): ")
point = point.split(",")
for i in range(len(point)):
point[i] = float(point[i])
l_points.append(point)
# DataFrame for all times
points = pd.DataFrame(l_points, columns = ["x","y"])
#points = pd.to_numeric(points, errors='coerce')
if cho ==3:
line_points = line_point(points,w1,w2)
y_yi = abs(points["y"]-line_points["yi"])
sum_y_yi = y_yi.sum()
mean_abs_error = sum_y_yi/num_points
print("\nMean Absolute Error: "+str(mean_abs_error))
elif cho ==4:
line_points = line_point(points,w1,w2)
y_yi = (points["y"]-line_points["yi"])**2
sum_y_yi = y_yi.sum()
mean_abs_error = sum_y_yi/num_points
print("\nMean Square Error: "+str(mean_abs_error))
def line_point(l_p,w_1,w_2):
yi_value = []
for i in range(len(l_p)):
yi = w_1*l_p["x"][i]+w_2
yi_value.append(yi)
l_p["yi"]= yi_value
l_p.drop(['y'], axis=1)
return l_p
# main script
def main():
w1_i = float(input("\nInsert W1: "))
w2_i = float(input("Insert W2: "))
print("\nthe line formula is:")
print("y = "+str(w1_i)+"x + "+str(w2_i))
print("\n Choose formulas")
print("1: Absolute Trick")
print("2: Square Trick")
print("3: Mean Absolute Error")
print("4: Mean Square Error")
choice = int(input("choice: "))
if choice == 1 or choice == 2:
print(trick(choice,w1_i,w2_i))
elif choice == 3 or choice == 4:
print(mean_error(choice,w1_i,w2_i))
# calling main script
if __name__ == "__main__":
main()
| 32.162791 | 78 | 0.587129 |
fbb96f8f0e0d7ad17bfc083476724bd5d325eb0e | 144 | py | Python | slac_services/services/epics.py | ChristopherMayes/lume-orchestration-demo | fbbf663911f45626c29ed7569569aef7090538e4 | [
"BSD-3-Clause-LBNL"
] | null | null | null | slac_services/services/epics.py | ChristopherMayes/lume-orchestration-demo | fbbf663911f45626c29ed7569569aef7090538e4 | [
"BSD-3-Clause-LBNL"
] | null | null | null | slac_services/services/epics.py | ChristopherMayes/lume-orchestration-demo | fbbf663911f45626c29ed7569569aef7090538e4 | [
"BSD-3-Clause-LBNL"
] | 1 | 2022-03-21T17:09:55.000Z | 2022-03-21T17:09:55.000Z | from pydantic import BaseSettings
class EPICSSettings():
...
class EPICSContextService():
...
def __init__(self):
... | 10.285714 | 33 | 0.604167 |
93d240105789f0171033515e6f3cf150cf17212d | 4,179 | py | Python | src/sentry/south_migrations/0005_auto.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 20 | 2016-10-01T04:29:24.000Z | 2020-10-09T07:23:34.000Z | src/sentry/south_migrations/0005_auto.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | src/sentry/south_migrations/0005_auto.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 7 | 2016-10-27T05:12:45.000Z | 2021-05-01T14:29:53.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'GroupedMessage', fields ['status']
db.create_index('sentry_groupedmessage', ['status'])
def backwards(self, orm):
# Removing index on 'GroupedMessage', fields ['status']
db.delete_index('sentry_groupedmessage', ['status'])
models = {
'sentry.filtervalue': {
'Meta': {'unique_together': "(('key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['sentry']
| 67.403226 | 178 | 0.571668 |
501712ad19c97f74c685c6212c6f379793fa2af6 | 2,553 | py | Python | Lib/test/test_str.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/test/test_str.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | Lib/test/test_str.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
] | null | null | null | import unittest
from test import test_support, string_tests
class StrTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUserStringTest,
string_tests.MixinStrUnicodeTest,
):
type2test = str
# We don't need to propagate to str
def fixtype(self, obj):
return obj
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
self.assertRaises(OverflowError, '%c'.__mod__, 0x1234)
def test_conversion(self):
# Make sure __str__() behaves properly
class Foo0:
def __unicode__(self):
return u"foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return u"foo"
class Foo4(unicode):
def __str__(self):
return u"foo"
class Foo5(str):
def __str__(self):
return u"foo"
class Foo6(str):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo7(unicode):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "string"
def __unicode__(self):
return "not unicode"
self.assert_(str(Foo0()).startswith("<")) # this is different from __unicode__
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foos")
self.assertEqual(str(Foo7("bar")), "foos")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
def test_main():
test_support.run_unittest(StrTest)
if __name__ == "__main__":
test_main()
| 28.366667 | 87 | 0.533882 |
1781250ca39ce5912db18071f0bdac9c1d9dba17 | 20 | py | Python | test/rospy/__init__.py | osrf/uctf | f7d597b9532995b1509cc29aed4ac7115c2b0cb8 | [
"Apache-2.0"
] | 28 | 2016-08-23T12:39:01.000Z | 2020-11-12T17:14:47.000Z | test/rospy/__init__.py | osrf/uctf | f7d597b9532995b1509cc29aed4ac7115c2b0cb8 | [
"Apache-2.0"
] | 80 | 2016-08-17T15:00:08.000Z | 2021-01-19T21:19:04.000Z | test/rospy/__init__.py | osrf/uctf | f7d597b9532995b1509cc29aed4ac7115c2b0cb8 | [
"Apache-2.0"
] | 13 | 2016-10-13T09:41:52.000Z | 2020-11-13T21:24:58.000Z | ServiceProxy = None
| 10 | 19 | 0.8 |
f55c2925a32f171aed7d6a1de333bc930f262517 | 56,998 | py | Python | keras/layers/preprocessing/image_preprocessing_test.py | ivallesp/keras | 1a35ff2788b5e6880ceb8af82e1a8d5f72d0f76f | [
"Apache-2.0"
] | null | null | null | keras/layers/preprocessing/image_preprocessing_test.py | ivallesp/keras | 1a35ff2788b5e6880ceb8af82e1a8d5f72d0f76f | [
"Apache-2.0"
] | null | null | null | keras/layers/preprocessing/image_preprocessing_test.py | ivallesp/keras | 1a35ff2788b5e6880ceb8af82e1a8d5f72d0f76f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image preprocessing layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from absl.testing import parameterized
import numpy as np
from tensorflow.python.distribute.mirrored_strategy import MirroredStrategy
from keras import keras_parameterized
from keras import testing_utils
from keras.engine import sequential
from keras.layers.preprocessing import image_preprocessing
from keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import gen_stateless_random_ops_v2
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class ResizingTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs.update({'height': expected_height, 'width': expected_width})
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.Resizing,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('down_sample_bilinear_2_by_2', {'interpolation': 'bilinear'}, 2, 2),
('down_sample_bilinear_3_by_2', {'interpolation': 'bilinear'}, 3, 2),
('down_sample_nearest_2_by_2', {'interpolation': 'nearest'}, 2, 2),
('down_sample_nearest_3_by_2', {'interpolation': 'nearest'}, 3, 2),
('down_sample_area_2_by_2', {'interpolation': 'area'}, 2, 2),
('down_sample_area_3_by_2', {'interpolation': 'area'}, 3, 2))
def test_down_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
@parameterized.named_parameters(
('up_sample_bilinear_10_by_12', {'interpolation': 'bilinear'}, 10, 12),
('up_sample_bilinear_12_by_12', {'interpolation': 'bilinear'}, 12, 12),
('up_sample_nearest_10_by_12', {'interpolation': 'nearest'}, 10, 12),
('up_sample_nearest_12_by_12', {'interpolation': 'nearest'}, 12, 12),
('up_sample_area_10_by_12', {'interpolation': 'area'}, 10, 12),
('up_sample_area_12_by_12', {'interpolation': 'area'}, 12, 12))
def test_up_sampling(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
def test_down_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(dtype)
layer = image_preprocessing.Resizing(
height=2, width=2, interpolation='nearest')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[5, 7],
[13, 15]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_up_sampling_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(dtype)
layer = image_preprocessing.Resizing(
height=4, width=4, interpolation='nearest')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[0, 0, 1, 1],
[0, 0, 1, 1],
[2, 2, 3, 3],
[2, 2, 3, 3]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 4, 1))
self.assertAllEqual(expected_output, output_image)
@parameterized.named_parameters(
('reshape_bilinear_10_by_4', {'interpolation': 'bilinear'}, 10, 4))
def test_reshaping(self, kwargs, expected_height, expected_width):
with CustomObjectScope({'Resizing': image_preprocessing.Resizing}):
self._run_test(kwargs, expected_height, expected_width)
def test_invalid_interpolation(self):
with self.assertRaises(NotImplementedError):
image_preprocessing.Resizing(5, 5, 'invalid_interpolation')
def test_config_with_custom_name(self):
layer = image_preprocessing.Resizing(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.Resizing.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def get_numpy_center_crop(images, expected_height, expected_width):
orig_height = images.shape[1]
orig_width = images.shape[2]
height_start = int((orig_height - expected_height) / 2)
width_start = int((orig_width - expected_width) / 2)
height_end = height_start + expected_height
width_end = width_start + expected_width
return images[:, height_start:height_end, width_start:width_end, :]
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class CenterCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
input_images = np.random.random(
(num_samples, orig_height, orig_width, channels)).astype(np.float32)
expected_output = get_numpy_center_crop(
input_images, expected_height, expected_width)
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.CenterCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
input_data=input_images,
expected_output=expected_output,
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('center_crop_3_by_4', 3, 4),
('center_crop_3_by_2', 3, 2))
def test_center_crop_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_4_by_5', 4, 5),
('center_crop_4_by_3', 4, 3))
def test_center_crop_mis_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_4_by_6', 4, 6),
('center_crop_3_by_2', 3, 2))
def test_center_crop_half_mis_aligned(self, expected_height, expected_width):
with CustomObjectScope({'CenterCrop': image_preprocessing.CenterCrop}):
self._run_test(expected_height, expected_width)
@parameterized.named_parameters(
('center_crop_5_by_12', 5, 12),
('center_crop_10_by_8', 10, 8),
('center_crop_10_by_12', 10, 12))
def test_invalid_center_crop(self, expected_height, expected_width):
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
r'assertion failed'):
self._run_test(expected_height, expected_width)
def test_config_with_custom_name(self):
layer = image_preprocessing.CenterCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.CenterCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomCropTest(keras_parameterized.TestCase):
def _run_test(self, expected_height, expected_width):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height': expected_height, 'width': expected_width}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomCrop,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, expected_height, expected_width,
channels))
@parameterized.named_parameters(
('random_crop_5_by_12', 5, 12),
('random_crop_10_by_8', 10, 8),
('random_crop_10_by_12', 10, 12))
def test_invalid_random_crop(self, expected_height, expected_width):
with self.assertRaises(tf.errors.InvalidArgumentError):
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width)
def test_training_with_mock(self):
if tf.test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
np.random.seed(1337)
height, width = 3, 4
height_offset = np.random.randint(low=0, high=3)
width_offset = np.random.randint(low=0, high=5)
mock_offset = [0, height_offset, width_offset, 0]
with tf.compat.v1.test.mock.patch.object(
stateless_random_ops, 'stateless_random_uniform',
return_value=mock_offset):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
inp = np.random.random((12, 5, 8, 3))
actual_output = layer(inp, training=1)
expected_output = inp[:, height_offset:(height_offset + height),
width_offset:(width_offset + width), :]
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_crop_4_by_6', 4, 6),
('random_crop_3_by_2', 3, 2))
def test_random_crop_output_shape(self, expected_height, expected_width):
if tf.test.is_built_with_rocm():
# TODO(rocm):
# re-enable this test once ROCm adds support for
# the StatefulUniformFullInt Op (on the GPU)
self.skipTest('Feature not supported on ROCm')
with CustomObjectScope({'RandomCrop': image_preprocessing.RandomCrop}):
self._run_test(expected_height, expected_width)
def test_random_crop_full_height(self):
self._run_test(5, 2)
def test_random_crop_full_width(self):
self._run_test(3, 8)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
inp = np.random.random((12, 8, 16, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
self.assertAllClose(inp, actual_output)
def test_predicting_with_mock_longer_height(self):
np.random.seed(1337)
height, width = 3, 3
inp = np.random.random((12, 10, 6, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = tf.image.resize(
inp, size=[5, 3])
expected_output = resized_inp[:, 1:4, :, :]
self.assertAllClose(expected_output, actual_output)
def test_predicting_with_mock_longer_width(self):
np.random.seed(1337)
height, width = 4, 6
inp = np.random.random((12, 8, 16, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomCrop(height, width)
actual_output = layer(inp, training=0)
resized_inp = tf.image.resize(inp, size=[4, 8])
expected_output = resized_inp[:, :, 1:7, :]
self.assertAllClose(expected_output, actual_output)
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomCrop(5, 5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomCrop.from_config(config)
self.assertEqual(layer_1.name, layer.name)
class RescalingTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_rescaling_base(self):
kwargs = {'scale': 1./127.5, 'offset': -1.}
testing_utils.layer_test(
image_preprocessing.Rescaling,
kwargs=kwargs,
input_shape=(2, 5, 6, 3),
expected_output_shape=(None, 5, 6, 3))
@testing_utils.run_v2_only
def test_rescaling_correctness_float(self):
layer = image_preprocessing.Rescaling(scale=1./127.5, offset=-1.)
inputs = tf.random.uniform((2, 4, 5, 3))
outputs = layer(inputs)
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1./127.5) - 1)
@testing_utils.run_v2_only
def test_rescaling_correctness_int(self):
layer = image_preprocessing.Rescaling(scale=1./127.5, offset=-1)
inputs = tf.random.uniform((2, 4, 5, 3), 0, 100, dtype='int32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype.name, 'float32')
self.assertAllClose(outputs.numpy(), inputs.numpy() * (1./127.5) - 1)
def test_config_with_custom_name(self):
layer = image_preprocessing.Rescaling(0.5, name='rescaling')
config = layer.get_config()
layer_1 = image_preprocessing.Rescaling.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomFlipTest(keras_parameterized.TestCase):
def _run_test(self, mode, expected_output=None, mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = [1 for _ in range(num_samples)]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
expected_output = inp
if mode == 'horizontal' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=2)
if mode == 'vertical' or mode == 'horizontal_and_vertical':
expected_output = np.flip(expected_output, axis=1)
with tf.compat.v1.test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomFlip(mode)
actual_output = layer(inp, training=1)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_flip_horizontal', 'horizontal'),
('random_flip_vertical', 'vertical'),
('random_flip_both', 'horizontal_and_vertical'))
def test_random_flip(self, mode):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
self._run_test(mode)
def test_random_flip_horizontal_half(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=1)
self._run_test('horizontal', expected_output, mock_random)
def test_random_flip_vertical_half(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
np.random.seed(1337)
mock_random = [1, 0]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images.copy()
expected_output[0, :, :, :] = np.flip(input_images[0, :, :, :], axis=0)
self._run_test('vertical', expected_output, mock_random)
def test_random_flip_inference(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
def test_random_flip_default(self):
with CustomObjectScope({'RandomFlip': image_preprocessing.RandomFlip}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = np.flip(np.flip(input_images, axis=1), axis=2)
mock_random = [1, 1]
mock_random = np.reshape(mock_random, [2, 1, 1, 1])
with tf.compat.v1.test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with self.cached_session(use_gpu=True):
layer = image_preprocessing.RandomFlip()
actual_output = layer(input_images, training=1)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomFlip(name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomFlip.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomContrastTest(keras_parameterized.TestCase):
def _run_test(self,
lower,
upper,
expected_output=None,
mock_random=None):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
if mock_random is None:
mock_random = 0.2
inp = np.random.random((num_samples, orig_height, orig_width, channels))
if expected_output is None:
# reduce mean on height.
inp_mean = np.mean(inp, axis=1, keepdims=True)
# reduce mean on width.
inp_mean = np.mean(inp_mean, axis=2, keepdims=True)
expected_output = (inp - inp_mean) * mock_random + inp_mean
with tf.compat.v1.test.mock.patch.object(
random_ops, 'random_uniform', return_value=mock_random):
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((lower, upper))
actual_output = layer(inp, training=True)
self.assertAllClose(expected_output, actual_output)
@parameterized.named_parameters(
('random_contrast_2_by_5', 0.2, 0.5),
('random_contrast_2_by_13', 0.2, 1.3),
('random_contrast_5_by_2', 0.5, 0.2))
def test_random_contrast(self, lower, upper):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
self._run_test(lower, upper)
@parameterized.named_parameters(
('random_contrast_amplitude_2', 0.2),
('random_contrast_amplitude_5', 0.5))
def test_random_contrast_amplitude(self, amplitude):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.random((2, 5, 8, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast(amplitude)
layer(input_images)
def test_random_contrast_inference(self):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
actual_output = layer(input_images, training=False)
self.assertAllClose(expected_output, actual_output)
def test_random_contrast_int_dtype(self):
with CustomObjectScope(
{'RandomContrast': image_preprocessing.RandomContrast}):
input_images = np.random.randint(low=0, high=255, size=(2, 5, 8, 3))
with testing_utils.use_gpu():
layer = image_preprocessing.RandomContrast((0.1, 0.2))
layer(input_images)
def test_random_contrast_invalid_bounds(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((-0.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((1.1, .5))
with self.assertRaises(ValueError):
image_preprocessing.RandomContrast((0.1, -0.2))
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomContrast((.5, .6), name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomContrast.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomTranslationTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomTranslation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_translate_4_by_6', .4, .6), ('random_translate_3_by_2', .3, .2),
('random_translate_tuple_factor', (-.5, .4), (.2, .3)))
def test_random_translation(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_translation_up_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0.)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[20, 21, 22, 23, 24]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_up_numeric_constant(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(-.2, -.2), width_factor=0., fill_mode='constant')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
[0, 0, 0, 0, 0]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.2, .2), width_factor=0.)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_asymmetric_size_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype(dtype)
# Shifting by .5 * 8 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.5, .5), width_factor=0.)
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[6, 7],
[4, 5],
[2, 3],
[0, 1],
[0, 1],
[2, 3],
[4, 5],
[6, 7],
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 8, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_down_numeric_constant(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=(.2, .2), width_factor=0., fill_mode='constant')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_reflect(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by .2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0., width_factor=(-.2, -.2))
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[1, 2, 3, 4, 4],
[6, 7, 8, 9, 9],
[11, 12, 13, 14, 14],
[16, 17, 18, 19, 19],
[21, 22, 23, 24, 24]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_left_numeric_constant(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
# Shifting by -.2 * 5 = 1 pixel.
layer = image_preprocessing.RandomTranslation(
height_factor=0., width_factor=(-.2, -.2), fill_mode='constant')
output_image = layer(input_image)
# pyformat: disable
expected_output = np.asarray([
[1, 2, 3, 4, 0],
[6, 7, 8, 9, 0],
[11, 12, 13, 14, 0],
[16, 17, 18, 19, 0],
[21, 22, 23, 24, 0]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_translation_inference(self):
with CustomObjectScope(
{'RandomTranslation': image_preprocessing.RandomTranslation}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomTranslation(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomTranslation(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomTranslation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomTransformTest(keras_parameterized.TestCase):
def _run_random_transform_with_mock(self,
transform_matrix,
expected_output,
mode,
fill_value=0.0,
interpolation='bilinear'):
inp = np.arange(15).reshape((1, 5, 3, 1)).astype(np.float32)
with self.cached_session(use_gpu=True):
output = image_preprocessing.transform(
inp,
transform_matrix,
fill_mode=mode,
fill_value=fill_value,
interpolation=interpolation)
self.assertAllClose(expected_output, output)
def test_random_translation_reflect(self):
# reflected output is (dcba|abcd|dcba)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 1., 2.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test left shift by 1.
# reflected output is (dcba|abcd|dcba)
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.],
[10., 11., 11.],
[13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[3., 3., 4],
[6., 6., 7.],
[9., 9., 10.],
[12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'reflect')
def test_random_translation_wrap(self):
# warpped output is (abcd|abcd|abcd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[12., 13., 14.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 1., 2.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 3.],
[7., 8., 6.],
[10., 11., 9.],
[13., 14., 12.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[2., 0., 1.],
[5., 3., 4],
[8., 6., 7.],
[11., 9., 10.],
[14., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'wrap')
def test_random_translation_nearest(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 1., 2.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[12., 13., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 2.],
[4., 5., 5.],
[7., 8., 8.],
[10., 11., 11.],
[13., 14., 14.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[3., 3., 4],
[6., 6., 7.],
[9., 9., 10.],
[12., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'nearest')
def test_random_translation_constant_0(self):
# constant output is (0000|abcd|0000)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 0.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 0.],
[7., 8., 0.],
[10., 11., 0.],
[13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[0., 3., 4],
[0., 6., 7.],
[0., 9., 10.],
[0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(transform_matrix, expected_output,
'constant')
def test_random_translation_constant_1(self):
with tf.compat.forward_compatibility_horizon(2020, 8, 6):
# constant output is (1111|abcd|1111)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 1., 1.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[1., 1., 1.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 1.],
[4., 5., 1.],
[7., 8., 1.],
[10., 11., 1.],
[13., 14., 1.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 0., 1.],
[1., 3., 4],
[1., 6., 7.],
[1., 9., 10.],
[1., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output, 'constant', fill_value=1.0)
def test_random_translation_nearest_interpolation(self):
# nearest output is (aaaa|abcd|dddd)
# Test down shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 0.],
[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8],
[9., 10., 11]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., -1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output,
mode='constant', interpolation='nearest')
# Test up shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[3., 4., 5.],
[6., 7., 8],
[9., 10., 11.],
[12., 13., 14.],
[0., 0., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 0., 0., 1., 1., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output,
mode='constant', interpolation='nearest')
# Test left shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[1., 2., 0.],
[4., 5., 0.],
[7., 8., 0.],
[10., 11., 0.],
[13., 14., 0.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., 1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output,
mode='constant', interpolation='nearest')
# Test right shift by 1.
# pyformat: disable
expected_output = np.asarray(
[[0., 0., 1.],
[0., 3., 4],
[0., 6., 7.],
[0., 9., 10.],
[0., 12., 13.]]).reshape((1, 5, 3, 1)).astype(np.float32)
# pyformat: enable
transform_matrix = np.asarray([[1., 0., -1., 0., 1., 0., 0., 0.]])
self._run_random_transform_with_mock(
transform_matrix, expected_output,
mode='constant', interpolation='nearest')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomRotationTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'factor': factor}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomRotation,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(('random_rotate_4', .4),
('random_rotate_3', .3),
('random_rotate_tuple_factor', (-.5, .4)))
def test_random_rotation(self, factor):
self._run_test(factor)
def test_random_rotation_inference(self):
with CustomObjectScope(
{'RandomTranslation': image_preprocessing.RandomRotation}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomRotation(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
def test_distribution_strategy(self):
"""Tests that RandomRotation can be created within distribution strategies.
And that replicas got the same random result.
"""
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
with testing_utils.use_gpu():
strat = MirroredStrategy(devices=['cpu', 'gpu'])
with strat.scope():
layer = image_preprocessing.RandomRotation(.5)
output = strat.run(lambda: layer(input_images, training=True))
values = output.values
self.assertAllEqual(2, len(values))
self.assertAllClose(values[0], values[1], rtol=1e-5)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomRotation(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomRotation.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomZoomTest(keras_parameterized.TestCase):
def _run_test(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
kwargs = {'height_factor': height_factor, 'width_factor': width_factor}
with testing_utils.use_gpu():
testing_utils.layer_test(
image_preprocessing.RandomZoom,
kwargs=kwargs,
input_shape=(num_samples, orig_height, orig_width, channels),
expected_output_shape=(None, orig_height, orig_width, channels))
@parameterized.named_parameters(
('random_zoom_4_by_6', -.4, -.6), ('random_zoom_2_by_3', -.2, -.3),
('random_zoom_tuple_factor', (-.4, -.5), (-.2, -.3)))
def test_random_zoom_in(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
@parameterized.named_parameters(
('random_zoom_4_by_6', .4, .6), ('random_zoom_2_by_3', .2, .3),
('random_zoom_tuple_factor', (.4, .5), (.2, .3)))
def test_random_zoom_out(self, height_factor, width_factor):
self._run_test(height_factor, width_factor)
def test_random_zoom_in_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((-.5, -.5), (-.5, -.5),
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((.5, .5), (.8, .8),
fill_mode='constant',
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = image_preprocessing.RandomZoom((.5, .5),
fill_mode='constant',
interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[0, 0, 0, 0, 0],
[0, 6, 7, 9, 0],
[0, 11, 12, 14, 0],
[0, 21, 22, 24, 0],
[0, 0, 0, 0, 0]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_inference(self):
with CustomObjectScope(
{'RandomZoom': image_preprocessing.RandomZoom}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomZoom(.5, .5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomZoom(.5, .6, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomHeightTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with testing_utils.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomHeight(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[2], 8)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_height_4_by_6', (.4, .6)),
('random_height_3_by_2', (-.3, .2)),
('random_height_3', .3))
def test_random_height_basic(self, factor):
self._run_test(factor)
def test_valid_random_height(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with tf.compat.v1.test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf.compat.v1.test.mock.patch.object(
gen_stateless_random_ops_v2, 'stateless_random_uniform_v2',
return_value=mock_factor):
with testing_utils.use_gpu():
img = np.random.random((12, 5, 8, 3))
layer = image_preprocessing.RandomHeight(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[1], 3)
def test_random_height_longer_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (2, 3, 1)).astype(dtype)
layer = image_preprocessing.RandomHeight(factor=(1., 1.))
# Return type of RandomHeight() is float32 if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
output_image = tf.cast(layer(np.expand_dims(input_image, axis=0)),
dtype=dtype)
# pyformat: disable
expected_output = np.asarray([
[0, 1, 2],
[0.75, 1.75, 2.75],
[2.25, 3.25, 4.25],
[3, 4, 5]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 4, 3, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (4, 2, 1)).astype(dtype)
layer = image_preprocessing.RandomHeight(
factor=(-.5, -.5), interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[2, 3],
[6, 7]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_height_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomHeight((-1.5, .4))
def test_random_height_inference(self):
with CustomObjectScope({'RandomHeight': image_preprocessing.RandomHeight}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomHeight(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomHeight(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomHeight.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class RandomWidthTest(keras_parameterized.TestCase):
def _run_test(self, factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
with testing_utils.use_gpu():
img = np.random.random((num_samples, orig_height, orig_width, channels))
layer = image_preprocessing.RandomWidth(factor)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[0], 2)
self.assertEqual(img_out.shape[1], 5)
self.assertEqual(img_out.shape[3], 3)
@parameterized.named_parameters(('random_width_4_by_6', (.4, .6)),
('random_width_3_by_2', (-.3, .2)),
('random_width_3', .3))
def test_random_width_basic(self, factor):
self._run_test(factor)
def test_valid_random_width(self):
# need (maxval - minval) * rnd + minval = 0.6
mock_factor = 0
with tf.compat.v1.test.mock.patch.object(
gen_stateful_random_ops, 'stateful_uniform', return_value=mock_factor):
with tf.compat.v1.test.mock.patch.object(
gen_stateless_random_ops_v2, 'stateless_random_uniform_v2',
return_value=mock_factor):
with testing_utils.use_gpu():
img = np.random.random((12, 8, 5, 3))
layer = image_preprocessing.RandomWidth(.4)
img_out = layer(img, training=True)
self.assertEqual(img_out.shape[2], 3)
def test_random_width_longer_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 6), (3, 2, 1)).astype(dtype)
layer = image_preprocessing.RandomWidth(factor=(1., 1.))
# Return type of RandomWidth() is float32 if `interpolation` is not
# set to `ResizeMethod.NEAREST_NEIGHBOR`; cast `layer` to desired dtype.
output_image = tf.cast(layer(np.expand_dims(input_image, axis=0)),
dtype=dtype)
# pyformat: disable
expected_output = np.asarray([
[0, 0.25, 0.75, 1],
[2, 2.25, 2.75, 3],
[4, 4.25, 4.75, 5]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 3, 4, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_shorter_numeric(self):
for dtype in (np.int64, np.float32):
with testing_utils.use_gpu():
input_image = np.reshape(np.arange(0, 8), (2, 4, 1)).astype(dtype)
layer = image_preprocessing.RandomWidth(
factor=(-.5, -.5), interpolation='nearest')
output_image = layer(np.expand_dims(input_image, axis=0))
# pyformat: disable
expected_output = np.asarray([
[1, 3],
[5, 7]
]).astype(dtype)
# pyformat: enable
expected_output = np.reshape(expected_output, (1, 2, 2, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_width_invalid_factor(self):
with self.assertRaises(ValueError):
image_preprocessing.RandomWidth((-1.5, .4))
def test_random_width_inference(self):
with CustomObjectScope({'RandomWidth': image_preprocessing.RandomWidth}):
input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
expected_output = input_images
with testing_utils.use_gpu():
layer = image_preprocessing.RandomWidth(.5)
actual_output = layer(input_images, training=0)
self.assertAllClose(expected_output, actual_output)
@testing_utils.run_v2_only
def test_config_with_custom_name(self):
layer = image_preprocessing.RandomWidth(.5, name='image_preproc')
config = layer.get_config()
layer_1 = image_preprocessing.RandomWidth.from_config(config)
self.assertEqual(layer_1.name, layer.name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class LearningPhaseTest(keras_parameterized.TestCase):
def test_plain_call(self):
layer = image_preprocessing.RandomWidth(.5, seed=123)
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = layer(img) # Default to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = layer(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
def test_call_in_container(self):
layer1 = image_preprocessing.RandomWidth(.5, seed=123)
layer2 = image_preprocessing.RandomHeight(.5, seed=123)
seq = sequential.Sequential([layer1, layer2])
shape = (12, 12, 3)
img = np.random.random((12,) + shape)
out = seq(img) # Default to training=True
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=True)
self.assertNotEqual(tuple(int(i) for i in out.shape[1:]), shape)
out = seq(img, training=False)
self.assertEqual(tuple(int(i) for i in out.shape[1:]), shape)
if __name__ == '__main__':
tf.test.main()
| 39.830887 | 80 | 0.626934 |
7fb840f6448d7a2ae35298a5d7426cd3cb7e6ab2 | 297 | py | Python | setup.py | simran212530/Probability_Gauss_Module | 26525ca72d7d39e9ff3e3f1e7cd62b3760c7d26a | [
"MIT"
] | null | null | null | setup.py | simran212530/Probability_Gauss_Module | 26525ca72d7d39e9ff3e3f1e7cd62b3760c7d26a | [
"MIT"
] | null | null | null | setup.py | simran212530/Probability_Gauss_Module | 26525ca72d7d39e9ff3e3f1e7cd62b3760c7d26a | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='prob_gauss_binomial',
version='0.2',
description='Gaussian and Binomial distributions',
packages=['prob_gauss_binomial'],
author = 'Gursimran Kaur',
author_email = 'gursimran18336@iiitd.ac.in',
zip_safe=False)
| 29.7 | 57 | 0.670034 |
e3fb0cc7b04438d839cff01298ef5ff420e0594f | 751 | py | Python | ReadFromCSV.py | nicetone/Python | 85fd9cc7e00f3bbeed5355ce392128ff22de459b | [
"MIT"
] | 28,321 | 2015-01-03T15:36:19.000Z | 2022-03-31T10:11:54.000Z | ReadFromCSV.py | nicetone/Python | 85fd9cc7e00f3bbeed5355ce392128ff22de459b | [
"MIT"
] | 544 | 2015-01-19T14:28:09.000Z | 2022-03-29T04:05:21.000Z | ReadFromCSV.py | nicetone/Python | 85fd9cc7e00f3bbeed5355ce392128ff22de459b | [
"MIT"
] | 13,366 | 2015-01-03T15:45:14.000Z | 2022-03-31T11:50:26.000Z | __author__ = 'vamsi'
import pandas as pd #pandas library to read csv file
from matplotlib import pyplot as plt #matplotlib library to visualise the data
from matplotlib import style
style.use("ggplot")
"""reading data from SalesData.csv file
and passing data to dataframe"""
df = pd.read_csv("..\SalesData.csv") #Reading the csv file
x = df["SalesID"].as_matrix() # casting SalesID to list #extracting the column with name SalesID
y = df["ProductPrice"].as_matrix() # casting ProductPrice to list
plt.xlabel("SalesID") # assigning X-axis label
plt.ylabel("ProductPrice") # assigning Y-axis label
plt.title("Sales Analysis") # assigning Title to the graph
plt.plot(x, y) # Plot X and Y axis
plt.show() # Show the graph
| 39.526316 | 98 | 0.721704 |
08d499fcd5018c57971c188547b8b81d42da152f | 444 | py | Python | gunicorn.py | pbillerot/drumservice | 84a702989aef9b385e3f6fa743bbb607371e7db6 | [
"MIT"
] | null | null | null | gunicorn.py | pbillerot/drumservice | 84a702989aef9b385e3f6fa743bbb607371e7db6 | [
"MIT"
] | 1 | 2021-03-20T08:01:31.000Z | 2021-03-20T08:01:31.000Z | gunicorn.py | pbillerot/drumservice | 84a702989aef9b385e3f6fa743bbb607371e7db6 | [
"MIT"
] | null | null | null | command = '/var/www/__APP_NAME__/venv/bin/gunicorn'
pythonpath = '/var/www/__APP_NAME__'
workers = 3
user = '__USER_NAME__'
bind = '127.0.0.1:8000'
pid = '/run/gunicorn/__APP_NAME__-pid'
errorlog = '/var/log/__APP_NAME__/error.log'
accesslog = '/var/log/__APP_NAME__/access.log'
access_log_format = '%({X-Real-IP}i)s %({X-Forwarded-For}i)s %(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
loglevel = 'warning'
capture_output = True
| 37 | 121 | 0.671171 |
fe1c476ed2205a964d4d25570269683b25ed35eb | 30,471 | py | Python | pandas/tests/generic/test_generic.py | GabrielUlisses/pandas | 6430d5324ae2b602b314a7851e9c1f4c5313cceb | [
"BSD-3-Clause"
] | 1 | 2020-10-29T17:32:26.000Z | 2020-10-29T17:32:26.000Z | pandas/tests/generic/test_generic.py | GabrielUlisses/pandas | 6430d5324ae2b602b314a7851e9c1f4c5313cceb | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/generic/test_generic.py | GabrielUlisses/pandas | 6430d5324ae2b602b314a7851e9c1f4c5313cceb | [
"BSD-3-Clause"
] | null | null | null | from copy import copy, deepcopy
import numpy as np
import pytest
from pandas.compat.numpy import np_version_under1p17
from pandas.core.dtypes.common import is_scalar
import pandas as pd
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
import pandas.core.common as com
# ----------------------------------------------------------------------
# Generic types test cases
class Generic:
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
"""
construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed
"""
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == "empty":
arr = None
dtype = np.float64
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list("ABCD")
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list("abcd"))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {
self._typ._get_axis_name(i): list(range(n)) for i in range(self._ndim)
}
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value="empty", **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
msg = f"The truth value of a {self._typ.__name__} is ambiguous"
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=np.nan)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
# empty
obj = self._construct(shape=0)
with pytest.raises(ValueError, match=msg):
bool(obj)
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
if obj1:
pass
with pytest.raises(ValueError, match=msg):
obj1 and obj2
with pytest.raises(ValueError, match=msg):
obj1 or obj2
with pytest.raises(ValueError, match=msg):
not obj1
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._mgr = o._mgr.downcast()
self._compare(result, o)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._mgr = o._mgr.downcast()
self._compare(result, o)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
msg = (
"compound dtypes are not implemented "
f"in the {self._typ.__name__} constructor"
)
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f("int64")
f("float64")
f("M8[ns]")
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = "foo"
o2 = self._construct(shape=3)
o2.name = "bar"
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(o)
self.check_metadata(o, result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
self.check_metadata(o, v1 & v1)
self.check_metadata(o, v1 | v1)
# combine_first
result = o.combine_first(o2)
self.check_metadata(o, result)
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
result = o + o2
self.check_metadata(result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
self.check_metadata(v1 & v2)
self.check_metadata(v1 | v2)
def test_head_tail(self, index):
# GH5370
o = self._construct(shape=len(index))
axis = o._get_axis_name(0)
setattr(o, axis, index)
o.head()
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(len(index) - 3))
self._compare(o.tail(-3), o.tail(len(index) - 3))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4, random_state=seed)
)
self._compare(
o.sample(frac=0.7, random_state=seed),
o.sample(frac=0.7, random_state=seed),
)
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)),
)
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
)
self._compare(
o.sample(
frac=2, replace=True, random_state=np.random.RandomState(test)
),
o.sample(
frac=2, replace=True, random_state=np.random.RandomState(test)
),
)
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(o.sample(n=4))
os2.append(o.sample(frac=0.7))
self._compare(*os1)
self._compare(*os2)
# Check for error when random_state argument invalid.
with pytest.raises(ValueError):
o.sample(random_state="astring!")
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with pytest.raises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with pytest.raises(ValueError):
o.sample(n=-3)
with pytest.raises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with pytest.raises(ValueError):
o.sample(n=3.2)
# Check lengths are right
assert len(o.sample(n=4) == 4)
assert len(o.sample(frac=0.34) == 3)
assert len(o.sample(frac=0.36) == 4)
###
# Check weights
###
# Weight length must be right
with pytest.raises(ValueError):
o.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with pytest.raises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with pytest.raises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with pytest.raises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=nan_weights)
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
def test_sample_upsampling_without_replacement(self):
# GH27451
df = pd.DataFrame({"A": list("abc")})
msg = (
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
df.sample(frac=2, replace=False)
def test_sample_is_copy(self):
# GH-27357, GH-30784: ensure the result of sample is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = pd.DataFrame(np.random.randn(10, 3), columns=["a", "b", "c"])
df2 = df.sample(3)
with tm.assert_produces_warning(None):
df2["d"] = 1
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = "Star Wars"
errmsg = "unexpected keyword"
with pytest.raises(TypeError, match=errmsg):
obj.max(epic=starwars) # stat_function
with pytest.raises(TypeError, match=errmsg):
obj.var(epic=starwars) # stat_function_ddof
with pytest.raises(TypeError, match=errmsg):
obj.sum(epic=starwars) # cum_function
with pytest.raises(TypeError, match=errmsg):
obj.any(epic=starwars) # logical_function
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
def test_api_compat(self, func):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
f = getattr(obj, func)
assert f.__name__ == func
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
obj.max(out=out) # stat_function
with pytest.raises(ValueError, match=errmsg):
obj.var(out=out) # stat_function_ddof
with pytest.raises(ValueError, match=errmsg):
obj.sum(out=out) # cum_function
with pytest.raises(ValueError, match=errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [int(2e3)] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype="int8", value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [int(2e6)] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype="int8", value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
@pytest.mark.parametrize(
"func",
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("shape", [0, 1, 2])
def test_copy_and_deepcopy(self, shape, func):
# GH 15444
obj = self._construct(shape)
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
@pytest.mark.parametrize(
"periods,fill_method,limit,exp",
[
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
(-1, "ffill", None, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, 0, np.nan]),
(-1, "ffill", 1, [np.nan, np.nan, -0.5, -0.5, -0.6, 0, np.nan, np.nan]),
(-1, "bfill", None, [0, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
(-1, "bfill", 1, [np.nan, 0, -0.5, -0.5, -0.6, np.nan, np.nan, np.nan]),
],
)
def test_pct_change(self, periods, fill_method, limit, exp):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = self._typ(vals)
func = getattr(obj, "pct_change")
res = func(periods=periods, fill_method=fill_method, limit=limit)
if type(obj) is DataFrame:
tm.assert_frame_equal(res, DataFrame(exp))
else:
tm.assert_series_equal(res, Series(exp))
class TestNDFrame:
# tests that don't fit elsewhere
def test_sample(sel):
# Fixes issue: 2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame(
{
"col1": range(10, 20),
"col2": range(20, 30),
"colString": ["a"] * 10,
"easyweights": easy_weight_list,
}
)
sample1 = df.sample(n=1, weights="easyweights")
tm.assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series or
# DataFrame with axis = 1.
s = Series(range(10))
with pytest.raises(ValueError):
s.sample(n=3, weights="weight_column")
with pytest.raises(ValueError):
df.sample(n=1, weights="weight_column", axis=1)
# Check weighting key error
with pytest.raises(
KeyError, match="'String passed to weights not a valid column'"
):
df.sample(n=3, weights="not_a_real_column_name")
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({"col1": range(10), "col2": ["a"] * 10})
second_column_weight = [0, 1]
tm.assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
)
# Different axis arg types
tm.assert_frame_equal(
df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
)
weight = [0] * 10
weight[5] = 0.5
tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
tm.assert_frame_equal(
df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
)
# Check out of range axis values
with pytest.raises(ValueError):
df.sample(n=1, axis=2)
with pytest.raises(ValueError):
df.sample(n=1, axis="not_a_name")
with pytest.raises(ValueError):
s = Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with pytest.raises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
tm.assert_frame_equal(sample1, df[["colString"]])
# Test default axes
tm.assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
)
# Test that function aligns weights with frame
df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
tm.assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError):
df.sample(1, weights=s4)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
pytest.param(
"np.random.MT19937",
3,
marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"),
),
pytest.param(
"np.random.PCG64",
11,
marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"),
),
],
)
def test_sample_random_state(self, func_str, arg):
# GH32503
df = pd.DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
result = df.sample(n=3, random_state=eval(func_str)(arg))
expected = df.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_frame_equal(result, expected)
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(df.squeeze(), df["A"])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name="five", dtype=np.float64)
empty_frame = DataFrame([empty_series])
tm.assert_series_equal(empty_series, empty_series.squeeze())
tm.assert_series_equal(empty_series, empty_frame.squeeze())
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis=2)
msg = "No axis named x for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis="x")
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
def test_transpose(self):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
def test_numpy_transpose(self):
msg = "the 'axes' parameter is not supported"
s = tm.makeFloatSeries()
tm.assert_series_equal(np.transpose(s), s)
with pytest.raises(ValueError, match=msg):
np.transpose(s, axes=1)
df = tm.makeTimeDataFrame()
tm.assert_frame_equal(np.transpose(np.transpose(df)), df)
with pytest.raises(ValueError, match=msg):
np.transpose(df, axes=1)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(
data=s.values.take(indices), index=s.index.take(indices), dtype=s.dtype
)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(
data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns,
)
tm.assert_frame_equal(out, expected)
def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
for obj in (s, df):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
obj.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
@pytest.mark.parametrize("is_copy", [True, False])
def test_depr_take_kwarg_is_copy(self, is_copy):
# GH 27357
df = DataFrame({"A": [1, 2, 3]})
msg = (
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this."
)
with tm.assert_produces_warning(FutureWarning) as w:
df.take([0, 1], is_copy=is_copy)
assert w[0].message.args[0] == msg
s = Series([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
s.take([0, 1], is_copy=is_copy)
def test_equals(self):
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(np.random.random(10), index=index, columns=["floats"])
df1["text"] = "the sky is so blue. we could use more chocolate.".split()
df1["start"] = date_range("2000-1-1", periods=10, freq="T")
df1["end"] = date_range("2000-1-1", periods=10, freq="D")
df1["diff"] = df1["end"] - df1["start"]
df1["bool"] = np.arange(10) % 3 == 0
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1["text"].equals(df2["text"])
assert df1["start"].equals(df2["start"])
assert df1["end"].equals(df2["end"])
assert df1["diff"].equals(df2["diff"])
assert df1["bool"].equals(df2["bool"])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different["floats"] = different["floats"].astype("float32")
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = pd.date_range("2000-1-1", periods=10, freq="T")
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(["text"], append=True)
df2 = df1.set_index(["text"], append=True)
assert df3.equals(df2)
df2 = df1.set_index(["floats"], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(["floats"], append=True)
df2 = df1.set_index(["floats"], append=True)
assert df3.equals(df2)
def test_pipe(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({"A": [1, 4, 9]})
tm.assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
tm.assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, "y"), 0)
tm.assert_frame_equal(result, df)
result = df.A.pipe((f, "y"), 0)
tm.assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
with pytest.raises(ValueError):
df.pipe((f, "y"), x=1, y=0)
with pytest.raises(ValueError):
df.A.pipe((f, "y"), x=1, y=0)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box(dtype=object)
values = box._AXIS_TO_AXIS_NUMBER.keys()
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_axis_names_deprecated(self, box):
# GH33637
obj = box(dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
obj._AXIS_NAMES
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_axis_numbers_deprecated(self, box):
# GH33637
obj = box(dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
obj._AXIS_NUMBERS
@pytest.mark.parametrize("as_frame", [True, False])
def test_flags_identity(self, as_frame):
s = Series([1, 2])
if as_frame:
s = s.to_frame()
assert s.flags is s.flags
s2 = s.copy()
assert s2.flags is not s.flags
| 33.856667 | 88 | 0.557382 |
86ef13773855653873828dcc4647225bf877730f | 1,394 | py | Python | addons/oejia_wx/models/res_partner.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | addons/oejia_wx/models/res_partner.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | addons/oejia_wx/models/res_partner.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | 1 | 2021-05-05T07:59:08.000Z | 2021-05-05T07:59:08.000Z | # coding=utf-8
import logging
from openerp import models, fields, api
_logger = logging.getLogger(__name__)
class res_partner(models.Model):
_inherit = 'res.partner'
wxcorp_user_id = fields.Many2one('wx.corpuser','关联企业号用户')
wx_user_id = fields.Many2one('wx.user','关联微信用户')
def send_corp_msg(self, msg):
from ..rpc import corp_client
entry = corp_client.corpenv(self.env)
mtype = msg["mtype"]
if mtype=="text":
entry.client.message.send_text(entry.current_agent, self.wxcorp_user_id.userid, msg["content"])
if mtype=="card":
entry.client.message.send_text_card(entry.current_agent, self.wxcorp_user_id.userid, msg['title'], msg['description'], msg['url'], btntxt=msg.get("btntxt", "详情"))
elif mtype=='image':
ret = entry.client.media.upload(mtype, msg['media_data'])
entry.client.message.send_image(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])
elif mtype=='voice':
ret = entry.client.media.upload(mtype, msg['media_data'])
entry.client.message.send_voice(entry.current_agent, self.wxcorp_user_id.userid, ret['media_id'])
def get_corp_key(self):
if self.wxcorp_user_id:
return self.wxcorp_user_id.userid
def get_wx_key(self):
if self.wx_user_id:
return self.wx_user_id.openid
| 38.722222 | 174 | 0.666428 |
f649728f39998745d4a24fcfe28d7039baaea7ad | 4,315 | py | Python | VSR/Models/Dcscn.py | johnnylili/VideoSuperResolution | 3f7142167b521ae739e7e0414c3c1cb3a82d9041 | [
"MIT"
] | null | null | null | VSR/Models/Dcscn.py | johnnylili/VideoSuperResolution | 3f7142167b521ae739e7e0414c3c1cb3a82d9041 | [
"MIT"
] | null | null | null | VSR/Models/Dcscn.py | johnnylili/VideoSuperResolution | 3f7142167b521ae739e7e0414c3c1cb3a82d9041 | [
"MIT"
] | 1 | 2020-02-25T16:12:05.000Z | 2020-02-25T16:12:05.000Z | """
Copyright: Intel Corp. 2018
Author: Wenyi Tang
Email: wenyi.tang@intel.com
Created Date: May 23rd 2018
Updated Date: June 15th 2018
Implementing Fast and Accurate Image Super Resolution by
Deep CNN with Skip Connection and Network in Network
See https://arxiv.org/abs/1707.05425
"""
from ..Framework.SuperResolution import SuperResolution
from ..Util.Utility import *
import tensorflow as tf
class DCSCN(SuperResolution):
def __init__(self,
layers,
reconstruction_layers,
filters,
min_filters,
nin_filter,
reconst_filter,
filters_decay_gamma,
drop_out,
name='dcscn',
**kwargs):
self.layers = layers
self.reconstruction_layers = reconstruction_layers
self.filters = filters
self.min_filters = min_filters
self.nin_filter = nin_filter
self.reconst_filter = reconst_filter
self.filters_decay_gamma = filters_decay_gamma
self.drop_out = drop_out
self.name = name
super(DCSCN, self).__init__(**kwargs)
def build_graph(self):
with tf.variable_scope(self.name):
super(DCSCN, self).build_graph()
shape_enlarged = tf.shape(self.inputs_preproc[-1])[1:3]
shape_enlarged = shape_enlarged * self.scale
bic = tf.image.resize_bicubic(self.inputs_preproc[-1], shape_enlarged)
x = [self.inputs_preproc[-1]]
drop_out = tf.cond(self.training_phase, lambda: self.drop_out, lambda: 1.0)
for i in range(self.layers):
if self.min_filters != 0 and i > 0:
x1 = i / float(self.layers - 1)
y1 = pow(x1, 1.0 / self.filters_decay_gamma)
output_feature_num = int((self.filters - self.min_filters) * (1 - y1) + self.min_filters)
nn = self.conv2d(x[-1], output_feature_num, 3, activation='relu', use_batchnorm=True,
kernel_initializer='he_normal', kernel_regularizer='l2')
x.append(tf.nn.dropout(nn, drop_out))
concat_x = tf.concat(x, axis=-1)
with tf.variable_scope('NIN'):
a1 = self.conv2d(concat_x, self.nin_filter[0], 1, activation='relu', use_batchnorm=True,
kernel_initializer='he_normal', kernel_regularizer='l2')
b1 = self.conv2d(concat_x, self.nin_filter[1], 1, activation='relu', use_batchnorm=True,
kernel_initializer='he_normal', kernel_regularizer='l2')
b2 = self.conv2d(b1, self.nin_filter[1], 3, activation='relu', use_batchnorm=True,
kernel_initializer='he_normal', kernel_regularizer='l2')
concat_nin = tf.concat([a1, b2], axis=-1)
ps = self.conv2d(concat_nin, self.scale[0] * self.scale[1], 3, kernel_initializer='he_normal',
kernel_regularizer='l2')
ps = pixel_shift(ps, self.scale, 1)
with tf.variable_scope('Reconstruction'):
for i in range(self.reconstruction_layers - 1):
ps = self.conv2d(ps, self.reconst_filter, 3, activation='relu', kernel_initializer='he_normal',
kernel_regularizer='l2')
ps = tf.nn.dropout(ps, drop_out)
outputs = self.conv2d(ps, 1, 3, kernel_initializer='he_normal', kernel_regularizer='l2')
self.outputs.append(outputs + bic)
def build_loss(self):
with tf.name_scope('loss'):
mse, loss = super(DCSCN, self).build_loss()
self.train_metric['loss'] = loss
self.metrics['mse'] = mse
self.metrics['psnr'] = tf.reduce_mean(tf.image.psnr(self.label[-1], self.outputs[-1], max_val=255))
self.metrics['ssim'] = tf.reduce_mean(tf.image.ssim(self.label[-1], self.outputs[-1], max_val=255))
def build_summary(self):
tf.summary.scalar('training_loss', self.train_metric['loss'])
tf.summary.scalar('mse', self.metrics['mse'])
tf.summary.scalar('psnr', self.metrics['psnr'])
tf.summary.scalar('ssim', self.metrics['ssim'])
| 47.417582 | 115 | 0.588413 |
b19ce9596494ea4bc56a8bacd20bd28ffb4eae96 | 4,881 | py | Python | app/biowl/dsl/parser.py | mainulhossain/biowl | 039adc96539fae25843b1fc36074a4e5e55830ec | [
"MIT"
] | null | null | null | app/biowl/dsl/parser.py | mainulhossain/biowl | 039adc96539fae25843b1fc36074a4e5e55830ec | [
"MIT"
] | null | null | null | app/biowl/dsl/parser.py | mainulhossain/biowl | 039adc96539fae25843b1fc36074a4e5e55830ec | [
"MIT"
] | 1 | 2020-01-05T10:47:21.000Z | 2020-01-05T10:47:21.000Z | from sys import stdin, stdout, stderr, argv, exit
import os
import json
import sys
import code
from pyparsing import *
from .grammar import PythonGrammar
from .context import Context
from .interpreter import Interpreter
from .pygen import CodeGenerator
class PhenoWLParser(object):
'''
The parser for PhenoWL DSL.
'''
def __init__(self, grammar = None):
self.grammar = grammar if grammar else PhenoWLGrammar()
self.tokens = ParseResults()
self.err = []
def error(self, *args):
self.err.append("{0}".format(', '.join(map(str, args))))
def parse(self, text):
try:
self.tokens = self.grammar.program.ignore(pythonStyleComment).parseString(text, parseAll=True)
return self.tokens
except ParseException as err:
print(err)
self.error(err)
except Exception as err:
print(err)
self.error(err)
def parse_subgrammar(self, subgrammer, text):
try:
self.tokens = subgrammer.ignore(pythonStyleComment).parseString(text, parseAll=True)
return self.tokens
except ParseException as err:
print(err)
self.error(err)
except Exception as err:
print(err)
self.error(err)
def parse_file(self, filename):
try:
self.tokens = self.grammar.program.ignore(pythonStyleComment).parseFile(filename, parseAll=True)
return self.tokens
except ParseException as err:
print(err)
exit(3)
except Exception as err:
print(err)
self.error(err)
if __name__ == "__main__":
from ..timer import Timer
with Timer() as t:
p = PhenoWLParser(PythonGrammar())
if len(sys.argv) > 1:
tokens = p.parse_file(sys.argv[1])
else:
test_program_example = """
#shippi.RegisterImage('127.0.0.1', 'phenodoop', 'sr-hadoop', '/home/phenodoop/phenowl/storage/images', '/home/phenodoop/phenowl/storage/output')
# GetFolders('/')
# CreateFolder('/images/img')
# x = 10
# y = 10
# z = 30
# for k in range(1,10):
# p =30
# q = 40
# if x <= 20:
# r = 40
# s = 50
# if y >= 10:
# t = 60
# s = 70
# print(z)
# if p < q:
# print(p + 5)
# task sparktest(s, u, p):
# GetTools()
# sparktest('server', 'user', 'password')
# parallel:
# x = 10
# q = x
# print(q)
# with:
# y = 20
# p = y
# print(p)
# task ('http://sr-p2irc-big8.usask.ca:8080', '7483fa940d53add053903042c39f853a'):
# ws = GetHistoryIDs()
# print(len(ws))
# l = len(ws)
# if l > 0:
# print(ws[0])
# w = GetHistory(ws[0])
# r = Upload(w['id'], '/home/phenodoop/phenowl/storage/texts/test.txt')
# print(r)
#print(w)
#print(len(w))
#print(w)
#print(w['name'])
#result = SearchEntrez("Myb AND txid3702[ORGN] AND 0:6000[SLEN]", "nucleotide")
#print(result)
# s = 10
# t = "15" + "16"
# print(t)
# task ('http://sr-p2irc-big8.usask.ca:8080', '7483fa940d53add053903042c39f853a'):
# history_id = CreateHistory('Galaxy Pipeline')
# dataset_id = FtpToHistory('ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR034/SRR034608/SRR034608.fastq.gz', history_id)
# tool_id = ToolNameToID('FASTQ Groomer')
# ref_dataset_id = HttpToHistory('http://rice.plantbiology.msu.edu/pub/data/Eukaryotic_Projects/o_sativa/annotation_dbs/pseudomolecules/version_6.1/all.dir/all.cDNA.gz', history_id)
# params = "name:" + ref_dataset_id
# r = RunTool(history_id, tool_id, params)
#
# output = r['name']
# print(output)
#x[0] = 5*4
#z = x[0]
#y = 50 + z
# a = {3: {'t':'ss'}, 4:11}
# y = a[3]
# x = []
# x[0] = 20
# y = 5 + (x[0])
# print(y)
# f = FastQC('fastq\SRR034608.fastq.gz')
# print(f)
# parallel:
# print(10)
# with:
# print(11)
ref_dataset_name = 100
#params = [ref_dataset_name, 50]
# { 'name': ref_dataset_name }
#params = [{ 'name': ref_dataset_name }, { 'name': [20 * 30] }]
#params = { 'name': ref_dataset_name }
params = {"fastq_R1": {"values":[{"src":"hda", "id":"9ac6c47a8515c831"}]}, "fastq_R2":{"values":[{"src":"hda","id":"6dbc21d257b88b00"}]}}
print(params = 'xt')
"""
tokens = p.parse(test_program_example)
#tokens = p.grammar.assignstmt.ignore(pythonStyleComment).parseString(test_program_example)
tokens.pprint()
#print(tokens.asXML())
integrator = PhenoWLInterpreter()
# integrator = PhenoWLCodeGenerator()
integrator.context.load_library("libraries")
integrator.run(tokens)
print(integrator.context.library)
print(integrator.context.out)
print(integrator.context.err)
#print(integrator.code)
| 27.421348 | 185 | 0.583897 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.