hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e18519800d7fdfc3c09ad4f19d1624b4549b0c48 | 6,855 | py | Python | util/convex.py | alters-mit/magnebot | 97641717ad7964a67e24d11379b7aa04693b38d2 | [
"MIT"
] | 3 | 2021-04-21T20:45:59.000Z | 2021-11-18T22:05:57.000Z | util/convex.py | alters-mit/magnebot | 97641717ad7964a67e24d11379b7aa04693b38d2 | [
"MIT"
] | 7 | 2021-03-01T00:00:41.000Z | 2022-01-12T16:19:17.000Z | util/convex.py | alters-mit/magnebot | 97641717ad7964a67e24d11379b7aa04693b38d2 | [
"MIT"
] | 3 | 2021-02-26T02:13:08.000Z | 2021-07-10T06:50:28.000Z | from typing import List, Dict
from json import dumps, loads, JSONEncoder
import numpy as np
from tqdm import tqdm
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.librarian import ModelLibrarian
from tdw.output_data import Bounds, Raycast
from magnebot.paths import CONVEX_SIDES_PATH
class Encoder(JSONEncoder):
"""
Source: https://stackoverflow.com/questions/16264515/json-dumps-custom-formatting
"""
def __init__(self, *args, **kwargs):
super(Encoder, self).__init__(*args, **kwargs)
self.current_indent = 0
self.current_indent_str = ""
def encode(self, o):
if isinstance(o, (list, tuple)):
return "[" + ", ".join([dumps(item) for item in o]) + "]"
elif isinstance(o, dict):
output = []
self.current_indent += self.indent
self.current_indent_str = "".join([" " for x in range(self.current_indent)])
for key, value in o.items():
output.append(self.current_indent_str + dumps(key) + ": " + self.encode(value))
self.current_indent -= self.indent
self.current_indent_str = "".join([" " for x in range(self.current_indent)])
return "{\n" + ",\n".join(output) + "\n" + self.current_indent_str + "}"
else:
return dumps(o)
class Convex(Controller):
"""
For every object in the full model library, determine which sides of the bounds are convex.
This data will be used by `Magnebot.grasp()` when choosing which sides to target.
"""
def __init__(self, port: int = 1071, launch_build: bool = True):
if not CONVEX_SIDES_PATH.exists():
CONVEX_SIDES_PATH.write_text("{}")
self.concave: Dict[str, List[int]] = dict()
else:
self.concave: Dict[str, List[int]] = loads(CONVEX_SIDES_PATH.read_text(encoding="utf-8"))
super().__init__(port=port, launch_build=launch_build, check_version=False)
self.model_librarian = ModelLibrarian("models_full.json")
self.pbar = tqdm(total=len(self.model_librarian.records))
def run(self) -> None:
"""
For every model in the model library, get the bounds. Raycast from one side of the object to the other.
If the raycast hit, and the hit point is closer to the farther side than the nearer side, the side is concave.
Write all convex sides per object to disk.
"""
self.communicate({"$type": "create_empty_environment"})
for record in self.model_librarian.records:
# Ignore bad models or models that we already checked.
if record.name in self.concave or record.do_not_use:
self.pbar.update(1)
continue
self.pbar.set_description(record.name)
object_id = self.get_unique_id()
scale = TDWUtils.get_unit_scale(record)
# Create the object. Scale to unit size. Make the object kinematic so that it won't fall over.
# Get the bounds. Raycast directly above the object.
resp = self.communicate([self.get_add_object(model_name=record.name,
library="models_full.json",
object_id=object_id),
{"$type": "set_kinematic_state",
"id": object_id,
"is_kinematic": True,
"use_gravity": False},
{"$type": "scale_object",
"id": object_id,
"scale_factor": {"x": scale, "y": scale, "z": scale}},
{"$type": "send_bounds"}])
bounds = Bounds(resp[0])
# Convert the bounds sides to a dictionary.
sides = {"left": np.array(bounds.get_left(0)),
"right": np.array(bounds.get_right(0)),
"front": np.array(bounds.get_front(0)),
"back": np.array(bounds.get_back(0)),
"top": np.array(bounds.get_top(0)),
"bottom": np.array(bounds.get_bottom(0))}
# The origin points of each ray per direction.
ray_origins = {"left": {"x": sides["left"][0] - 4, "y": sides["left"][1], "z": 0},
"right": {"x": sides["right"][0] + 4, "y": sides["right"][1], "z": 0},
"front": {"x": 0, "y": sides["front"][1], "z": sides["front"][2] + 4},
"back": {"x": 0, "y": sides["back"][1], "z": sides["back"][2] - 4},
"top": {"x": 0, "y": sides["top"][1] + 4, "z": 0},
"bottom": {"x": 0, "y": sides["bottom"][1] - 4, "z": 0}}
# The destination of each ray (the opposite side of the bounds).
ray_destinations = {"left": "right",
"right": "left",
"front": "back",
"back": "front",
"top": "bottom",
"bottom": "top"}
# Get a raycast per side.
good_sides: List[int] = list()
for i, side, ray in zip(range(len(sides)), sides.keys(), ray_origins):
resp = self.communicate({"$type": "send_raycast",
"origin": ray_origins[ray],
"destination": ray_origins[ray_destinations[ray]]})
raycast = Raycast(resp[0])
# Ignore raycasts that didn't hit the object.
if not raycast.get_hit() or not raycast.get_hit_object():
continue
side_origin: np.array = sides[side]
side_destination: np.array = sides[ray_destinations[side]]
point: np.array = np.array(raycast.get_point())
# Ignore raycasts that hit a significant concavity.
if np.linalg.norm(side_origin - point) - np.linalg.norm(side_destination - point) > 0.05:
continue
good_sides.append(i)
# Destroy the object and remove it from memory.
self.communicate([{"$type": "destroy_object",
"id": object_id},
{"$type": "unload_asset_bundles"}])
# Record the results.
self.concave[record.name] = good_sides
CONVEX_SIDES_PATH.write_text(dumps(self.concave, indent=2, cls=Encoder))
self.pbar.update(1)
self.communicate({"$type": "terminate"})
if __name__ == "__main__":
c = Convex()
c.run()
| 50.036496 | 118 | 0.521663 |
dcad28f0c97a3b29769b7ffcddc06adc04ca89f6 | 2,037 | py | Python | src/move_source_.py | Jwolter0/OBS-Studio-Python-Scripting-Cheatsheet-obspython-Examples-of-API | 6d6396f2da410bc29f9665dbc40a21fc390aaa57 | [
"MIT"
] | null | null | null | src/move_source_.py | Jwolter0/OBS-Studio-Python-Scripting-Cheatsheet-obspython-Examples-of-API | 6d6396f2da410bc29f9665dbc40a21fc390aaa57 | [
"MIT"
] | null | null | null | src/move_source_.py | Jwolter0/OBS-Studio-Python-Scripting-Cheatsheet-obspython-Examples-of-API | 6d6396f2da410bc29f9665dbc40a21fc390aaa57 | [
"MIT"
] | null | null | null | import obspython as obs
class Example:
def __init__(self):
pos = obs.vec2()
self.location = pos
def create_text_source(self):
current_scene = obs.obs_frontend_get_current_scene()
scene = obs.obs_scene_from_source(current_scene)
settings = obs.obs_data_create()
obs.obs_data_set_string(
settings, "text", "The quick brown fox jumps over the lazy dog"
)
source = obs.obs_source_create_private("text_gdiplus", "test_py", settings)
obs.obs_scene_add(scene, source)
obs.obs_scene_release(scene)
obs.obs_data_release(settings)
obs.obs_source_release(source)
def move_text_source(self):
current_scene = obs.obs_frontend_get_current_scene()
source = obs.obs_get_source_by_name("test_py")
scene = obs.obs_scene_from_source(current_scene)
scene_item = obs.obs_scene_find_source(scene, "test_py")
if scene_item:
dx, dy = 10, 10
print("old values", self.location.x)
obs.obs_sceneitem_get_pos(
scene_item, self.location
) # update to last position if its changed from OBS
self.location.x += dx
self.location.y += dy
print("new values", self.location.x)
obs.obs_sceneitem_set_pos(scene_item, self.location)
obs.obs_scene_release(scene)
obs.obs_source_release(source)
eg = Example() # class created ,obs part starts
def add_pressed(props, prop):
eg.create_text_source()
def move_pressed(props, prop):
eg.move_text_source()
def script_description():
return "add text source to current scene"
def script_properties(): # ui
props = obs.obs_properties_create()
obs.obs_properties_add_button(props, "button", "Add text source", add_pressed)
obs.obs_properties_add_button(
props, "button2", "Move source +10 pixels", move_pressed
)
return props
| 30.863636 | 84 | 0.636721 |
f0db2ce834c5e198ea4c2c76bd392e87fa38dc39 | 250 | py | Python | accounting/accounting/doctype/party/party.py | pateljannat/accounting-app | 5e41746b4566b98cb4e72c3625c71d8310c138ba | [
"MIT"
] | null | null | null | accounting/accounting/doctype/party/party.py | pateljannat/accounting-app | 5e41746b4566b98cb4e72c3625c71d8310c138ba | [
"MIT"
] | null | null | null | accounting/accounting/doctype/party/party.py | pateljannat/accounting-app | 5e41746b4566b98cb4e72c3625c71d8310c138ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Party(Document):
pass
| 22.727273 | 49 | 0.768 |
618a35481004a0f823c2d5fa74c8d6b459244819 | 1,503 | py | Python | Python/number-of-islands-ii.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | Python/number-of-islands-ii.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | Python/number-of-islands-ii.py | black-shadows/LeetCode-Solutions | b1692583f7b710943ffb19b392b8bf64845b5d7a | [
"Fair",
"Unlicense"
] | null | null | null | # Time: O(klog*k) ~= O(k), k is the length of the positions
# Space: O(k)
class Solution(object):
def numIslands2(self, m, n, positions):
"""
:type m: int
:type n: int
:type positions: List[List[int]]
:rtype: List[int]
"""
def node_id(node, n):
return node[0] * n + node[1]
def find_set(x):
if set[x] != x:
set[x] = find_set(set[x]) # path compression.
return set[x]
def union_set(x, y):
x_root, y_root = find_set(x), find_set(y)
set[min(x_root, y_root)] = max(x_root, y_root)
numbers = []
number = 0
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
set = {}
for position in positions:
node = (position[0], position[1])
set[node_id(node, n)] = node_id(node, n)
number += 1
for d in directions:
neighbor = (position[0] + d[0], position[1] + d[1])
if 0 <= neighbor[0] < m and 0 <= neighbor[1] < n and \
node_id(neighbor, n) in set:
if find_set(node_id(node, n)) != find_set(node_id(neighbor, n)):
# Merge different islands, amortised time: O(log*k) ~= O(1)
union_set(node_id(node, n), node_id(neighbor, n))
number -= 1
numbers.append(number)
return numbers
| 33.4 | 84 | 0.457086 |
ca558b7f3998c223920114334151f5dbe94fefcb | 3,287 | py | Python | manyssh/input.py | linkdd/manyssh | 1f95d9abbf3215d115c627fad41cabcba02f5e28 | [
"MIT"
] | 3 | 2015-02-08T23:50:53.000Z | 2015-02-09T09:14:34.000Z | manyssh/input.py | linkdd/manyssh | 1f95d9abbf3215d115c627fad41cabcba02f5e28 | [
"MIT"
] | 5 | 2015-02-09T09:20:20.000Z | 2015-02-09T09:41:29.000Z | manyssh/input.py | linkdd/manyssh | 1f95d9abbf3215d115c627fad41cabcba02f5e28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from gi.repository import Gdk, Gtk
class Input(Gtk.Entry):
"""
ManySSH command line.
"""
def __init__(self, win, *args, **kwargs):
"""
:param win: ManySSH window
:type win: manyssh.win.Window
"""
super(Input, self).__init__(*args, **kwargs)
self.win = win
self.ctx = win.status.get_context_id('ManySSH command line')
self.status = lambda *args, **kwargs: self.win.status.push(
self.ctx,
*args,
**kwargs
)
self.status('-- INSERT --')
self.insertmode = True
for ev in ['key-press-event', 'key-release-event']:
self.connect(ev, self.sendevent, ev)
self.connect('activate', self.parse_command)
def sendevent(self, sender, event, eventname):
"""
If in INSERT mode, redirect events to terminals.
:param sender: Event emitter
:type sender: Gtk.Widget
:param event: Keyboard event
:type event: Gdk.EventKey
:param eventname: Event's name
:type eventname: str
:returns: True if event was handled, False otherwise
"""
modifier = Gdk.ModifierType.CONTROL_MASK
if self.insertmode:
if event.keyval == Gdk.KEY_Escape and event.state & modifier:
self.status('-- COMMAND --')
self.insertmode = False
return True
else:
for term in self.win.terms:
if term.activator.get_active():
term.grab_focus()
term.emit(eventname, event.copy())
self.grab_focus()
return True
elif event.keyval == Gdk.KEY_Insert and event.state & modifier:
self.status('-- INSERT --')
self.set_text('')
self.insertmode = True
return True
return False
def parse_command(self, sender):
"""
Parse command line.
:param sender: Event emitter
:type sender: Gtk.Widget
"""
command = self.get_text()
method = 'cmd_{0}'.format(command)
if not hasattr(self, method):
self.status.push(
self.ctx,
'-- COMMAND -- Error: Unknown command: {0}'.format(command)
)
else:
handler = getattr(self, method)
handler()
self.status('-- COMMAND --')
self.set_text('')
def cmd_p(self):
""" Previous page command. """
self.win.terms.prev_page()
def cmd_n(self):
""" Next page command. """
self.win.terms.next_page()
def cmd_r(self):
""" Refresh page command. """
self.terms.current.refresh(self)
def cmd_ra(self):
""" Refresh all pages command. """
for term in self.win.terms:
term.refresh(self)
def cmd_t(self):
""" Toggle page command. """
self.win.terms.current.toggle_active(self)
def cmd_ta(self):
""" Toggle all pages command. """
for term in self.win.terms:
term.toggle_active(self)
def cmd_q(self):
""" Exit command. """
Gtk.main_quit()
| 25.284615 | 75 | 0.524186 |
b50acaf8414641f8cef7af44b51132f7690988a3 | 38,733 | py | Python | tools/linter_lib/custom_check.py | adi611/zulip | e1f42c1ac54f74176fa69c360791f4cca3c7c87d | [
"Apache-2.0"
] | null | null | null | tools/linter_lib/custom_check.py | adi611/zulip | e1f42c1ac54f74176fa69c360791f4cca3c7c87d | [
"Apache-2.0"
] | null | null | null | tools/linter_lib/custom_check.py | adi611/zulip | e1f42c1ac54f74176fa69c360791f4cca3c7c87d | [
"Apache-2.0"
] | null | null | null | from typing import List
from zulint.custom_rules import Rule, RuleList
# Rule help:
# By default, a rule applies to all files within the extension for which it is specified (e.g. all .py files)
# There are three operators we can use to manually include or exclude files from linting for a rule:
# 'exclude': 'set([<path>, ...])' - if <path> is a filename, excludes that file.
# if <path> is a directory, excludes all files directly below the directory <path>.
# 'exclude_line': 'set([(<path>, <line>), ...])' - excludes all lines matching <line> in the file <path> from linting.
# 'include_only': 'set([<path>, ...])' - includes only those files where <path> is a substring of the filepath.
FILES_WITH_LEGACY_SUBJECT = {
# This basically requires a big DB migration:
"zerver/lib/topic.py",
# This is for backward compatibility.
"zerver/tests/test_legacy_subject.py",
# Other migration-related changes require extreme care.
"zerver/lib/fix_unreads.py",
"zerver/tests/test_migrations.py",
# These use subject in the email sense, and will
# probably always be exempt:
"zerver/lib/email_mirror.py",
"zerver/lib/send_email.py",
"zerver/tests/test_new_users.py",
"zerver/tests/test_email_mirror.py",
"zerver/tests/test_email_notifications.py",
# This uses subject in authentication protocols sense:
"zerver/tests/test_auth_backends.py",
# These are tied more to our API than our DB model.
"zerver/openapi/python_examples.py",
"zerver/tests/test_openapi.py",
# This has lots of query data embedded, so it's hard
# to fix everything until we migrate the DB to "topic".
"zerver/tests/test_message_fetch.py",
}
shebang_rules: List["Rule"] = [
{
"pattern": "^#!",
"description": "zerver library code shouldn't have a shebang line.",
"include_only": {"zerver/"},
},
# /bin/sh and /usr/bin/env are the only two binaries
# that NixOS provides at a fixed path (outside a
# buildFHSUserEnv sandbox).
{
"pattern": "^#!(?! *(?:/usr/bin/env|/bin/sh)(?: |$))",
"description": "Use `#!/usr/bin/env foo` instead of `#!/path/foo`"
" for interpreters other than sh.",
},
{
"pattern": "^#!/usr/bin/env python$",
"description": "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`.",
},
]
trailing_whitespace_rule: "Rule" = {
"pattern": r"\s+$",
"strip": "\n",
"exclude": {"tools/ci/success-http-headers.template.txt"},
"description": "Fix trailing whitespace",
}
whitespace_rules: List["Rule"] = [
# This linter should be first since bash_rules depends on it.
trailing_whitespace_rule,
{
"pattern": "http://zulip.readthedocs.io",
"description": "Use HTTPS when linking to ReadTheDocs",
},
{
"pattern": "\t",
"strip": "\n",
"description": "Fix tab-based whitespace",
},
]
comma_whitespace_rule: List["Rule"] = [
{
"pattern": ", {2,}[^#/ ]",
"exclude": {"zerver/tests", "frontend_tests/node_tests", "corporate/tests"},
"description": "Remove multiple whitespaces after ','",
"good_lines": ["foo(1, 2, 3)", "foo = bar # some inline comment"],
"bad_lines": ["foo(1, 2, 3)", "foo(1, 2, 3)"],
},
]
markdown_whitespace_rules: List["Rule"] = [
*(rule for rule in whitespace_rules if rule["pattern"] != r"\s+$"),
# Two spaces trailing a line with other content is okay--it's a Markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{
"pattern": r"((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)",
"strip": "\n",
"description": "Fix trailing whitespace",
},
{
"pattern": "^#+[A-Za-z0-9]",
"strip": "\n",
"description": "Missing space after # in heading",
"exclude_line": {
("docs/subsystems/hotspots.md", "#hotspot_new_hotspot_name_icon {"),
},
"good_lines": ["### some heading", "# another heading"],
"bad_lines": ["###some heading", "#another heading"],
},
]
js_rules = RuleList(
langs=["js", "ts"],
rules=[
{
"pattern": "subject|SUBJECT",
"exclude": {"static/js/util.js", "frontend_tests/"},
"exclude_pattern": "emails",
"description": "avoid subject in JS code",
"good_lines": ["topic_name"],
"bad_lines": ['subject="foo"', " MAX_SUBJECT_LEN"],
},
{
"pattern": "msgid|MSGID",
"description": 'Avoid using "msgid" as a variable name; use "message_id" instead.',
},
{
"pattern": r"\$t\(.+\).*\+",
"description": "Do not concatenate i18n strings",
},
{"pattern": r"\+.*\$t\(.+\)", "description": "Do not concatenate i18n strings"},
{
"pattern": "[.]html[(]",
"exclude_pattern": r"""\.html\(("|'|render_|html|message\.content|util\.clean_user_content_links|rendered_|$|\)|error_html|widget_elem|\$error|\$\("<p>"\))""",
"exclude": {
"static/js/portico",
"static/js/lightbox.js",
"static/js/ui_report.ts",
"static/js/dialog_widget.js",
"frontend_tests/",
},
"description": "Setting HTML content with jQuery .html() can lead to XSS security bugs. Consider .text() or using rendered_foo as a variable name if content comes from handlebars and thus is already sanitized.",
},
{
"pattern": "[\"']json/",
"description": "Relative URL for JSON route not supported by i18n",
},
{
"pattern": r"""[.]text\(["'][a-zA-Z]""",
"description": "Strings passed to $().text should be wrapped in $t() for internationalization",
"exclude": {"frontend_tests/node_tests/"},
},
{
"pattern": r"""compose_error\(["']""",
"description": "Argument to compose_error should be a literal string translated "
"by $t_html()",
},
{
"pattern": r"ui.report_success\(",
"description": "Deprecated function, use ui_report.success.",
},
{
"pattern": r"""report.success\(["']""",
"description": "Argument to ui_report.success should be a literal string translated "
"by $t_html()",
},
{
"pattern": r"ui.report_error\(",
"description": "Deprecated function, use ui_report.error.",
},
{
"pattern": r"""report.error\(["'][^'"]""",
"description": "Argument to ui_report.error should be a literal string translated "
"by $t_html()",
"good_lines": ['ui_report.error("")', 'ui_report.error(_("text"))'],
"bad_lines": ['ui_report.error("test")'],
},
{
"pattern": r"""report.client_error\(["'][^'"]""",
"description": "Argument to ui_report.client_error should be a literal string translated "
"by $t_html()",
"good_lines": ['ui_report.client_error("")', 'ui_report.client_error(_("text"))'],
"bad_lines": ['ui_report.client_error("test")'],
},
{
"pattern": r"\$\(document\)\.ready\(",
"description": "`Use $(f) rather than `$(document).ready(f)`",
"good_lines": ["$(function () {foo();}"],
"bad_lines": ["$(document).ready(function () {foo();}"],
},
{
"pattern": "[$][.](get|post|patch|delete|ajax)[(]",
"description": "Use channel module for AJAX calls",
"exclude": {
# Internal modules can do direct network calls
"static/js/blueslip.ts",
"static/js/channel.js",
# External modules that don't include channel.js
"static/js/stats/",
"static/js/portico/",
"static/js/billing/",
},
"good_lines": ["channel.get(...)"],
"bad_lines": ["$.get()", "$.post()", "$.ajax()"],
},
{
"pattern": "style ?=",
"exclude_pattern": r"(const |\S)style ?=",
"description": "Avoid using the `style=` attribute; we prefer styling in CSS files",
"exclude": {
"frontend_tests/node_tests/copy_and_paste.js",
},
"good_lines": ["#my-style {color: blue;}", "const style =", 'some_style = "test"'],
"bad_lines": ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"'],
},
{
"pattern": r"assert\(",
"description": "Use 'assert.ok' instead of 'assert'. We avoid the use of 'assert' as it can easily be confused with 'assert.equal'.",
"good_lines": ["assert.ok(...)"],
"bad_lines": ["assert(...)"],
},
*whitespace_rules,
],
)
python_rules = RuleList(
langs=["py"],
rules=[
{
"pattern": "subject|SUBJECT",
"exclude_pattern": "subject to the|email|outbox",
"description": "avoid subject as a var",
"good_lines": ["topic_name"],
"bad_lines": ['subject="foo"', " MAX_SUBJECT_LEN"],
"exclude": FILES_WITH_LEGACY_SUBJECT,
"include_only": {
"zerver/data_import/",
"zerver/lib/",
"zerver/tests/",
"zerver/views/",
},
},
{
"pattern": "msgid|MSGID",
"exclude": {"tools/check-capitalization"},
"description": 'Avoid using "msgid" as a variable name; use "message_id" instead.',
},
{
"pattern": "^(?!#)@login_required",
"description": "@login_required is unsupported; use @zulip_login_required",
"good_lines": ["@zulip_login_required", "# foo @login_required"],
"bad_lines": ["@login_required", " @login_required"],
},
{
"pattern": "^user_profile[.]save[(][)]",
"description": "Always pass update_fields when saving user_profile objects",
"exclude_line": {
(
"zerver/lib/actions.py",
"user_profile.save() # Can't use update_fields because of how the foreign key works.",
),
},
"exclude": {"zerver/tests", "zerver/lib/create_user.py"},
"good_lines": ['user_profile.save(update_fields=["pointer"])'],
"bad_lines": ["user_profile.save()"],
},
{
"pattern": "self: Any",
"description": "you can omit Any annotation for self",
"good_lines": ["def foo (self):"],
"bad_lines": ["def foo(self: Any):"],
},
{
"pattern": "assertEquals[(]",
"description": "Use assertEqual, not assertEquals (which is deprecated).",
"good_lines": ["assertEqual(1, 2)"],
"bad_lines": ["assertEquals(1, 2)"],
},
{
"pattern": "assertEqual[(]len[(][^ ]*[)],",
"description": "Use the assert_length helper instead of assertEqual(len(..), ..).",
"good_lines": ["assert_length(data, 2)"],
"bad_lines": ["assertEqual(len(data), 2)"],
},
{
"pattern": "assertTrue[(]len[(][^ ]*[)]",
"description": "Use assert_length or assertGreater helper instead of assertTrue(len(..) ..).",
"good_lines": ["assert_length(data, 2)", "assertGreater(len(data), 2)"],
"bad_lines": [
"assertTrue(len(data) == 2)",
"assertTrue(len(data) >= 2)",
"assertTrue(len(data) > 2)",
],
},
{
"pattern": r"#\s*type:\s*ignore(?!\[[^][]+\] +# +\S)",
"exclude": {"tools/tests", "zerver/lib/test_runner.py", "zerver/tests"},
"description": '"type: ignore" should always end with "# type: ignore[code] # explanation for why"',
"good_lines": ["foo = bar # type: ignore[code] # explanation"],
"bad_lines": [
"foo = bar # type: ignore",
"foo = bar # type: ignore[code]",
"foo = bar # type: ignore # explanation",
],
},
{
"pattern": r"\bsudo\b",
"include_only": {"scripts/"},
"exclude": {"scripts/lib/setup_venv.py"},
"exclude_line": {
("scripts/lib/zulip_tools.py", 'args = ["sudo", *sudo_args, "--", *args]'),
},
"description": "Most scripts are intended to run on systems without sudo.",
"good_lines": ['subprocess.check_call(["ls"])'],
"bad_lines": ['subprocess.check_call(["sudo", "ls"])'],
},
{
"pattern": "django.utils.translation",
"include_only": {"test/", "zerver/views/development/"},
"exclude": {"zerver/views/development/dev_login.py"},
"description": "Test strings should not be tagged for translation",
"good_lines": [""],
"bad_lines": ["django.utils.translation"],
},
{
"pattern": "userid",
"description": "We prefer user_id over userid.",
"good_lines": ["id = alice.user_id"],
"bad_lines": ["id = alice.userid"],
},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{
"pattern": r"\WJsonableError\(_\(?\w.+\)",
"exclude": {"zerver/tests", "zerver/views/development/"},
"description": "Argument to JsonableError should be a literal string enclosed by _()",
},
{
"pattern": r"""\WJsonableError\(["'].+\)""",
"exclude": {"zerver/tests", "zerver/views/development/"},
"description": "Argument to JsonableError should be a literal string enclosed by _()",
},
{
"pattern": r"""([a-zA-Z0-9_]+)=REQ\(['"]\1['"]""",
"description": "REQ's first argument already defaults to parameter name",
},
{
"pattern": r"self\.client\.(get|post|patch|put|delete)",
"description": """Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
""",
},
# Directly fetching Message objects in e.g. views code is often a security bug.
{
"pattern": "[^r]Message.objects.get",
"exclude": {
"zerver/tests",
"zerver/lib/onboarding.py",
"zilencer/management/commands/add_mock_conversation.py",
"zerver/worker/queue_processors.py",
"zerver/management/commands/export.py",
"zerver/lib/export.py",
},
"description": "Please use access_message() to fetch Message objects",
},
{
"pattern": "Stream.objects.get",
"include_only": {"zerver/views/"},
"description": "Please use access_stream_by_*() to fetch Stream objects",
},
{
"pattern": "get_stream[(]",
"include_only": {"zerver/views/", "zerver/lib/actions.py"},
"exclude_line": {
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
("zerver/lib/actions.py", "stream = get_stream(stream_name, realm)"),
("zerver/lib/actions.py", 'return get_stream("signups", realm)'),
},
"description": "Please use access_stream_by_*() to fetch Stream objects",
},
{
"pattern": "datetime[.](now|utcnow)",
"include_only": {"zerver/", "analytics/"},
"description": "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/contributing/code-style.html#naive-datetime-objects",
},
{
"pattern": "from os.path",
"description": "Don't use from when importing from the standard library",
},
{
"pattern": "import os.path",
"description": "Use import os instead of import os.path",
},
{
"pattern": r"(logging|logger)\.warn\W",
"description": "Logger.warn is a deprecated alias for Logger.warning; Use 'warning' instead of 'warn'.",
"good_lines": ["logging.warning('I am a warning.')", "logger.warning('warning')"],
"bad_lines": ["logging.warn('I am a warning.')", "logger.warn('warning')"],
},
{
"pattern": r"\.pk",
"exclude_pattern": "[.]_meta[.]pk",
"description": "Use `id` instead of `pk`.",
"good_lines": ["if my_django_model.id == 42", "self.user_profile._meta.pk"],
"bad_lines": ["if my_django_model.pk == 42"],
},
{
"pattern": r"^\s*#\s*type:",
"description": "Comment-style function type annotation. Use Python3 style annotations instead.",
},
{
"pattern": r"\S\s*#\s*type:(?!\s*ignore)",
"exclude": {
"scripts/lib/hash_reqs.py",
"scripts/lib/setup_venv.py",
"scripts/lib/zulip_tools.py",
"tools/lib/provision.py",
},
"description": "Comment-style variable type annotation. Use Python 3.6 style annotations instead.",
"good_lines": ["a: List[int] = []"],
"bad_lines": ["a = [] # type: List[int]"],
},
{
"pattern": r": *(?!Optional)[^ ].*= models[.].*null=True",
"include_only": {"zerver/models.py"},
"description": "Model variable with null=true not annotated as Optional.",
"good_lines": [
"desc: Optional[Text] = models.TextField(null=True)",
"stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)",
"desc: Text = models.TextField()",
"stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)",
],
"bad_lines": [
"desc: Text = models.CharField(null=True)",
"stream: Stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE)",
],
},
{
"pattern": r": *Optional.*= models[.].*\)",
"exclude_pattern": "null=True",
"include_only": {"zerver/models.py"},
"description": "Model variable annotated with Optional but variable does not have null=true.",
"good_lines": [
"desc: Optional[Text] = models.TextField(null=True)",
"stream: Optional[Stream] = models.ForeignKey(Stream, null=True, on_delete=CASCADE)",
"desc: Text = models.TextField()",
"stream: Stream = models.ForeignKey(Stream, on_delete=CASCADE)",
],
"bad_lines": [
"desc: Optional[Text] = models.TextField()",
"stream: Optional[Stream] = models.ForeignKey(Stream, on_delete=CASCADE)",
],
},
{
"pattern": r"exit[(][1-9]\d*[)]",
"include_only": {"/management/commands/"},
"description": "Raise CommandError to exit with failure in management commands",
"exclude": {"zerver/management/commands/process_queue.py"},
},
{
"pattern": ".is_realm_admin =",
"description": "Use do_change_user_role function rather than setting UserProfile's is_realm_admin attribute directly.",
"exclude": {
"zerver/migrations/0248_userprofile_role_start.py",
"zerver/tests/test_users.py",
},
},
{
"pattern": ".is_guest =",
"description": "Use do_change_user_role function rather than setting UserProfile's is_guest attribute directly.",
"exclude": {
"zerver/migrations/0248_userprofile_role_start.py",
"zerver/tests/test_users.py",
},
},
{
"pattern": "\\.(called(_once|_with|_once_with)?|not_called|has_calls|not_called)[(]",
"description": 'A mock function is missing a leading "assert_"',
},
{
"pattern": "@transaction.atomic\\(\\)",
"description": "Use @transaction.atomic as function decorator for consistency.",
},
*whitespace_rules,
],
max_length=110,
shebang_rules=shebang_rules,
)
bash_rules = RuleList(
langs=["bash"],
rules=[
{
"pattern": "#!.*sh [-xe]",
"description": "Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches"
" to set -x|set -e",
},
{
"pattern": "sudo",
"description": "Most scripts are intended to work on systems without sudo",
"include_only": {"scripts/"},
"exclude": {
"scripts/lib/install",
"scripts/setup/configure-rabbitmq",
},
},
*whitespace_rules[0:1],
],
shebang_rules=shebang_rules,
)
css_rules = RuleList(
langs=["css"],
rules=[
*whitespace_rules,
],
)
prose_style_rules: List["Rule"] = [
{
"pattern": r'^[^{].*?[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs/custom-markdown
"exclude": {"docs/documentation/api.md", "templates/corporate/policies/privacy.md"},
"description": "javascript should be spelled JavaScript",
},
{
"pattern": r"""[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]""", # exclude usage in hrefs/divs
"description": "github should be spelled GitHub",
},
{
"pattern": "[oO]rganisation", # exclude usage in hrefs/divs
"description": "Organization is spelled with a z",
"exclude_line": {("docs/translating/french.md", "- organization - **organisation**")},
},
{"pattern": "!!! warning", "description": "!!! warning is invalid; it's spelled '!!! warn'"},
{"pattern": "Terms of service", "description": "The S in Terms of Service is capitalized"},
{
"pattern": "[^-_p]botserver(?!rc)|bot server",
"description": "Use Botserver instead of botserver or bot server.",
},
*comma_whitespace_rule,
]
html_rules: List["Rule"] = [
*whitespace_rules,
*prose_style_rules,
{
"pattern": "subject|SUBJECT",
"exclude": {
"templates/zerver/email.html",
"zerver/tests/fixtures/email",
"templates/zerver/for-business.html",
"templates/corporate/support_request.html",
"templates/corporate/support_request_thanks.html",
"templates/zerver/emails/support_request.html",
},
"exclude_pattern": "email subject",
"description": "avoid subject in templates",
"good_lines": ["topic_name"],
"bad_lines": ['subject="foo"', " MAX_SUBJECT_LEN"],
},
{
"pattern": r'placeholder="[^{#](?:(?!\.com).)+$',
"description": "`placeholder` value should be translatable.",
"exclude_line": {
("templates/zerver/register.html", 'placeholder="acme"'),
("templates/zerver/register.html", 'placeholder="Acme or Ακμή"'),
},
"exclude": {
"templates/analytics/support.html",
# We have URL prefix and Pygments language name as placeholders
# in the below template which we don't want to be translatable.
"static/templates/settings/playground_settings_admin.hbs",
},
"good_lines": [
'<input class="stream-list-filter" type="text" placeholder="{{ _(\'Filter streams\') }}" />'
],
"bad_lines": ['<input placeholder="foo">'],
},
{
"pattern": "={",
"description": "Likely missing quoting in HTML attribute",
"good_lines": ['<a href="{{variable}}">'],
"bad_lines": ["<a href={{variable}}>"],
},
{
"pattern": " '}}",
"description": "Likely misplaced quoting in translation tags",
"good_lines": ["{{t 'translatable string' }}"],
"bad_lines": ["{{t 'translatable string '}}"],
},
{
"pattern": "placeholder='[^{]",
"description": "`placeholder` value should be translatable.",
"good_lines": [
'<input class="stream-list-filter" type="text" placeholder="{{ _(\'Filter streams\') }}" />'
],
"bad_lines": ["<input placeholder='foo'>"],
},
{
"pattern": "aria-label='[^{]",
"description": "`aria-label` value should be translatable.",
"good_lines": [
'<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'
],
"bad_lines": ["<button aria-label='foo'></button>"],
},
{
"pattern": 'aria-label="[^{]',
"description": "`aria-label` value should be translatable.",
"good_lines": [
'<button type="button" class="close close-alert-word-status" aria-label="{{t \'Close\' }}">'
],
"bad_lines": ['<button aria-label="foo"></button>'],
},
{
"pattern": 'script src="http',
"description": "Don't directly load dependencies from CDNs. See docs/subsystems/html-css.md",
"exclude": {
"templates/corporate/billing.html",
"templates/zerver/hello.html",
"templates/corporate/upgrade.html",
"templates/corporate/event_status.html",
},
"bad_lines": [
'<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>'
],
},
{
"pattern": "title='[^{]",
"description": "`title` value should be translatable.",
"good_lines": ['<link rel="author" title="{{ _(\'About these documents\') }}" />'],
"bad_lines": ["<p title='foo'></p>"],
},
{
"pattern": r'title="[^{\:]',
"exclude": {
"templates/zerver/emails",
"templates/analytics/realm_details.html",
"templates/analytics/support.html",
},
"description": "`title` value should be translatable.",
},
{
"pattern": r"""\Walt=["'][^{"']""",
"description": "alt argument should be enclosed by _() or it should be an empty string.",
"exclude_line": {
(
# Emoji should not be tagged for translation.
"static/templates/keyboard_shortcuts.hbs",
'<img alt=":thumbs_up:"',
),
},
"good_lines": ['<img src="{{source_url}}" alt="{{ _(name) }}" />', '<img alg="" />'],
"bad_lines": ['<img alt="Foo Image" />'],
},
{
"pattern": r"""\Walt=["']{{ ?["']""",
"description": "alt argument should be enclosed by _().",
"good_lines": ['<img src="{{source_url}}" alt="{{ _(name) }}" />'],
"bad_lines": ['<img alt="{{ " />'],
},
{
"pattern": r"\bon\w+ ?=",
"description": "Don't use inline event handlers (onclick=, etc. attributes) in HTML. Instead,"
"attach a jQuery event handler ($('#foo').on('click', function () {...})) when "
"the DOM is ready (inside a $(function () {...}) block).",
"exclude": {
"templates/zerver/development/dev_login.html",
"templates/corporate/upgrade.html",
},
"good_lines": ["($('#foo').on('click', function () {}"],
"bad_lines": [
"<button id='foo' onclick='myFunction()'>Foo</button>",
"<input onchange='myFunction()'>",
],
},
{
"pattern": "style ?=",
"description": "Avoid using the `style=` attribute; we prefer styling in CSS files",
"exclude_pattern": r'.*style ?=["'
+ "'"
+ "](display: ?none|background: {{|color: {{|background-color: {{).*",
"exclude": {
# 5xx page doesn't have external CSS
"static/html/5xx.html",
# exclude_pattern above handles color, but have other issues:
"static/templates/draft.hbs",
"static/templates/stream_settings/browse_streams_list_item.hbs",
"static/templates/single_message.hbs",
# Old-style email templates need to use inline style
# attributes; it should be possible to clean these up
# when we convert these templates to use premailer.
"templates/zerver/emails/email_base_messages.html",
# Email log templates; should clean up.
"templates/zerver/email.html",
"templates/zerver/development/email_log.html",
# Social backend logos are dynamically loaded
"templates/zerver/accounts_home.html",
"templates/zerver/login.html",
# Needs the width cleaned up; display: none is fine
"static/templates/dialog_change_password.hbs",
# background image property is dynamically generated
"static/templates/user_profile_modal.hbs",
"static/templates/pm_list_item.hbs",
# Inline styling for an svg; could be moved to CSS files?
"templates/zerver/landing_nav.html",
"templates/zerver/billing_nav.html",
"templates/zerver/features.html",
"templates/zerver/portico-header.html",
"templates/corporate/billing.html",
"templates/corporate/upgrade.html",
# Miscellaneous violations to be cleaned up
"static/templates/user_info_popover_title.hbs",
"static/templates/confirm_dialog/confirm_subscription_invites_warning.hbs",
"templates/zerver/reset_confirm.html",
"templates/zerver/config_error.html",
"templates/zerver/dev_env_email_access_details.html",
"templates/zerver/confirm_continue_registration.html",
"templates/zerver/register.html",
"templates/zerver/accounts_send_confirm.html",
"templates/zerver/integrations/index.html",
"templates/zerver/documentation_main.html",
"templates/analytics/realm_summary_table.html",
"templates/corporate/zephyr.html",
"templates/corporate/zephyr-mirror.html",
},
"good_lines": ["#my-style {color: blue;}", 'style="display: none"', "style='display: none"],
"bad_lines": ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"'],
},
]
handlebars_rules = RuleList(
langs=["hbs"],
rules=[
*html_rules,
{
"pattern": "[<]script",
"description": "Do not use inline <script> tags here; put JavaScript in static/js instead.",
},
{
"pattern": "{{ t (\"|')",
"description": 'There should be no spaces before the "t" in a translation tag.',
},
{
"pattern": r"{{t '.*' }}[\.\?!]",
"description": "Period should be part of the translatable string.",
},
{
"pattern": r'{{t ".*" }}[\.\?!]',
"description": "Period should be part of the translatable string.",
},
{
"pattern": r"{{/tr}}[\.\?!]",
"description": "Period should be part of the translatable string.",
},
{
"pattern": "{{t (\"|') ",
"description": "Translatable strings should not have leading spaces.",
},
{
"pattern": "{{t '[^']+ ' }}",
"description": "Translatable strings should not have trailing spaces.",
},
{
"pattern": '{{t "[^"]+ " }}',
"description": "Translatable strings should not have trailing spaces.",
},
{
"pattern": r'"{{t "',
"description": "Invalid quoting for HTML element with translated string.",
},
],
)
jinja2_rules = RuleList(
langs=["html"],
rules=[
*html_rules,
{
"pattern": r"{% endtrans %}[\.\?!]",
"description": "Period should be part of the translatable string.",
},
{
"pattern": r"{{ _(.+) }}[\.\?!]",
"description": "Period should be part of the translatable string.",
},
{
"pattern": r'{% set entrypoint = "dev-',
"exclude": {"templates/zerver/development/"},
"description": "Development entry points (dev-) must not be imported in production.",
},
],
)
json_rules = RuleList(
langs=["json"],
rules=[
# Here, we don't use `whitespace_rules`, because the tab-based
# whitespace rule flags a lot of third-party JSON fixtures
# under zerver/webhooks that we want preserved verbatim. So
# we just include the trailing whitespace rule and a modified
# version of the tab-based whitespace rule (we can't just use
# exclude in whitespace_rules, since we only want to ignore
# JSON files with tab-based whitespace, not webhook code).
trailing_whitespace_rule,
{
"pattern": "\t",
"strip": "\n",
"exclude": {"zerver/webhooks/"},
"description": "Fix tab-based whitespace",
},
{
"pattern": r'":["\[\{]',
"exclude": {"zerver/webhooks/", "zerver/tests/fixtures/"},
"description": "Require space after : in JSON",
},
],
)
markdown_docs_length_exclude = {
# Has some example Vagrant output that's very long
"docs/development/setup-vagrant.md",
# Have wide output in code blocks
"docs/subsystems/logging.md",
"docs/subsystems/schema-migrations.md",
# Have curl commands with JSON that would be messy to wrap
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
# Has a very long configuration line
"templates/zerver/integrations/perforce.md",
# Has some example code that could perhaps be wrapped
"templates/zerver/api/incoming-webhooks-walkthrough.md",
"templates/zerver/api/get-messages.md",
# This macro has a long indented URL
"templates/zerver/help/include/git-webhook-url-with-branches-indented.md",
# These two are the same file and have some too-long lines for GitHub badges
"README.md",
"docs/overview/readme.md",
}
markdown_rules = RuleList(
langs=["md"],
rules=[
*markdown_whitespace_rules,
*prose_style_rules,
{
"pattern": r"\[(?P<url>[^\]]+)\]\((?P=url)\)",
"description": "Linkified Markdown URLs should use cleaner <http://example.com> syntax.",
},
{
"pattern": "https://zulip.readthedocs.io/en/latest/[a-zA-Z0-9]",
"exclude": {
"docs/overview/contributing.md",
"docs/overview/readme.md",
"docs/README.md",
"docs/subsystems/email.md",
},
"exclude_line": {
(
"docs/overview/changelog.md",
"[latest-changelog]: https://zulip.readthedocs.io/en/latest/overview/changelog.html",
),
},
"include_only": {"docs/"},
"description": "Use relative links (../foo/bar.html) to other documents in docs/",
},
{
"pattern": "su zulip -c [^']",
"include_only": {"docs/"},
"description": "Always quote arguments using `su zulip -c '` to avoid confusion about how su works.",
},
{
"pattern": r"\][(][^#h]",
"exclude_pattern": "mailto:",
"include_only": {"README.md", "CONTRIBUTING.md"},
"description": "Use absolute links from docs served by GitHub",
},
{
"pattern": r"\.(py|js)#L\d+",
"include_only": {"docs/"},
"description": "Don't link directly to line numbers",
},
],
max_length=120,
length_exclude=markdown_docs_length_exclude,
exclude_files_in="templates/zerver/help/",
)
help_markdown_rules = RuleList(
langs=["md"],
rules=[
*markdown_rules.rules,
{
"pattern": "[a-z][.][A-Z]",
"description": "Likely missing space after end of sentence",
"include_only": {"templates/zerver/help/"},
"exclude_pattern": "Rocket.Chat",
},
{
"pattern": r"\b[rR]ealm[s]?\b",
"include_only": {"templates/zerver/help/"},
"exclude": {"templates/zerver/help/change-organization-url.md"},
"good_lines": ["Organization", "deactivate_realm", "realm_filter"],
"bad_lines": ["Users are in a realm", "Realm is the best model"],
"description": "Realms are referred to as Organizations in user-facing docs.",
# Keycloak uses the term realm as well.
# Additionally, we allow -realm- as that appears in /api/ doc URLs.
"exclude_pattern": "(-realm-|[kK]eycloak)",
},
],
length_exclude=markdown_docs_length_exclude,
)
puppet_rules = RuleList(
langs=["pp"],
rules=[
{
"pattern": r"(include\s+|\$)zulip::(profile|base)\b",
"exclude": {
"puppet/zulip/manifests/profile/",
"puppet/zulip_ops/manifests/",
"puppet/zulip/manifests/dockervoyager.pp",
},
"description": "Abstraction layering violation; only profiles should reference profiles or zulip::base",
},
{
"pattern": r"(include\s+|\$)zulip_ops::(profile|base)\b",
"exclude": {
"puppet/zulip/manifests/",
"puppet/zulip_ops/manifests/profile/",
},
"description": "Abstraction layering violation; only profiles should reference profiles or zulip_ops::base",
},
],
)
txt_rules = RuleList(
langs=["txt", "text", "yaml", "rst", "yml"],
rules=whitespace_rules,
)
non_py_rules = [
handlebars_rules,
jinja2_rules,
css_rules,
js_rules,
json_rules,
markdown_rules,
help_markdown_rules,
bash_rules,
txt_rules,
puppet_rules,
]
| 40.771579 | 224 | 0.534996 |
ed0e06b3805a10757c529bbee79b14c1e7fcf162 | 1,049 | py | Python | azure/mgmt/network/v2015_06_15/models/express_route_circuit_peering_paged.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2015_06_15/models/express_route_circuit_peering_paged.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2015_06_15/models/express_route_circuit_peering_paged.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ExpressRouteCircuitPeeringPaged(Paged):
"""
A paging container for iterating over a list of :class:`ExpressRouteCircuitPeering <azure.mgmt.network.v2015_06_15.models.ExpressRouteCircuitPeering>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ExpressRouteCircuitPeering]'}
}
def __init__(self, *args, **kwargs):
super(ExpressRouteCircuitPeeringPaged, self).__init__(*args, **kwargs)
| 37.464286 | 162 | 0.593899 |
480dfd617c179948490e6a5f635bf419df290ab0 | 3,970 | py | Python | xknx/devices/sensor.py | onkelbeh/xknx | b7c7427b77b1a709aef8e25b39bbbb62ace6f708 | [
"MIT"
] | null | null | null | xknx/devices/sensor.py | onkelbeh/xknx | b7c7427b77b1a709aef8e25b39bbbb62ace6f708 | [
"MIT"
] | null | null | null | xknx/devices/sensor.py | onkelbeh/xknx | b7c7427b77b1a709aef8e25b39bbbb62ace6f708 | [
"MIT"
] | null | null | null | """
Module for managing a sensor via KNX.
It provides functionality for
* reading the current state from KNX bus.
* watching for state updates from KNX bus.
"""
from typing import TYPE_CHECKING, Any, Iterator, Optional, Union
from xknx.remote_value import RemoteValueControl, RemoteValueSensor
from .device import Device, DeviceCallbackType
if TYPE_CHECKING:
from xknx.remote_value import RemoteValue
from xknx.telegram import Telegram
from xknx.telegram.address import GroupAddressableType
from xknx.xknx import XKNX
class Sensor(Device):
"""Class for managing a sensor."""
def __init__(
self,
xknx: "XKNX",
name: str,
group_address_state: Optional["GroupAddressableType"] = None,
sync_state: bool = True,
always_callback: bool = False,
value_type: Optional[str] = None,
device_updated_cb: Optional[DeviceCallbackType] = None,
):
"""Initialize Sensor class."""
# pylint: disable=too-many-arguments
super().__init__(xknx, name, device_updated_cb)
self.sensor_value: Union[RemoteValueControl, RemoteValueSensor]
if value_type in [
"stepwise_dimming",
"stepwise_blinds",
"startstop_dimming",
"startstop_blinds",
]:
self.sensor_value = RemoteValueControl(
xknx,
group_address_state=group_address_state,
sync_state=sync_state,
value_type=value_type,
device_name=self.name,
after_update_cb=self.after_update,
)
else:
self.sensor_value = RemoteValueSensor(
xknx,
group_address_state=group_address_state,
sync_state=sync_state,
value_type=value_type,
device_name=self.name,
after_update_cb=self.after_update,
)
self.always_callback = always_callback
def _iter_remote_values(self) -> Iterator["RemoteValue"]:
"""Iterate the devices RemoteValue classes."""
yield self.sensor_value
@classmethod
def from_config(cls, xknx: "XKNX", name: str, config: Any) -> "Sensor":
"""Initialize object from configuration structure."""
group_address_state = config.get("group_address_state")
sync_state = config.get("sync_state", True)
always_callback = config.get("always_callback", False)
value_type = config.get("value_type")
return cls(
xknx,
name,
group_address_state=group_address_state,
sync_state=sync_state,
always_callback=always_callback,
value_type=value_type,
)
async def process_group_write(self, telegram: "Telegram") -> None:
"""Process incoming and outgoing GROUP WRITE telegram."""
await self.sensor_value.process(telegram, always_callback=self.always_callback)
async def process_group_response(self, telegram: "Telegram") -> None:
"""Process incoming GroupValueResponse telegrams."""
await self.sensor_value.process(telegram)
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement."""
return self.sensor_value.unit_of_measurement
def ha_device_class(self) -> Optional[str]:
"""Return the home assistant device class as string."""
return self.sensor_value.ha_device_class
def resolve_state(self) -> Optional[Any]:
"""Return the current state of the sensor as a human readable string."""
return self.sensor_value.value
def __str__(self) -> str:
"""Return object as readable string."""
return '<Sensor name="{}" ' 'sensor="{}" value="{}" unit="{}"/>'.format(
self.name,
self.sensor_value.group_addr_str(),
self.resolve_state(),
self.unit_of_measurement(),
)
| 34.824561 | 87 | 0.634005 |
a1094d37ed79737b1c039137eaf78f9c0af3c56b | 7,613 | py | Python | models/utils/lrs_scheduler.py | AutuanLiu/PyTorch-ML | 884c7723843d9ffb4da09d95eb97886b2cc38f28 | [
"MIT"
] | 9 | 2018-11-05T05:16:06.000Z | 2021-02-17T16:52:29.000Z | models/utils/lrs_scheduler.py | AutuanLiu/PyTorch-DNN | 884c7723843d9ffb4da09d95eb97886b2cc38f28 | [
"MIT"
] | null | null | null | models/utils/lrs_scheduler.py | AutuanLiu/PyTorch-DNN | 884c7723843d9ffb4da09d95eb97886b2cc38f28 | [
"MIT"
] | 1 | 2019-06-19T18:53:14.000Z | 2019-06-19T18:53:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
Description : lrs_scheduler
Reference:
1. https://towardsdatascience.com/transfer-learning-using-pytorch-4c3475f4495
2. https://discuss.pytorch.org/t/solved-learning-rate-decay/6825/5
3. https://discuss.pytorch.org/t/adaptive-learning-rate/320/34
4. https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py
5. https://github.com/bckenstler/CLR
6. https://github.com/fastai/fastai/blob/master/fastai/sgdr.py
7. https://github.com/NVIDIA/nvvl/blob/master/examples/pytorch_superres/model/clr.py
Email : autuanliu@163.com
Date:2018/3/22
"""
from .utils_imports import *
class WarmRestart(lr_scheduler.CosineAnnealingLR):
"""This class implements Stochastic Gradient Descent with Warm Restarts(SGDR): https://arxiv.org/abs/1608.03983.
Set the learning rate of each parameter group using a cosine annealing schedule, When last_epoch=-1, sets initial lr as lr.
This can't support scheduler.step(epoch). please keep epoch=None.
"""
def __init__(self, optimizer, T_max=10, T_mult=2, eta_min=0, last_epoch=-1):
"""implements SGDR
Parameters:
----------
T_max : int
Maximum number of epochs.
T_mult : int
Multiplicative factor of T_max.
eta_min : int
Minimum learning rate. Default: 0.
last_epoch : int
The index of last epoch. Default: -1.
"""
self.T_mult = T_mult
super().__init__(optimizer, T_max, eta_min, last_epoch)
def get_lr(self):
if self.last_epoch == self.T_max:
self.last_epoch = 0
self.T_max *= self.T_mult
return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 for base_lr in self.base_lrs]
def cyclical_lr(step_sz, min_lr=0.001, max_lr=1, mode='triangular', scale_func=None, scale_md='cycles', gamma=1.):
"""implements a cyclical learning rate policy (CLR).
The method cycles the learning rate between two boundaries with some constant frequency, as detailed in this
paper (https://arxiv.org/abs/1506.01186). The amplitude of the cycle can be scaled on a per-iteration or per-cycle basis.
This function has three built-in policies, as put forth in the paper.
Note:
-----
1. The difficulty in minimizing the loss arise from saddle rather than poor local minima(Dauphin, 2015).
2. Set stepsize equal to 2~10 times he number of iterations in an epoch.
3. It's best to stop training at the end of a cycle which is when the learning rate is at the minimum value and the accuracy peaks.(back to min learning rate at the training end)
4. LR range test: The triangular learning rate policy provides a simple mechanism to do this. Set base lr to the minimum value and set max lr to the
maximum value. Set both the stepsize and max iter to the same number of iterations. In this case, the learning rate will increase linearly from the minimum
value to the maximum value during this short run. Next, plot the accuracy versus learning rate.
Note the learning rate value when the accuracy starts to increase and when the accuracy slows, becomes ragged, or starts to fall. These two learning rates
are good choices for bounds; that is, set base lr to the first value and set max lr to the latter value. Alternatively, one can use the rule of
thumb that the optimum learning rate is usually within a factor of two of the largest one that converges and set base lr to 1/3 or 1/4 of max lr
5. The optimum learning rate will be between the bounds and near optimal learning rates will be used throughout training.
Notes: the learning rate of optimizer should be 1
Parameters:
----------
min_lr : float
lower boundary in the cycle. which is equal to the optimizer's initial learning rate.
max_lr : float
upper boundary in the cycle. Functionally, it defines the cycle amplitude (max_lr - base_lr).
step_sz : int
(2~10)*(len(datasets)/minibatch)
mode : str, optional
one of {triangular, triangular2, exp_range}. Default 'triangular'.
"triangular": A basic triangular cycle with no amplitude scaling.
"triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
"exp_range": A cycle that scales initial amplitude by gamma**(cycle iterations) at each cycle iteration.
scale_func : lambda function, optional
Custom scaling policy defined by a single argument lambda function, where 0 <= scale_fn(x) <= 1 for all x >= 0.
scale_md : str, optional
{'cycles', 'iterations'}. Defines whether scale_fn is evaluated on cycle number or cycle iterations (training
iterations since start of cycle). Default is 'cycles'.
gamma : float, optional
constant in 'exp_range' scaling function: gamma**(cycle iterations)
Returns:
--------
lambda function
Examples:
--------
>>> optimizer = optim.Adam(model.parameters(), lr=1.)
>>> step_size = 2*len(train_loader)
>>> clr = cyclical_lr(step_size, min_lr=0.001, max_lr=0.005)
>>> scheduler = lr_scheduler.LambdaLR(optimizer, [clr])
>>> # some other operations
>>> scheduler.step()
>>> optimizer.step()
"""
if scale_func == None:
if mode == 'triangular':
scale_fn = lambda x: 1.
scale_mode = 'cycles'
elif mode == 'triangular2':
scale_fn = lambda x: 1 / (2.**(x - 1))
scale_mode = 'cycles'
elif mode == 'exp_range':
scale_fn = lambda x: gamma**(x)
scale_mode = 'iterations'
else:
raise ValueError(f'The {mode} is not valid value!')
else:
scale_fn = scale_func
scale_mode = scale_md
lr_lambda = lambda iters: min_lr + (max_lr - min_lr) * rel_val(iters, step_sz, scale_mode)
def rel_val(iteration, stepsize, mode):
cycle = math.floor(1 + iteration / (2 * stepsize))
x = abs(iteration / stepsize - 2 * cycle + 1)
if mode == 'cycles':
return max(0, (1 - x)) * scale_fn(cycle)
elif mode == 'iterations':
return max(0, (1 - x)) * scale_fn(iteration)
else:
raise ValueError(f'The {scale_mode} is not valid value!')
return lr_lambda
def clr_reset(scheduler, thr):
"""learning rate scheduler reset if iteration = thr
Parameters:
----------
scheduler : instance of optim.lr_scheduler
instance of optim.lr_scheduler
thr : int
the reset point
Examples:
--------
>>> # some other operations(note the order of operations)
>>> scheduler.step()
>>> scheduler = clr_reset(scheduler, 1000)
>>> optimizer.step()
"""
if scheduler.last_epoch == thr:
scheduler.last_epoch = -1
return scheduler
def warm_restart(scheduler, T_mult=2):
"""warm restart policy
Parameters:
----------
T_mult: int
default is 2, Stochastic Gradient Descent with Warm Restarts(SGDR): https://arxiv.org/abs/1608.03983.
Examples:
--------
>>> # some other operations(note the order of operations)
>>> scheduler.step()
>>> scheduler = warm_restart(scheduler, T_mult=2)
>>> optimizer.step()
"""
if scheduler.last_epoch == scheduler.T_max:
scheduler.last_epoch = -1
scheduler.T_max *= T_mult
return scheduler
| 42.294444 | 182 | 0.651254 |
e4422612fb79992ce00a4364d3f6a8a386f2c7c2 | 388 | py | Python | py/tests/problems/dynamic/fib_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/tests/problems/dynamic/fib_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/tests/problems/dynamic/fib_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | import unittest
from dcp.problems.dynamic.fib import fib1
class Test_Fibonacci(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
assert fib1(10) == 55
from dcp.problems.dynamic.fib import fib2
class Test_Fibonacci(unittest.TestCase):
def setUp(self):
pass
def test_case1(self):
assert fib2(10) == 55
| 16.166667 | 41 | 0.639175 |
81a7f651b42a3acf9247fc7ea9db00cd7bf7c4cc | 5,890 | py | Python | flexget/plugins/sites/iptorrents.py | davidcollom/Flexget | cd763e04afdf6da8f1673dd567a42d55d4cb3b6c | [
"MIT"
] | 1 | 2021-03-24T11:54:01.000Z | 2021-03-24T11:54:01.000Z | flexget/plugins/sites/iptorrents.py | davidcollom/Flexget | cd763e04afdf6da8f1673dd567a42d55d4cb3b6c | [
"MIT"
] | null | null | null | flexget/plugins/sites/iptorrents.py | davidcollom/Flexget | cd763e04afdf6da8f1673dd567a42d55d4cb3b6c | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote_plus
import re
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode
from flexget.utils.tools import parse_filesize
log = logging.getLogger('iptorrents')
CATEGORIES = {
# Movies
'Movie-all': 72,
'Movie-3D': 87,
'Movie-480p': 77,
'Movie-BD-R': 89,
'Movie-BD-Rip': 90,
'Movie-DVD-R': 6,
'Movie-HD-Bluray': 48,
'Movie-Kids': 54,
'Movie-MP4': 62,
'Movie-Non-English': 38,
'Movie-Packs': 68,
'Movie-XviD': 17,
# TV
'TV-all': 73,
'TV-Sports': 55,
'TV-480p': 78,
'TV-MP4': 66,
'TV-Non-English': 82,
'TV-Packs': 65,
'TV-Packs-Non-English': 83,
'TV-SD-x264': 79,
'TV-x264': 5,
'TV-XVID': 4,
'TV-Web-DL': 22
}
BASE_URL = 'https://iptorrents.com'
class UrlRewriteIPTorrents(object):
"""
IpTorrents urlrewriter and search plugin.
iptorrents:
rss_key: xxxxxxxxx (required)
uid: xxxxxxxx (required)
password: xxxxxxxx (required)
category: HD
Category is any combination of: all, Movie-3D, Movie-480p, Movie-3D,
Movie-480p, Movie-BD-R, Movie-BD-Rip, Movie-DVD-R,
Movie-HD-Bluray, Movie-Kids, Movie-MP4,
Movie-Non-English, Movie-Packs, Movie-XviD,
TV-all, TV-Sports, TV-480p, TV-MP4, TV-Non-English, TV-Packs,
TV-Packs-Non-English, TV-SD-x264, TV-x264, TV-XVID, TV-Web-DL
"""
schema = {
'type': 'object',
'properties': {
'rss_key': {'type': 'string'},
'uid': {'oneOf': [
{'type': 'integer'},
{'type': 'string'}
]},
'password': {'type': 'string'},
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]}),
},
'required': ['rss_key', 'uid', 'password'],
'additionalProperties': False
}
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith(BASE_URL + '/download.php/'):
return False
if url.startswith(BASE_URL + '/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
if 'url' not in entry:
log.error("Didn't actually get a URL...")
else:
log.debug("Got the URL: %s" % entry['url'])
if entry['url'].startswith(BASE_URL + '/t?'):
# use search
results = self.search(task, entry)
if not results:
raise UrlRewritingError("No search results found")
# TODO: Search doesn't enforce close match to title, be more picky
entry['url'] = results[0]['url']
@plugin.internet(log)
def search(self, task, entry, config=None):
"""
Search for name from iptorrents
"""
categories = config.get('category', 'all')
# Make sure categories is a list
if not isinstance(categories, list):
categories = [categories]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c]
for c in categories]
filter_url = '&'.join((str(c) + '=') for c in categories)
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_unicode(search_string)
query = quote_plus(query.encode('utf8'))
url = "{base_url}/t?{filter}&q={query}&qf=".format(base_url=BASE_URL, filter=filter_url, query=query)
log.debug('searching with url: %s' % url)
req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})
if '/u/' + str(config.get('uid')) not in req.content:
raise plugin.PluginError("Invalid cookies (user not logged in)...")
soup = get_soup(req.content, parser="html.parser")
torrents = soup.find('table', {'id': 'torrents'})
for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
entry = Entry()
entry['url'] = "{base}{link}?torrent_pass={key}".format(
base=BASE_URL, link=torrent['href'], key=config.get('rss_key'))
entry['title'] = torrent.findPrevious("a", attrs={'class': 't_title'}).text
seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text
leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text
entry['torrent_seeds'] = int(seeders)
entry['torrent_leeches'] = int(leechers)
entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
entry['torrent_leeches'])
size = torrent.findNext(text=re.compile('^([\.\d]+) ([GMK]?)B$'))
size = re.search('^([\.\d]+) ([GMK]?)B$', size)
entry['content_size'] = parse_filesize(size.group(0))
entries.add(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteIPTorrents, 'iptorrents', groups=['urlrewriter', 'search'], api_ver=2)
| 34.244186 | 113 | 0.561969 |
88af56cc4326032792ce9c95fb7cf1f549ae91b6 | 3,521 | py | Python | ipdb/views.py | ojarva/switch-management-django | 63900a5ffb6bfee4c96b9949bd7caa32d0cb217e | [
"MIT",
"Unlicense"
] | 1 | 2021-01-27T16:53:41.000Z | 2021-01-27T16:53:41.000Z | ipdb/views.py | ojarva/switch-management-django | 63900a5ffb6bfee4c96b9949bd7caa32d0cb217e | [
"MIT",
"Unlicense"
] | null | null | null | ipdb/views.py | ojarva/switch-management-django | 63900a5ffb6bfee4c96b9949bd7caa32d0cb217e | [
"MIT",
"Unlicense"
] | 3 | 2016-06-01T07:08:52.000Z | 2021-05-18T05:45:51.000Z | """
IP database views
"""
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
import datetime
import json
import pickle
from switch.models import MacDB, IPDB
# port mac timestamp
# port -> number,mode,vlan
def get_macdb(request):
ret_dict = {"macdb": {}}
macdb = MacDB.objects.select_related(depth=2).filter(archived=False)
latest_entry = datetime.datetime.now()-datetime.timedelta(days=365)
for item in macdb:
if not item.mac in ret_dict["macdb"]:
ret_dict["macdb"][item.mac] = []
if item.timestamp > latest_entry:
latest_entry = item.timestamp
ret_dict["macdb"][item.mac].append({'switch': item.port.switch.switch_ip, 'port_number': item.port.number, 'port_mode': item.port.mode, 'vlan': item.port.vlan, 'last_seen': str(item.timestamp), 'first_seen': str(item.first_seen)})
ret_dict["info"] = {"mac_count": len(ret_dict["macdb"]), "latest_entry": str(latest_entry)}
return HttpResponse(json.dumps(ret_dict, sort_keys=True, indent=4), content_type="text/plain")
@csrf_exempt
def update_mac_ip(request):
start_time = datetime.datetime.now()
ret_dict = {"new_entries_added": 0, "old_entries_updated": 0, "new_entries": []}
if request.POST.get("arptables"):
table = json.loads(request.POST.get("arptables"))
for entry in table:
# 1. Another mac in same IP -> archived
ret_dict["duplicate_mac_entries_archived"] = IPDB.objects.filter(ip=entry.get("ip")).exclude(mac=entry.get("mac")).filter(archived=False).update(archived=True)
# 2. MAC in another IP -> archived
ret_dict["duplicate_ip_entries_archived"] = IPDB.objects.filter(mac=entry.get("mac")).exclude(ip=entry.get("ip")).filter(archived=False).update(archived=True)
obj = IPDB.objects.filter(mac=entry.get("mac")).filter(ip=entry.get("ip")).filter(archived=False)
if len(obj) > 0:
ret_dict["old_entries_updated"] += 1
obj = obj[0]
else:
obj = IPDB(ip=entry.get("ip"), mac=entry.get("mac"), archived=False)
ret_dict["new_entries"].append(entry.get("mac"))
ret_dict["new_entries_added"] += 1
obj.last_seen = datetime.datetime.now()
obj.save()
ret_dict["status"] = "Tables updated"
ret_dict["success"] = True
else:
ret_dict["status"] = "No table specified, no actions taken"
ret_dict["success"] = False
# 3. Cleanup: if not seen for > 350 seconds, archive
ret_dict["too_old_entries_archived"] = IPDB.objects.filter(archived=False).filter(last_seen__lte=datetime.datetime.now()-datetime.timedelta(seconds=350)).update(archived=True)
# Counters
ret_dict["number_of_active_entries"] = IPDB.objects.filter(archived=False).count()
ret_dict["number_of_archived_entries"] = IPDB.objects.filter(archived=True).count()
ret_dict["number_of_entries"] = ret_dict["number_of_active_entries"] + ret_dict["number_of_archived_entries"]
end_time = datetime.datetime.now()
ret_dict["start_time"] = str(start_time)
ret_dict["end_time"] = str(end_time)
ret_dict["duration"] = str(end_time-start_time)
return HttpResponse(json.dumps(ret_dict))
| 50.3 | 238 | 0.685885 |
b8cdf88c51916ace90279cc86bd0f952e4326f31 | 166 | py | Python | testing/redis_smoke_tests.py | rallen10/ergo_particle_gym | 5bb8073d880ab1da60ee333d892ea8a4720f3396 | [
"FSFULLR",
"FSFUL"
] | null | null | null | testing/redis_smoke_tests.py | rallen10/ergo_particle_gym | 5bb8073d880ab1da60ee333d892ea8a4720f3396 | [
"FSFULLR",
"FSFUL"
] | null | null | null | testing/redis_smoke_tests.py | rallen10/ergo_particle_gym | 5bb8073d880ab1da60ee333d892ea8a4720f3396 | [
"FSFULLR",
"FSFUL"
] | 3 | 2019-12-08T08:36:23.000Z | 2021-11-07T17:35:53.000Z | # #Setup connection
# from redis import Redis
# from rq import Queue
# q = Queue(connection=Redis('10.0.0.32'))
# #Create a task
# results = q.enqueue('smoke_tests') | 27.666667 | 42 | 0.698795 |
30ded230fb1fd5c22332638e838d55c1f7cc8482 | 1,463 | py | Python | ooobuild/lo/ucb/package_content_provider.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/ucb/package_content_provider.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/ucb/package_content_provider.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ucb
from .x_content_provider import XContentProvider as XContentProvider_d4150cc0
class PackageContentProvider(XContentProvider_d4150cc0):
"""
Service Class
The Package Content Provider (PCP) implements a ContentProvider for the UniversalContentBroker (UCB).
It provides access to packages ( zip / jar archive files ) containing folders and streams.
See Also:
`API PackageContentProvider <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1ucb_1_1PackageContentProvider.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.PackageContentProvider'
__ooo_type_name__: str = 'service'
__all__ = ['PackageContentProvider']
| 34.833333 | 144 | 0.756664 |
43b7013f1f6c3d9b640231a16d32c48abbe67303 | 5,313 | py | Python | opacus/validators/module_validator.py | bogdan-kulynych/opacus | e2d13003a179f64920835bc585f3729b8148279f | [
"Apache-2.0"
] | null | null | null | opacus/validators/module_validator.py | bogdan-kulynych/opacus | e2d13003a179f64920835bc585f3729b8148279f | [
"Apache-2.0"
] | null | null | null | opacus/validators/module_validator.py | bogdan-kulynych/opacus | e2d13003a179f64920835bc585f3729b8148279f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List
import torch.nn as nn
from opacus.grad_sample.grad_sample_module import GradSampleModule
from opacus.utils.module_utils import clone_module, get_submodule
from opacus.validators.errors import (
IllegalModuleConfigurationError,
UnsupportedModuleError,
)
logger = logging.getLogger(__name__)
class ModuleValidator:
"""
Encapsulates all the validation logic required by Opacus.
Also works as a namespace to hold registered validators and fixers.
"""
VALIDATORS = {}
FIXERS = {}
@classmethod
def validate(
cls, module: nn.Module, *, strict: bool = False
) -> List[UnsupportedModuleError]:
"""
Validate module and sub_modules by running registered custom validators.
Returns or raises excpetions depending on ``strict`` flag.
Args:
module: The root module to validate.
strict: Boolean to indicate whether to raise errors or return
the list of errors.
Raises:
UnsupportedModuleError in case of validation failures.
"""
errors = []
# 1. validate that module is in training mode
if not module.training:
errors.append(
IllegalModuleConfigurationError("Model needs to be in training mode")
)
# 2. validate that all trainable modules are supported by GradSampleModule.
errors.extend(GradSampleModule.validate(module=module, strict=False))
# 3. perform module specific validations.
# TODO: use module name here - it's useful part of error message
for _, sub_module in module.named_modules():
if type(sub_module) in ModuleValidator.VALIDATORS:
sub_module_validator = ModuleValidator.VALIDATORS[type(sub_module)]
errors.extend(sub_module_validator(sub_module))
# raise/return as needed
if strict and len(errors) > 0:
raise UnsupportedModuleError(errors)
else:
return errors
@classmethod
def is_valid(cls, module: nn.Module) -> bool:
"""
Check if module and sub_modules are valid by running registered custom validators.
Args:
module: The root module to validate.
Returns:
bool
"""
return len(cls.validate(module, strict=False)) == 0
@classmethod
def fix(cls, module: nn.Module) -> nn.Module:
"""
Make the module and sub_modules DP compatible by running registered custom fixers.
Args:
module: The root module to be made compatible.
Returns:
Fixed module.
"""
module = clone_module(module)
# iterate over all sub_modules
# Need to get sub_module names first as we will be changing
# inside the the loop.
sub_module_names = [name for name, _ in module.named_modules()]
for sub_module_name in sub_module_names:
# get sub_module
sub_module = get_submodule(module, sub_module_name)
# if sub_module has a registered fixer
if type(sub_module) in ModuleValidator.FIXERS:
# get a repalcement for sub_module
sub_module_fixer = ModuleValidator.FIXERS[type(sub_module)]
new_sub_module = sub_module_fixer(sub_module)
# get module after replacement.
module = cls._repalce_sub_module(
root=module,
sub_module_name=sub_module_name,
new_sub_module=new_sub_module,
)
# log it
logger.info(
f"Replaced sub_module {sub_module_name} : {sub_module}"
f" with {new_sub_module}"
)
# return fixed module
return module
@classmethod
def _repalce_sub_module(
cls,
*,
root: nn.Module,
sub_module_name: str,
new_sub_module: nn.Module,
) -> None:
sub_module_path = sub_module_name.split(".")
if (
len(sub_module_path) == 1 and sub_module_path[0] == ""
): # root is the only sub_module of root
return new_sub_module
else: # repalce root's descendant
sub_module_parent = root
for name in sub_module_path[:-1]: # descend down to sub_module
sub_module_parent = sub_module_parent._modules[name]
sub_module_parent._modules[sub_module_path[-1]] = new_sub_module
return root
@classmethod
def fix_and_validate(cls, module: nn.Module) -> nn.Module:
"""
Fix the module and sub_modules first, and then run validation.
Args:
module: The root module to be fixed and validted
Returns:
Fixed module.
Raises:
UnsupportedModuleError in case of validation failures.
"""
# 1. replace any fixable modules
fixed_module = cls.fix(module)
# 2. perform module specific validations.
cls.validate(fixed_module, strict=True)
# return fixed module
return fixed_module
| 34.277419 | 90 | 0.615095 |
801cc9c3a1ff6050e2d259e85e998d4d1674a1d1 | 764 | py | Python | N_tree/problems/E_print_left_view_of_binary_tree.py | Princeton21/DSA | 0f2321b284fc54f4ddf73733cc1a8d05e549aeea | [
"MIT"
] | 58 | 2021-01-06T10:05:51.000Z | 2022-02-10T05:15:19.000Z | N_tree/problems/E_print_left_view_of_binary_tree.py | Princeton21/DSA | 0f2321b284fc54f4ddf73733cc1a8d05e549aeea | [
"MIT"
] | 5 | 2021-02-22T04:14:24.000Z | 2021-12-26T09:19:17.000Z | N_tree/problems/E_print_left_view_of_binary_tree.py | Princeton21/DSA | 0f2321b284fc54f4ddf73733cc1a8d05e549aeea | [
"MIT"
] | 27 | 2021-02-09T13:58:33.000Z | 2022-03-06T03:48:08.000Z | class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def leftViewUtil(root, level, max_level):
if root is None:
return
if max_level[0] < level:
print("% d\t" % (root.data))
max_level[0] = level
leftViewUtil(root.left, level + 1, max_level)
leftViewUtil(root.right, level + 1, max_level)
def leftView(root):
max_level = [0]
leftViewUtil(root, 1, max_level)
if __name__ == "__main__":
"""
from timeit import timeit
root = Node(12)
root.left = Node(10)
root.right = Node(20)
root.right.left = Node(25)
root.right.right = Node(40)
print(timeit(lambda: leftView(root), number=10000)) # 0.2834473659968353
""" | 20.648649 | 77 | 0.602094 |
4e054197e3744573a7b57584298c00372547d049 | 5,644 | py | Python | libcloud/utils/py3.py | vanclevstik/libcloud | 2d58fa9ed4defec7f44ce0b83aede701dbc806e1 | [
"Apache-2.0"
] | null | null | null | libcloud/utils/py3.py | vanclevstik/libcloud | 2d58fa9ed4defec7f44ce0b83aede701dbc806e1 | [
"Apache-2.0"
] | 1 | 2015-08-05T10:28:40.000Z | 2015-08-05T15:20:33.000Z | libcloud/utils/py3.py | ferewuz/libcloud | 2d58fa9ed4defec7f44ce0b83aede701dbc806e1 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Libcloud Python 2.x and 3.x compatibility layer
# Some methods below are taken from Django PYK3 port which is licensed under 3
# clause BSD license
# https://bitbucket.org/loewis/django-3k
from __future__ import absolute_import
import sys
import types
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
PY2 = False
PY25 = False
PY26 = False
PY27 = False
PY3 = False
PY32 = False
if sys.version_info >= (2, 0) and sys.version_info < (3, 0):
PY2 = True
if sys.version_info >= (2, 5) and sys.version_info < (2, 6):
PY25 = True
if sys.version_info >= (2, 6) and sys.version_info < (2, 7):
PY26 = True
if sys.version_info >= (2, 7) and sys.version_info < (2, 8):
PY27 = True
if sys.version_info >= (3, 0):
PY3 = True
if sys.version_info >= (3, 2) and sys.version_info < (3, 3):
PY32 = True
if PY3:
import http.client as httplib
from io import StringIO
import urllib
import urllib as urllib2
# pylint: disable=no-name-in-module
import urllib.parse as urlparse
import xmlrpc.client as xmlrpclib
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from urllib.parse import urlencode as urlencode
from os.path import relpath
from imp import reload
from builtins import bytes
from builtins import next
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
basestring = str
def method_type(callable, instance, klass):
return types.MethodType(callable, instance or klass())
def b(s):
if isinstance(s, str):
return s.encode('utf-8')
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
def ensure_string(s):
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode('utf-8')
else:
raise TypeError("Invalid argument %r for ensure_string()" % (s,))
def byte(n):
# assume n is a Latin-1 string of length 1
return ord(n)
u = str
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return bytes([s])
def dictvalues(d):
return list(d.values())
def tostring(node):
return ET.tostring(node, encoding='unicode')
def hexadigits(s):
# s needs to be a byte string.
return [format(x, "x") for x in s]
else:
import httplib # NOQA
from StringIO import StringIO # NOQA
import urllib # NOQA
import urllib2 # NOQA
import urlparse # NOQA
import xmlrpclib # NOQA
from urllib import quote as _urlquote # NOQA
from urllib import unquote as urlunquote # NOQA
from urllib import urlencode as urlencode # NOQA
from __builtin__ import reload # NOQA
if PY25:
import cgi
parse_qs = cgi.parse_qs
parse_qsl = cgi.parse_qsl
else:
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
if not PY25:
from os.path import relpath # NOQA
# Save the real value of unicode because urlquote needs it to tell the
# difference between a unicode string and a byte string.
_real_unicode = unicode
basestring = unicode = str
method_type = types.MethodType
b = bytes = ensure_string = str
def byte(n):
return n
u = unicode
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return chr(s)
def next(i):
return i.next()
def dictvalues(d):
return d.values()
tostring = ET.tostring
def urlquote(s, safe='/'):
if isinstance(s, _real_unicode):
# Pretend to be py3 by encoding the URI automatically.
s = s.encode('utf8')
return _urlquote(s, safe)
def hexadigits(s):
# s needs to be a string.
return [x.encode("hex") for x in s]
if PY25:
import posixpath
# Taken from http://jimmyg.org/work/code/barenecessities/index.html
# (MIT license)
# pylint: disable=function-redefined
def relpath(path, start=posixpath.curdir): # NOQA
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
if PY27 or PY3:
unittest2_required = False
else:
unittest2_required = True
| 27.940594 | 78 | 0.653437 |
074adff4c43d36ce6a4e86783bdb968e32aff98e | 5,032 | py | Python | utility.py | Cadyshack/AIPND-Project-2-Create-Your-Own-Image-Classifier | 939d03f6a04d5983c409a707edbb1d773b146ca0 | [
"MIT"
] | null | null | null | utility.py | Cadyshack/AIPND-Project-2-Create-Your-Own-Image-Classifier | 939d03f6a04d5983c409a707edbb1d773b146ca0 | [
"MIT"
] | null | null | null | utility.py | Cadyshack/AIPND-Project-2-Create-Your-Own-Image-Classifier | 939d03f6a04d5983c409a707edbb1d773b146ca0 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torchvision import datasets, transforms
from PIL import Image
import json
def load_data(image_dir):
"""
Retrieve images from path given to image folder which will be used to train the model
images_dir:
relative path to the folder of images that are to be
classified by the classifier function (string).
The Image folder is expected to have three sub folders:
- train
- valid
- test
"""
image_dir = image_dir
train_dir = image_dir + '/train'
valid_dir = image_dir + '/valid'
test_dir = image_dir + '/test'
# Define transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
# Using the image datasets and the transforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=True)
image_dict = { "train_data": train_data,
"valid_data": valid_data,
"test_data": test_data,
"trainloader": trainloader,
"validloader": validloader,
"testloader": testloader
}
return image_dict
def process_image(image):
"""
Scales, crops, and normalizes a PIL image for a PyTorch model,
Parameters:
image_dir:
path to image to be processed
Returns:
a Numpy array
"""
# Process a PIL image for use in a PyTorch model
img = Image.open(image)
# Resize the image and get the new dimensions to use in crop below
img.thumbnail((256,256))
width, height = img.size
left = (width - 224)/2
top = (height - 224)/2
right = (left + 224)
bottom = (top + 224)
# Crop the center of the image to 224 x 224 dimension
img = img.crop((left, top, right, bottom))
np_image = np.array(img)
np_image = np_image.astype('float32') / 255.0
# Normalize images in specific way needed for network
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# Transpose image array to fit PyTorch expeted shape with colour channel as first dimension
np_image = np_image.transpose((2,0,1))
image_tensor = torch.from_numpy(np_image)
return image_tensor
def cat_to_name(file):
"""
Loads a json file with mapping from category to flower name
Parameters:
file:
name of .json mapping file
Returns:
a python dictionary with mapping of categories to flower names
"""
with open(file, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def predict(image_path, model, topk, device):
"""
Predict the class (or classes) of an image using a trained deep learning model.
Parameters:
image_path:
path to image to predict
model:
CNN model to use to make prediction
topk:
The number of results you wish to be printed
device:
Either "cpu" or "cuda".
"""
image = process_image(image_path)
image = image.unsqueeze(0)
image = image.float()
image = image.to(device)
model = model
model.eval()
model.to(device)
with torch.no_grad():
log_ps = model.forward(image)
ps = torch.exp(log_ps)
return ps.topk(topk, dim=1) | 35.43662 | 95 | 0.575318 |
07201b67beeaf697ef36f79b77817d9b58e7ce68 | 1,022 | py | Python | data_processing/ddh5_Plotting/ddh5_freq_sweep_plotting.py | PITT-HATLAB/data_processing | ad49bb921e0fc90b90f0b696e2cbb662019f5f40 | [
"MIT"
] | null | null | null | data_processing/ddh5_Plotting/ddh5_freq_sweep_plotting.py | PITT-HATLAB/data_processing | ad49bb921e0fc90b90f0b696e2cbb662019f5f40 | [
"MIT"
] | null | null | null | data_processing/ddh5_Plotting/ddh5_freq_sweep_plotting.py | PITT-HATLAB/data_processing | ad49bb921e0fc90b90f0b696e2cbb662019f5f40 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 1 14:02:03 2021
@author: Hatlab_3
for getting many files out of a particular directory with Wolfie's directory structure'
"""
import easygui
import os
from plottr.apps.autoplot import autoplotDDH5, script, main
from plottr.data.datadict_storage import all_datadicts_from_hdf5
cwd = r'E:\Data\Cooldown_20210104\pitchfork_freq_sweeps\2_kappa'
def find_all_ddh5(cwd):
dirs = os.listdir(cwd)
filepaths = []
for path in dirs:
rechecks = []
subs = os.listdir(cwd+'\\'+path)
for sub in subs:
print(sub)
if sub.split('.')[-1] == 'ddh5':
filepaths.append(cwd+'\\'+path+'\\'+sub)
else:
for subsub in os.listdir(cwd+'\\'+path+'\\'+sub):
if subsub.split('.')[-1] == 'ddh5':
filepaths.append(cwd+'\\'+path+'\\'+sub+'\\'+subsub)
return filepaths
res = find_all_ddh5(cwd)
for filename in res:
main(filename, 'data') | 28.388889 | 87 | 0.588063 |
63507e6d9d794ca159e3efee3d2cf66f41c91ef9 | 8,398 | py | Python | khmer/__init__.py | Dmarch28/khmer | 86ce40a6619fc6f6e9c4ce18ce1e89de93ba2f83 | [
"CNRI-Python"
] | null | null | null | khmer/__init__.py | Dmarch28/khmer | 86ce40a6619fc6f6e9c4ce18ce1e89de93ba2f83 | [
"CNRI-Python"
] | 4 | 2021-03-19T08:45:22.000Z | 2022-02-18T21:25:42.000Z | khmer/__init__.py | Dmarch28/khmer | 86ce40a6619fc6f6e9c4ce18ce1e89de93ba2f83 | [
"CNRI-Python"
] | 1 | 2021-03-16T12:01:37.000Z | 2021-03-16T12:01:37.000Z | # This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2010-2015, Michigan State University.
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=too-few-public-methods,no-init,missing-docstring
"""This is khmer; please see http://khmer.readthedocs.io/."""
from collections import namedtuple
from math import log
import json
from khmer._khmer import Read
from khmer._khmer import forward_hash
# tests/test_{functions,countgraph,counting_single}.py
from khmer._khmer import forward_hash_no_rc # tests/test_functions.py
from khmer._khmer import reverse_hash # tests/test_functions.py
# tests/counting_single.py
from khmer._khmer import hash_murmur3 # tests/test_functions.py
from khmer._khmer import hash_no_rc_murmur3 # tests/test_functions.py
from khmer._khmer import reverse_complement
from khmer._khmer import get_version_cpp as __version_cpp__
# tests/test_version.py
from khmer._khmer import ReadParser # sandbox/to-casava-1.8-fastq.py
# tests/test_read_parsers.py,scripts/{filter-abund-single,load-graph}.py
# scripts/{abundance-dist-single,load-into-counting}.py
from khmer._khmer import FILETYPES
from khmer._oxli.graphs import (Counttable, QFCounttable,BufferedQFCounttable, Nodetable,
CyclicCounttable,
SmallCounttable, Countgraph, SmallCountgraph,
Nodegraph)
from khmer._oxli.labeling import GraphLabels
from khmer._oxli.legacy_partitioning import SubsetPartition, PrePartitionInfo
from khmer._oxli.parsing import FastxParser
from khmer._oxli.readaligner import ReadAligner
from khmer._oxli.utils import get_n_primes_near_x, is_prime
import sys
from struct import pack, unpack
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
_buckets_per_byte = {
# calculated by hand from settings in third-part/cqf/gqf.h
'qfcounttable': 1 / 1.26,
'countgraph': 1,
'counttable': 1,
'smallcountgraph': 2,
'nodegraph': 8,
}
def extract_nodegraph_info(filename):
"""Open the given nodegraph file and return a tuple of information.
Returns: the k-mer size, the table size, the number of tables, the version
of the table format, and the type of table flag.
Keyword argument:
filename -- the name of the nodegraph file to inspect
"""
ksize = None
n_tables = None
table_size = None
signature = None
version = None
ht_type = None
occupied = None
uint_size = len(pack('I', 0))
uchar_size = len(pack('B', 0))
ulonglong_size = len(pack('Q', 0))
try:
with open(filename, 'rb') as nodegraph:
signature, = unpack('4s', nodegraph.read(4))
version, = unpack('B', nodegraph.read(1))
ht_type, = unpack('B', nodegraph.read(1))
ksize, = unpack('I', nodegraph.read(uint_size))
n_tables, = unpack('B', nodegraph.read(uchar_size))
occupied, = unpack('Q', nodegraph.read(ulonglong_size))
table_size, = unpack('Q', nodegraph.read(ulonglong_size))
if signature != b"OXLI":
raise ValueError("Node graph '{}' is missing file type "
"signature".format(filename) + str(signature))
except:
raise ValueError("Node graph '{}' is corrupt ".format(filename))
return ksize, round(table_size, -2), n_tables, version, ht_type, occupied
def extract_countgraph_info(filename):
"""Open the given countgraph file and return a tuple of information.
Return: the k-mer size, the table size, the number of tables, the bigcount
flag, the version of the table format, and the type of table flag.
Keyword argument:
filename -- the name of the countgraph file to inspect
"""
CgInfo = namedtuple("CgInfo", ['ksize', 'n_tables', 'table_size',
'use_bigcount', 'version', 'ht_type',
'n_occupied'])
ksize = None
n_tables = None
table_size = None
signature = None
version = None
ht_type = None
use_bigcount = None
occupied = None
uint_size = len(pack('I', 0))
ulonglong_size = len(pack('Q', 0))
try:
with open(filename, 'rb') as countgraph:
signature, = unpack('4s', countgraph.read(4))
version, = unpack('B', countgraph.read(1))
ht_type, = unpack('B', countgraph.read(1))
if ht_type != FILETYPES['SMALLCOUNT']:
use_bigcount, = unpack('B', countgraph.read(1))
else:
use_bigcount = None
ksize, = unpack('I', countgraph.read(uint_size))
n_tables, = unpack('B', countgraph.read(1))
occupied, = unpack('Q', countgraph.read(ulonglong_size))
table_size, = unpack('Q', countgraph.read(ulonglong_size))
if signature != b'OXLI':
raise ValueError("Count graph file '{}' is missing file type "
"signature. ".format(filename) + str(signature))
except:
raise ValueError("Count graph file '{}' is corrupt ".format(filename))
return CgInfo(ksize, n_tables, round(table_size, -2), use_bigcount,
version, ht_type, occupied)
def calc_expected_collisions(graph, force=False, max_false_pos=.2):
"""Do a quick & dirty expected collision rate calculation on a graph.
Also check to see that collision rate is within threshold.
Keyword argument:
graph: the countgraph or nodegraph object to inspect
"""
sizes = graph.hashsizes()
n_ht = float(len(sizes))
occupancy = float(graph.n_occupied())
min_size = min(sizes)
fp_one = occupancy / min_size
fp_all = fp_one ** n_ht
if fp_all > max_false_pos:
print("**", file=sys.stderr)
print("** ERROR: the graph structure is too small for ",
file=sys.stderr)
print("** this data set. Increase data structure size",
file=sys.stderr)
print("** with --max_memory_usage/-M.", file=sys.stderr)
print("**", file=sys.stderr)
print("** Do not use these results!!", file=sys.stderr)
print("**", file=sys.stderr)
print("** (estimated false positive rate of %.3f;" % fp_all,
file=sys.stderr, end=' ')
print("max recommended %.3f)" % max_false_pos, file=sys.stderr)
print("**", file=sys.stderr)
if not force:
sys.exit(1)
return fp_all
from khmer._oxli.assembly import (LinearAssembler, SimpleLabeledAssembler,
JunctionCountAssembler)
from khmer._oxli.hashset import HashSet
from khmer._oxli.hllcounter import HLLCounter
from khmer._oxli.labeling import GraphLabels
| 37.491071 | 89 | 0.671946 |
0a63ee815c81e780f3db874597b7906f63973fc0 | 1,558 | py | Python | py34env/Scripts/enhancer.py | EKiefer/edge-starter | cc1bbac3fb7191b16eeca03b2a596d232b4ece7f | [
"MIT"
] | null | null | null | py34env/Scripts/enhancer.py | EKiefer/edge-starter | cc1bbac3fb7191b16eeca03b2a596d232b4ece7f | [
"MIT"
] | null | null | null | py34env/Scripts/enhancer.py | EKiefer/edge-starter | cc1bbac3fb7191b16eeca03b2a596d232b4ece7f | [
"MIT"
] | null | null | null | #!c:\users\ekiefer\projects\django\my_edge\py34env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| 25.966667 | 80 | 0.65276 |
be949f877b35b00d9729eb61c88a04277376e5a8 | 1,238 | py | Python | src/pykeen/triples/stats.py | Rodrigo-A-Pereira/pykeen | 76c5acb707faa524b5951b0d1d85ab1afe806462 | [
"MIT"
] | 1 | 2021-03-24T13:25:54.000Z | 2021-03-24T13:25:54.000Z | src/pykeen/triples/stats.py | Rodrigo-A-Pereira/pykeen | 76c5acb707faa524b5951b0d1d85ab1afe806462 | [
"MIT"
] | null | null | null | src/pykeen/triples/stats.py | Rodrigo-A-Pereira/pykeen | 76c5acb707faa524b5951b0d1d85ab1afe806462 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Compute statistics of a KG to be able to interpret the performance of KGE models."""
from collections import Counter
from typing import Mapping
import numpy as np
def compute_number_tails_per_head_relation_tuples(triples: np.ndarray) -> Mapping[str, int]:
"""Compute number of tails per head-relation pairs."""
return _count_two_columns(triples, slice(0, 1), slice(1, 2))
def compute_number_heads_per_tail_relation_tuples(triples: np.ndarray) -> Mapping[str, int]:
"""Compute number of heads per relation-tail pairs."""
return _count_two_columns(triples, slice(1, 2), slice(2, 3))
def _count_two_columns(triples: np.ndarray, c1_slice: slice, c2_slice: slice) -> Mapping[str, int]:
"""Compute number of heads per relation-tail pairs."""
c1 = triples[:, c1_slice]
c2 = triples[:, c2_slice]
arr = np.concatenate([c1, c2], axis=-1).tolist()
stats = Counter(map(tuple, arr))
return _get_sorted_dict_from_counter(stats)
def _get_sorted_dict_from_counter(counter: Counter) -> Mapping[str, int]:
"""Return sorted dict for Counter tail."""
return {
f'{c1_label} {c2_label}': count
for (c1_label, c2_label), count in counter.most_common()
}
| 33.459459 | 99 | 0.703554 |
b354cc57d2c4afd6b0567f412e20ff314f7a9e97 | 2,790 | py | Python | dataset.py | mtyhon/ckconv | 056ec93c039e8bcda89f07ff9fdece3e7373b0bf | [
"MIT"
] | null | null | null | dataset.py | mtyhon/ckconv | 056ec93c039e8bcda89f07ff9fdece3e7373b0bf | [
"MIT"
] | null | null | null | dataset.py | mtyhon/ckconv | 056ec93c039e8bcda89f07ff9fdece3e7373b0bf | [
"MIT"
] | null | null | null | import torch
from datasets import (
AdditionProblem,
CopyMemory,
MNIST,
CIFAR10,
SpeechCommands,
CharTrajectories,
)
import ml_collections
from typing import Tuple
def dataset_constructor(
config: ml_collections.ConfigDict,
) -> Tuple[
torch.utils.data.Dataset, torch.utils.data.Dataset, torch.utils.data.Dataset
]:
"""
Create datasets loaders for the chosen datasets
:return: Tuple (training_set, validation_set, test_set)
"""
dataset = {
"AddProblem": AdditionProblem,
"CopyMemory": CopyMemory,
"MNIST": MNIST,
"CIFAR10": CIFAR10,
"SpeechCommands": SpeechCommands,
"CharTrajectories": CharTrajectories,
}[config.dataset]
training_set = dataset(
partition="train",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
test_set = dataset(
partition="test",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train
if config.sr_test == 0
else config.sr_test, # Test set can be sample differently.
dropped_rate=config.drop_rate,
)
if config.dataset in ["SpeechCommands", "CharTrajectories"]:
validation_set = dataset(
partition="val",
seq_length=config.seq_length,
memory_size=config.memory_size,
mfcc=config.mfcc,
sr=config.sr_train,
dropped_rate=config.drop_rate,
)
else:
validation_set = None
return training_set, validation_set, test_set
def get_dataset(
config: ml_collections.ConfigDict,
num_workers: int = 4,
data_root="./data",
) -> Tuple[dict, torch.utils.data.DataLoader]:
"""
Create datasets loaders for the chosen datasets
:return: Tuple ( dict(train_loader, val_loader) , test_loader)
"""
training_set, validation_set, test_set = dataset_constructor(config)
training_loader = torch.utils.data.DataLoader(
training_set,
batch_size=config.batch_size,
shuffle=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=config.batch_size,
shuffle=False,
num_workers=num_workers,
)
if validation_set is not None:
val_loader = torch.utils.data.DataLoader(
validation_set,
batch_size=config.batch_size,
shuffle=False,
num_workers=num_workers,
)
else:
val_loader = test_loader
dataloaders = {"train": training_loader, "validation": val_loader}
return dataloaders, test_loader
| 27.352941 | 80 | 0.642652 |
463fe211b7350c7393eadabce0e2526f5f128d67 | 5,999 | py | Python | resotocore/tests/core/worker_task_queue_test.py | MrMarvin/cloudkeeper | cdca21c1a3b945da6e53a5dbb37a437e1d46f557 | [
"Apache-2.0"
] | null | null | null | resotocore/tests/core/worker_task_queue_test.py | MrMarvin/cloudkeeper | cdca21c1a3b945da6e53a5dbb37a437e1d46f557 | [
"Apache-2.0"
] | null | null | null | resotocore/tests/core/worker_task_queue_test.py | MrMarvin/cloudkeeper | cdca21c1a3b945da6e53a5dbb37a437e1d46f557 | [
"Apache-2.0"
] | null | null | null | import asyncio
from collections import defaultdict
from datetime import timedelta
from typing import AsyncGenerator, Dict, List, Tuple
from pytest import fixture, mark
from core.model.graph_access import Section
from core.model.resolve_in_graph import GraphResolver, NodePath
from core.util import group_by, identity, value_in_path
from core.worker_task_queue import WorkerTaskDescription, WorkerTaskQueue, WorkerTask
@fixture
def task_queue() -> WorkerTaskQueue:
return WorkerTaskQueue()
@fixture
def performed_by() -> Dict[str, List[str]]:
return defaultdict(list)
@fixture
def incoming_tasks() -> List[WorkerTask]:
return []
@fixture
async def worker(
task_queue: WorkerTaskQueue, performed_by: Dict[str, List[str]], incoming_tasks: List[WorkerTask]
) -> AsyncGenerator[Tuple[WorkerTaskDescription, WorkerTaskDescription, WorkerTaskDescription], None]:
success = WorkerTaskDescription("success_task")
fail = WorkerTaskDescription("fail_task")
wait = WorkerTaskDescription("wait_task")
tag = WorkerTaskDescription("tag")
async def do_work(worker_id: str, task_descriptions: List[WorkerTaskDescription]) -> None:
async with task_queue.attach(worker_id, task_descriptions) as tasks:
while True:
task: WorkerTask = await tasks.get()
incoming_tasks.append(task)
performed_by[task.id].append(worker_id)
if task.name == success.name:
await task_queue.acknowledge_task(worker_id, task.id, {"result": "done!"})
elif task.name == fail.name:
await task_queue.error_task(worker_id, task.id, ";)")
elif task.name == wait.name:
# if we come here, neither success nor failure was given, ignore the task
pass
elif task.name == "tag":
node = task.data["node"]
for key in GraphResolver.resolved_ancestors.keys():
for section in Section.content:
if section in node:
node[section].pop(key, None)
# update or delete tags
if "tags" not in node:
node["tags"] = {}
if task.data.get("delete"):
for a in task.data.get("delete"): # type: ignore
node["tags"].pop(a, None)
continue
elif task.data.get("update"):
for k, v in task.data.get("update").items(): # type: ignore
node["tags"][k] = v
continue
# for testing purposes: change revision number
kind: str = value_in_path(node, NodePath.reported_kind) # type: ignore
if kind == "bla":
node["revision"] = "changed"
await task_queue.acknowledge_task(worker_id, task.id, node)
workers = [asyncio.create_task(do_work(f"w{a}", [success, fail, wait, tag])) for a in range(0, 4)]
await asyncio.sleep(0)
yield success, fail, wait
for worker in workers:
worker.cancel()
@mark.asyncio
async def test_handle_work_successfully(
task_queue: WorkerTaskQueue,
worker: Tuple[WorkerTaskDescription, WorkerTaskDescription, WorkerTaskDescription],
performed_by: Dict[str, List[str]],
) -> None:
success_task, _, _ = worker
all_tasks = [create_task(str(n), success_task.name) for n in range(0, 20)]
for t in all_tasks:
await task_queue.add_task(t)
results = await asyncio.gather(*[a.callback for a in all_tasks])
assert results == [{"result": "done!"} for _ in range(0, 20)]
# make sure the work is split equally between all workers: 20 work items by 4 workers: 5 work items each
by_worker = group_by(identity, (item for sublist in performed_by.values() for item in sublist))
assert len(by_worker) == 4
for work_done in by_worker.values():
assert len(work_done) == 5
@mark.asyncio
async def test_handle_failure(
task_queue: WorkerTaskQueue,
worker: Tuple[WorkerTaskDescription, WorkerTaskDescription, WorkerTaskDescription],
) -> None:
_, fail_task, _ = worker
all_tasks = [create_task(str(n), fail_task.name) for n in range(0, 20)]
for t in all_tasks:
await task_queue.add_task(t)
results = await asyncio.gather(*[a.callback for a in all_tasks], return_exceptions=True)
# make sure all results are failures
for r in results:
assert isinstance(r, Exception)
@mark.asyncio
async def test_handle_outdated(
task_queue: WorkerTaskQueue,
worker: Tuple[WorkerTaskDescription, WorkerTaskDescription, WorkerTaskDescription],
performed_by: Dict[str, List[str]],
) -> None:
_, _, outdated_task = worker
all_tasks = [create_task(str(n), outdated_task.name) for n in range(0, 20)]
for t in all_tasks:
await task_queue.add_task(t)
await asyncio.sleep(0)
count_outstanding = 0
while task_queue.outstanding_tasks:
await task_queue.check_outdated_unassigned_tasks()
count_outstanding += 1
# every message is retried 3 times ==> 4 times to get rid of all messages
assert count_outstanding == 4
results = await asyncio.gather(*[a.callback for a in all_tasks], return_exceptions=True)
# make sure all results are failures
for r in results:
assert isinstance(r, Exception)
# 20 work items by 4 workers: 5 work items each + retried 3 times (15) => 20
by_worker = group_by(identity, (item for sublist in performed_by.values() for item in sublist))
assert len(by_worker) == 4
for work_done in by_worker.values():
assert len(work_done) == 20
def create_task(uid: str, name: str) -> WorkerTask:
return WorkerTask(uid, name, {}, {}, asyncio.get_event_loop().create_future(), timedelta())
| 37.26087 | 108 | 0.63844 |
cfaac7fef9ce6ea7d02b702423841383d42601d2 | 37,032 | py | Python | tools/testbed/client.py | ghsecuritylab/Alios_SDK | edd416e7d2961db42c2100ac2d6237ee527d1aee | [
"Apache-2.0"
] | 2 | 2021-05-28T08:25:33.000Z | 2021-11-17T02:58:50.000Z | tools/testbed/client.py | ghsecuritylab/Alios_SDK | edd416e7d2961db42c2100ac2d6237ee527d1aee | [
"Apache-2.0"
] | null | null | null | tools/testbed/client.py | ghsecuritylab/Alios_SDK | edd416e7d2961db42c2100ac2d6237ee527d1aee | [
"Apache-2.0"
] | 5 | 2018-05-23T02:56:10.000Z | 2021-01-02T16:44:09.000Z | import os, sys, time, platform, json, traceback, random, re, glob, uuid
import socket, ssl, thread, threading, subprocess, signal, Queue, importlib
from os import path
import TBframe as pkt
MAX_MSG_LENGTH = 65536
ENCRYPT = True
DEBUG = True
EN_STATUS_POLL = False
LOCALLOG = False
def signal_handler(sig, frame):
print "received SIGINT"
raise KeyboardInterrupt
def queue_safeput(queue, item):
try:
queue.put(item, False)
except:
pass
class ConnectionLost(Exception):
pass
class Client:
def __init__(self):
self.service_socket = None
self.output_queue = Queue.Queue(256)
self.devices = {}
self.keep_running = True
self.connected = False
self.poll_str = '\x1b[t'
bytes = os.urandom(4)
for byte in bytes:
self.poll_str += '{0:02x}'.format(ord(byte))
self.poll_str += 'm'
self.poll_interval = 60
self.uuid = '{0:012x}'.format(uuid.getnode())
self.model_interface = {}
self.mesh_changed = [re.compile('become leader'),
re.compile('become detached'),
re.compile('allocate sid 0x[0-9a-f]{4}, become [0-9] in net [0-9a-f]{4}')]
self.neighbor_changed = [re.compile('sid [0-9a-f]{4} mac [0-9a-f]{16} is replaced'),
re.compile('[0-9a-f]{1,4} neighbor [0-9a-f]{16} become inactive')]
self.device_uuid_changed = ["ACCS: connected",
"ACCS: disconnected",
'GATEWAY: connect to server succeed']
def packet_send_thread(self):
heartbeat_timeout = time.time() + 10
while self.keep_running:
try:
[type, content] = self.output_queue.get(block=True, timeout=0.1)
except Queue.Empty:
type = None
pass
if self.service_socket == None:
continue
if type == None:
if time.time() < heartbeat_timeout:
continue
heartbeat_timeout += 10
data = pkt.construct(pkt.HEARTBEAT,'')
else:
data = pkt.construct(type, content)
try:
self.service_socket.send(data)
except:
self.connected = False
continue
def send_packet(self, type, content, timeout=0.1):
if self.service_socket == None:
return False
try:
self.output_queue.put([type, content], True, timeout)
return True
except Queue.Full:
print "error: ouput buffer full, drop packet [{0] {1}]".format(type, content)
return False
def send_device_list(self):
device_list = []
for device in list(self.devices):
if self.devices[device]['valid']:
device_list.append(device)
content = ':'.join(device_list)
self.send_packet(pkt.CLIENT_DEV, content)
def send_device_status(self):
for device in list(self.devices):
if self.devices[device]['valid'] == False:
continue
content = device + ':' + json.dumps(self.devices[device]['attributes'], sort_keys=True)
ret = self.send_packet(pkt.DEVICE_STATUS, content)
if ret == False:
break
def run_poll_command(self, device, command, lines_expect, timeout):
filter = {}
response = []
while self.devices[device]['plog_queue'].empty() == False:
self.devices[device]['plog_queue'].get()
self.devices[device]['handle'].write(self.poll_str + command + '\r')
start = time.time()
while True:
try:
log = self.devices[device]['plog_queue'].get(False)
except:
log = None
if time.time() - start >= timeout:
break
if log == None:
time.sleep(0.01)
continue
log = log.replace('\r', '')
log = log.replace('\n', '')
log = log.replace(self.poll_str, '')
if log == '':
continue
response.append(log)
if len(response) > lines_expect:
break
if len(response) > 0:
response.pop(0)
if not response:
print "device {0} run poll commad '{1}' faild".format(device, command)
return response
def device_cmd_process(self, device, exit_condition):
poll_fail_num = 0
interface = self.devices[device]['interface']
pcmd_queue = self.devices[device]['pcmd_queue']
if self.devices[device]['attributes'] != {}:
content = device + ':' + json.dumps(self.devices[device]['attributes'], sort_keys=True)
self.send_packet(pkt.DEVICE_STATUS, content)
poll_timeout = time.time() + 3 + random.uniform(0, self.poll_interval/10)
while interface.exist(device) and exit_condition.is_set() == False:
try:
if EN_STATUS_POLL == True and time.time() >= poll_timeout:
poll_timeout += self.poll_interval
queue_safeput(pcmd_queue, ['devname', 1, 0.2])
queue_safeput(pcmd_queue, ['mac', 1, 0.2])
queue_safeput(pcmd_queue, ['version', 2, 0.2])
queue_safeput(pcmd_queue, ['uuid', 1, 0.2])
queue_safeput(pcmd_queue, ['umesh status', 11, 0.2])
queue_safeput(pcmd_queue, ['umesh extnetid', 1, 0.2])
queue_safeput(pcmd_queue, ['umesh nbrs', 35, 0.3])
block=True
timeout=0
try:
args = None
if self.devices[device]['ucmd_queue'].empty() == True and pcmd_queue.empty() == True:
args = self.devices[device]['ucmd_queue'].get(block=True, timeout=0.1)
elif self.devices[device]['ucmd_queue'].empty() == False:
args = self.devices[device]['ucmd_queue'].get()
except Queue.Empty:
args = None
continue
except:
if DEBUG: traceback.print_exc()
args = None
continue
if args != None:
type = args[0]
term = args[1]
if type == pkt.DEVICE_ERASE:
self.device_erase(device, term)
elif type == pkt.DEVICE_PROGRAM:
address = args[2]
filename = args[3]
self.device_program(device, address, filename, term)
elif type in [pkt.DEVICE_RESET, pkt.DEVICE_START, pkt.DEVICE_STOP]:
self.device_control(device, type, term)
elif type == pkt.DEVICE_CMD:
cmd = args[2]
self.device_run_cmd(device, cmd, term)
if re.search('umesh extnetid [0-9A-Fa-f]{12}', cmd) != None:
queue_safeput(pcmd_queue, ['umesh extnetid', 1, 0.2])
else:
print "error: unknown operation type {0}".format(repr(type))
args = None
time.sleep(0.05)
continue
if pcmd_queue.empty() == True:
continue
[cmd, lines, timeout] = pcmd_queue.get()
response = self.run_poll_command(device, cmd, lines, timeout)
if cmd == 'devname': #poll device model
if len(response) == lines and response[0].startswith('device name:'):
poll_fail_num = 0
self.devices[device]['attributes']['model'] = response[0].split()[-1]
else:
poll_fail_num += 1
elif cmd == 'mac': #poll device mac
if len(response) == 1 and response[0].startswith('MAC address:'):
poll_fail_num = 0
macaddr = response[0].split()[-1]
macaddr = macaddr.replace('-', '') + '0000'
self.devices[device]['attributes']['macaddr'] = macaddr
else:
poll_fail_num += 1
elif cmd == 'version': #poll device version
if len(response) == lines:
poll_fail_num = 0
for line in response:
if 'kernel version :' in line:
self.devices[device]['attributes']['kernel_version'] = line.replace('kernel version :AOS-', '')
if 'app version :' in line:
line = line.replace('app version :', '')
line = line.replace('app-', '')
line = line.replace('APP-', '')
self.devices[device]['attributes']['app_version'] = line
else:
poll_fail_num += 1
elif cmd == 'umesh status': #poll mesh status
if len(response) == lines:
poll_fail_num = 0
for line in response:
if 'state\t' in line:
self.devices[device]['attributes']['state'] = line.replace('state\t', '')
elif '\tnetid\t' in line:
self.devices[device]['attributes']['netid'] = line.replace('\tnetid\t', '')
elif '\tsid\t' in line:
self.devices[device]['attributes']['sid'] = line.replace('\tsid\t', '')
elif '\tnetsize\t' in line:
self.devices[device]['attributes']['netsize'] = line.replace('\tnetsize\t', '')
elif '\trouter\t' in line:
self.devices[device]['attributes']['router'] = line.replace('\trouter\t', '')
elif '\tchannel\t' in line:
self.devices[device]['attributes']['channel'] = line.replace('\tchannel\t', '')
else:
poll_fail_num += 1
elif cmd == 'umesh nbrs': #poll mesh nbrs
if len(response) > 0 and 'num=' in response[-1]:
poll_fail_num = 0
nbrs = {}
for line in response:
if '\t' not in line or ',' not in line:
continue
line = line.replace('\t', '')
nbr_info = line.split(',')
if len(nbr_info) < 10:
continue
nbrs[nbr_info[0]] = {'relation':nbr_info[1], \
'netid':nbr_info[2], \
'sid':nbr_info[3], \
'link_cost':nbr_info[4], \
'child_num':nbr_info[5], \
'channel':nbr_info[6], \
'reverse_rssi':nbr_info[7], \
'forward_rssi':nbr_info[8], \
'last_heard':nbr_info[9]}
if len(nbr_info) > 10:
nbrs[nbr_info[0]]['awake'] = nbr_info[10]
self.devices[device]['attributes']['nbrs'] = nbrs
else:
poll_fail_num += 1
elif cmd == 'umesh extnetid': #poll mesh extnetid
if len(response) == 1 and response[0].count(':') == 5:
poll_fail_num += 1
self.devices[device]['attributes']['extnetid'] = response[0]
else:
poll_fail_num += 1
elif cmd == 'uuid': #poll uuid
if len(response) == 1:
if 'uuid:' in response[0]:
poll_fail_num = 0
self.devices[device]['attributes']['uuid'] = response[0].replace('uuid: ', '')
elif 'alink is not connected' in response[0]:
poll_fail_num = 0
self.devices[device]['attributes']['uuid'] = 'N/A'
else:
poll_fail_num += 1
else:
poll_fail_num += 1
else:
print "error: unrecognized poll cmd '{0}'".format(cmd)
continue
if poll_fail_num >= 7:
if self.devices[device]['attributes']['status'] == 'active':
print "device {0} become inactive".format(device)
self.devices[device]['attributes']['status'] = 'inactive'
else:
if self.devices[device]['attributes']['status'] == 'inactive':
print "device {0} become active".format(device)
self.devices[device]['attributes']['status'] = 'active'
if pcmd_queue.empty() == False:
continue
content = device + ':' + json.dumps(self.devices[device]['attributes'], sort_keys=True)
self.send_packet(pkt.DEVICE_STATUS, content)
except:
if interface.exist(device) == False:
exit_condition.set()
break
if exit_condition.is_set() == True:
break
if DEBUG: traceback.print_exc()
try:
self.devices[device]['handle'].close()
self.devices[device]['handle'].open()
except:
exit_condition.set()
break
print 'devie command process thread for {0} exited'.format(device)
def device_log_filter(self, device, log):
pcmd_queue = self.devices[device]['pcmd_queue']
if EN_STATUS_POLL == False:
return
if pcmd_queue.full() == True:
return
for flog in self.mesh_changed:
if flog.search(log) == None:
continue
#print log
#print "device {0} mesh status changed".format(device)
queue_safeput(pcmd_queue, ['umesh status', 11, 0.2])
queue_safeput(pcmd_queue, ['umesh nbrs', 33, 0.3])
return
for flog in self.neighbor_changed:
if flog.search(log) == None:
continue
#print log
#print "device {0} neighbors changed".format(device)
queue_safeput(pcmd_queue, ['umesh nbrs', 33, 0.3])
return
for flog in self.device_uuid_changed:
if flog not in log:
continue
#print log
#print "device {0} uuid changed".format(device)
queue_safeput(pcmd_queue, ['uuid', 1, 0.2])
return
def device_log_poll(self, device, exit_condition):
log_time = time.time()
log = ''
if LOCALLOG:
logfile= path.join(path.expanduser('~'), '.tbclient', path.basename(device) + '.log')
flog = open(logfile, 'a+')
interface = self.devices[device]['interface']
while interface.exist(device) and exit_condition.is_set() == False:
if self.connected == False or self.devices[device]['iolock'].locked():
time.sleep(0.01)
continue
newline = False
while self.devices[device]['iolock'].acquire(False) == True:
try:
c = self.devices[device]['handle'].read(1)
except:
c = ''
finally:
self.devices[device]['iolock'].release()
if c == '':
break
if log == '':
log_time = time.time()
log += c
if c == '\n':
newline = True
break
if newline == True and log != '':
if self.poll_str in log:
queue_safeput(self.devices[device]['plog_queue'], log)
else:
self.device_log_filter(device, log)
if LOCALLOG:
flog.write('{0:.3f}:'.format(log_time) + log)
log = device + ':{0:.3f}:'.format(log_time) + log
self.send_packet(pkt.DEVICE_LOG,log)
log = ''
if LOCALLOG:
flog.close()
print 'device {0} removed'.format(device)
self.devices[device]['valid'] = False
exit_condition.set()
try:
self.devices[device]['handle'].close()
except:
pass
self.send_device_list()
print 'device log poll thread for {0} exited'.format(device)
def add_new_device(self, mi, device):
handle = mi.new_device(device)
if handle == None:
return False
self.devices[device] = {
'valid':True, \
'handle':handle, \
'interface' : mi, \
'iolock':threading.Lock(), \
'attributes':{}, \
'ucmd_queue':Queue.Queue(12), \
'pcmd_queue':Queue.Queue(64), \
'plog_queue':Queue.Queue(64)
}
self.devices[device]['attributes']['status'] = 'inactive'
return True
def add_old_device(self, mi, device):
ser = mi.new_device(device)
if ser == None:
return False
self.devices[device]['handle'] = ser
if self.devices[device]['iolock'].locked():
self.devices[device]['iolock'].release()
while self.devices[device]['ucmd_queue'].empty() == False:
self.devices[device]['ucmd_queue'].get()
while self.devices[device]['pcmd_queue'].empty() == False:
self.devices[device]['pcmd_queue'].get()
while self.devices[device]['plog_queue'].empty() == False:
self.devices[device]['plog_queue'].get()
self.devices[device]['attributes']['status'] = 'inactive'
self.devices[device]['valid'] = True
return True
def list_devices(self):
os = platform.system()
devices_new = []
for model in self.model_interface:
mi = self.model_interface[model]
devices = mi.list_devices(os)
for device in devices:
if device in self.devices and self.devices[device]['valid'] == True:
continue
if device not in self.devices:
ret = self.add_new_device(mi, device)
else:
ret = self.add_old_device(mi, device)
if ret == True:
devices_new.append(device)
devices_new.sort()
return devices_new
def device_monitor(self):
while self.keep_running:
devices_new = self.list_devices()
for device in devices_new:
print 'device {0} added'.format(device)
exit_condition = threading.Event()
thread.start_new_thread(self.device_log_poll, (device, exit_condition,))
thread.start_new_thread(self.device_cmd_process, (device, exit_condition,))
if devices_new != []:
self.send_device_list()
time.sleep(0.5)
print 'device monitor thread exited'
self.keep_running = False
def load_interfaces(self):
board_dir = path.join(path.dirname(path.abspath(__file__)), 'board')
candidates = os.listdir(board_dir)
for d in candidates:
if path.isdir(path.join(board_dir, d)) == False:
continue
model = path.basename(d)
interface_file = path.join(board_dir, d, model+'.py')
if path.isfile(interface_file) == False:
continue
sys.path.append(path.join(board_dir, d))
try:
self.model_interface[model] = importlib.import_module(model)
except:
if DEBUG: traceback.print_exc()
continue
print 'model loaded - {0}'.format(model)
def device_erase(self, device, term):
interface = self.devices[device]['interface']
self.devices[device]['iolock'].acquire()
try:
ret = interface.erase(device)
except:
if DEBUG: traceback.print_exc()
ret = 'fail'
finally:
self.devices[device]['iolock'].release()
print 'erasing', device, '...', ret
content = ','.join(term) + ',' + pkt.DEVICE_ERASE + ',' + ret
self.send_packet(pkt.RESPONSE, content)
def device_program(self, device, address, file, term):
if device not in self.devices:
print "error: progamming nonexist device {0}".format(device)
content = ','.join(term) + ',' + pkt.DEVICE_PROGRAM + ',' + 'device nonexist'
self.send_packet(pkt.RESPONSE, content)
return
interface = self.devices[device]['interface']
self.devices[device]['iolock'].acquire()
try:
ret = interface.program(device, address, file)
except:
if DEBUG: traceback.print_exc()
ret = 'fail'
finally:
self.devices[device]['iolock'].release()
print 'programming', file, 'to', device, '@', address, '...', ret
content = ','.join(term) + ',' + pkt.DEVICE_PROGRAM + ',' + ret
self.send_packet(pkt.RESPONSE, content)
def device_control(self, device, type, term):
operations= {pkt.DEVICE_RESET:'reset', pkt.DEVICE_STOP:'stop', pkt.DEVICE_START:'start'}
if device not in self.devices:
print "error: controlling nonexist device {0}".format(device)
content = ','.join(term) + ',' + type + ',' + 'device nonexist'
self.send_packet(pkt.RESPONSE, content)
return
interface = self.devices[device]['interface']
try:
ret = interface.control(device, operations[type])
except:
if DEBUG: traceback.print_exc()
ret = 'fail'
print operations[type], device, ret
content = ','.join(term) + ',' + type + ',' + ret
self.send_packet(pkt.RESPONSE, content)
def device_run_cmd(self, device, cmd, term):
if device not in self.devices:
print "error: run command at nonexist device {0}".format(device)
content = ','.join(term) + ',' + pkt.DEVICE_CMD + ',' + 'device nonexist'
self.send_packet(pkt.RESPONSE, content)
return
try:
self.devices[device]['handle'].write(cmd+'\r')
result='success'
print "run command '{0}' at {1} succeed".format(cmd, device)
except:
if DEBUG: traceback.print_exc()
result='fail'
print "run command '{0}' at {1} failed".format(cmd, device)
content = ','.join(term) + ',' + pkt.DEVICE_CMD + ',' + result
self.send_packet(pkt.RESPONSE, content)
def connect_to_server(self, server_ip, server_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if ENCRYPT:
certfile = path.join(path.dirname(path.abspath(__file__)), 'certificate.pem')
sock = ssl.wrap_socket(sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=certfile)
try:
sock.connect((server_ip, server_port))
while self.output_queue.empty() == False:
self.output_queue.get()
self.service_socket = sock
self.connected = True
return "success"
except:
if DEBUG: traceback.print_exc()
return "fail"
def client_func(self, server_ip, server_port):
work_dir = path.join(path.expanduser('~'), '.tbclient')
if path.exists(work_dir) == False:
os.mkdir(work_dir)
signal.signal(signal.SIGINT, signal_handler)
self.load_interfaces()
thread.start_new_thread(self.packet_send_thread, ())
thread.start_new_thread(self.device_monitor,())
file_received = {}
file_receiving = {}
self.connected = False
self.service_socket = None
msg = ''
while True:
try:
if self.connected == False:
raise ConnectionLost
new_msg = self.service_socket.recv(MAX_MSG_LENGTH)
if new_msg == '':
raise ConnectionLost
break
msg += new_msg
while msg != '':
type, length, value, msg = pkt.parse(msg)
if type == pkt.TYPE_NONE:
break
for hash in list(file_receiving):
if time.time() < file_receiving[hash]['timeout']:
continue
file_receiving[hash]['handle'].close()
try:
os.remove(file_receiving[hash]['name'])
except:
pass
file_receiving.pop(hash)
if type == pkt.FILE_BEGIN:
split_value = value.split(':')
term = split_value[0]
hash = split_value[1]
filename = split_value[2]
if hash in file_received:
if path.exists(file_received[hash]) == True:
content = term + ',' + type + ',' + 'exist'
self.send_packet(pkt.RESPONSE, content)
continue
else:
file_received.pop(hash)
if hash in file_receiving:
content = term + ',' + type + ',' + 'busy'
self.send_packet(pkt.RESPONSE, content)
print "busy: refused to recive {0}:{1}".format(filename, hash)
continue
filename = path.join(path.expanduser('~'), '.tbclient', path.basename(filename))
filename += '-' + term.split(',')[0]
filename += '@' + time.strftime('%Y-%m-%d-%H-%M')
filehandle = open(filename, 'wb')
timeout = time.time() + 4
file_receiving[hash] = {'name':filename, 'seq':0, 'handle':filehandle, 'timeout': timeout}
content = term + ',' + type + ',' + 'ok'
self.send_packet(pkt.RESPONSE, content)
if DEBUG:
print 'start receiving {0} as {1}'.format(split_value[2], filename)
elif type == pkt.FILE_DATA:
try:
split_value = value.split(':')
term = split_value[0]
hash = split_value[1]
seq = split_value[2]
data = value[(len(term) + len(hash) + len(seq) + 3):]
seq = int(seq)
except:
print "argument error: {0}".format(type)
continue
if hash not in file_receiving:
content = term + ',' + type + ',' + 'noexist'
self.send_packet(pkt.RESPONSE, content)
print "error: drop data fragment {0}:{1}, hash not in receiving file".format(hash, seq)
continue
if file_receiving[hash]['seq'] != seq and file_receiving[hash]['seq'] != seq + 1:
content = term + ',' + type + ',' + 'seqerror'
self.send_packet(pkt.RESPONSE, content)
print "error: drop data fragment {0}:{1}, sequence error".format(hash, seq)
continue
if file_receiving[hash]['seq'] == seq:
file_receiving[hash]['handle'].write(data)
file_receiving[hash]['seq'] += 1
file_receiving[hash]['timeout'] = time.time() + 4
content = term + ',' + type + ',' + 'ok'
self.send_packet(pkt.RESPONSE, content)
elif type == pkt.FILE_END:
try:
split_value = value.split(':')
term = split_value[0]
hash = split_value[1]
except:
print "argument error: {0} {1}".format(type, value)
continue
if hash not in file_receiving:
content = term + ',' + type + ',' + 'noexist'
self.send_packet(pkt.RESPONSE, content)
continue
file_receiving[hash]['handle'].close()
localhash = pkt.hash_of_file(file_receiving[hash]['name'])
if localhash != hash:
response = 'hasherror'
else:
response = 'ok'
file_received[hash] = file_receiving[hash]['name']
if DEBUG:
print 'finished receiving {0}, result:{1}'.format(file_receiving[hash]['name'], response)
file_receiving.pop(hash)
content = term + ',' + type + ',' + response
self.send_packet(pkt.RESPONSE, content)
elif type == pkt.DEVICE_ERASE:
args = value.split(',')
if len(args) != 3:
continue
term = args[0:2]
device = args[2]
if device in self.devices:
if self.devices[device]['ucmd_queue'].full() == False:
self.devices[device]['ucmd_queue'].put([type, term])
continue
else:
result = 'busy'
print 'erase', device, 'failed, device busy'
else:
result = 'nonexist'
print 'erase', device, 'failed, device nonexist'
content = ','.join(term) + ',' + type + ',' + result
self.send_packet(pkt.RESPONSE, content)
elif type == pkt.DEVICE_PROGRAM:
args = value.split(',')
if len(args) != 5:
continue
term = args[0:2]
device = args[2]
address = args[3]
hash = args[4]
if hash not in file_received:
content = ','.join(term) + ',' + type + ',' + 'error'
self.send_packet(pkt.RESPONSE, content)
continue
filename = file_received[hash]
if device in self.devices:
if self.devices[device]['ucmd_queue'].full() == False:
self.devices[device]['ucmd_queue'].put([type, term, address, filename])
continue
else:
result = 'busy'
print 'program {0} to {1} @ {2} failed, device busy'.format(filename, device, address)
else:
result = 'error'
print 'program {0} to {1} @ {2} failed, device nonexist'.format(filename, device, address)
content = ','.join(term) + ',' + type + ',' + result
self.send_packet(pkt.RESPONSE, content)
elif type in [pkt.DEVICE_RESET, pkt.DEVICE_START, pkt.DEVICE_STOP]:
operations = {pkt.DEVICE_RESET:'reset', pkt.DEVICE_START:'start', pkt.DEVICE_STOP:'stop'}
args = value.split(',')
if len(args) != 3:
continue
term = args[0:2]
device = args[2]
if os.path.exists(device) and device in self.devices:
if self.devices[device]['ucmd_queue'].full() == False:
self.devices[device]['ucmd_queue'].put([type, term])
continue
else:
result = 'busy'
print operations[type], device, 'failed, device busy'
else:
result = 'nonexist'
print operations[type], device, 'failed, device nonexist'
content = ','.join(term) + ',' + type + ',' + result
self.send_packet(pkt.RESPONSE, content)
elif type == pkt.DEVICE_CMD:
args = value.split(':')[0]
arglen = len(args) + 1
args = args.split(',')
term = args[0:2]
device = args[2]
cmd = value[arglen:].replace('|', ' ')
if device in self.devices and self.devices[device]['valid'] == True:
if self.devices[device]['ucmd_queue'].full() == False:
self.devices[device]['ucmd_queue'].put([type, term, cmd])
continue
else:
result = 'busy'
print "run command '{0}' at {1} failed, device busy".format(cmd, device)
else:
result = 'nonexist'
print "run command '{0}' at {1} failed, device nonexist".format(cmd, device)
content = ','.join(term) + ',' + type + ',' + result
self.send_packet(pkt.RESPONSE, content)
elif type == pkt.CLIENT_UUID:
print 'server request UUID'
self.send_packet(pkt.CLIENT_UUID, self.uuid)
self.send_packet(pkt.CLIENT_TAG, self.poll_str)
self.send_device_list()
self.send_device_status()
except ConnectionLost:
self.connected = False
if self.service_socket != None:
self.service_socket.close()
print 'connection to server lost, try reconnecting...'
result = self.connect_to_server(server_ip, server_port)
if result != 'success':
print 'connect to server {0}:{1} failed, retry later ...'.format(server_ip, server_port)
try:
time.sleep(5)
except KeyboardInterrupt:
break
continue
print 'connect to server {0}:{1} succeeded'.format(server_ip, server_port)
self.send_packet(pkt.CLIENT_UUID, self.uuid)
self.send_packet(pkt.CLIENT_TAG, self.poll_str)
self.send_device_list()
self.send_device_status()
except KeyboardInterrupt:
break
except:
if DEBUG: traceback.print_exc()
print "client exiting ..."
self.keep_running = False
time.sleep(0.3)
if self.service_socket: self.service_socket.close()
| 45.775031 | 127 | 0.462654 |
729ad04f737c9758e63b5526b47c897a79382c17 | 83,708 | py | Python | xonsh/tools.py | Flimm/xonsh | 14fe0355d0ce203ffb998562891fa250d49642ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xonsh/tools.py | Flimm/xonsh | 14fe0355d0ce203ffb998562891fa250d49642ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xonsh/tools.py | Flimm/xonsh | 14fe0355d0ce203ffb998562891fa250d49642ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """Misc. xonsh tools.
The following implementations were forked from the IPython project:
* Copyright (c) 2008-2014, IPython Development Team
* Copyright (C) 2001-2007 Fernando Perez <fperez@colorado.edu>
* Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
* Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
Implementations:
* decode()
* encode()
* cast_unicode()
* safe_hasattr()
* indent()
"""
import ast
import collections
import collections.abc as cabc
import contextlib
import ctypes
import datetime
import functools
import glob
import itertools
import operator
import os
import pathlib
import re
import shlex
import string
import subprocess
import sys
import threading
import traceback
import typing as tp
import warnings
# adding imports from further xonsh modules is discouraged to avoid circular
# dependencies
from xonsh import __version__
from xonsh.lazyasd import LazyDict, LazyObject, lazyobject
from xonsh.platform import (
DEFAULT_ENCODING,
HAS_PYGMENTS,
ON_LINUX,
ON_WINDOWS,
expanduser,
os_environ,
pygments_version_info,
)
@functools.lru_cache(1)
def is_superuser():
if ON_WINDOWS:
rtn = ctypes.windll.shell32.IsUserAnAdmin() != 0
else:
rtn = os.getuid() == 0
return rtn
@lazyobject
def xsh():
from xonsh.built_ins import XSH
return XSH
class XonshError(Exception):
pass
class XonshCalledProcessError(XonshError, subprocess.CalledProcessError):
"""Raised when there's an error with a called process
Inherits from XonshError and subprocess.CalledProcessError, catching
either will also catch this error.
Raised *after* iterating over stdout of a captured command, if the
returncode of the command is nonzero.
Example:
-------
try:
for line in !(ls):
print(line)
except subprocess.CalledProcessError as error:
print("Error in process: {}.format(error.completed_command.pid))
This also handles differences between Python3.4 and 3.5 where
CalledProcessError is concerned.
"""
def __init__(
self, returncode, command, output=None, stderr=None, completed_command=None
):
super().__init__(returncode, command, output)
self.stderr = stderr
self.completed_command = completed_command
def expand_path(s, expand_user=True):
"""Takes a string path and expands ~ to home if expand_user is set
and environment vars if EXPAND_ENV_VARS is set."""
env = xsh.env or os_environ
if env.get("EXPAND_ENV_VARS", False):
s = expandvars(s)
if expand_user:
# expand ~ according to Bash unquoted rules "Each variable assignment is
# checked for unquoted tilde-prefixes immediately following a ':' or the
# first '='". See the following for more details.
# https://www.gnu.org/software/bash/manual/html_node/Tilde-Expansion.html
pre, char, post = s.partition("=")
if char:
s = expanduser(pre) + char
s += os.pathsep.join(map(expanduser, post.split(os.pathsep)))
else:
s = expanduser(s)
return s
def _expandpath(path):
"""Performs environment variable / user expansion on a given path
if EXPAND_ENV_VARS is set.
"""
env = xsh.env or os_environ
expand_user = env.get("EXPAND_ENV_VARS", False)
return expand_path(path, expand_user=expand_user)
def simple_random_choice(lst):
"""Returns random element from the list with length less than 1 million elements."""
size = len(lst)
if size > 1000000: # microsecond maximum
raise ValueError("The list is too long.")
return lst[datetime.datetime.now().microsecond % size]
def decode_bytes(b):
"""Tries to decode the bytes using XONSH_ENCODING if available,
otherwise using sys.getdefaultencoding().
"""
env = xsh.env or os_environ
enc = env.get("XONSH_ENCODING") or DEFAULT_ENCODING
err = env.get("XONSH_ENCODING_ERRORS") or "strict"
return b.decode(encoding=enc, errors=err)
def findfirst(s, substrs):
"""Finds whichever of the given substrings occurs first in the given string
and returns that substring, or returns None if no such strings occur.
"""
i = len(s)
result = None
for substr in substrs:
pos = s.find(substr)
if -1 < pos < i:
i = pos
result = substr
return i, result
class EnvPath(cabc.MutableSequence):
"""A class that implements an environment path, which is a list of
strings. Provides a custom method that expands all paths if the
relevant env variable has been set.
"""
def __init__(self, args=None):
if not args:
self._l = []
else:
if isinstance(args, str):
self._l = args.split(os.pathsep)
elif isinstance(args, pathlib.Path):
self._l = [args]
elif isinstance(args, bytes):
# decode bytes to a string and then split based on
# the default path separator
self._l = decode_bytes(args).split(os.pathsep)
elif isinstance(args, cabc.Iterable):
# put everything in a list -before- performing the type check
# in order to be able to retrieve it later, for cases such as
# when a generator expression was passed as an argument
args = list(args)
if not all(isinstance(i, (str, bytes, pathlib.Path)) for i in args):
# make TypeError's message as informative as possible
# when given an invalid initialization sequence
raise TypeError(
"EnvPath's initialization sequence should only "
"contain str, bytes and pathlib.Path entries"
)
self._l = args
else:
raise TypeError(
"EnvPath cannot be initialized with items "
"of type %s" % type(args)
)
def __getitem__(self, item):
# handle slices separately
if isinstance(item, slice):
return [_expandpath(i) for i in self._l[item]]
else:
return _expandpath(self._l[item])
def __setitem__(self, index, item):
self._l.__setitem__(index, item)
def __len__(self):
return len(self._l)
def __delitem__(self, key):
self._l.__delitem__(key)
def insert(self, index, value):
self._l.insert(index, value)
@property
def paths(self):
"""
Returns the list of directories that this EnvPath contains.
"""
return list(self)
def __repr__(self):
return repr(self._l)
def __eq__(self, other):
if len(self) != len(other):
return False
return all(map(operator.eq, self, other))
def _repr_pretty_(self, p, cycle):
"""Pretty print path list"""
if cycle:
p.text("EnvPath(...)")
else:
with p.group(1, "EnvPath(\n[", "]\n)"):
for idx, item in enumerate(self):
if idx:
p.text(",")
p.breakable()
p.pretty(item)
def __add__(self, other):
if isinstance(other, EnvPath):
other = other._l
return EnvPath(self._l + other)
def __radd__(self, other):
if isinstance(other, EnvPath):
other = other._l
return EnvPath(other + self._l)
def add(self, data, front=False, replace=False):
"""Add a value to this EnvPath,
path.add(data, front=bool, replace=bool) -> ensures that path contains data, with position determined by kwargs
Parameters
----------
data : string or bytes or pathlib.Path
value to be added
front : bool
whether the value should be added to the front, will be
ignored if the data already exists in this EnvPath and
replace is False
Default : False
replace : bool
If True, the value will be removed and added to the
start or end(depending on the value of front)
Default : False
Returns
-------
None
"""
data = str(expand_path(data))
if data not in self._l:
self._l.insert(0 if front else len(self._l), data)
elif replace:
# https://stackoverflow.com/a/25251306/1621381
self._l = list(filter(lambda x: x != data, self._l))
self._l.insert(0 if front else len(self._l), data)
@lazyobject
def FORMATTER():
return string.Formatter()
class DefaultNotGivenType:
"""Singleton for representing when no default value is given."""
__inst: tp.Optional["DefaultNotGivenType"] = None
def __new__(cls):
if DefaultNotGivenType.__inst is None:
DefaultNotGivenType.__inst = object.__new__(cls)
return DefaultNotGivenType.__inst
DefaultNotGiven = DefaultNotGivenType()
BEG_TOK_SKIPS = LazyObject(
lambda: frozenset(["WS", "INDENT", "NOT", "LPAREN"]), globals(), "BEG_TOK_SKIPS"
)
END_TOK_TYPES = LazyObject(
lambda: frozenset(["SEMI", "AND", "OR", "RPAREN"]), globals(), "END_TOK_TYPES"
)
RE_END_TOKS = LazyObject(
lambda: re.compile(r"(;|and|\&\&|or|\|\||\))"), globals(), "RE_END_TOKS"
)
LPARENS = LazyObject(
lambda: frozenset(
["LPAREN", "AT_LPAREN", "BANG_LPAREN", "DOLLAR_LPAREN", "ATDOLLAR_LPAREN"]
),
globals(),
"LPARENS",
)
def _is_not_lparen_and_rparen(lparens, rtok):
"""Tests if an RPAREN token is matched with something other than a plain old
LPAREN type.
"""
# note that any([]) is False, so this covers len(lparens) == 0
return rtok.type == "RPAREN" and any(x != "LPAREN" for x in lparens)
def balanced_parens(line, mincol=0, maxcol=None, lexer=None):
"""Determines if parentheses are balanced in an expression."""
line = line[mincol:maxcol]
if lexer is None:
lexer = xsh.execer.parser.lexer
if "(" not in line and ")" not in line:
return True
cnt = 0
lexer.input(line)
for tok in lexer:
if tok.type in LPARENS:
cnt += 1
elif tok.type == "RPAREN":
cnt -= 1
elif tok.type == "ERRORTOKEN" and ")" in tok.value:
cnt -= 1
return cnt == 0
def ends_with_colon_token(line, lexer=None):
"""Determines whether a line ends with a colon token, ignoring comments."""
if lexer is None:
lexer = xsh.execer.parser.lexer
lexer.input(line)
toks = list(lexer)
return len(toks) > 0 and toks[-1].type == "COLON"
def find_next_break(line, mincol=0, lexer=None):
"""Returns the column number of the next logical break in subproc mode.
This function may be useful in finding the maxcol argument of
subproc_toks().
"""
if mincol >= 1:
line = line[mincol:]
if lexer is None:
lexer = xsh.execer.parser.lexer
if RE_END_TOKS.search(line) is None:
return None
maxcol = None
lparens = []
lexer.input(line)
for tok in lexer:
if tok.type in LPARENS:
lparens.append(tok.type)
elif tok.type in END_TOK_TYPES:
if _is_not_lparen_and_rparen(lparens, tok):
lparens.pop()
else:
maxcol = tok.lexpos + mincol + 1
break
elif tok.type == "ERRORTOKEN" and ")" in tok.value:
maxcol = tok.lexpos + mincol + 1
break
elif tok.type == "BANG":
maxcol = mincol + len(line) + 1
break
return maxcol
def _offset_from_prev_lines(line, last):
lines = line.splitlines(keepends=True)[:last]
return sum(map(len, lines))
def subproc_toks(
line, mincol=-1, maxcol=None, lexer=None, returnline=False, greedy=False
):
"""Encapsulates tokens in a source code line in a uncaptured
subprocess ![] starting at a minimum column. If there are no tokens
(ie in a comment line) this returns None. If greedy is True, it will encapsulate
normal parentheses. Greedy is False by default.
"""
if lexer is None:
lexer = xsh.execer.parser.lexer
if maxcol is None:
maxcol = len(line) + 1
lexer.reset()
lexer.input(line)
toks = []
lparens = []
saw_macro = False
end_offset = 0
for tok in lexer:
pos = tok.lexpos
if tok.type not in END_TOK_TYPES and pos >= maxcol:
break
if tok.type == "BANG":
saw_macro = True
if saw_macro and tok.type not in ("NEWLINE", "DEDENT"):
toks.append(tok)
continue
if tok.type in LPARENS:
lparens.append(tok.type)
if greedy and len(lparens) > 0 and "LPAREN" in lparens:
toks.append(tok)
if tok.type == "RPAREN":
lparens.pop()
continue
if len(toks) == 0 and tok.type in BEG_TOK_SKIPS:
continue # handle indentation
elif len(toks) > 0 and toks[-1].type in END_TOK_TYPES:
if _is_not_lparen_and_rparen(lparens, toks[-1]):
lparens.pop() # don't continue or break
elif pos < maxcol and tok.type not in ("NEWLINE", "DEDENT", "WS"):
if not greedy:
toks.clear()
if tok.type in BEG_TOK_SKIPS:
continue
else:
break
if pos < mincol:
continue
toks.append(tok)
if tok.type == "WS" and tok.value == "\\":
pass # line continuation
elif tok.type == "NEWLINE":
break
elif tok.type == "DEDENT":
# fake a newline when dedenting without a newline
tok.type = "NEWLINE"
tok.value = "\n"
tok.lineno -= 1
if len(toks) >= 2:
prev_tok_end = toks[-2].lexpos + len(toks[-2].value)
else:
prev_tok_end = len(line)
if "#" in line[prev_tok_end:]:
tok.lexpos = prev_tok_end # prevents wrapping comments
else:
tok.lexpos = len(line)
break
elif check_bad_str_token(tok):
return
else:
if len(toks) > 0 and toks[-1].type in END_TOK_TYPES:
if _is_not_lparen_and_rparen(lparens, toks[-1]):
pass
elif greedy and toks[-1].type == "RPAREN":
pass
else:
toks.pop()
if len(toks) == 0:
return # handle comment lines
tok = toks[-1]
pos = tok.lexpos
if isinstance(tok.value, str):
end_offset = len(tok.value.rstrip())
else:
el = line[pos:].split("#")[0].rstrip()
end_offset = len(el)
if len(toks) == 0:
return # handle comment lines
elif saw_macro or greedy:
end_offset = len(toks[-1].value.rstrip()) + 1
if toks[0].lineno != toks[-1].lineno:
# handle multiline cases
end_offset += _offset_from_prev_lines(line, toks[-1].lineno)
beg, end = toks[0].lexpos, (toks[-1].lexpos + end_offset)
end = len(line[:end].rstrip())
rtn = "![" + line[beg:end] + "]"
if returnline:
rtn = line[:beg] + rtn + line[end:]
return rtn
def check_bad_str_token(tok):
"""Checks if a token is a bad string."""
if tok.type == "ERRORTOKEN" and tok.value == "EOF in multi-line string":
return True
elif isinstance(tok.value, str) and not check_quotes(tok.value):
return True
else:
return False
def check_quotes(s):
"""Checks a string to make sure that if it starts with quotes, it also
ends with quotes.
"""
starts_as_str = RE_BEGIN_STRING.match(s) is not None
ends_as_str = s.endswith('"') or s.endswith("'")
if not starts_as_str and not ends_as_str:
ok = True
elif starts_as_str and not ends_as_str:
ok = False
elif not starts_as_str and ends_as_str:
ok = False
else:
m = RE_COMPLETE_STRING.match(s)
ok = m is not None
return ok
def _have_open_triple_quotes(s):
if s.count('"""') % 2 == 1:
open_triple = '"""'
elif s.count("'''") % 2 == 1:
open_triple = "'''"
else:
open_triple = False
return open_triple
def get_line_continuation():
"""The line continuation characters used in subproc mode. In interactive
mode on Windows the backslash must be preceded by a space. This is because
paths on Windows may end in a backslash.
"""
if ON_WINDOWS:
env = getattr(xsh, "env", None) or {}
if env.get("XONSH_INTERACTIVE", False):
return " \\"
return "\\"
def get_logical_line(lines, idx):
"""Returns a single logical line (i.e. one without line continuations)
from a list of lines. This line should begin at index idx. This also
returns the number of physical lines the logical line spans. The lines
should not contain newlines
"""
n = 1
nlines = len(lines)
linecont = get_line_continuation()
while idx > 0 and lines[idx - 1].endswith(linecont):
idx -= 1
start = idx
line = lines[idx]
open_triple = _have_open_triple_quotes(line)
while (line.endswith(linecont) or open_triple) and idx < nlines - 1:
n += 1
idx += 1
if line.endswith(linecont):
line = line[:-1] + lines[idx]
else:
line = line + "\n" + lines[idx]
open_triple = _have_open_triple_quotes(line)
return line, n, start
def replace_logical_line(lines, logical, idx, n):
"""Replaces lines at idx that may end in line continuation with a logical
line that spans n lines.
"""
linecont = get_line_continuation()
if n == 1:
lines[idx] = logical
return
space = " "
for i in range(idx, idx + n - 1):
a = len(lines[i])
b = logical.find(space, a - 1)
if b < 0:
# no space found
lines[i] = logical
logical = ""
else:
# found space to split on
lines[i] = logical[:b] + linecont
logical = logical[b:]
lines[idx + n - 1] = logical
def is_balanced(expr, ltok, rtok):
"""Determines whether an expression has unbalanced opening and closing tokens."""
lcnt = expr.count(ltok)
if lcnt == 0:
return True
rcnt = expr.count(rtok)
if lcnt == rcnt:
return True
else:
return False
def subexpr_from_unbalanced(expr, ltok, rtok):
"""Attempts to pull out a valid subexpression for unbalanced grouping,
based on opening tokens, eg. '(', and closing tokens, eg. ')'. This
does not do full tokenization, but should be good enough for tab
completion.
"""
if is_balanced(expr, ltok, rtok):
return expr
subexpr = expr.rsplit(ltok, 1)[-1]
subexpr = subexpr.rsplit(",", 1)[-1]
subexpr = subexpr.rsplit(":", 1)[-1]
return subexpr
def subexpr_before_unbalanced(expr, ltok, rtok):
"""Obtains the expression prior to last unbalanced left token."""
subexpr, _, post = expr.rpartition(ltok)
nrtoks_in_post = post.count(rtok)
while nrtoks_in_post != 0:
for _ in range(nrtoks_in_post):
subexpr, _, post = subexpr.rpartition(ltok)
nrtoks_in_post = post.count(rtok)
_, _, subexpr = subexpr.rpartition(rtok)
_, _, subexpr = subexpr.rpartition(ltok)
return subexpr
@lazyobject
def STARTING_WHITESPACE_RE():
return re.compile(r"^(\s*)")
def starting_whitespace(s):
"""Returns the whitespace at the start of a string"""
return STARTING_WHITESPACE_RE.match(s).group(1)
def decode(s, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return s.decode(encoding, "replace")
def encode(u, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return u.encode(encoding, "replace")
def cast_unicode(s, encoding=None):
if isinstance(s, bytes):
return decode(s, encoding)
return s
def safe_hasattr(obj, attr):
"""In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors.
"""
try:
getattr(obj, attr)
return True
except Exception:
return False
def indent(instr, nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
outstr : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = "\t" * ntabs + " " * nspaces
if flatten:
pat = re.compile(r"^\s*", re.MULTILINE)
else:
pat = re.compile(r"^", re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep + ind):
return outstr[: -len(ind)]
else:
return outstr
def get_sep():
"""Returns the appropriate filepath separator char depending on OS and
xonsh options set
"""
if ON_WINDOWS and xsh.env.get("FORCE_POSIX_PATHS"):
return os.altsep
else:
return os.sep
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if cond
is false.
"""
def dec(obj):
return obj if cond else backup
return dec
# The following redirect classes were taken directly from Python 3.5's source
# code (from the contextlib module). This can be removed when 3.5 is released,
# although redirect_stdout exists in 3.4, redirect_stderr does not.
# See the Python software license: https://docs.python.org/3/license.html
# Copyright (c) Python Software Foundation. All rights reserved.
class _RedirectStream:
_stream: tp.Optional[str] = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file::
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
Mostly for backwards compatibility.
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
def _yield_accessible_unix_file_names(path):
"""yield file names of executable files in path."""
if not os.path.exists(path):
return
for file_ in os.scandir(path):
try:
if file_.is_file() and os.access(file_.path, os.X_OK):
yield file_.name
except OSError:
# broken Symlink are neither dir not files
pass
def _executables_in_posix(path):
if not os.path.exists(path):
return
else:
yield from _yield_accessible_unix_file_names(path)
def _executables_in_windows(path):
if not os.path.isdir(path):
return
extensions = xsh.env["PATHEXT"]
try:
for x in os.scandir(path):
try:
is_file = x.is_file()
except OSError:
continue
if is_file:
fname = x.name
else:
continue
base_name, ext = os.path.splitext(fname)
if ext.upper() in extensions:
yield fname
except FileNotFoundError:
# On Windows, there's no guarantee for the directory to really
# exist even if isdir returns True. This may happen for instance
# if the path contains trailing spaces.
return
def executables_in(path):
"""Returns a generator of files in path that the user could execute."""
if ON_WINDOWS:
func = _executables_in_windows
else:
func = _executables_in_posix
try:
yield from func(path)
except PermissionError:
return
def debian_command_not_found(cmd):
"""Uses the debian/ubuntu command-not-found utility to suggest packages for a
command that cannot currently be found.
"""
if not ON_LINUX:
return ""
cnf = xsh.commands_cache.lazyget(
"command-not-found", ("/usr/lib/command-not-found",)
)[0]
if not os.path.isfile(cnf):
return ""
c = "{0} {1}; exit 0"
s = subprocess.check_output(
c.format(cnf, shlex.quote(cmd)),
universal_newlines=True,
stderr=subprocess.STDOUT,
shell=True,
)
s = "\n".join(s.rstrip().splitlines()).strip()
return s
def conda_suggest_command_not_found(cmd, env):
"""Uses conda-suggest to suggest packages for a command that cannot
currently be found.
"""
try:
from conda_suggest import find
except ImportError:
return ""
return find.message_string(
cmd, conda_suggest_path=env.get("CONDA_SUGGEST_PATH", None)
)
def command_not_found(cmd, env):
"""Uses various mechanism to suggest packages for a command that cannot
currently be found.
"""
if ON_LINUX:
rtn = debian_command_not_found(cmd)
else:
rtn = ""
conda = conda_suggest_command_not_found(cmd, env)
if conda:
rtn = rtn + "\n\n" + conda if rtn else conda
return rtn
@functools.lru_cache()
def suggest_commands(cmd, env):
"""Suggests alternative commands given an environment and aliases."""
if not env.get("SUGGEST_COMMANDS"):
return ""
thresh = env.get("SUGGEST_THRESHOLD")
max_sugg = env.get("SUGGEST_MAX_NUM")
if max_sugg < 0:
max_sugg = float("inf")
cmd = cmd.lower()
suggested = {}
for alias in xsh.aliases:
if alias not in suggested:
if levenshtein(alias.lower(), cmd, thresh) < thresh:
suggested[alias] = "Alias"
for _cmd in xsh.commands_cache.all_commands:
if _cmd not in suggested:
if levenshtein(_cmd.lower(), cmd, thresh) < thresh:
suggested[_cmd] = f"Command ({_cmd})"
suggested = collections.OrderedDict(
sorted(
suggested.items(), key=lambda x: suggestion_sort_helper(x[0].lower(), cmd)
)
)
num = min(len(suggested), max_sugg)
if num == 0:
rtn = command_not_found(cmd, env)
else:
oneof = "" if num == 1 else "one of "
tips = f"Did you mean {oneof}the following?"
items = list(suggested.popitem(False) for _ in range(num))
length = max(len(key) for key, _ in items) + 2
alternatives = "\n".join(
" {: <{}} {}".format(key + ":", length, val) for key, val in items
)
rtn = f"{tips}\n{alternatives}"
c = command_not_found(cmd, env)
rtn += ("\n\n" + c) if len(c) > 0 else ""
return rtn
def _get_manual_env_var(name, default=None):
"""Returns if the given variable is manually set as well as it's value."""
env = getattr(xsh, "env", None)
if env is None:
env = os_environ
manually_set = name in env
else:
manually_set = env.is_manually_set(name)
value = env.get(name, default)
return (manually_set, value)
def print_warning(msg):
"""Print warnings with/without traceback."""
manually_set_trace, show_trace = _get_manual_env_var("XONSH_SHOW_TRACEBACK", False)
manually_set_logfile, log_file = _get_manual_env_var("XONSH_TRACEBACK_LOGFILE")
if (not manually_set_trace) and (not manually_set_logfile):
# Notify about the traceback output possibility if neither of
# the two options have been manually set
sys.stderr.write(
"xonsh: For full traceback set: " "$XONSH_SHOW_TRACEBACK = True\n"
)
# convert show_trace to bool if necessary
if not is_bool(show_trace):
show_trace = to_bool(show_trace)
# if the trace option has been set, print all traceback info to stderr
if show_trace:
# notify user about XONSH_TRACEBACK_LOGFILE if it has
# not been set manually
if not manually_set_logfile:
sys.stderr.write(
"xonsh: To log full traceback to a file set: "
"$XONSH_TRACEBACK_LOGFILE = <filename>\n"
)
traceback.print_stack()
# additionally, check if a file for traceback logging has been
# specified and convert to a proper option if needed
log_file = to_logfile_opt(log_file)
if log_file:
# if log_file <> '' or log_file <> None, append
# traceback log there as well
with open(os.path.abspath(log_file), "a") as f:
traceback.print_stack(file=f)
msg = msg if msg.endswith("\n") else msg + "\n"
sys.stderr.write(msg)
def print_exception(msg=None, exc_info=None):
"""Print given exception (or current if None) with/without traceback and set sys.last_type, sys.last_value, sys.last_traceback accordingly."""
# is no exec_info() triple is given, use the exception beeing handled at the moment
if exc_info is None:
exc_info = sys.exc_info()
# these values (initialized with their default for traceback.print_exception) control how an exception is printed
limit = None
chain = True
_, debug_level = _get_manual_env_var("XONSH_DEBUG", 0)
# the interal state of the parsers stack is
# not helpful in normal operation (XONSH_DEBUG == 0).
# this is also done to be consistent with python
is_syntax_error = issubclass(exc_info[0], SyntaxError)
# XonshErrors don't show where in the users code they occured
# (most are reported deeper in the callstack, e.g. see procs/pipelines.py),
# but only show non-helpful xonsh internals.
# These are only relevent when developing/debugging xonsh itself.
# Therefore, dont print these traces until this gets overhauled.
is_xonsh_error = exc_info[0] in (XonshError, XonshCalledProcessError)
# hide unhelpful traces if not debugging
hide_stacktrace = debug_level == 0 and (is_syntax_error or is_xonsh_error)
if hide_stacktrace:
limit = 0
chain = False
sys.last_type, sys.last_value, sys.last_traceback = exc_info
manually_set_trace, show_trace = _get_manual_env_var("XONSH_SHOW_TRACEBACK", False)
manually_set_logfile, log_file = _get_manual_env_var("XONSH_TRACEBACK_LOGFILE")
if (not manually_set_trace) and (not manually_set_logfile):
# Notify about the traceback output possibility if neither of
# the two options have been manually set
sys.stderr.write(
"xonsh: For full traceback set: " "$XONSH_SHOW_TRACEBACK = True\n"
)
# convert show_trace to bool if necessary
if not is_bool(show_trace):
show_trace = to_bool(show_trace)
# if the trace option has been set, print all traceback info to stderr
if show_trace:
# notify user about XONSH_TRACEBACK_LOGFILE if it has
# not been set manually
if not manually_set_logfile:
sys.stderr.write(
"xonsh: To log full traceback to a file set: "
"$XONSH_TRACEBACK_LOGFILE = <filename>\n"
)
traceback_str = "".join(
traceback.format_exception(*exc_info, limit=limit, chain=chain)
)
# color the traceback if available
_, interactive = _get_manual_env_var("XONSH_INTERACTIVE", 0)
_, color_results = _get_manual_env_var("COLOR_RESULTS", 0)
if interactive and color_results and HAS_PYGMENTS:
import pygments.lexers.python
lexer = pygments.lexers.python.PythonTracebackLexer()
tokens = list(pygments.lex(traceback_str, lexer=lexer))
# this goes to stdout, but since we are interactive it doesn't matter
print_color(tokens, end="")
else:
print(traceback_str, file=sys.stderr, end="")
# additionally, check if a file for traceback logging has been
# specified and convert to a proper option if needed
log_file = to_logfile_opt(log_file)
if log_file:
# if log_file <> '' or log_file <> None, append
# traceback log there as well
with open(os.path.abspath(log_file), "a") as f:
traceback.print_exception(*exc_info, limit=limit, chain=chain, file=f)
if not show_trace:
# if traceback output is disabled, print the exception's
# error message on stderr.
display_error_message(exc_info)
if msg:
msg = msg if msg.endswith("\n") else msg + "\n"
sys.stderr.write(msg)
def display_error_message(exc_info, strip_xonsh_error_types=True):
"""
Prints the error message of the given sys.exc_info() triple on stderr.
"""
exc_type, exc_value, exc_traceback = exc_info
exception_only = traceback.format_exception_only(exc_type, exc_value)
if exc_type is XonshError and strip_xonsh_error_types:
exception_only[0] = exception_only[0].partition(": ")[-1]
sys.stderr.write("".join(exception_only))
def is_writable_file(filepath):
"""
Checks if a filepath is valid for writing.
"""
filepath = expand_path(filepath)
# convert to absolute path if needed
if not os.path.isabs(filepath):
filepath = os.path.abspath(filepath)
# cannot write to directories
if os.path.isdir(filepath):
return False
# if the file exists and is writable, we're fine
if os.path.exists(filepath):
return True if os.access(filepath, os.W_OK) else False
# if the path doesn't exist, isolate its directory component
# and ensure that directory is writable instead
return os.access(os.path.dirname(filepath), os.W_OK)
# Modified from Public Domain code, by Magnus Lie Hetland
# from http://hetland.org/coding/python/levenshtein.py
def levenshtein(a, b, max_dist=float("inf")):
"""Calculates the Levenshtein distance between a and b."""
n, m = len(a), len(b)
if abs(n - m) > max_dist:
return float("inf")
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def suggestion_sort_helper(x, y):
"""Returns a score (lower is better) for x based on how similar
it is to y. Used to rank suggestions."""
x = x.lower()
y = y.lower()
lendiff = len(x) + len(y)
inx = len([i for i in x if i not in y])
iny = len([i for i in y if i not in x])
return lendiff + inx + iny
def escape_windows_cmd_string(s):
"""Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and empirical testing:
http://www.robvanderwoude.com/escapechars.php
"""
for c in '^()%!<>&|"':
s = s.replace(c, "^" + c)
return s
def argvquote(arg, force=False):
"""Returns an argument quoted in such a way that that CommandLineToArgvW
on Windows will return the argument string unchanged.
This is the same thing Popen does when supplied with an list of arguments.
Arguments in a command line should be separated by spaces; this
function does not add these spaces. This implementation follows the
suggestions outlined here:
https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
"""
if not force and len(arg) != 0 and not any([c in arg for c in ' \t\n\v"']):
return arg
else:
n_backslashes = 0
cmdline = '"'
for c in arg:
if c == "\\":
# first count the number of current backslashes
n_backslashes += 1
continue
if c == '"':
# Escape all backslashes and the following double quotation mark
cmdline += (n_backslashes * 2 + 1) * "\\"
else:
# backslashes are not special here
cmdline += n_backslashes * "\\"
n_backslashes = 0
cmdline += c
# Escape all backslashes, but let the terminating
# double quotation mark we add below be interpreted
# as a metacharacter
cmdline += +n_backslashes * 2 * "\\" + '"'
return cmdline
def on_main_thread():
"""Checks if we are on the main thread or not."""
return threading.current_thread() is threading.main_thread()
_DEFAULT_SENTINEL = object()
@contextlib.contextmanager
def swap(namespace, name, value, default=_DEFAULT_SENTINEL):
"""Swaps a current variable name in a namespace for another value, and then
replaces it when the context is exited.
"""
old = getattr(namespace, name, default)
setattr(namespace, name, value)
yield value
if old is default:
delattr(namespace, name)
else:
setattr(namespace, name, old)
@contextlib.contextmanager
def swap_values(d, updates, default=_DEFAULT_SENTINEL):
"""Updates a dictionary (or other mapping) with values from another mapping,
and then restores the original mapping when the context is exited.
"""
old = {k: d.get(k, default) for k in updates}
d.update(updates)
yield
for k, v in old.items():
if v is default and k in d:
del d[k]
else:
d[k] = v
#
# Validators and converters
#
def detype(x):
"""This assumes that the object has a detype method, and calls that."""
return x.detype()
def is_int(x):
"""Tests if something is an integer"""
return isinstance(x, int)
def is_float(x):
"""Tests if something is a float"""
return isinstance(x, float)
def is_string(x):
"""Tests if something is a string"""
return isinstance(x, str)
def is_slice(x):
"""Tests if something is a slice"""
return isinstance(x, slice)
def is_callable(x):
"""Tests if something is callable"""
return callable(x)
def is_string_or_callable(x):
"""Tests if something is a string or callable"""
return is_string(x) or is_callable(x)
def is_class(x):
"""Tests if something is a class"""
return isinstance(x, type)
def always_true(x):
"""Returns True"""
return True
def always_false(x):
"""Returns False"""
return False
def always_none(x):
"""Returns None"""
return None
def ensure_string(x):
"""Returns a string if x is not a string, and x if it already is. If x is None, the empty string is returned."""
return str(x) if x is not None else ""
def is_path(x):
"""This tests if something is a path."""
return isinstance(x, pathlib.Path)
def is_env_path(x):
"""This tests if something is an environment path, ie a list of strings."""
return isinstance(x, EnvPath)
def str_to_path(x):
"""Converts a string to a path."""
if x is None or x == "":
return None
elif isinstance(x, str):
return pathlib.Path(x)
elif isinstance(x, pathlib.Path):
return x
elif isinstance(x, EnvPath) and len(x) == 1:
return pathlib.Path(x[0]) if x[0] else None
else:
raise TypeError(
f"Variable should be a pathlib.Path, str or single EnvPath type. {type(x)} given."
)
def str_to_env_path(x):
"""Converts a string to an environment path, ie a list of strings,
splitting on the OS separator.
"""
# splitting will be done implicitly in EnvPath's __init__
return EnvPath(x)
def path_to_str(x):
"""Converts a path to a string."""
return str(x) if x is not None else ""
def env_path_to_str(x):
"""Converts an environment path to a string by joining on the OS
separator.
"""
return os.pathsep.join(x)
def is_bool(x):
"""Tests if something is a boolean."""
return isinstance(x, bool)
def is_bool_or_none(x):
"""Tests if something is a boolean or None."""
return (x is None) or isinstance(x, bool)
def is_logfile_opt(x):
"""
Checks if x is a valid $XONSH_TRACEBACK_LOGFILE option. Returns False
if x is not a writable/creatable file or an empty string or None.
"""
if x is None:
return True
if not isinstance(x, str):
return False
else:
return is_writable_file(x) or x == ""
def to_logfile_opt(x):
"""Converts a $XONSH_TRACEBACK_LOGFILE option to either a str containing
the filepath if it is a writable file or None if the filepath is not
valid, informing the user on stderr about the invalid choice.
"""
if isinstance(x, os.PathLike): # type: ignore
x = str(x)
if is_logfile_opt(x):
return x
else:
# if option is not valid, return a proper
# option and inform the user on stderr
sys.stderr.write(
"xonsh: $XONSH_TRACEBACK_LOGFILE must be a "
"filepath pointing to a file that either exists "
"and is writable or that can be created.\n"
)
return None
def logfile_opt_to_str(x):
"""
Detypes a $XONSH_TRACEBACK_LOGFILE option.
"""
if x is None:
# None should not be detyped to 'None', as 'None' constitutes
# a perfectly valid filename and retyping it would introduce
# ambiguity. Detype to the empty string instead.
return ""
return str(x)
_FALSES = LazyObject(
lambda: frozenset(["", "0", "n", "f", "no", "none", "false", "off"]),
globals(),
"_FALSES",
)
def to_bool(x):
"""Converts to a boolean in a semantically meaningful way."""
if isinstance(x, bool):
return x
elif isinstance(x, str):
return False if x.lower() in _FALSES else True
else:
return bool(x)
def to_bool_or_none(x):
"""Converts to a boolean or none in a semantically meaningful way."""
if x is None or isinstance(x, bool):
return x
elif isinstance(x, str):
low_x = x.lower()
if low_x == "none":
return None
else:
return False if x.lower() in _FALSES else True
else:
return bool(x)
def to_itself(x):
"""No conversion, returns itself."""
return x
def to_int_or_none(x) -> tp.Optional[int]:
"""Convert the given value to integer if possible. Otherwise return None"""
if isinstance(x, str) and x.lower() == "none":
return None
else:
return int(x)
def bool_to_str(x):
"""Converts a bool to an empty string if False and the string '1' if
True.
"""
return "1" if x else ""
def bool_or_none_to_str(x):
"""Converts a bool or None value to a string."""
if x is None:
return "None"
else:
return "1" if x else ""
_BREAKS = LazyObject(
lambda: frozenset(["b", "break", "s", "skip", "q", "quit"]), globals(), "_BREAKS"
)
def to_bool_or_break(x):
if isinstance(x, str) and x.lower() in _BREAKS:
return "break"
else:
return to_bool(x)
def is_bool_or_int(x):
"""Returns whether a value is a boolean or integer."""
return is_bool(x) or is_int(x)
def to_bool_or_int(x):
"""Converts a value to a boolean or an integer."""
if isinstance(x, str):
return int(x) if x.isdigit() else to_bool(x)
elif is_int(x): # bools are ints too!
return x
else:
return bool(x)
def bool_or_int_to_str(x):
"""Converts a boolean or integer to a string."""
return bool_to_str(x) if is_bool(x) else str(x)
@lazyobject
def SLICE_REG():
return re.compile(
r"(?P<start>(?:-\d)?\d*):(?P<end>(?:-\d)?\d*):?(?P<step>(?:-\d)?\d*)"
)
def to_shlvl(x):
"""Converts a value to an $SHLVL integer according to bash's behaviour (variables.c::adjust_shell_level)."""
if x is None:
return 0
else:
x = str(x)
try:
return adjust_shlvl(max(0, int(x)), 0)
except ValueError:
return 0
def is_valid_shlvl(x):
"""Checks whether a variable is a proper $SHLVL integer."""
return isinstance(x, int) and to_shlvl(x) == x
def adjust_shlvl(old_lvl: int, change: int):
"""Adjusts an $SHLVL integer according to bash's behaviour (variables.c::adjust_shell_level)."""
new_level = old_lvl + change
if new_level < 0:
new_level = 0
elif new_level >= 1000:
new_level = 1
return new_level
def ensure_slice(x):
"""Try to convert an object into a slice, complain on failure"""
if not x and x != 0:
return slice(None)
elif is_slice(x):
return x
try:
x = int(x)
if x != -1:
s = slice(x, x + 1)
else:
s = slice(-1, None, None)
except ValueError:
x = x.strip("[]()")
m = SLICE_REG.fullmatch(x)
if m:
groups = (int(i) if i else None for i in m.groups())
s = slice(*groups)
else:
raise ValueError(f"cannot convert {x!r} to slice")
except TypeError:
try:
s = slice(*(int(i) for i in x))
except (TypeError, ValueError):
raise ValueError(f"cannot convert {x!r} to slice")
return s
def get_portions(it, slices):
"""Yield from portions of an iterable.
Parameters
----------
it : iterable
slices : a slice or a list of slice objects
"""
if is_slice(slices):
slices = [slices]
if len(slices) == 1:
s = slices[0]
try:
yield from itertools.islice(it, s.start, s.stop, s.step)
return
except ValueError: # islice failed
pass
it = list(it)
for s in slices:
yield from it[s]
def is_slice_as_str(x):
"""
Test if string x is a slice. If not a string return False.
"""
try:
x = x.strip("[]()")
m = SLICE_REG.fullmatch(x)
if m:
return True
except AttributeError:
pass
return False
def is_int_as_str(x):
"""
Test if string x is an integer. If not a string return False.
"""
try:
return x.isdecimal()
except AttributeError:
return False
def is_string_set(x):
"""Tests if something is a set of strings"""
return isinstance(x, cabc.Set) and all(isinstance(a, str) for a in x)
def csv_to_set(x):
"""Convert a comma-separated list of strings to a set of strings."""
if not x:
return set()
else:
return set(x.split(","))
def set_to_csv(x):
"""Convert a set of strings to a comma-separated list of strings."""
return ",".join(x)
def pathsep_to_set(x):
"""Converts a os.pathsep separated string to a set of strings."""
if not x:
return set()
else:
return set(x.split(os.pathsep))
def set_to_pathsep(x, sort=False):
"""Converts a set to an os.pathsep separated string. The sort kwarg
specifies whether to sort the set prior to str conversion.
"""
if sort:
x = sorted(x)
return os.pathsep.join(x)
def is_string_seq(x):
"""Tests if something is a sequence of strings"""
return isinstance(x, cabc.Sequence) and all(isinstance(a, str) for a in x)
def is_nonstring_seq_of_strings(x):
"""Tests if something is a sequence of strings, where the top-level
sequence is not a string itself.
"""
return (
isinstance(x, cabc.Sequence)
and not isinstance(x, str)
and all(isinstance(a, str) for a in x)
)
def pathsep_to_seq(x):
"""Converts a os.pathsep separated string to a sequence of strings."""
if not x:
return []
else:
return x.split(os.pathsep)
def seq_to_pathsep(x):
"""Converts a sequence to an os.pathsep separated string."""
return os.pathsep.join(x)
def pathsep_to_upper_seq(x):
"""Converts a os.pathsep separated string to a sequence of
uppercase strings.
"""
if not x:
return []
else:
return x.upper().split(os.pathsep)
def seq_to_upper_pathsep(x):
"""Converts a sequence to an uppercase os.pathsep separated string."""
return os.pathsep.join(x).upper()
def is_bool_seq(x):
"""Tests if an object is a sequence of bools."""
return isinstance(x, cabc.Sequence) and all(isinstance(y, bool) for y in x)
def csv_to_bool_seq(x):
"""Takes a comma-separated string and converts it into a list of bools."""
return [to_bool(y) for y in csv_to_set(x)]
def bool_seq_to_csv(x):
"""Converts a sequence of bools to a comma-separated string."""
return ",".join(map(str, x))
def ptk2_color_depth_setter(x):
"""Setter function for $PROMPT_TOOLKIT_COLOR_DEPTH. Also
updates os.environ so prompt toolkit can pickup the value.
"""
x = str(x)
if x in {
"DEPTH_1_BIT",
"MONOCHROME",
"DEPTH_4_BIT",
"ANSI_COLORS_ONLY",
"DEPTH_8_BIT",
"DEFAULT",
"DEPTH_24_BIT",
"TRUE_COLOR",
}:
pass
elif x in {"", None}:
x = ""
else:
msg = f'"{x}" is not a valid value for $PROMPT_TOOLKIT_COLOR_DEPTH. '
warnings.warn(msg, RuntimeWarning)
x = ""
if x == "" and "PROMPT_TOOLKIT_COLOR_DEPTH" in os_environ:
del os_environ["PROMPT_TOOLKIT_COLOR_DEPTH"]
else:
os_environ["PROMPT_TOOLKIT_COLOR_DEPTH"] = x
return x
def is_completions_display_value(x):
"""Enumerated values of ``$COMPLETIONS_DISPLAY``"""
return x in {"none", "single", "multi"}
def to_completions_display_value(x):
"""Convert user input to value of ``$COMPLETIONS_DISPLAY``"""
x = str(x).lower()
if x in {"none", "false"}:
x = "none"
elif x in {"multi", "true"}:
x = "multi"
elif x in {"single", "readline"}:
pass
else:
msg = f'"{x}" is not a valid value for $COMPLETIONS_DISPLAY. '
msg += 'Using "multi".'
warnings.warn(msg, RuntimeWarning)
x = "multi"
return x
CANONIC_COMPLETION_MODES = frozenset({"default", "menu-complete"})
def is_completion_mode(x):
"""Enumerated values of $COMPLETION_MODE"""
return x in CANONIC_COMPLETION_MODES
def to_completion_mode(x):
"""Convert user input to value of $COMPLETION_MODE"""
y = str(x).casefold().replace("_", "-")
y = (
"default"
if y in ("", "d", "xonsh", "none", "def")
else "menu-complete"
if y in ("m", "menu", "menu-completion")
else y
)
if y not in CANONIC_COMPLETION_MODES:
warnings.warn(
f"'{x}' is not valid for $COMPLETION_MODE, must be one of {CANONIC_COMPLETION_MODES}. Using 'default'.",
RuntimeWarning,
)
y = "default"
return y
def is_str_str_dict(x):
"""Tests if something is a str:str dictionary"""
return isinstance(x, dict) and all(
isinstance(k, str) and isinstance(v, str) for k, v in x.items()
)
def to_dict(x):
"""Converts a string to a dictionary"""
if isinstance(x, dict):
return x
try:
x = ast.literal_eval(x)
except (ValueError, SyntaxError):
msg = f'"{x}" can not be converted to Python dictionary.'
warnings.warn(msg, RuntimeWarning)
x = dict()
return x
def to_str_str_dict(x):
"""Converts a string to str:str dictionary"""
if is_str_str_dict(x):
return x
x = to_dict(x)
if not is_str_str_dict(x):
msg = f'"{x}" can not be converted to str:str dictionary.'
warnings.warn(msg, RuntimeWarning)
x = dict()
return x
def dict_to_str(x):
"""Converts a dictionary to a string"""
if not x or len(x) == 0:
return ""
return str(x)
# history validation
_min_to_sec = lambda x: 60.0 * float(x)
_hour_to_sec = lambda x: 60.0 * _min_to_sec(x)
_day_to_sec = lambda x: 24.0 * _hour_to_sec(x)
_month_to_sec = lambda x: 30.4375 * _day_to_sec(x)
_year_to_sec = lambda x: 365.25 * _day_to_sec(x)
_kb_to_b = lambda x: 1024 * int(x)
_mb_to_b = lambda x: 1024 * _kb_to_b(x)
_gb_to_b = lambda x: 1024 * _mb_to_b(x)
_tb_to_b = lambda x: 1024 * _tb_to_b(x) # type: ignore
CANON_HISTORY_UNITS = LazyObject(
lambda: frozenset(["commands", "files", "s", "b"]), globals(), "CANON_HISTORY_UNITS"
)
HISTORY_UNITS = LazyObject(
lambda: {
"": ("commands", int),
"c": ("commands", int),
"cmd": ("commands", int),
"cmds": ("commands", int),
"command": ("commands", int),
"commands": ("commands", int),
"f": ("files", int),
"files": ("files", int),
"s": ("s", float),
"sec": ("s", float),
"second": ("s", float),
"seconds": ("s", float),
"m": ("s", _min_to_sec),
"min": ("s", _min_to_sec),
"mins": ("s", _min_to_sec),
"h": ("s", _hour_to_sec),
"hr": ("s", _hour_to_sec),
"hour": ("s", _hour_to_sec),
"hours": ("s", _hour_to_sec),
"d": ("s", _day_to_sec),
"day": ("s", _day_to_sec),
"days": ("s", _day_to_sec),
"mon": ("s", _month_to_sec),
"month": ("s", _month_to_sec),
"months": ("s", _month_to_sec),
"y": ("s", _year_to_sec),
"yr": ("s", _year_to_sec),
"yrs": ("s", _year_to_sec),
"year": ("s", _year_to_sec),
"years": ("s", _year_to_sec),
"b": ("b", int),
"byte": ("b", int),
"bytes": ("b", int),
"kb": ("b", _kb_to_b),
"kilobyte": ("b", _kb_to_b),
"kilobytes": ("b", _kb_to_b),
"mb": ("b", _mb_to_b),
"meg": ("b", _mb_to_b),
"megs": ("b", _mb_to_b),
"megabyte": ("b", _mb_to_b),
"megabytes": ("b", _mb_to_b),
"gb": ("b", _gb_to_b),
"gig": ("b", _gb_to_b),
"gigs": ("b", _gb_to_b),
"gigabyte": ("b", _gb_to_b),
"gigabytes": ("b", _gb_to_b),
"tb": ("b", _tb_to_b),
"terabyte": ("b", _tb_to_b),
"terabytes": ("b", _tb_to_b),
},
globals(),
"HISTORY_UNITS",
)
"""Maps lowercase unit names to canonical name and conversion utilities."""
def is_history_tuple(x):
"""Tests if something is a proper history value, units tuple."""
if (
isinstance(x, cabc.Sequence)
and len(x) == 2
and isinstance(x[0], (int, float))
and x[1].lower() in CANON_HISTORY_UNITS
):
return True
return False
def is_history_backend(x):
"""Tests if something is a valid history backend."""
return is_string(x) or is_class(x) or isinstance(x, object)
def is_dynamic_cwd_width(x):
"""Determine if the input is a valid input for the DYNAMIC_CWD_WIDTH
environment variable.
"""
return (
isinstance(x, tuple)
and len(x) == 2
and isinstance(x[0], float)
and x[1] in set("c%")
)
def to_dynamic_cwd_tuple(x):
"""Convert to a canonical cwd_width tuple."""
unit = "c"
if isinstance(x, str):
if x[-1] == "%":
x = x[:-1]
unit = "%"
else:
unit = "c"
return (float(x), unit)
else:
return (float(x[0]), x[1])
def dynamic_cwd_tuple_to_str(x):
"""Convert a canonical cwd_width tuple to a string."""
if x[1] == "%":
return str(x[0]) + "%"
else:
return str(x[0])
RE_HISTORY_TUPLE = LazyObject(
lambda: re.compile(r"([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*([A-Za-z]*)"),
globals(),
"RE_HISTORY_TUPLE",
)
def to_history_tuple(x):
"""Converts to a canonical history tuple."""
if not isinstance(x, (cabc.Sequence, float, int)):
raise ValueError("history size must be given as a sequence or number")
if isinstance(x, str):
m = RE_HISTORY_TUPLE.match(x.strip().lower())
return to_history_tuple((m.group(1), m.group(3)))
elif isinstance(x, (float, int)):
return to_history_tuple((x, "commands"))
units, converter = HISTORY_UNITS[x[1]]
value = converter(x[0])
return (value, units)
def history_tuple_to_str(x):
"""Converts a valid history tuple to a canonical string."""
return "{} {}".format(*x)
def all_permutations(iterable):
"""Yeilds all permutations, not just those of a specified length"""
for r in range(1, len(iterable) + 1):
yield from itertools.permutations(iterable, r=r)
def format_color(string, **kwargs):
"""Formats strings that may contain colors. This simply dispatches to the
shell instances method of the same name. The results of this function should
be directly usable by print_color().
"""
if hasattr(xsh.shell, "shell"):
return xsh.shell.shell.format_color(string, **kwargs)
else:
# fallback for ANSI if shell is not yet initialized
from xonsh.ansi_colors import ansi_partial_color_format
style = xsh.env.get("XONSH_COLOR_STYLE")
return ansi_partial_color_format(string, style=style)
def print_color(string, **kwargs):
"""Prints a string that may contain colors. This dispatched to the shell
method of the same name. Colors will be formatted if they have not already
been.
"""
if hasattr(xsh.shell, "shell"):
xsh.shell.shell.print_color(string, **kwargs)
else:
# fallback for ANSI if shell is not yet initialized
print(format_color(string, **kwargs))
def color_style_names():
"""Returns an iterable of all available style names."""
return xsh.shell.shell.color_style_names()
def color_style():
"""Returns the current color map."""
return xsh.shell.shell.color_style()
def register_custom_style(
name, styles, highlight_color=None, background_color=None, base="default"
):
"""Register custom style.
Parameters
----------
name : str
Style name.
styles : dict
Token -> style mapping.
highlight_color : str
Hightlight color.
background_color : str
Background color.
base : str, optional
Base style to use as default.
Returns
-------
style : The style object created, None if not succeeded
"""
style = None
if pygments_version_info():
from xonsh.pyghooks import register_custom_pygments_style
style = register_custom_pygments_style(
name, styles, highlight_color, background_color, base
)
# register ANSI colors
from xonsh.ansi_colors import register_custom_ansi_style
register_custom_ansi_style(name, styles, base)
return style
def _token_attr_from_stylemap(stylemap):
"""yields tokens attr, and index from a stylemap"""
import prompt_toolkit as ptk
if xsh.shell.shell_type == "prompt_toolkit1":
style = ptk.styles.style_from_dict(stylemap)
for token in stylemap:
yield token, style.token_to_attrs[token]
else:
style = ptk.styles.style_from_pygments_dict(stylemap)
for token in stylemap:
style_str = "class:{}".format(
ptk.styles.pygments.pygments_token_to_classname(token)
)
yield (token, style.get_attrs_for_style_str(style_str))
def _get_color_lookup_table():
"""Returns the prompt_toolkit win32 ColorLookupTable"""
if xsh.shell.shell_type == "prompt_toolkit1":
from prompt_toolkit.terminal.win32_output import ColorLookupTable
else:
from prompt_toolkit.output.win32 import ColorLookupTable
return ColorLookupTable()
def _get_color_indexes(style_map):
"""Generates the color and windows color index for a style"""
table = _get_color_lookup_table()
for token, attr in _token_attr_from_stylemap(style_map):
if attr.color:
index = table.lookup_fg_color(attr.color)
try:
rgb = (
int(attr.color[0:2], 16),
int(attr.color[2:4], 16),
int(attr.color[4:6], 16),
)
except Exception:
rgb = None
yield token, index, rgb
# Map of new PTK2 color names to PTK1 variants
PTK_NEW_OLD_COLOR_MAP = LazyObject(
lambda: {
"black": "black",
"red": "darkred",
"green": "darkgreen",
"yellow": "brown",
"blue": "darkblue",
"magenta": "purple",
"cyan": "teal",
"gray": "lightgray",
"brightblack": "darkgray",
"brightred": "red",
"brightgreen": "green",
"brightyellow": "yellow",
"brightblue": "blue",
"brightmagenta": "fuchsia",
"brightcyan": "turquoise",
"white": "white",
},
globals(),
"PTK_NEW_OLD_COLOR_MAP",
)
# Map of new ansicolor names to old PTK1 names
ANSICOLOR_NAMES_MAP = LazyObject(
lambda: {"ansi" + k: "#ansi" + v for k, v in PTK_NEW_OLD_COLOR_MAP.items()},
globals(),
"ANSICOLOR_NAMES_MAP",
)
def _win10_color_map():
cmap = {
"ansiblack": (12, 12, 12),
"ansiblue": (0, 55, 218),
"ansigreen": (19, 161, 14),
"ansicyan": (58, 150, 221),
"ansired": (197, 15, 31),
"ansimagenta": (136, 23, 152),
"ansiyellow": (193, 156, 0),
"ansigray": (204, 204, 204),
"ansibrightblack": (118, 118, 118),
"ansibrightblue": (59, 120, 255),
"ansibrightgreen": (22, 198, 12),
"ansibrightcyan": (97, 214, 214),
"ansibrightred": (231, 72, 86),
"ansibrightmagenta": (180, 0, 158),
"ansibrightyellow": (249, 241, 165),
"ansiwhite": (242, 242, 242),
}
return {k: f"#{r:02x}{g:02x}{b:02x}" for k, (r, g, b) in cmap.items()}
WIN10_COLOR_MAP = LazyObject(_win10_color_map, globals(), "WIN10_COLOR_MAP")
def _win_bold_color_map():
"""Map dark ansi colors to lighter version."""
return {
"ansiblack": "ansibrightblack",
"ansiblue": "ansibrightblue",
"ansigreen": "ansibrightgreen",
"ansicyan": "ansibrightcyan",
"ansired": "ansibrightred",
"ansimagenta": "ansibrightmagenta",
"ansiyellow": "ansibrightyellow",
"ansigray": "ansiwhite",
}
WIN_BOLD_COLOR_MAP = LazyObject(_win_bold_color_map, globals(), "WIN_BOLD_COLOR_MAP")
def hardcode_colors_for_win10(style_map):
"""Replace all ansi colors with hardcoded colors to avoid unreadable defaults
in conhost.exe
"""
modified_style = {}
if not xsh.env["PROMPT_TOOLKIT_COLOR_DEPTH"]:
xsh.env["PROMPT_TOOLKIT_COLOR_DEPTH"] = "DEPTH_24_BIT"
# Replace all ansi colors with hardcoded colors to avoid unreadable defaults
# in conhost.exe
for token, style_str in style_map.items():
for ansicolor in WIN10_COLOR_MAP:
if ansicolor in style_str:
if "bold" in style_str and "nobold" not in style_str:
# Win10 doesn't yet handle bold colors. Instead dark
# colors are mapped to their lighter version. We simulate
# the same here.
style_str.replace("bold", "")
hexcolor = WIN10_COLOR_MAP[
WIN_BOLD_COLOR_MAP.get(ansicolor, ansicolor)
]
else:
hexcolor = WIN10_COLOR_MAP[ansicolor]
style_str = style_str.replace(ansicolor, hexcolor)
modified_style[token] = style_str
return modified_style
def ansicolors_to_ptk1_names(stylemap):
"""Converts ansicolor names in a stylemap to old PTK1 color names"""
if pygments_version_info() and pygments_version_info() >= (2, 4, 0):
return stylemap
modified_stylemap = {}
for token, style_str in stylemap.items():
for color, ptk1_color in ANSICOLOR_NAMES_MAP.items():
if "#" + color not in style_str:
style_str = style_str.replace(color, ptk1_color)
modified_stylemap[token] = style_str
return modified_stylemap
def intensify_colors_for_cmd_exe(style_map):
"""Returns a modified style to where colors that maps to dark
colors are replaced with brighter versions.
"""
modified_style = {}
replace_colors = {
1: "ansibrightcyan", # subst blue with bright cyan
2: "ansibrightgreen", # subst green with bright green
4: "ansibrightred", # subst red with bright red
5: "ansibrightmagenta", # subst magenta with bright magenta
6: "ansibrightyellow", # subst yellow with bright yellow
9: "ansicyan", # subst intense blue with dark cyan (more readable)
}
if xsh.shell.shell_type == "prompt_toolkit1":
replace_colors = ansicolors_to_ptk1_names(replace_colors)
for token, idx, _ in _get_color_indexes(style_map):
if idx in replace_colors:
modified_style[token] = replace_colors[idx]
return modified_style
def intensify_colors_on_win_setter(enable):
"""Resets the style when setting the INTENSIFY_COLORS_ON_WIN
environment variable.
"""
enable = to_bool(enable)
if xsh.shell is not None and hasattr(xsh.shell.shell.styler, "style_name"):
delattr(xsh.shell.shell.styler, "style_name")
return enable
def format_std_prepost(template, env=None):
"""Formats a template prefix/postfix string for a standard buffer.
Returns a string suitable for prepending or appending.
"""
if not template:
return ""
env = xsh.env if env is None else env
invis = "\001\002"
if xsh.shell is None:
# shell hasn't fully started up (probably still in xonshrc)
from xonsh.ansi_colors import ansi_partial_color_format
from xonsh.prompt.base import PromptFormatter
pf = PromptFormatter()
s = pf(template)
style = env.get("XONSH_COLOR_STYLE")
s = ansi_partial_color_format(invis + s + invis, hide=False, style=style)
else:
# shell has fully started. do the normal thing
shell = xsh.shell.shell
try:
s = shell.prompt_formatter(template)
except Exception:
print_exception()
# \001\002 is there to fool pygments into not returning an empty string
# for potentially empty input. This happens when the template is just a
# color code with no visible text.
s = shell.format_color(invis + s + invis, force_string=True)
s = s.replace(invis, "")
return s
_RE_STRING_START = "[bBprRuUf]*"
_RE_STRING_TRIPLE_DOUBLE = '"""'
_RE_STRING_TRIPLE_SINGLE = "'''"
_RE_STRING_DOUBLE = '"'
_RE_STRING_SINGLE = "'"
_STRINGS = (
_RE_STRING_TRIPLE_DOUBLE,
_RE_STRING_TRIPLE_SINGLE,
_RE_STRING_DOUBLE,
_RE_STRING_SINGLE,
)
RE_BEGIN_STRING = LazyObject(
lambda: re.compile("(" + _RE_STRING_START + "(" + "|".join(_STRINGS) + "))"),
globals(),
"RE_BEGIN_STRING",
)
"""Regular expression matching the start of a string, including quotes and
leading characters (r, b, or u)"""
RE_STRING_START = LazyObject(
lambda: re.compile(_RE_STRING_START), globals(), "RE_STRING_START"
)
"""Regular expression matching the characters before the quotes when starting a
string (r, b, or u, case insensitive)"""
RE_STRING_CONT = LazyDict(
{
'"': lambda: re.compile(r'((\\(.|\n))|([^"\\]))*'),
"'": lambda: re.compile(r"((\\(.|\n))|([^'\\]))*"),
'"""': lambda: re.compile(r'((\\(.|\n))|([^"\\])|("(?!""))|\n)*'),
"'''": lambda: re.compile(r"((\\(.|\n))|([^'\\])|('(?!''))|\n)*"),
},
globals(),
"RE_STRING_CONT",
)
"""Dictionary mapping starting quote sequences to regular expressions that
match the contents of a string beginning with those quotes (not including the
terminating quotes)"""
@lazyobject
def RE_COMPLETE_STRING():
ptrn = (
"^"
+ _RE_STRING_START
+ "(?P<quote>"
+ "|".join(_STRINGS)
+ ")"
+ ".*?(?P=quote)$"
)
return re.compile(ptrn, re.DOTALL)
def strip_simple_quotes(s):
"""Gets rid of single quotes, double quotes, single triple quotes, and
single double quotes from a string, if present front and back of a string.
Otherwiswe, does nothing.
"""
starts_single = s.startswith("'")
starts_double = s.startswith('"')
if not starts_single and not starts_double:
return s
elif starts_single:
ends_single = s.endswith("'")
if not ends_single:
return s
elif s.startswith("'''") and s.endswith("'''") and len(s) >= 6:
return s[3:-3]
elif len(s) >= 2:
return s[1:-1]
else:
return s
else:
# starts double
ends_double = s.endswith('"')
if not ends_double:
return s
elif s.startswith('"""') and s.endswith('"""') and len(s) >= 6:
return s[3:-3]
elif len(s) >= 2:
return s[1:-1]
else:
return s
def check_for_partial_string(x):
"""Returns the starting index (inclusive), ending index (exclusive), and
starting quote string of the most recent Python string found in the input.
check_for_partial_string(x) -> (startix, endix, quote)
Parameters
----------
x : str
The string to be checked (representing a line of terminal input)
Returns
-------
startix : int (or None)
The index where the most recent Python string found started
(inclusive), or None if no strings exist in the input
endix : int (or None)
The index where the most recent Python string found ended (exclusive),
or None if no strings exist in the input OR if the input ended in the
middle of a Python string
quote : str (or None)
A string containing the quote used to start the string (e.g., b", ",
'''), or None if no string was found.
"""
string_indices = []
starting_quote = []
current_index = 0
match = re.search(RE_BEGIN_STRING, x)
while match is not None:
# add the start in
start = match.start()
quote = match.group(0)
lenquote = len(quote)
current_index += start
# store the starting index of the string, as well as the
# characters in the starting quotes (e.g., ", ', """, r", etc)
string_indices.append(current_index)
starting_quote.append(quote)
# determine the string that should terminate this string
ender = re.sub(RE_STRING_START, "", quote)
x = x[start + lenquote :]
current_index += lenquote
# figure out what is inside the string
continuer = RE_STRING_CONT[ender]
contents = re.match(continuer, x)
inside = contents.group(0)
leninside = len(inside)
current_index += contents.start() + leninside + len(ender)
# if we are not at the end of the input string, add the ending index of
# the string to string_indices
if contents.end() < len(x):
string_indices.append(current_index)
x = x[leninside + len(ender) :]
# find the next match
match = re.search(RE_BEGIN_STRING, x)
numquotes = len(string_indices)
if numquotes == 0:
return (None, None, None)
elif numquotes % 2:
return (string_indices[-1], None, starting_quote[-1])
else:
return (string_indices[-2], string_indices[-1], starting_quote[-1])
# regular expressions for matching environment variables
# i.e $FOO, ${'FOO'}
@lazyobject
def POSIX_ENVVAR_REGEX():
pat = r"""\$({(?P<quote>['"])|)(?P<envvar>\w+)((?P=quote)}|(?:\1\b))"""
return re.compile(pat)
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
env = xsh.env
if isinstance(path, bytes):
path = path.decode(
encoding=env.get("XONSH_ENCODING"), errors=env.get("XONSH_ENCODING_ERRORS")
)
elif isinstance(path, pathlib.Path):
# get the path's string representation
path = str(path)
if "$" in path:
shift = 0
for match in POSIX_ENVVAR_REGEX.finditer(path):
name = match.group("envvar")
if name in env:
detyper = env.get_detyper(name)
val = env[name]
value = str(val) if detyper is None else detyper(val)
value = str(val) if value is None else value
start_pos, end_pos = match.span()
path_len_before_replace = len(path)
path = path[: start_pos + shift] + value + path[end_pos + shift :]
shift = shift + len(path) - path_len_before_replace
return path
#
# File handling tools
#
def backup_file(fname):
"""Moves an existing file to a new name that has the current time right
before the extension.
"""
# lazy imports
import shutil
from datetime import datetime
base, ext = os.path.splitext(fname)
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")
newfname = f"{base}.{timestamp}{ext}"
shutil.move(fname, newfname)
def normabspath(p):
"""Returns as normalized absolute path, namely, normcase(abspath(p))"""
return os.path.normcase(os.path.abspath(p))
def expanduser_abs_path(inp):
"""Provides user expanded absolute path"""
return os.path.abspath(expanduser(inp))
WINDOWS_DRIVE_MATCHER = LazyObject(
lambda: re.compile(r"^\w:"), globals(), "WINDOWS_DRIVE_MATCHER"
)
def expand_case_matching(s):
"""Expands a string to a case insensitive globable string."""
t = []
openers = {"[", "{"}
closers = {"]", "}"}
nesting = 0
drive_part = WINDOWS_DRIVE_MATCHER.match(s) if ON_WINDOWS else None
if drive_part:
drive_part = drive_part.group(0)
t.append(drive_part)
s = s[len(drive_part) :]
for c in s:
if c in openers:
nesting += 1
elif c in closers:
nesting -= 1
elif nesting > 0:
pass
elif c.isalpha():
folded = c.casefold()
if len(folded) == 1:
c = f"[{c.upper()}{c.lower()}]"
else:
newc = [f"[{f.upper()}{f.lower()}]?" for f in folded[:-1]]
newc = "".join(newc)
newc += f"[{folded[-1].upper()}{folded[-1].lower()}{c}]"
c = newc
t.append(c)
return "".join(t)
def globpath(
s, ignore_case=False, return_empty=False, sort_result=None, include_dotfiles=None
):
"""Simple wrapper around glob that also expands home and env vars."""
o, s = _iglobpath(
s,
ignore_case=ignore_case,
sort_result=sort_result,
include_dotfiles=include_dotfiles,
)
o = list(o)
no_match = [] if return_empty else [s]
return o if len(o) != 0 else no_match
def _dotglobstr(s):
modified = False
dotted_s = s
if "/*" in dotted_s:
dotted_s = dotted_s.replace("/*", "/.*")
dotted_s = dotted_s.replace("/.**/.*", "/**/.*")
modified = True
if dotted_s.startswith("*") and not dotted_s.startswith("**"):
dotted_s = "." + dotted_s
modified = True
return dotted_s, modified
def _iglobpath(s, ignore_case=False, sort_result=None, include_dotfiles=None):
s = xsh.expand_path(s)
if sort_result is None:
sort_result = xsh.env.get("GLOB_SORTED")
if include_dotfiles is None:
include_dotfiles = xsh.env.get("DOTGLOB")
if ignore_case:
s = expand_case_matching(s)
if "**" in s and "**/*" not in s:
s = s.replace("**", "**/*")
if include_dotfiles:
dotted_s, dotmodified = _dotglobstr(s)
if sort_result:
paths = glob.glob(s, recursive=True)
if include_dotfiles and dotmodified:
paths.extend(glob.iglob(dotted_s, recursive=True))
paths.sort()
paths = iter(paths)
else:
paths = glob.iglob(s, recursive=True)
if include_dotfiles and dotmodified:
paths = itertools.chain(glob.iglob(dotted_s, recursive=True), paths)
return paths, s
def iglobpath(s, ignore_case=False, sort_result=None, include_dotfiles=None):
"""Simple wrapper around iglob that also expands home and env vars."""
try:
return _iglobpath(
s,
ignore_case=ignore_case,
sort_result=sort_result,
include_dotfiles=include_dotfiles,
)[0]
except IndexError:
# something went wrong in the actual iglob() call
return iter(())
def ensure_timestamp(t, datetime_format=None):
if isinstance(t, (int, float)):
return t
try:
return float(t)
except (ValueError, TypeError):
pass
if datetime_format is None:
datetime_format = xsh.env["XONSH_DATETIME_FORMAT"]
if isinstance(t, datetime.datetime):
t = t.timestamp()
else:
t = datetime.datetime.strptime(t, datetime_format).timestamp()
return t
def format_datetime(dt):
"""Format datetime object to string base on $XONSH_DATETIME_FORMAT Env."""
format_ = xsh.env["XONSH_DATETIME_FORMAT"]
return dt.strftime(format_)
def columnize(elems, width=80, newline="\n"):
"""Takes an iterable of strings and returns a list of lines with the
elements placed in columns. Each line will be at most *width* columns.
The newline character will be appended to the end of each line.
"""
sizes = [len(e) + 1 for e in elems]
total = sum(sizes)
nelem = len(elems)
if total - 1 <= width:
ncols = len(sizes)
nrows = 1
columns = [sizes]
last_longest_row = total
enter_loop = False
else:
ncols = 1
nrows = len(sizes)
columns = [sizes]
last_longest_row = max(sizes)
enter_loop = True
while enter_loop:
longest_row = sum(map(max, columns))
if longest_row - 1 <= width:
# we might be able to fit another column.
ncols += 1
nrows = nelem // ncols
columns = [sizes[i * nrows : (i + 1) * nrows] for i in range(ncols)]
last_longest_row = longest_row
else:
# we can't fit another column
ncols -= 1
nrows = nelem // ncols
break
pad = (width - last_longest_row + ncols) // ncols
pad = pad if pad > 1 else 1
data = [elems[i * nrows : (i + 1) * nrows] for i in range(ncols)]
colwidths = [max(map(len, d)) + pad for d in data]
colwidths[-1] -= pad
row_t = "".join(["{{row[{i}]: <{{w[{i}]}}}}".format(i=i) for i in range(ncols)])
row_t += newline
lines = [
row_t.format(row=row, w=colwidths)
for row in itertools.zip_longest(*data, fillvalue="")
]
return lines
ALIAS_KWARG_NAMES = frozenset(["args", "stdin", "stdout", "stderr", "spec", "stack"])
def unthreadable(f):
"""Decorator that specifies that a callable alias should be run only
on the main thread process. This is often needed for debuggers and
profilers.
"""
f.__xonsh_threadable__ = False
return f
def uncapturable(f):
"""Decorator that specifies that a callable alias should not be run with
any capturing. This is often needed if the alias call interactive
subprocess, like pagers and text editors.
"""
f.__xonsh_capturable__ = False
return f
def carriage_return():
"""Writes a carriage return to stdout, and nothing else."""
print("\r", flush=True, end="")
def deprecated(deprecated_in=None, removed_in=None):
"""Parametrized decorator that deprecates a function in a graceful manner.
Updates the decorated function's docstring to mention the version
that deprecation occurred in and the version it will be removed
in if both of these values are passed.
When removed_in is not a release equal to or less than the current
release, call ``warnings.warn`` with details, while raising
``DeprecationWarning``.
When removed_in is a release equal to or less than the current release,
raise an ``AssertionError``.
Parameters
----------
deprecated_in : str
The version number that deprecated this function.
removed_in : str
The version number that this function will be removed in.
"""
message_suffix = _deprecated_message_suffix(deprecated_in, removed_in)
if not message_suffix:
message_suffix = ""
def decorated(func):
warning_message = f"{func.__name__} has been deprecated"
warning_message += message_suffix
@functools.wraps(func)
def wrapped(*args, **kwargs):
_deprecated_error_on_expiration(func.__name__, removed_in)
func(*args, **kwargs)
warnings.warn(warning_message, DeprecationWarning)
wrapped.__doc__ = (
f"{wrapped.__doc__}\n\n{warning_message}"
if wrapped.__doc__
else warning_message
)
return wrapped
return decorated
def _deprecated_message_suffix(deprecated_in, removed_in):
if deprecated_in and removed_in:
message_suffix = " in version {} and will be removed in version {}".format(
deprecated_in, removed_in
)
elif deprecated_in and not removed_in:
message_suffix = f" in version {deprecated_in}"
elif not deprecated_in and removed_in:
message_suffix = f" and will be removed in version {removed_in}"
else:
message_suffix = None
return message_suffix
def _deprecated_error_on_expiration(name, removed_in):
from packaging.version import Version
if not removed_in:
return
elif Version(__version__) >= Version(removed_in):
raise AssertionError(f"{name} has passed its version {removed_in} expiry date!")
def to_repr_pretty_(inst, p, cycle):
name = f"{inst.__class__.__module__}.{inst.__class__.__name__}"
with p.group(0, name + "(", ")"):
if cycle:
p.text("...")
elif len(inst):
p.break_()
p.pretty(dict(inst))
class XAttr:
"""hold attribute and value"""
__slots__ = ("name", "value")
def __init__(self, val) -> None:
self.value = val
def __set_name__(self, owner, name) -> None:
self.name: str = name
def __get__(self, instance, owner) -> "XAttr":
return self
def __str__(self) -> str:
return f"<{self.name}={self.value}>"
class NamedConstantMeta(type):
"""utility class to hold list of values as class-attributes"""
def __iter__(cls) -> tp.Iterator[XAttr]:
for attr in vars(cls):
if not attr.startswith("__"):
yield getattr(cls, attr)
| 30.263196 | 146 | 0.610157 |
972b6c8633b40d3b477a744183d33098ef9a63a7 | 3,019 | py | Python | pnc_cli/swagger_client/models/build_configuration_audited_singleton.py | SakuragawaAsaba/pnc-cli | 0e0c5976766f6d2e32980c39ebc30950fc02960e | [
"Apache-2.0"
] | null | null | null | pnc_cli/swagger_client/models/build_configuration_audited_singleton.py | SakuragawaAsaba/pnc-cli | 0e0c5976766f6d2e32980c39ebc30950fc02960e | [
"Apache-2.0"
] | 3 | 2015-06-01T22:12:27.000Z | 2015-10-11T16:20:11.000Z | pnc_cli/swagger_client/models/build_configuration_audited_singleton.py | SakuragawaAsaba/pnc-cli | 0e0c5976766f6d2e32980c39ebc30950fc02960e | [
"Apache-2.0"
] | 5 | 2015-05-28T18:14:36.000Z | 2018-07-20T07:38:21.000Z | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from datetime import datetime
from pprint import pformat
from six import iteritems
class BuildConfigurationAuditedSingleton(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
BuildConfigurationAuditedSingleton - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'content': 'BuildConfigurationAuditedRest'
}
self.attribute_map = {
'content': 'content'
}
self._content = None
@property
def content(self):
"""
Gets the content of this BuildConfigurationAuditedSingleton.
:return: The content of this BuildConfigurationAuditedSingleton.
:rtype: BuildConfigurationAuditedRest
"""
return self._content
@content.setter
def content(self, content):
"""
Sets the content of this BuildConfigurationAuditedSingleton.
:param content: The content of this BuildConfigurationAuditedSingleton.
:type: BuildConfigurationAuditedRest
"""
self._content = content
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, datetime):
result[attr] = str(value.date())
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| 28.752381 | 79 | 0.607817 |
00208c34797df70aed355f10c955869d32c642ce | 2,790 | py | Python | eval.py | Moeinzjg/r-pgcn | a2065aa76cb936037f6e0ddf8ff244a6b38ede90 | [
"MIT"
] | null | null | null | eval.py | Moeinzjg/r-pgcn | a2065aa76cb936037f6e0ddf8ff244a6b38ede90 | [
"MIT"
] | null | null | null | eval.py | Moeinzjg/r-pgcn | a2065aa76cb936037f6e0ddf8ff244a6b38ede90 | [
"MIT"
] | null | null | null | import argparse
import os
import time
import re
import math
import torch
import pytorch_mask_rcnn as pmr
def main(args):
device = torch.device("cuda" if torch.cuda.is_available() and args.use_cuda else "cpu")
cuda = device.type == "cuda"
if cuda:
pmr.get_gpu_prop(show=True)
print("\ndevice: {}".format(device))
d_test = pmr.datasets(args.dataset, args.data_dir, "test", train=True) # COCO 2017
print(args)
num_classes = max(d_test.classes) + 1
if 'fpn' in args.backbone:
backbone_name = re.findall('(.*?)_fpn', args.backbone)[0]
model = pmr.maskrcnn_resnet_fpn(pretrained=False, num_classes=num_classes,
pretrained_backbone=True, backbone_name=backbone_name).to(device)
else:
model = pmr.maskrcnn_resnet50(False, num_classes, pretrained_backbone=True).to(device)
checkpoint = torch.load(args.ckpt_path, map_location=device)
model.load_state_dict(checkpoint["model"])
# print(checkpoint["eval_info"])
del checkpoint
if cuda:
torch.cuda.empty_cache()
print("\nevaluating...\n")
B = time.time()
eval_output, rpolygcn_eval_output, iter_eval, poly_iou, poly_maxtan = pmr.evaluate(model, d_test, device, args)
B = time.time() - B
for bf in eval_output.buffer:
print(bf)
print(eval_output.get_AP())
print('---------------------------- R-PolyGCN ----------------------------')
for bf in rpolygcn_eval_output.buffer:
print(bf)
print(rpolygcn_eval_output.get_AP())
if iter_eval is not None:
print("\nTotal time of this evaluation: {:.1f} s, speed: {:.1f} imgs/s".format(B, 1 / iter_eval))
if poly_iou is not None:
print("\n Average IOU of polygons is: {:.2f}".format(sum(poly_iou) / len(poly_iou)))
if poly_maxtan is not None:
print("\n Average MaxTangent of polygons is: {:.2f}".format(sum(poly_maxtan) / len(poly_maxtan) * 180/math.pi))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="coco")
parser.add_argument("--data-dir", default="../Vegas_coco_random_splits")
parser.add_argument("--ckpt-path", default="maskrcnn_coco-25.pth")
parser.add_argument("--iters", type=int, default=-1) # number of iterations, minus means the entire dataset
parser.add_argument("--backbone", type=str, default="resnet50_fpn", choices=["resnet50", "resnet50_fpn", "resnet101_fpn"])
args = parser.parse_args() # [] is needed if you're using Jupyter Notebook.
args.use_cuda = True
args.results = os.path.join(os.path.dirname(args.ckpt_path), "maskrcnn_results.pth")
args.rpolygcn_results = os.path.join(os.path.dirname(args.ckpt_path), "rpolygcn_results.pth")
main(args)
| 37.702703 | 126 | 0.662007 |
7fe042908547f7eec5ffe10b7408dd548e0a58d8 | 471 | py | Python | mahjong/hand_calculating/yaku_list/yakuman/sashikomi.py | wardenlym/mahjong | 475da6360e24e70e6e0d1deada573c842a71f261 | [
"MIT"
] | 254 | 2017-09-20T15:02:20.000Z | 2022-03-28T11:33:28.000Z | mahjong/hand_calculating/yaku_list/yakuman/sashikomi.py | wardenlym/mahjong | 475da6360e24e70e6e0d1deada573c842a71f261 | [
"MIT"
] | 39 | 2017-09-23T14:28:36.000Z | 2022-01-06T08:41:57.000Z | mahjong/hand_calculating/yaku_list/yakuman/sashikomi.py | wardenlym/mahjong | 475da6360e24e70e6e0d1deada573c842a71f261 | [
"MIT"
] | 38 | 2017-10-19T09:06:53.000Z | 2022-03-15T05:08:22.000Z | from mahjong.hand_calculating.yaku import Yaku
class Sashikomi(Yaku):
"""
Yaku situation
"""
def __init__(self, yaku_id):
super(Sashikomi, self).__init__(yaku_id)
def set_attributes(self):
self.name = "Sashikomi"
self.han_open = None
self.han_closed = 13
self.is_yakuman = True
def is_condition_met(self, hand, *args):
# was it here or not is controlling by superior code
return True
| 20.478261 | 60 | 0.630573 |
7b68c4aa61df2402f79bc3f2b63f9523480accc4 | 248 | py | Python | bullet-gym-primitive/showKerasNAFMotionExample.py | benelot/bullet-gym | 863cbc25c74e3fcecdc84f68bde0cbba1a65f070 | [
"MIT"
] | 55 | 2017-04-12T19:12:57.000Z | 2021-04-17T12:54:50.000Z | bullet-gym-primitive/showKerasNAFMotionExample.py | shipjobs/bullet-gym | 1e064b135175546172acd1a5a996863469759b6d | [
"MIT"
] | 10 | 2017-03-25T20:55:30.000Z | 2017-07-14T08:27:42.000Z | bullet-gym-primitive/showKerasNAFMotionExample.py | shipjobs/bullet-gym | 1e064b135175546172acd1a5a996863469759b6d | [
"MIT"
] | 9 | 2017-11-04T15:33:40.000Z | 2021-08-29T19:36:31.000Z | #!/usr/bin/python
import os
os.system('python runTrainer.py --agent=KerasNAFAgent --env=Motionv0Env --train-for=0 --test-for=10000000 --random-initial-position --delay=0.005 --gui --show-test --load-file=checkpoints/KerasNAF-Motionv0-chkpt-1.h5')
| 62 | 218 | 0.75 |
ee6873ec38dd1f287d40fbd6ad5a400193e1d7b7 | 360 | py | Python | benchmark.py | Cryptum169/RadioTextProcessing_NLP | e55aba766bc86012c56b60350f6490b70c705491 | [
"MIT"
] | 1 | 2018-06-22T20:18:23.000Z | 2018-06-22T20:18:23.000Z | benchmark.py | Cryptum169/RadioTextProcessing_NLP | e55aba766bc86012c56b60350f6490b70c705491 | [
"MIT"
] | null | null | null | benchmark.py | Cryptum169/RadioTextProcessing_NLP | e55aba766bc86012c56b60350f6490b70c705491 | [
"MIT"
] | null | null | null | import gensim
model_sogou = gensim.models.Word2Vec.load('data/Model/gensimWord2Vec.bin')
model_audio = gensim.models.Word2Vec.load('data/Model/gensimWord2Vec_audio.bin')
print('Benchmark:')
print('搜狗\t转写')
print(model_sogou.most_similar('车辆'))
print(model_audio.most_similar('车辆'))
print(model_sogou.most_similar('事故'))
print(model_audio.most_similar('事故'))
| 30 | 80 | 0.788889 |
5af06f04aac5bca47a9cb2df51b2af68666ab47e | 678 | py | Python | lib/robot/version.py | nbbull/RIDE | e6496f0b1b6dc454b9479de48b6949bce29b53df | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-11-25T03:22:52.000Z | 2017-11-25T03:22:52.000Z | lib/robot/version.py | nbbull/RIDE | e6496f0b1b6dc454b9479de48b6949bce29b53df | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | lib/robot/version.py | nbbull/RIDE | e6496f0b1b6dc454b9479de48b6949bce29b53df | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Automatically generated by 'package.py' script.
import sys
VERSION = 'trunk'
RELEASE = '20120314'
TIMESTAMP = '20120314-230805'
def get_version(sep=' '):
if RELEASE == 'final':
return VERSION
return VERSION + sep + RELEASE
def get_full_version(who=''):
sys_version = sys.version.split()[0]
version = '%s %s (%s %s on %s)' \
% (who, get_version(), _get_interpreter(), sys_version, sys.platform)
return version.strip()
def _get_interpreter():
if sys.platform.startswith('java'):
return 'Jython'
if sys.platform == 'cli':
return 'IronPython'
if 'PyPy' in sys.version:
return 'PyPy'
return 'Python'
| 24.214286 | 77 | 0.631268 |
2d4fb7957566b5b19de7e1380ec8ca8c8343b323 | 8,240 | py | Python | arcgis_proxy/tests/test_image_service.py | gfw-api/arcgis-proxy | 007e6d8d36327c8edb602fba47b654dc4baac2b0 | [
"MIT"
] | null | null | null | arcgis_proxy/tests/test_image_service.py | gfw-api/arcgis-proxy | 007e6d8d36327c8edb602fba47b654dc4baac2b0 | [
"MIT"
] | null | null | null | arcgis_proxy/tests/test_image_service.py | gfw-api/arcgis-proxy | 007e6d8d36327c8edb602fba47b654dc4baac2b0 | [
"MIT"
] | null | null | null | import unittest
import json
from arcgis_proxy import app
from arcgis_proxy.config.servers import servers
from unittest import mock
from arcgis_proxy.routes.api.v1.image_router import compute_histograms
import logging
histogram_route = '/api/v1/arcgis-proxy/ImageServer/computeHistograms'
geostore_id = '204c6ff1dae38a10953b19d452921283'
with open('arcgis_proxy/tests/fixtures/histogram.json') as src:
server_response = json.load(src)
with open('arcgis_proxy/tests/fixtures/esrijson.json') as src:
esrijson = json.load(src)
with open('arcgis_proxy/tests/fixtures/rendering_rule.json') as src:
rendering_rule = json.load(src)
server_request = 'https://gis-gfw.wri.org/arcgis/rest/services/image_services/analysis/ImageServer/computeHistograms'
def compose_query_params_histograms(server='gfw',
service='image_services/analysis',
rendering_rule=json.dumps(rendering_rule),
mosaic_rule=None,
pixel_size=100,
geostore_id=geostore_id):
""" compose query parameter for ImageServer/computeHistograms endpoint """
if mosaic_rule is None:
mosaic_rule = ''
query_params = '?server={}&service={}&renderingRule={}&mosaicRule={}&pixelSize={}&geostore={}'.format(
server,
service,
rendering_rule,
mosaic_rule,
pixel_size,
geostore_id)
return query_params
def deserialize(response):
""" deserialize response and look for errors """
deserialized_response = json.loads(response.data)
if 'errors' in deserialized_response.keys():
data = None
errors = deserialized_response['errors'][0]['detail']
else:
data = deserialized_response
errors = None
return data, errors
def mocked_requests_post(*args, **kwargs):
""" This method will be used by the mock to replace requests.post to ImageServer """
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.text = json_data
def json(self):
return self.json_data
if args[0] == server_request:
return MockResponse(server_response, 200)
else:
return MockResponse(None, 404)
def mocked_get_esrijson_wm(*args, **kwargs):
""" mock get_esrijson_wm function """
logging.debug('[MOCK]: args: {}'.format(args))
return esrijson
class ImageServiceHistogramTest(unittest.TestCase):
""" Image Server Compute Histograms Test """
def setUp(self):
app.testing = True
app.config['TESTING'] = True
app.config['DEBUG'] = False
self.app = app.test_client()
def tearDown(self):
pass
def make_request(self, request, error=False):
"""
make request to provided URL
if requests is suppose to fail assert status code 400
otherwise assert for status code 200
"""
response = self.app.get(request, follow_redirects=True)
data, errors = deserialize(response)
status_code = response.status_code
if error:
self.assertEqual(status_code, 400)
return data, errors
else:
self.assertEqual(status_code, 200)
return data, errors
def test_image_router_compute_histograms_no_server(self):
""" using false rendering rule """
logging.debug('[TEST]: Test compute histograms no server')
server = ""
query_params = compose_query_params_histograms(server=server)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'either server or serverUrl is required')
def test_image_router_compute_histograms_false_server(self):
""" using false rendering rule """
logging.debug('[TEST]: Test compute histograms false server')
server = "false-server"
query_params = compose_query_params_histograms(server=server)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'server not in list {}'.format(servers.keys()))
def test_image_router_compute_histograms_no_service(self):
""" using false rendering rule """
logging.debug('[TEST]: Test compute histograms no service')
service = ""
query_params = compose_query_params_histograms(service=service)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'service is required')
def test_image_router_compute_histograms_false_rendering_rule(self):
""" using false rendering rule """
logging.debug('[TEST]: Test compute histograms false rendering rule')
rendering_rule = "False rule"
query_params = compose_query_params_histograms(rendering_rule=rendering_rule)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'renderingRule not a valid JSON')
def test_image_router_compute_histograms_false_mosaic_rule(self):
""" using false mosaic rule """
logging.debug('[TEST]: Test compute histograms false mosaic rule')
mosaic_rule = "False rule"
query_params = compose_query_params_histograms(mosaic_rule=mosaic_rule)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'mosaicRule not a valid JSON')
def test_image_router_compute_histograms_no_rendering_rule(self):
""" using no rendering rule """
logging.debug('[TEST]: Test compute histograms no rendering rule')
rendering_rule = ''
query_params = compose_query_params_histograms(rendering_rule=rendering_rule)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'Must provide a valid renderingRule')
def test_image_router_compute_histograms_false_pixel_size(self):
""" using false pixel size """
logging.debug('[TEST]: Test compute histograms false pixel size')
pixel_size = "One"
query_params = compose_query_params_histograms(pixel_size=pixel_size)
data, errors = self.make_request('{}{}'.format(histogram_route, query_params), error=True)
self.assertEqual(errors, 'pixelSize must be of Type Integer')
@mock.patch('arcgis_proxy.routes.api.v1.image_router.requests.post', side_effect=mocked_requests_post)
@mock.patch('arcgis_proxy.routes.api.v1.image_router.get_esrijson_wm', side_effect=mocked_get_esrijson_wm)
def test_image_router_compute_histograms(self, mock_geostore, mock_post):
"""
actual call to compute histogram using correct params
expecting Image Server response
"""
logging.debug('[TEST]: Test compute histograms')
query_params = compose_query_params_histograms()
# using app.test_request_context to fake a request,
# so that compute_histogram() knows what URL it should handle
with app.test_request_context(path='{}{}'.format(histogram_route, query_params)):
ch = compute_histograms()
logging.debug('[TEST]: response:{}'.format(json.loads(ch[0].data)))
self.assertEqual(json.loads(ch[0].data), server_response)
logging.debug('[TEST]: POST {}'.format(mock_post.call_args_list))
self.assertIn(mock.call(server_request,
files={'geometry': (None, json.dumps(esrijson)),
'geometryType': (None, 'esriGeometryPolygon'),
'renderingRule': (None, json.dumps(rendering_rule)),
'mosaicRule': (None, None),
'pixelSize': (None, '100'),
'f': (None, 'json')}), mock_post.call_args_list)
| 34.767932 | 117 | 0.654126 |
d1f3bd8e064730bd42359d9e577f4cbaeaa13498 | 6,530 | py | Python | owtf/settings.py | Udbhavbisarya23/owtf | 27623937677caf975569f8de8af7983ca57611bc | [
"BSD-3-Clause"
] | 1 | 2021-02-03T10:03:35.000Z | 2021-02-03T10:03:35.000Z | owtf/settings.py | justdvnsh/owtf | 3a543b4eb2a7ad67155eb96dd2d99efbc181498d | [
"BSD-3-Clause"
] | 3 | 2021-03-26T00:33:28.000Z | 2022-02-13T21:08:52.000Z | owtf/settings.py | justdvnsh/owtf | 3a543b4eb2a7ad67155eb96dd2d99efbc181498d | [
"BSD-3-Clause"
] | null | null | null | """
owtf.settings
~~~~~~~~~~~~~
It contains all the owtf global configs.
"""
import os
import re
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
import yaml
HOME_DIR = os.path.expanduser("~")
OWTF_CONF = os.path.join(HOME_DIR, ".owtf")
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_DIR = os.path.join(ROOT_DIR, "data", "conf")
DEBUG = True
# Used by tools like dirbuster to launch gui or cli versions
INTERACTIVE = True
# Database config when used in docker
if os.environ.get("DOCKER", None):
DATABASE_NAME = os.environ["POSTGRES_DB"]
DATABASE_PASS = os.environ["POSTGRES_PASSWORD"]
DATABASE_USER = os.environ["POSTGRES_USER"]
DATABASE_IP = "db"
DATABASE_PORT = 5432
else:
# Change this if you deploy OWTF to a public facing server
DATABASE_PASS = "jgZKW33Q+HZk8rqylZxaPg1lbuNGHJhgzsq3gBKV32g="
DATABASE_NAME = "owtf_db"
DATABASE_USER = "owtf_db_user"
DATABASE_IP = "127.0.0.1"
DATABASE_PORT = 5432
# API and UI Server
SERVER_ADDR = "0.0.0.0"
SERVER_PORT = 8009
FILE_SERVER_PORT = 8010
# Default API version
DEFAULT_API_VERSION = "v1"
# Application secret
# Change this
APP_SECRET = "changeme"
SESSION_COOKIE_NAME = "owtf-session"
# CORS settings. Fine grained, do not override if possible.
SIMPLE_HEADERS = ["accept", "accept-language", "content-language"]
ALLOWED_ORIGINS = ["http:/localhost:8009", "http://localhost:8010"]
ALLOWED_METHODS = ["GET", "POST", "DELETE"]
SEND_CREDENTIALS = False
# ERROR reporting
USE_SENTRY = False
SENTRY_API_KEY = ""
# IMP PATHS
WEB_TEST_GROUPS = os.path.join(
OWTF_CONF, "conf", "profiles", "plugin_web", "groups.cfg"
)
NET_TEST_GROUPS = os.path.join(
OWTF_CONF, "conf", "profiles", "plugin_net", "groups.cfg"
)
AUX_TEST_GROUPS = os.path.join(
OWTF_CONF, "conf", "profiles", "plugin_aux", "groups.cfg"
)
PLUGINS_DIR = os.path.join(ROOT_DIR, "plugins")
# Output Settings
OUTPUT_PATH = "owtf_review"
AUX_OUTPUT_PATH = "owtf_review/auxiliary"
NET_SCANS_PATH = "owtf_review/scans"
# The name of the directories relative to output path
TARGETS_DIR = "targets"
WORKER_LOG_DIR = "logs"
# Default profile settings
DEFAULT_GENERAL_PROFILE = os.path.join(OWTF_CONF, "conf", "general.yaml")
DEFAULT_FRAMEWORK_CONFIG = os.path.join(OWTF_CONF, "conf", "framework.yaml")
DEFAULT_RESOURCES_PROFILE = os.path.join(OWTF_CONF, "conf", "resources.cfg")
DEFAULT_WEB_PLUGIN_ORDER_PROFILE = os.path.join(
OWTF_CONF, "conf", "profiles", "plugin_web", "order.cfg"
)
DEFAULT_NET_PLUGIN_ORDER_PROFILE = os.path.join(
OWTF_CONF, "conf", "profiles", "plugin_net", "order.cfg"
)
# logs_dir can be both relative or absolute path ;)
LOGS_DIR = "logs"
# Used for logging in OWTF
OWTF_LOG_FILE = "/tmp/owtf.log"
# Interface static folders
TEMPLATES = os.path.join(OWTF_CONF, "build")
STATIC_ROOT = os.path.join(OWTF_CONF, "build")
# SMTP
EMAIL_FROM = "you@your_server.com"
SMTP_LOGIN = "login@your_server.com"
SMTP_PASS = "your_password"
SMTP_HOST = "your_mail_server.com"
SMTP_PORT = 25
# OUTBOUND PROXY
USE_OUTBOUND_PROXY = False
OUTBOUND_PROXY_IP = ""
OUTBOUND_PROXY_PORT = ""
OUTBOUND_PROXY_AUTH = None
# Inbound Proxy Configuration
INBOUND_PROXY_IP = "127.0.0.1"
INBOUND_PROXY_PORT = 8008
INBOUND_PROXY_PROCESSES = 0
INBOUND_PROXY_CACHE_DIR = "/tmp/owtf/proxy-cache"
CA_CERT = os.path.join(OWTF_CONF, "proxy", "certs", "ca.crt")
CA_KEY = os.path.join(OWTF_CONF, "proxy", "certs", "ca.key")
CA_PASS_FILE = os.path.join(OWTF_CONF, "proxy", "certs", "ca_pass.txt")
CERTS_FOLDER = os.path.join(OWTF_CONF, "proxy", "certs")
BLACKLIST_COOKIES = ["_ga", "__utma", "__utmb", "__utmc", "__utmz", "__utmv"]
WHITELIST_COOKIES = ""
PROXY_RESTRICTED_RESPONSE_HEADERS = [
"Content-Length",
"Content-Encoding",
"Etag",
"Transfer-Encoding",
"Connection",
"Vary",
"Accept-Ranges",
"Pragma",
]
PROXY_RESTRICTED_REQUEST_HEADERS = [
"Connection",
"Pragma",
"Cache-Control",
"If-Modified-Since",
]
PROXY_LOG = "/tmp/owtf/proxy.log"
# Define regex patterns
REGEXP_FILE_URL = "^[^\?]+\.(xml|exe|pdf|cs|log|inc|dat|bak|conf|cnf|old|zip|7z|rar|tar|gz|bz2|txt|xls|xlsx|doc|docx|ppt|pptx)$"
# Potentially small files will be retrieved for analysis
REGEXP_SMALL_FILE_URL = "^[^\?]+\.(xml|cs|inc|dat|bak|conf|cnf|old|txt)$"
REGEXP_IMAGE_URL = "^[^\?]+\.(jpg|jpeg|png|gif|bmp)$"
REGEXP_VALID_URL = "^[^\?]+\.(shtml|shtm|stm)$"
REGEXP_SSI_URL = "^(http|ftp)[^ ]+$"
# Compile regular expressions once at the beginning for speed purposes:
is_file_regex = re.compile(REGEXP_FILE_URL, re.IGNORECASE)
is_small_file_regex = re.compile(REGEXP_SMALL_FILE_URL, re.IGNORECASE)
is_image_regex = re.compile(REGEXP_IMAGE_URL, re.IGNORECASE)
is_url_regex = re.compile(REGEXP_VALID_URL, re.IGNORECASE)
is_ssi_regex = re.compile(REGEXP_SSI_URL, re.IGNORECASE)
# UI
SERVER_LOG = "/tmp/owtf/ui_server.log"
FILE_SERVER_LOG = "/tmp/owtf/file_server.log"
# HTTP_AUTH
HTTP_AUTH_HOST = None
HTTP_AUTH_USERNAME = None
HTTP_AUTH_PASSWORD = None
HTTP_AUTH_MODE = "basic"
# Memory
RESOURCE_MONITOR_PROFILER = 0
PROCESS_PER_CORE = 1
MIN_RAM_NEEDED = 20
# misc
DATE_TIME_FORMAT = "%d/%m/%Y-%H:%M"
REPLACEMENT_DELIMITER = "@@@"
REPLACEMENT_DELIMITER_LENGTH = len(REPLACEMENT_DELIMITER)
CONFIG_TYPES = ["string", "other"]
USER_AGENT = "Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/15.0"
PROXY_CHECK_URL = "http://www.google.ie"
# Fallback
FALLBACK_WEB_TEST_GROUPS = os.path.join(
ROOT_DIR, "data", "conf", "profiles", "plugin_web", "groups.cfg"
)
FALLBACK_NET_TEST_GROUPS = os.path.join(
ROOT_DIR, "data", "conf", "profiles", "plugin_net", "groups.cfg"
)
FALLBACK_AUX_TEST_GROUPS = os.path.join(
ROOT_DIR, "data", "conf", "profiles", "plugin_aux", "groups.cfg"
)
FALLBACK_PLUGINS_DIR = os.path.join(ROOT_DIR, "data", "plugins")
FALLBACK_GENERAL_PROFILE = os.path.join(ROOT_DIR, "data", "conf", "general.yaml")
FALLBACK_FRAMEWORK_CONFIG = os.path.join(ROOT_DIR, "data", "conf", "framework.yaml")
FALLBACK_RESOURCES_PROFILE = os.path.join(ROOT_DIR, "data", "conf", "resources.cfg")
FALLBACK_WEB_PLUGIN_ORDER_PROFILE = os.path.join(
ROOT_DIR, "data", "conf", "profiles", "plugin_web", "order.cfg"
)
FALLBACK_NET_PLUGIN_ORDER_PROFILE = os.path.join(
ROOT_DIR, "data", "conf", "profiles", "plugin_net", "order.cfg"
)
# Override the values
local_conf = os.path.join(OWTF_CONF, "settings.py")
try:
with open(local_conf) as f:
settings = compile(f.read(), local_conf, "exec")
exec(settings, globals(), locals())
except FileNotFoundError:
pass
| 29.954128 | 128 | 0.724655 |
671680a579ba3d5ff80d85af1ebc22abc38c7838 | 5,764 | py | Python | bibchex/checks/authors.py | tinloaf/bibchex | 3775f00903db560f40e6f86a656761e55ec4f15b | [
"MIT"
] | 5 | 2020-06-07T21:57:43.000Z | 2020-11-18T10:11:13.000Z | bibchex/checks/authors.py | tinloaf/bibchex | 3775f00903db560f40e6f86a656761e55ec4f15b | [
"MIT"
] | null | null | null | bibchex/checks/authors.py | tinloaf/bibchex | 3775f00903db560f40e6f86a656761e55ec4f15b | [
"MIT"
] | 1 | 2020-06-07T15:21:27.000Z | 2020-06-07T15:21:27.000Z | from bibchex.config import Config
class InitialDottedChecker(object):
NAME = 'author_initial_dotted'
def __init__(self):
self._cfg = Config()
async def check(self, entry):
authors = await self.check_one("authors", "Author", entry)
editors = await self.check_one("editors", "Editor", entry)
return authors + editors
async def check_one(self, field, name, entry):
should_dot = self._cfg.get('author_initial_want_dotted', entry, True)
problems = []
for author in getattr(entry, field):
(first, last) = author
words = first.split(" ") + last.split(" ")
for word in words:
if len(word) == 0:
continue
if not any(c.islower() for c in word):
if should_dot and word[-1] != '.':
problems.append(
(type(self).NAME,
"{} {} {} seems to have an undotted initial."
.format(name, first, last), ""))
if not should_dot and word.find('.') != -1:
problems.append(
(type(self).NAME,
"{} {} {} seems to have a dotted initial."
.format(name, first, last), ""))
return problems
class AllcapsNameChecker(object):
NAME = "author_names_allcaps"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
authors = await self.check_one("authors", "Author", entry)
editors = await self.check_one("editors", "Editor", entry)
return authors + editors
async def check_one(self, field, name, entry):
problems = []
for author in getattr(entry, field):
(first, last) = author
first_lower_count = sum((int(c.islower()) for c in first))
first_upper_count = sum((int(c.isupper()) for c in first))
# Length check > 1 b/c otherwise it is considered an abbreviation
if first_lower_count == 0 and first_upper_count > 1:
problems.append(
(type(self).NAME,
"{} '{} {}' seems to have an all-caps first name."
.format(name, first, last), ""))
last_lower_count = sum((int(c.islower()) for c in last))
last_upper_count = sum((int(c.isupper()) for c in last))
if last_lower_count == 0 and last_upper_count > 1:
problems.append(
(type(self).NAME,
"{} '{} {}' seems to have an all-caps last name."
.format(name, first, last), ""))
return problems
class FirstNameInitialChecker(object):
NAME = "author_names_firstinitial"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
authors = await self.check_one("authors", "Author", entry)
editors = await self.check_one("editors", "Editor", entry)
return authors + editors
async def check_one(self, field, name, entry):
problems = []
for author in getattr(entry, field):
(given, last) = author
if len(given) == 0:
continue
first = list(filter(lambda s: len(s) > 0, given.split(" ")))[0]
first_lower_count = sum((int(c.islower()) for c in first))
if first_lower_count == 0:
problems.append(
(type(self).NAME,
("{} '{} {}' seems to have a first name "
"that is in abbreviated or all-caps.")
.format(name, given, last), ""))
return problems
class MiddleNameInitialChecker(object):
NAME = "author_names_middleinitial"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
authors = await self.check_one("authors", "Author", entry)
editors = await self.check_one("editors", "Editor", entry)
return authors + editors
async def check_one(self, field, name, entry):
problems = []
for author in getattr(entry, field):
(given, last) = author
if len(given) == 0:
continue
tokens = list(filter(lambda s: len(s) > 0, given.split(" ")))
if len(tokens) == 1:
continue
middle = " ".join(tokens[1:])
middle_lower_count = sum((int(c.islower()) for c in middle))
if middle_lower_count == 0:
problems.append(
(type(self).NAME,
("{} '{} {}' seems to have a middle name that "
"is in abbreviated or all-caps.")
.format(name, given, last), ""))
return problems
class LastNameInitialChecker(object):
NAME = "author_names_lastinitial"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
authors = await self.check_one("authors", "Author", entry)
editors = await self.check_one("editors", "Editor", entry)
return authors + editors
async def check_one(self, field, name, entry):
problems = []
for author in getattr(entry, field):
(given, last) = author
last_lower_count = sum((int(c.islower()) for c in last))
if last_lower_count == 0:
problems.append(
(type(self).NAME,
("{} '{} {}' seems to have a last name "
"that is in abbreviated or all-caps.")
.format(name, given, last), ""))
return problems
| 35.361963 | 77 | 0.520645 |
cadae3a325bc54fb37f1dfe69cc838a1acfa8345 | 3,418 | py | Python | python/packet_feature.py | nyu-tandon-hsn-ai/tcp-trace-feature-selection | 90126ffd37d7a000ca5f9266c5d0dea225d757a0 | [
"Apache-2.0"
] | 1 | 2019-11-29T14:07:46.000Z | 2019-11-29T14:07:46.000Z | python/packet_feature.py | nyu-tandon-hsn-ai/trace-feature-selection | 90126ffd37d7a000ca5f9266c5d0dea225d757a0 | [
"Apache-2.0"
] | 19 | 2018-04-06T00:34:46.000Z | 2018-07-19T17:40:29.000Z | python/packet_feature.py | jimmyahacker/tcp-trace-feature-selection | 90126ffd37d7a000ca5f9266c5d0dea225d757a0 | [
"Apache-2.0"
] | 1 | 2018-07-08T20:21:05.000Z | 2018-07-08T20:21:05.000Z | import subprocess
import os
import sys
import pandas as pd
import argparse
def _tshark_extract(tshark_query_str, trace_file_name, trace_feature_file_name, print_err):
tshark_command = subprocess.Popen(\
tshark_query_str.\
format(input=trace_file_name, output=trace_feature_file_name),\
shell=True,\
stdout=subprocess.PIPE,\
stderr=subprocess.PIPE\
)
out_data, err_data = tshark_command.communicate()
out_data, err_data = out_data.decode('utf-8'), err_data.decode('utf-8')
if out_data == '':
print('Conversion done',flush=True)
if print_err:
if err_data != '':
print(err_data,file=sys.stderr,flush=True)
else:
print('No error',file=sys.stderr,flush=True)
def _generate_full_addr(protocol, trace_feature_file_name):
# add source and destination address
trace_df = pd.read_csv(trace_feature_file_name)
trace_df['src_addr'] = trace_df['ip.src'] + ":" + trace_df[protocol + '.srcport'].apply(str)
trace_df['dst_addr'] = trace_df['ip.dst'] + ":" + trace_df[protocol + '.dstport'].apply(str)
trace_df.to_csv(trace_feature_file_name, index=False)
def _add_tcp_pkt_len(trace_feature_file_name):
trace_df = pd.read_csv(trace_feature_file_name)
trace_df['tcp.payload'] = trace_df['tcp.len']
trace_df['tcp.len'] = trace_df['tcp.len'] + trace_df['tcp.hdr_len']
trace_df.to_csv(trace_feature_file_name, index=False)
'''
Generate TCP packet features
'''
def tcp_generate(trace_file_name, trace_feature_file_name, print_err=False, is_cluster=False):
if is_cluster:
_tshark_extract('/share/apps/singularity/2.5.2/bin/singularity exec /beegfs/work/public/singularity/wireshark-2.4.2.img tshark -r {input} -Y tcp -T fields -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport -e tcp.hdr_len -e tcp.len -e frame.time_relative -e frame.len -e tcp.seq -e tcp.ack -e tcp.flags.ack -e tcp.flags.syn -e tcp.flags.fin -e tcp.stream -Eheader=y -Eseparator=, -Equote=d > {output}', trace_file_name, trace_feature_file_name, print_err)
else:
_tshark_extract('tshark -r {input} -Y tcp -T fields -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport -e tcp.hdr_len -e tcp.len -e frame.time_relative -e frame.len -e tcp.seq -e tcp.ack -e tcp.flags.ack -e tcp.flags.syn -e tcp.flags.fin -e tcp.stream -Eheader=y -Eseparator=, -Equote=d > {output}', trace_file_name, trace_feature_file_name, print_err)
_generate_full_addr('tcp', trace_feature_file_name)
_add_tcp_pkt_len(trace_feature_file_name)
'''
Generate UDP packet features
'''
def udp_generate(trace_file_name, trace_feature_file_name, print_err=False, is_cluster=False):
if is_cluster:
_tshark_extract('/share/apps/singularity/2.5.2/bin/singularity exec /beegfs/work/public/singularity/wireshark-2.4.2.img tshark -r {input} -Y udp -T fields -e ip.src -e ip.dst -e udp.srcport -e udp.dstport -e udp.length -e frame.time_relative -e frame.len -e udp.stream -Eheader=y -Eseparator=, -Equote=d > {output}', trace_file_name, trace_feature_file_name, print_err)
else:
_tshark_extract('tshark -r {input} -Y udp -T fields -e ip.src -e ip.dst -e udp.srcport -e udp.dstport -e udp.length -e frame.time_relative -e frame.len -e udp.stream -Eheader=y -Eseparator=, -Equote=d > {output}', trace_file_name, trace_feature_file_name, print_err)
_generate_full_addr('udp', trace_feature_file_name) | 59.964912 | 462 | 0.721475 |
742c795549cac68864d6d67a78be2c2b8f4c8b23 | 21,075 | py | Python | python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/requests/adapters.py | 1tracy/3.3.1.1-flask-blog | 05ba85340cf90109038492b81c670cba8829a375 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/requests/adapters.py | 1tracy/3.3.1.1-flask-blog | 05ba85340cf90109038492b81c670cba8829a375 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/requests/adapters.py | 1tracy/3.3.1.1-flask-blog | 05ba85340cf90109038492b81c670cba8829a375 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
from pip._vendor.urllib3.response import HTTPResponse
from pip._vendor.urllib3.util import Timeout as TimeoutSauce
from pip._vendor.urllib3.util.retry import Retry
from pip._vendor.urllib3.exceptions import ClosedPoolError
from pip._vendor.urllib3.exceptions import ConnectTimeoutError
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
from pip._vendor.urllib3.exceptions import MaxRetryError
from pip._vendor.urllib3.exceptions import NewConnectionError
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
from pip._vendor.urllib3.exceptions import ProtocolError
from pip._vendor.urllib3.exceptions import ReadTimeoutError
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
from pip._vendor.urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (
DEFAULT_CA_BUNDLE_PATH,
get_encoding_from_headers,
prepend_scheme_if_needed,
get_auth_from_url,
urldefragauth,
select_proxy,
)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (
ConnectionError,
ConnectTimeout,
ReadTimeout,
SSLError,
ProxyError,
RetryError,
InvalidSchema,
)
from .auth import _basic_auth_str
try:
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = [
"max_retries",
"config",
"_pool_connections",
"_pool_maxsize",
"_pool_block",
]
def __init__(
self,
pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE,
max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK,
):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(
self._pool_connections, self._pool_maxsize, block=self._pool_block
)
def init_poolmanager(
self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
strict=True,
**pool_kwargs
)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith("socks"):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith("https") and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc or not os.path.exists(cert_loc):
raise IOError(
"Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc)
)
conn.cert_reqs = "CERT_REQUIRED"
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = "CERT_NONE"
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError(
"Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file)
)
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError(
"Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file)
)
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, "status", None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode("utf-8")
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, "http")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = proxy and scheme != "https"
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith("socks")
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers["Proxy-Authorization"] = _basic_auth_str(username, password)
return headers
def send(
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or "Content-Length" in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = (
"Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout)
)
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout,
)
# Send the request.
else:
if hasattr(conn, "proxy_pool"):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method, url, skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode("utf-8"))
low_conn.send(b"\r\n")
low_conn.send(i)
low_conn.send(b"\r\n")
low_conn.send(b"0\r\n\r\n")
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False,
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| 37.234982 | 97 | 0.615516 |
a5a24185f666762f0543ab75c30eab834a898abe | 5,957 | py | Python | pyshop/views/package.py | XeL64/pyshop | fcd94e88c6728d6430383c8d6b3e66f447035bbc | [
"BSD-3-Clause"
] | 94 | 2015-01-02T21:23:27.000Z | 2022-01-08T08:19:33.000Z | pyshop/views/package.py | XeL64/pyshop | fcd94e88c6728d6430383c8d6b3e66f447035bbc | [
"BSD-3-Clause"
] | 45 | 2015-01-14T22:23:43.000Z | 2019-04-15T23:23:08.000Z | pyshop/views/package.py | XeL64/pyshop | fcd94e88c6728d6430383c8d6b3e66f447035bbc | [
"BSD-3-Clause"
] | 38 | 2015-01-19T11:39:04.000Z | 2020-12-17T20:35:58.000Z | # -*- coding: utf-8 -*-
"""
PyShop Package Management Views.
"""
import math
import logging
import os
from sqlalchemy.sql.expression import func
from pyramid.httpexceptions import HTTPNotFound, HTTPForbidden
from pyshop.models import Package, Release, Classifier, User
from .base import View, RedirectView
log = logging.getLogger(__name__)
class List(View):
def render(self):
req = self.request
page_no = 1
page_size = 20
if 'page_no' in req.matchdict:
page_no = int(req.matchdict['page_no'])
opts = {}
if 'form.submitted' in req.params:
opts['local_only'] = req.params.get('local_only', '0') == '1'
else:
opts['local_only'] = True
opts['names'] = []
opts['classifiers'] = []
if 'form.submitted' in req.params or \
req.params.get('classifier.added'):
classifiers = [Classifier.by_id(self.session, id)
for id in set(req.params.getall('classifiers'))]
names = req.params.getall('names')
if req.params.get('classifier.added'):
classifier = Classifier.by_name(self.session,
req.params['classifier.added'])
if classifier:
classifiers.append(classifier)
else:
names.append(req.params['classifier.added'])
opts['classifiers'] = classifiers
opts['names'] = names
package_count = Package.by_filter(self.session, opts, count='*')
return {u'has_page': package_count > page_size,
u'paging': {u'route': u'list_package_page',
u'qs': self.request.query_string,
u'kwargs': {},
u'max': int(
math.ceil(float(package_count) / page_size)),
u'no': page_no},
u'package_count': package_count,
u'packages': Package.by_filter(
self.session, opts,
limit=page_size, offset=page_size * (page_no - 1),
order_by=func.lower(Package.name)
),
u'filter': opts,
u'classifiers': Classifier.all(self.session,
order_by=Classifier.name)
}
class Show(View):
def render(self):
package = Package.by_name(self.session,
self.request.matchdict['package_name'])
if not package:
raise HTTPNotFound()
if 'form.refresh_package' in self.request.params:
package.update_at = None
self.session.add(package)
owners = dict((usr.login, usr) for usr in package.owners)
can_edit_role = self.login in owners.keys() and package.local
if 'form.add_role' in self.request.params:
if not can_edit_role:
raise HTTPForbidden()
user = User.by_login(self.session, self.request.params['login'])
if user and user.has_permission('upload_releasefile'):
if self.request.params['role'] == 'owner':
if user.login not in owners:
package.owners.append(user)
else:
maintainers = [usr.login for usr in package.owners]
if user.login not in maintainers:
package.maintainers.append(user)
self.session.add(package)
if 'form.remove_maintainer' in self.request.params:
if not can_edit_role:
raise HTTPForbidden()
user = User.by_login(self.session, self.request.params['login'])
if user:
maintainers = dict((usr.login, usr)
for usr in package.maintainers)
if user.login in maintainers:
package.maintainers.remove(maintainers[user.login])
self.session.add(package)
if 'form.remove_owner' in self.request.params:
if not can_edit_role:
raise HTTPForbidden()
user = User.by_login(self.session, self.request.params['login'])
if user:
if user.login in owners:
package.owners.remove(owners[user.login])
self.session.add(package)
if 'release_version' in self.request.matchdict:
release = Release.by_version(
self.session, package.name,
self.request.matchdict['release_version'])
else:
release = package.sorted_releases[0]
return {u'package': package,
u'release': release,
u'can_edit_role': can_edit_role,
}
class Refresh(View):
def render(self):
package = Package.by_name(self.session,
self.request.matchdict['package_name'])
class Purge(RedirectView):
model = Package
matchdict_key = 'package_id'
redirect_route = 'list_package'
def delete(self, model):
# Check for and delete any packages on disk
repository = self.request.registry.settings['pyshop.repository']
for release in model.releases:
for f in release.files:
filepath = os.path.join(repository, f.filename[0], f.filename)
if os.path.isfile(filepath):
os.remove(filepath)
self.session.delete(model)
def render(self):
model = self.model.by_id(
self.session,
int(self.request.matchdict[self.matchdict_key]))
if 'form.submitted' in self.request.params:
self.delete(model)
return self.redirect()
return {self.model.__tablename__: model}
| 33.466292 | 79 | 0.539701 |
b314ae12bc5b7aba6ec08c2949d6e7a2668baaba | 34,328 | py | Python | wingspipe/cull_photometry.py | benw1/WINGS | 32d4bfd073da0b86d2340cde25a5601d0a1ec95e | [
"RSA-MD"
] | 4 | 2022-01-04T18:24:56.000Z | 2022-01-27T08:23:37.000Z | wingspipe/cull_photometry.py | benw1/WINGS | 32d4bfd073da0b86d2340cde25a5601d0a1ec95e | [
"RSA-MD"
] | null | null | null | wingspipe/cull_photometry.py | benw1/WINGS | 32d4bfd073da0b86d2340cde25a5601d0a1ec95e | [
"RSA-MD"
] | null | null | null | #! /usr/bin/env python
import os
import graphviz
import pickle
import warnings
import numpy as np
import pandas as pd
import wpipe as wp
from astropy import units as u
from astropy import wcs
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy.io import ascii, fits
from astropy.table import Table, vstack
from matplotlib import cm
from matplotlib import pyplot as plt
from scipy.spatial import cKDTree
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier as MLPc
from sklearn.preprocessing import label_binarize, StandardScaler
from sklearn.tree import DecisionTreeClassifier, export_graphviz
warnings.filterwarnings('ignore')
def register(task):
_temp = task.mask(source='*', name='start', value='*')
_temp = task.mask(source='*', name='dolphot_done', value='*')
FEAT_NAMES = ['err', 'SNR', 'Sharpness', 'Crowding']
# filter names
FILTERS = np.array(['F062', 'F087', 'F106', 'F129', 'F158', 'F184'])
FILTERSHORT = np.array(['F062', 'F087', 'F106', 'F129', 'F158', 'F184'])
# AB magnitude Zero points
AB_VEGA = np.array([0.2, 0.487, 0.653, 0.958, 1.287, 1.552])
FITS_FILES = ["sim_1_0.fits", "sim_2_0.fits", "sim_3_0.fits",
"sim_4_0.fits", "sim_5_0.fits"]
SKY_COORD = np.zeros(len(FILTERS))
REF_FITS = int(3)
USE_RADEC = False
def cull_photometry(this_config, this_dp_id):
phot_dp = wp.DataProduct(this_dp_id)
phot = phot_dp.filename
procpath = this_config.procpath
target = this_config.target
targname = target.name
targroot = targname.split('.')[0]
photpath = procpath + "/" + phot
print("PHOT PATH: ", photpath, "\n")
clean_all(photpath, tol=5.0, test_size=0.75, valid_mag=30.0, targroot=targroot)
def clean_all(filename='10_10_phot.txt',
feature_names=None,
filters=FILTERS,
# ab_vega=AB_VEGA,
fits_files=None,
ref_fits=REF_FITS,
sky_coord=SKY_COORD,
tol=2., test_size=0.1, valid_mag=30.,
use_radec=USE_RADEC,
show_plot=False,
targroot='',
opt=None):
"""
Top level wrapper to read data, clean data, train/test/evaluate
classification model, make figure and display evaluation report
Calls read_data(), prep_data(), classify() and makePlots().
"""
if fits_files is None:
fits_files = FITS_FILES
if feature_names is None:
feature_names = FEAT_NAMES
if opt is None:
opt = {'evaluate': True,
'summary': True,
'plots': True,
'tree': False,
'saveClean': True}
fileroot, filename = os.path.split(filename)
fileroot += '/'
filepre = filename.split('.')[0]
if use_radec:
sky_coord = [wcs.WCS(fits.open(fileroot + imfile)[1].header) for imfile in fits_files]
input_data, output_data = read_data(filename=filename,
fileroot=fileroot,
targroot=targroot,
filters=filters)
in_df, out_df, out_lab = prep_data(input_data, output_data,
use_radec=use_radec,
sky_coord=sky_coord,
filters=filters,
tol=tol,
valid_mag=valid_mag,
ref_fits=ref_fits)
clf = MLPc(hidden_layer_sizes=(10, 10, 10),
activation='logistic',
solver='lbfgs',
max_iter=20000,
shuffle=True,
warm_start=False,
early_stopping=True) # , n_iter_no_change=10)
new_labels = classify(out_df, out_lab,
filters=filters,
feature_names=feature_names,
test_size=test_size,
fileroot=fileroot,
opt=opt,
clf=clf)
if opt['plots']:
make_plots(in_df, out_df, new_labels,
sky_coord=sky_coord,
filters=filters,
fileroot=fileroot,
nameroot=filepre,
tol=tol,
use_radec=use_radec,
ref_fits=ref_fits,
show_plot=show_plot)
if opt['saveClean']:
save_cats(input_data, output_data, out_df, new_labels,
sky_coord=sky_coord,
filters=filters,
fileroot=fileroot,
nameroot=filepre,
tol=tol,
use_radec=use_radec,
ref_fits=ref_fits,
valid_mag=valid_mag)
return print('\n')
def classify(out_df, out_lab,
filters=FILTERS,
feature_names=None,
test_size=0.9,
fileroot='',
opt=None,
clf=None):
"""
High level wrapper to build and evaluate classification models
for all bands and return new labels for the entire dataset.
For each filter:
- Extract features and label
- Split into training and testing dataset
- Train models, predict label for test set
- optional: evaluate model performance, make figures,
save the 'tree', and display report
- Re-label the entire dataset: for qualitative evaluation
return an array containing new labels in each filter
"""
if feature_names is None:
feature_names = FEAT_NAMES
if opt is None:
opt = {'evaluate': True,
'summary': True,
'tree': True}
if clf is None:
clf = DecisionTreeClassifier()
new_labels = []
out_file = fileroot + "_labels.pkl"
for i, filt in enumerate(filters):
features = out_df[i][feature_names]
labels = out_lab[i]
train_f, test_f, train_l, test_l = train_test_split(features, labels,
test_size=test_size)
clf.fit(train_f, train_l)
pred_l = clf.predict(test_f)
if opt['evaluate'] | opt['summary']:
print_report(filt, test_l, pred_l, feature_names,
opt['summary'])
if opt['tree']:
dot_data = export_graphviz(clf, out_file=None,
leaves_parallel=True,
feature_names=feature_names,
class_names=['other', 'point'],
max_depth=3)
graph = graphviz.Source(dot_data)
graph.render(fileroot + filt + '_tree')
new_labels.append(clf.predict(features))
print(new_labels)
pickle.dump(new_labels, open(out_file, 'wb'))
return new_labels
def read_my_data(fileroot, filenameroot, targroot, filt):
#fits_data = fits.open(fileroot + "Mixed_" + filenameroot + '_' + targroot + '_' + filt + '_observed_SCA01.fits')
fits_data = fits.open(fileroot + "Mixed_" + filenameroot + '_' + filt + '_observed_SCA01.fits')
print("ALL has this many tables: ", len(fits_data))
count = 0
check = 0
input_data1 = []
for table in fits_data:
hdr = table.header
print("HEADER: ", repr(hdr))
if check == 2:
continue # Why stop after 2 tables without 'DETECTOR'?
if 'DETECTOR' in hdr:
count += 1
if count == 1:
input_data1 = [Table.read(table)][0]
print("INPUT1: ", input_data1)
else:
print("COUNT: ", count)
new = Table.read(table)
# for p in range(len(new)):
# input_data1.add_row(new[p])
input_data1 = vstack([input_data1, new])
else:
check += 1
fits_data.close()
print("INPUTEND: ", len(input_data1))
# We do not want the first item? If we do,
# change to: return input_data1
return input_data1
def read_data(filename='10_10_phot.txt', fileroot='', targroot='', filters=FILTERS):
"""
Read in the raw fata files:
- Input: sythetic photometry file for image generation, IPAC format
- Output: DOLPHOT measured raw photometry, ASCII format
Return arrays of AstroPy tables for input and numpy arrays for output
ordered by corresponding filternames.
"""
filenameroot = filename.split('.')[0]
input_data = [read_my_data(fileroot, filenameroot, targroot, filt) for filt in FILTERSHORT]
output_data = np.loadtxt(fileroot + filename)
np.random.shuffle(output_data)
print(input_data[3])
return input_data, output_data
def prep_data(input_data, output_data, sky_coord=SKY_COORD,
filters=FILTERS, use_radec=False,
tol=2., valid_mag=30., ref_fits=0.):
"""
Prepare the data for classification. The output data is now cleaned
to exclude low information entries and also labeled based on location
of detection.
Return 3 arrays ordered by corresponding filternames:
- First array for input data in pandas data frames
- Second array for cleaned output data in pandas data frames
- Third array for labels of output data in numpy arrays
"""
nfilt = filters.size
_xy = output_data[:, 2:4].T
_count = output_data[:, range(13, 13 + 13 * nfilt, 13)].T
_vega_mags = output_data[:, range(15, 15 + 13 * nfilt, 13)].T
_mag_errors = output_data[:, range(17, 17 + 13 * nfilt, 13)].T
_snr = output_data[:, range(19, 19 + 13 * nfilt, 13)].T
_sharp = output_data[:, range(20, 20 + 13 * nfilt, 13)].T
_round = output_data[:, range(21, 21 + 13 * nfilt, 13)].T
_crowd = output_data[:, range(22, 22 + 13 * nfilt, 13)].T
in_df, out_df, labels = [], [], []
for i in range(nfilt):
in_df.append(pack_input(input_data[i], valid_mag=valid_mag))
t = validate_output(_mag_errors[i],
_count[i], _snr[i],
_sharp[i], _round[i],
_crowd[i])
out_df.append(pack_output(_xy, _vega_mags[i], _mag_errors[i],
_count[i], _snr[i], _sharp[i], _round[i],
_crowd[i], t))
labels.append(label_output(in_df[i], out_df[i],
tol=tol,
valid_mag=valid_mag,
radec={'opt': use_radec,
'wcs1': sky_coord[i],
'wcs2': sky_coord[ref_fits]}))
print("IN_DF ",in_df,len(in_df),in_df[0])
return in_df, out_df, labels
def validate_output(err, count, snr, shr, rnd, crd):
"""
Clean and validate output data
- Remove measurements with unphysical values, such as negative countrate
- Remove low information entries, such as magnitude errors >0.5 & SNR <1
- Remove missing value indicators such as +/- 9.99
"""
return (err < 0.5) & (count >= 0) & (snr >= 1) & (crd != 9.999) & \
(shr != 9.999) & (shr != -9.999) & (rnd != 9.999) & (rnd != -9.999)
def scale_features(_df):
scaler = StandardScaler()
for i, df in enumerate(_df):
df['err'] = scaler.fit_transform(df['err'].values.reshape(-1, 1))
df['Count'] = scaler.fit_transform(df['Count'].values.reshape(-1, 1))
df['SNR'] = scaler.fit_transform(df['SNR'].values.reshape(-1, 1))
df['Crowding'] = scaler.fit_transform(df['Crowding'].values.reshape(-1, 1))
df['Sharpness'] = scaler.fit_transform(df['Sharpness'].values.reshape(-1, 1))
df['Roundness'] = scaler.fit_transform(df['Roundness'].values.reshape(-1, 1))
_df[i] = df
return _df
def pack_input(data, valid_mag=30.):
"""
return Pandas Dataframes for input AstroPy tables containing
sources that are brighter than specified magnitude (valid_mag)
"""
t = data['vegamag'] < valid_mag
return pd.DataFrame({'x': data['x'][t], 'y': data['y'][t], 'm': data['vegamag'][t], 'type': data['type'][t]})
def pack_output(xy, mags, errs, count, snr, shr, rnd, crd, t):
"""
return Pandas Dataframes for output numpy arrays including
all quality parameter
"""
return pd.DataFrame({'x': xy[0][t], 'y': xy[1][t], 'mag': mags[t], 'err': errs[t],
'Count': count[t], 'SNR': snr[t], 'Sharpness': shr[t],
'Roundness': rnd[t], 'Crowding': crd[t]})
# return _df.reindex(np.random.permutation(_df.index))
def label_output(in_df, out_df, tol=2., valid_mag=30., radec=None):
"""
Label output data entries and return the labels as numpy array.
Match each remaining output entry with the closest input entry
within matching radius specified by 'tol' that are brighter than
specified magnitude (valid_mag).
Those matched to point source input are labeled '1',
everything else get '0' label.
Optionally, use sky_soordinates from the simulated images since
the images may not be aligned to each other.
"""
if radec is None:
radec = {'opt': False, 'wcs1': '', 'wcs2': ''}
in_x, in_y = in_df['x'].values, in_df['y'].values
typ_in = in_df['type'].values
mags = in_df['m'].values
t = (mags < valid_mag)
in_x, in_y, typ_in = in_x[t], in_y[t], typ_in[t]
out_x, out_y = out_df['x'].values, out_df['y'].values
tmp, typ_out = match_in_out(tol, in_x, in_y, out_x, out_y, typ_in, radec=radec)
typ_out[typ_out == 'sersic'] = 'other'
mag_diff = np.zeros(len(in_x))
mag_diff[tmp != -1] = in_df['m'].values[tmp != -1] - out_df['mag'].values[tmp[tmp != -1]]
# print(len(typ_out[tmp[tmp!=-1]][np.fabs(mag_diff[tmp!=-1])>0.5]=='point'))
typ_out[tmp[tmp != -1]][np.fabs(mag_diff[tmp != -1]) > 0.5] = 'other'
typ_bin = label_binarize(typ_out, classes=['other', 'point'])
typ_bin = typ_bin.reshape((typ_bin.shape[0],))
return typ_bin
def input_pair(df, i, j, radec=None):
"""
Pick sources added in both bands as same object types
return data dictionary containing the two input magnitudes
(m1_in, m2_in), coordinates (X, Y) and input source type
(typ_in)
"""
if radec is None:
radec = {'opt': False, 'wcs1': '', 'wcs2': ''}
m1_in, m2_in, x1, y1, x2, y2 = (df[i]['m'].values, df[j + 1]['m'].values,
df[i]['x'].values, df[i]['y'].values,
df[j + 1]['x'].values, df[j + 1]['y'].values)
typ1_in, typ2_in = df[i]['type'].values, df[j + 1]['type'].values
if radec['opt']:
ra1, dec1 = xy_to_wcs(np.array([x1, y1]).T, radec['wcs1'])
ra2, dec2 = xy_to_wcs(np.array([x2, y2]).T, radec['wcs2'])
in12 = match_cats(0.05, ra1, dec1, ra2, dec2)
else:
in12 = match_lists(0.1, x1, y1, x2, y2)
m1_in, x1, y1, typ1_in = m1_in[in12 != -1], x1[in12 != -1], y1[in12 != -1], typ1_in[in12 != -1]
in12 = in12[in12 != -1]
m2_in, typ2_in = m2_in[in12], typ2_in[in12]
tt = typ1_in == typ2_in
m1_in, m2_in, x, y, typ_in = m1_in[tt], m2_in[tt], x1[tt], y1[tt], typ1_in[tt]
return dict(zip(['m1_in', 'm2_in', 'X', 'Y', 'typ_in'], [m1_in, m2_in, x, y, typ_in]))
# Recovered source photometry and quality params
def output_pair(df, labels, i, j):
"""
Pick sources detected in both bands as same object types
return data dictionary containing the two output magnitudes (mag)
coordinates (xy), all quality parameters (err,snr,crd,rnd,shr)
and labels (lbl). Each dictionary item is has two elements for
two filters (xy has x and y).
"""
x1, y1, x2, y2 = df[i]['x'].values, df[i]['y'].values, df[j + 1]['x'].values, df[j + 1]['y'].values
t2 = match_lists(0.1, x1, y1, x2, y2)
t1 = t2 != -1
t2 = t2[t2 != -1]
xy = x1[t1], y1[t1]
mags = [df[i]['mag'][t1].values, df[j + 1]['mag'][t2].values]
errs = [df[i]['err'][t1].values, df[j + 1]['err'][t2].values]
snrs = [df[i]['SNR'][t1].values, df[j + 1]['SNR'][t2].values]
crds = [df[i]['Crowding'][t1].values, df[j + 1]['Crowding'][t2].values]
rnds = [df[i]['Roundness'][t1].values, df[j + 1]['Roundness'][t2].values]
shrs = [df[i]['Sharpness'][t1].values, df[j + 1]['Sharpness'][t2].values]
lbls = [labels[i][t1], labels[j + 1][t2]]
nms = ['xy', 'mag', 'err', 'snr', 'crd', 'rnd', 'shr', 'lbl']
return dict(zip(nms, [xy, mags, errs, snrs, crds, rnds, shrs, lbls]))
def clean_pair(in_pair, out_pair, tol=2., radec=None):
"""
Re-classify sources detected in both bands as stars. Change detected
source type from 'star' to 'other' if their location do not match to
that of a star added in both bands as stars
return data dictionary containing the two output magnitudes
(m1, m2), coordinates (X, Y) and output source type (typ_out)
"""
if radec is None:
radec = {'opt': False, 'wcs1': '', 'wcs2': ''}
x1, y1, typ_in = in_pair['X'], in_pair['Y'], in_pair['typ_in']
x2, y2 = out_pair['xy'][0], out_pair['xy'][1]
m1_out, m2_out = out_pair['mag'][0], out_pair['mag'][1]
t1, t2 = out_pair['lbl'][0], out_pair['lbl'][1]
t = (t1 == 1) & (t2 == 1)
x2, y2, m1_out, m2_out = x2[t], y2[t], m1_out[t], m2_out[t]
tmp, typ_out = match_in_out(tol, x1, y1, x2, y2, typ_in, radec=radec)
return dict(zip(['m1', 'm2', 'x', 'y', 'typ_out'], [m1_out, m2_out, x2, y2, typ_out]))
def save_cats(in_dat, out_dat, out_df, labels,
sky_coord=SKY_COORD, fileroot='', nameroot='',
filters=FILTERS, tol=2., ref_fits=0.,
use_radec=False, valid_mag=30.):
i = -1
flags = []
_X, _Y = out_dat[:, 2].T, out_dat[:, 3].T
for data, df, label, filt in zip(in_dat, out_df, labels, filters):
i += 1
t = data['vegamag'] < valid_mag
_df1 = pd.DataFrame({'x': data['x'], 'y': data['y'], 'mag': data['vegamag']})
_df2 = df[label == 1]
x1, y1 = _df1['x'].values, _df1['y'].values
x2, y2 = _df2['x'].values, _df2['y'].values
if use_radec:
ra1, dec1 = xy_to_wcs(np.array([x1, y1]).T, sky_coord[i])
ra2, dec2 = xy_to_wcs(np.array([x2, y2]).T, sky_coord[ref_fits])
in1 = match_cats(tol * 0.11, ra1, dec1, ra2, dec2)
in2 = match_cats(tol * 0.11, ra2, dec2, ra1[t], dec1[t])
else:
in1 = match_lists(tol, x1, y1, x2, y2)
in2 = match_lists(tol, x2, y2, x1[t], y1[t])
# Extend input list with recovered mag
re_mag = np.repeat(99.99, len(x1))
re_x = np.repeat(99.99, len(x1))
re_y = np.repeat(99.99, len(x1))
_t = (in1 != -1) & t
re_mag[_t] = _df2['mag'].values[in1[_t]]
re_x[_t] = x2[in1[_t]]
re_y[_t] = y2[in1[_t]]
data['recovmag'] = re_mag
data['recov_x'] = re_x
data['recov_y'] = re_y
ascii.write(data, fileroot + nameroot + '_' + str(filt) + '_recov_input.txt', format='ipac')
# Extend output list with input mag
inmag = np.repeat(99.99, len(x2))
_t = in2 != -1
inmag[_t] = _df1['mag'].values[t][in2[_t]]
_df2['inputmag'] = inmag
_df2[['x', 'y', 'mag', 'err', 'inputmag', 'Count', 'Crowding', 'Roundness', 'SNR', 'Sharpness']]. \
to_csv(fileroot + nameroot + '_' + str(filt) + '_clean.csv', index=False)
# Make shorter recovered phot file keeping sources kept in at least one filter
in1 = match_lists(0.1, _X, _Y, x2, y2)
flag = np.zeros(len(_X))
flag[in1 != -1] = 1
flags.append(flag)
flag = np.sum(flags, axis=0)
idx = np.arange(len(flag))
idx = idx[flag != 0]
new_dat = out_dat[idx, :]
return np.savetxt(fileroot + nameroot + '_' + 'Clean_Catalog.phot', new_dat, fmt='%10.7e')
def match_lists(tol, x1, y1, x2, y2):
"""
Match X and Y coordinates using cKDTree
return index of 2nd list at coresponding position in the 1st
return -1 if no match is found within matching radius (tol)
"""
d1 = np.empty((x1.size, 2))
d2 = np.empty((x2.size, 2))
d1[:, 0], d1[:, 1] = x1, y1
d2[:, 0], d2[:, 1] = x2, y2
t = cKDTree(d2)
tmp, in1 = t.query(d1, distance_upper_bound=tol)
in1[in1 == x2.size] = -1
return in1
def match_cats(tol, ra1, dec1, ra2, dec2):
"""
Match astronomical coordinates using SkyCoord
return index of 2nd list at coresponding position in the 1st
return -1 if no match is found within matching radius (tol)
"""
c1 = SkyCoord(ra=ra1 * u.degree, dec=dec1 * u.degree)
c2 = SkyCoord(ra=ra2 * u.degree, dec=dec2 * u.degree)
in1, sep, tmp = match_coordinates_sky(c1, c2, storekdtree=False)
sep = sep.to(u.arcsec)
in1[in1 == ra2.size] = -1
in1[sep > tol * u.arcsec] = -1
return in1
def match_in_out(tol, in_x, in_y, out_x, out_y, typ_in, radec=None):
"""
Match input coordnates to recovered coordinates picking the
closest matched item.
return index of output entry at coresponding position in the
input list and source type of the matching input
return -1 as the index if no match is found and source type
as 'other' (not point source)
"""
if radec is None:
radec = {'opt': False, 'wcs1': '', 'wcs2': ''}
if radec['opt']:
ra1, dec1 = xy_to_wcs(np.array([in_x, in_y]).T, radec['wcs1'])
ra2, dec2 = xy_to_wcs(np.array([out_x, out_y]).T, radec['wcs2'])
in1 = match_cats(tol * 0.11, ra1, dec1, ra2, dec2)
else:
in1 = match_lists(tol, in_x, in_y, out_x, out_y)
in2 = in1 != -1
in3 = in1[in2]
in4 = np.arange(len(out_x))
in5 = np.setdiff1d(in4, in3)
typ_out = np.empty(len(out_x), dtype='<U10')
typ_out[in3] = typ_in[in2]
typ_out[in5] = 'other'
return in1, typ_out
def print_report(filt, test_labels, pred_labels, feat_nms, feat_imp=None, short_rep=True):
"""
Evaluate the classification model
- Score the classifier for all classes and each class separately
- Manually calculate Precision, Recall and Specficity
- Display the values along with feature importances
"""
# if feat_imp is None:
# feat_imp = []
score1 = accuracy_score(test_labels, pred_labels)
score2 = accuracy_score(test_labels[test_labels == 0], pred_labels[test_labels == 0])
score3 = accuracy_score(test_labels[test_labels == 1], pred_labels[test_labels == 1])
tp = int(np.ceil(score3 * len(test_labels[test_labels == 1])))
fn = int(np.ceil((1 - score3) * len(test_labels[test_labels == 1])))
tn = int(np.ceil(score2 * len(test_labels[test_labels == 0])))
fp = int(np.ceil((1 - score2) * len(test_labels[test_labels == 0])))
print('\nBand {:s} feature importance:'.format(filt))
if not short_rep:
print('\n Non-point: {:d}'.format(len(test_labels[test_labels == 0])))
print(' Point:\t\t{:d}\n'.format(len(test_labels[test_labels == 1])))
print(' Tp:\t\t{:d}\n Fp:\t\t{:d}\n Tn:\t\t{:d}\n Fn:\t\t{:d}\n'.format(tp, fp, tn, fn))
print(' All:\t\t{:.2f}\n Non-point:\t{:.2f}\n Point:\t\t{:.2f}\n'.format(score1, score2, score3))
print(' Precision:\t{:.2f}'.format(tp / (tp + fp)))
# _tmp = [print('{:s}:\t{:.3f}'.format(feat_nms[i],feat_imp[i]))
# for i in range(len(feat_nms))]
# print('\n Precision:\t{:.2f}'.format(tp/(tp+fp)))
print(' Recall:\t{:.2f} (Sensitivity)'.format(tp / (tp + fn)))
print(' Specificity:\t{:.2f}\n'.format(tn / (tn + fp)))
return print('\n')
def make_plots(in_df, out_df, new_labels,
sky_coord=SKY_COORD, fileroot='', nameroot='',
filters=FILTERS,
tol=5., ref_fits=0.,
use_radec=False,
show_plot=False):
"""
Produce figures and text to qualitatively evaluate practicality
of the classification model for the intended use case of maximizing
star identification in realistic catalogs
"""
print("IN MAKE PLOTS")
def paired_in(a, b, c):
return input_pair(in_df, a, b, c)
def paired_out(a, b):
return output_pair(out_df, new_labels, a, b)
for i in range(len(filters) - 1):
for j in range(i, len(filters) - 1):
radec1 = {'opt': use_radec,
'wcs1': sky_coord[i], 'wcs2': sky_coord[j + 1]}
radec2 = {'opt': use_radec,
'wcs1': sky_coord[i], 'wcs2': sky_coord[ref_fits]}
in_pair, out_pair = paired_in(i, j, radec1), paired_out(i, j)
print("PAIRED IN",in_pair,"LENGTH ",len(in_pair))
cln_pair = clean_pair(in_pair, out_pair, tol=tol, radec=radec2)
make_cmd_and_xy(in_pair, out_pair, cln_pair,
fileroot=fileroot, tol=tol, filepre=nameroot,
filt1=filters[i], filt2=filters[j + 1],
ab_vega1=AB_VEGA[i], ab_vega2=AB_VEGA[j + 1],
opt=['input', 'output', 'clean', 'diff'],
radec=radec2, show_plot=show_plot)
return print('\n')
def make_cmd_and_xy(all_in={}, all_out={}, clean_out={},
filt1='', filt2='', ab_vega1=0., ab_vega2=0.,
fileroot='', tol=5., filepre='',
opt=None, radec=None, show_plot=False):
"""
Produce color-magnitude diagrams and systematic offsets
"""
if radec is None:
radec = {'opt': False, 'wcs1': '', 'wcs2': ''}
if opt is None:
opt = ['input', 'output', 'clean', 'diff']
print('\nFilters {:s} and {:s}:'.format(filt1, filt2))
print('\n pre: {:s}'.format(filepre))
def plot_me(a, b, st, ot, ttl, pre, post):
return plot_cmd(a, b, filt1=filt1, filt2=filt2, stars=st, other=ot, title=ttl,
fileroot=fileroot, outfile='_'.join((filepre, 'cmd', filt1, filt2, post)), show_plot=show_plot)
def plot_it(a, b, filt):
return plot_xy(x=a, y=a - b, ylim1=-1.0, ylim2=1.0, xlim1=18.5, xlim2=28,
ylabel='magIn - magOut', xlabel='magOut', title='In-Out Mag Diff {:s}'.format(filt),
fileroot=fileroot, outfile='_'.join((filepre, 'mag', 'diff', filt)), show_plot=show_plot)
#BFW commenting out this line
#m1_in, m2_in, typ_in = np.array([])
m1_in = np.array([])
m2_in = np.array([])
typ_in = np.array([])
if ('input' in opt) & (len(all_in) > 0):
m1_in, m2_in, typ_in = all_in['m1_in'], all_in['m2_in'], all_in['typ_in']
stars, other = typ_in == 'point', typ_in != 'point'
print('Stars: {:d} Others: {:d}'.format(int(np.sum(stars)), int(np.sum(other))))
plot_me(m1_in, m2_in, stars, other,
'Input CMD (Vega)', 'input', 'Vega')
if ('output' in opt) & (len(all_out) > 0):
m1, m2 = all_out['mag'][0], all_out['mag'][1]
if 'input' in opt:
in_x, in_y, out_x, out_y = all_in['X'], all_in['Y'], all_out['xy'][0], all_out['xy'][1]
in1, typ_out = match_in_out(tol, in_x, in_y, out_x, out_y, typ_in, radec=radec)
# stars, other = typ_out == 'point', typ_out != 'point'
if ('diff' in opt) | ('diff2' in opt):
t1 = (in1 != -1) & (typ_in == 'point')
m1in, m2in, m1t, m2t = m1_in[t1], m2_in[t1], m1[in1[t1]], m2[in1[t1]]
t2 = typ_out[in1[t1]] == 'point'
m1in, m2in, m1t, m2t = m1in[t2], m2in[t2], m1t[t2], m2t[t2]
if 'diff' in opt:
plot_it(m1in, m1t, filt1)
if 'diff2' in opt:
plot_it(m2in, m2t, filt2)
else:
typ_out = np.repeat('other', len(m1))
stars, other = typ_out == 'point', typ_out != 'point'
print('Stars: {:d} Others: {:d}'.format(int(np.sum(stars)), int(np.sum(other))))
plot_me(m1, m2, stars, other, 'Full CMD', 'output', 'full')
if ('clean' in opt) & (len(clean_out) > 0):
m1, m2, typ_out = clean_out['m1'], clean_out['m2'], clean_out['typ_out']
stars, other = typ_out == 'point', typ_out != 'point'
print('Stars: {:d} Others: {:d}'.format(int(np.sum(stars)), int(np.sum(other))))
plot_me(m1, m2, stars, other, 'Cleaned CMD', 'clean', 'clean')
rr, fr = get_stat(all_in['typ_in'], clean_out['typ_out'])
print('Recovery Rate:\t {:.2f}\nFalse Rate: \t {:.2f}\n'.format(rr, fr))
return print('\n')
def plot_cmd(m1, m2, e1=[], e2=[], filt1='', filt2='', stars=[], other=[],
fileroot='', outfile='test', fmt='png',
xlim1=-1.5, xlim2=3.5, ylim1=28.5, ylim2=16.5, n=4,
title='', show_plot=False):
"""
Produce color-magnitude diagrams
"""
print("IN PLOT CMD")
m1m2 = m1 - m2
plt.rc("font", family='serif', weight='bold')
plt.rc("xtick", labelsize=15)
plt.rc("ytick", labelsize=15)
fig = plt.figure(1, (10, 10))
fig.suptitle(title, fontsize=5 * n)
if np.sum(stars) == 0:
m1m2t, m2t = plot_hess(m1m2, m2)
plt.plot(m1m2t, m2t, 'k.', markersize=2, alpha=0.75, zorder=3)
else:
plt.plot(m1m2[stars], m2[stars], 'b.', markersize=2,
alpha=0.75, zorder=2, label='Stars: %d' % len(m2[stars]))
plt.plot(m1m2[other], m2[other], 'k.', markersize=1,
alpha=0.5, zorder=1, label='Other: %d' % len(m2[other]))
plt.legend(loc=4, fontsize=20)
# if len(e1) & len(e2):
# m1m2err = np.sqrt(e1 ** 2 + e2 ** 2)
# plot_error_bars(m2, e2, m1m2err, xlim1, xlim2, ylim1, slope=[])
plt.xlim(xlim1, xlim2)
plt.ylim(ylim1, ylim2)
plt.xlabel(str(filt1 + '-' + filt2), fontsize=20)
plt.ylabel(filt2, fontsize=20)
print('\t\t\t Writing out: ', fileroot + outfile + '.' + str(fmt))
plt.savefig(fileroot + outfile + '.' + str(fmt))
if show_plot:
plt.show()
return plt.close()
def plot_xy(x, y, xlabel='', ylabel='', title='', stars=[], other=[],
xlim1=-1., xlim2=1., ylim1=-7.5, ylim2=7.5,
fileroot='', outfile='test', fmt='png', n=4,
show_plot=False):
"""
Custom scatterplot maker
"""
plt.rc("font", family='serif', weight='bold')
plt.rc("xtick", labelsize=15)
plt.rc("ytick", labelsize=15)
fig = plt.figure(1, (10, 10))
fig.suptitle(title, fontsize=5 * n)
if not len(x[other]):
plt.plot(x, y, 'k.', markersize=1, alpha=0.5)
else:
plt.plot(x[stars], y[stars], 'b.', markersize=2,
alpha=0.5, zorder=2, label='Stars: %d' % len(x[stars]))
plt.plot(x[other], y[other], 'k.', markersize=1,
alpha=0.75, zorder=1, label='Other: %d' % len(x[other]))
plt.legend(loc=4, fontsize=20)
plt.xlim(xlim1, xlim2)
plt.ylim(ylim1, ylim2)
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
plt.savefig(fileroot + outfile + '.' + str(fmt))
# print('\t\t\t Writing out: ',fileroot+outfile+'.'+str(fmt))
if show_plot:
plt.show()
return plt.close()
def plot_hess(color, mag, binsize=0.1, threshold=25):
"""
Overplot hess diagram for densest regions
of a scatterplot
"""
if not len(color) > threshold:
return color, mag
# mmin, mmax = np.amin(mag), np.amax(mag)
cmin, cmax = np.amin(color), np.amax(color)
nmbins = np.ceil((cmax - cmin) / binsize)
ncbins = np.ceil((cmax - cmin) / binsize)
hist_value, x_ticks, y_ticks = np.histogram2d(color, mag, bins=(ncbins, nmbins))
x_ctrds = 0.5 * (x_ticks[:-1] + x_ticks[1:])
y_ctrds = 0.5 * (y_ticks[:-1] + y_ticks[1:])
y_grid, x_grid = np.meshgrid(y_ctrds, x_ctrds)
masked_hist = np.ma.array(hist_value, mask=(hist_value == 0))
levels = np.logspace(np.log10(threshold),
np.log10(np.amax(masked_hist)), (nmbins / ncbins) * 20)
if (np.amax(masked_hist) > threshold) & (len(levels) > 1):
cntr = plt.contourf(x_grid, y_grid, masked_hist, cmap=cm.jet, levels=levels, zorder=0)
cntr.cmap.set_under(alpha=0)
x_grid, y_grid, masked_hist = x_grid.flatten(), y_grid.flatten(), hist_value.flatten()
x_grid = x_grid[masked_hist > 2.5 * threshold]
y_grid = y_grid[masked_hist > 2.5 * threshold]
mask = np.zeros_like(mag)
for col, m in zip(x_grid, y_grid):
mask[(m - binsize < mag) & (m + binsize > mag) &
(col - binsize < color) & (col + binsize > color)] = 1
mag = np.ma.array(mag, mask=mask)
color = np.ma.array(color, mask=mask)
return color, mag
def xy_to_wcs(xy, _w):
"""
Convert pixel coordinates (xy) to astronomical
coordinated (RA and DEC)
"""
_radec = _w.wcs_pix2world(xy, 1)
return _radec[:, 0], _radec[:, 1]
def get_stat(typ_in, typ_out):
"""
Return recovery rate and false rate for stars
"""
all_in, all_recov = len(typ_in), len(typ_out)
stars_in = len(typ_in[typ_in == 'point'])
stars_recov = len(typ_out[typ_out == 'point'])
recovery_rate = (stars_recov / stars_in)
false_rate = 1 - (stars_recov / all_recov)
return recovery_rate, false_rate
def parse_all():
parser = wp.PARSER
parser.add_argument('--C', '-c', type=int, dest='config_id',
help='Configuration ID')
parser.add_argument('--T', '-t', type=int, dest='target_id',
help='Target ID')
return parser.parse_args()
if __name__ == '__main__':
args = parse_all()
if args.config_id:
myConfig = wp.Configuration(args.config_id)
# cull_photometry(myConfig)
elif args.target_id:
myTarget = wp.Target(int(args.target_id))
pid = myTarget.pipeline_id
allConf = myTarget.configurations
for myConfig in allConf:
print(myConfig)
# cull_photometry(myConfig)
else:
this_job = wp.ThisJob
this_event = wp.ThisEvent
dp_id = this_event.options['dp_id']
print(this_job.config_id)
myConfig = this_job.config
cull_photometry(myConfig, dp_id)
| 40.915375 | 119 | 0.57105 |
aca50f4c3b1c2c48b00af8c92a244a1c7001bee6 | 3,400 | py | Python | run_model.py | JustinLokHinWu/ACTGAN | d9e22f8fd499baaf5e647046eb3a214a57b5c201 | [
"MIT"
] | 1 | 2021-08-22T08:44:11.000Z | 2021-08-22T08:44:11.000Z | run_model.py | JustinLokHinWu/ACTGAN | d9e22f8fd499baaf5e647046eb3a214a57b5c201 | [
"MIT"
] | null | null | null | run_model.py | JustinLokHinWu/ACTGAN | d9e22f8fd499baaf5e647046eb3a214a57b5c201 | [
"MIT"
] | 1 | 2022-01-10T01:00:13.000Z | 2022-01-10T01:00:13.000Z | import argparse
from torchvision import transforms
from attrdict import AttrDict
import json
import torch
import glob
from models.generator import ACGAN_Generator
from utils.helpers import noise, label_to_onehot
class GeneratorRunner:
'''
cfg: AttrDict containing model parameters
'''
def __init__(self, cfg):
self.generator = ACGAN_Generator(cfg)
self.generator.eval()
if cfg.cuda:
self.generator = self.generator.cuda()
self.models_dir = cfg.models_dir
self.noise_size = cfg.noise_size
self.cuda = cfg.cuda
self.n_classes = cfg.n_classes
self.valid_epochs = None
'''
Generates an image of class class_id using a generator at epoch epoch,
using a custom seed if supplied. Returns None on failure.
class_id: int indicating image class
seed: int for pytorch seed
'''
def evaluate(self, class_id, epoch, seed=None):
if class_id < 0 or class_id >= self.n_classes:
return None
try:
if self.cuda:
t = torch.load('{}/G_epoch_{}'.format(self.models_dir, epoch))
else:
t = torch.load(
'{}/G_epoch_{}'.format(self.models_dir, epoch),
map_location=torch.device('cpu'))
self.generator.load_state_dict(t)
except:
return None
if seed:
torch.manual_seed(seed)
# Setup input for generator
input_noise = noise(1, self.noise_size, self.cuda)
input_onehot = label_to_onehot(
torch.Tensor([class_id]).long(),
self.n_classes, self.cuda
)
with torch.no_grad():
output = self.generator(input_noise, input_onehot) / 2.0 + 0.5
image = transforms.ToPILImage()(output[0].cpu())
return image
'''
Return a list of all valid generator epochs in models_dir
'''
def get_valid_epochs(self):
if self.valid_epochs is None:
paths = glob.glob('{}/G_epoch_*'.format(self.models_dir))
self.valid_epochs = sorted(
[int(path.rsplit('_', 1)[1]) for path in paths])
return self.valid_epochs
if __name__=='__main__':
# Load arguments
parser = argparse.ArgumentParser(
description='Load and run pretrained ACTGAN generator.'
)
parser.add_argument(
'--config',
help='path to config json used to train the generator',
required=True
)
parser.add_argument(
'--out_dir',
help='path to output file',
required=True
)
parser.add_argument(
'--class_id',
help='class index for generated image',
required=True,
type=int
)
parser.add_argument(
'--epoch',
help='model epoch to use',
type=int
)
parser.add_argument(
'--seed',
help='seed for random noise generation',
type=int
)
args = parser.parse_args()
# Load config from file
with open(args.config) as f:
cfg = AttrDict(json.load(f))
gen_runner = GeneratorRunner(cfg)
if args.epoch is not None:
epoch = args.epoch
else:
epoch = gen_runner.get_valid_epochs()[-1]
image = gen_runner.evaluate(args.class_id, epoch=epoch, seed=args.seed)
image.save(args.out_dir)
| 27.642276 | 78 | 0.595882 |
b6974d68e940e918808c7be7a1ee49d0196aca0e | 1,713 | py | Python | CondTools/Ecal/python/EcalTPGPedfromFile_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CondTools/Ecal/python/EcalTPGPedfromFile_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CondTools/Ecal/python/EcalTPGPedfromFile_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("ProcessOne")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb/'
#
# Choose the output database
#
#process.CondDBCommon.connect = 'oracle://cms_orcon_prod/CMS_COND_311X_ECAL_LAS'
process.CondDBCommon.connect = 'sqlite_file:EcalLinPed.db'
#process.CondDBCommon.connect = 'oracle://cms_orcoff_prep/CMS_COND_ECAL'
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True)
),
debugModules = cms.untracked.vstring('*')
)
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.untracked.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('EcalTPGPedestalsRcd'),
tag = cms.string('EcalTPGPedestals_test')
)
)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:DBLog.db'),
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('EcalTPGPedestalsRcd'),
tag = cms.string('EcalTPGPedestals_test')
)
)
)
process.Test1 = cms.EDAnalyzer("ExTestEcalTPGPedfromFile",
record = cms.string('EcalTPGPedestalsRcd'),
Source = cms.PSet(
debug = cms.bool(True),
)
)
process.p = cms.Path(process.Test1)
| 28.081967 | 84 | 0.716871 |
0c4621c54f08a6050564a7e549c71cc2277bc7d2 | 8,891 | py | Python | test/test.py | ahupp/python-magic | 7f7542fcbc192fef6e4939f4eb748e941a720b2c | [
"MIT"
] | 1,803 | 2015-01-08T09:18:56.000Z | 2022-03-31T06:26:22.000Z | test/test.py | ahupp/python-magic | 7f7542fcbc192fef6e4939f4eb748e941a720b2c | [
"MIT"
] | 209 | 2015-01-06T18:15:35.000Z | 2022-03-23T20:41:50.000Z | test/test.py | ahupp/python-magic | 7f7542fcbc192fef6e4939f4eb748e941a720b2c | [
"MIT"
] | 232 | 2015-01-11T05:31:19.000Z | 2022-03-17T09:29:32.000Z | import os
# for output which reports a local time
os.environ['TZ'] = 'GMT'
if os.environ.get('LC_ALL', '') != 'en_US.UTF-8':
# this ensure we're in a utf-8 default filesystem encoding which is
# necessary for some tests
raise Exception("must run `export LC_ALL=en_US.UTF-8` before running test suite")
import shutil
import os.path
import unittest
import magic
import sys
# magic_descriptor is broken (?) in centos 7, so don't run those tests
SKIP_FROM_DESCRIPTOR = bool(os.environ.get('SKIP_FROM_DESCRIPTOR'))
class MagicTest(unittest.TestCase):
TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata')
def test_version(self):
try:
self.assertTrue(magic.version() > 0)
except NotImplementedError:
pass
def test_fs_encoding(self):
self.assertEqual('utf-8', sys.getfilesystemencoding().lower())
def assert_values(self, m, expected_values, buf_equals_file=True):
for filename, expected_value in expected_values.items():
try:
filename = os.path.join(self.TESTDATA_DIR, filename)
except TypeError:
filename = os.path.join(
self.TESTDATA_DIR.encode('utf-8'), filename)
if type(expected_value) is not tuple:
expected_value = (expected_value,)
with open(filename, 'rb') as f:
buf_value = m.from_buffer(f.read())
file_value = m.from_file(filename)
if buf_equals_file:
self.assertEqual(buf_value, file_value)
for value in (buf_value, file_value):
self.assertIn(value, expected_value)
def test_from_file_str_and_bytes(self):
filename = os.path.join(self.TESTDATA_DIR, "test.pdf")
self.assertEqual('application/pdf',
magic.from_file(filename, mime=True))
self.assertEqual('application/pdf',
magic.from_file(filename.encode('utf-8'), mime=True))
def test_from_descriptor_str_and_bytes(self):
if SKIP_FROM_DESCRIPTOR:
self.skipTest("magic_descriptor is broken in this version of libmagic")
filename = os.path.join(self.TESTDATA_DIR, "test.pdf")
with open(filename) as f:
self.assertEqual('application/pdf',
magic.from_descriptor(f.fileno(), mime=True))
self.assertEqual('application/pdf',
magic.from_descriptor(f.fileno(), mime=True))
def test_from_buffer_str_and_bytes(self):
if SKIP_FROM_DESCRIPTOR:
self.skipTest("magic_descriptor is broken in this version of libmagic")
m = magic.Magic(mime=True)
self.assertTrue(
m.from_buffer('#!/usr/bin/env python\nprint("foo")')
in ("text/x-python", "text/x-script.python"))
self.assertTrue(
m.from_buffer(b'#!/usr/bin/env python\nprint("foo")')
in ("text/x-python", "text/x-script.python"))
def test_mime_types(self):
dest = os.path.join(MagicTest.TESTDATA_DIR,
b'\xce\xbb'.decode('utf-8'))
shutil.copyfile(os.path.join(MagicTest.TESTDATA_DIR, 'lambda'), dest)
try:
m = magic.Magic(mime=True)
self.assert_values(m, {
'magic._pyc_': ('application/octet-stream', 'text/x-bytecode.python'),
'test.pdf': 'application/pdf',
'test.gz': ('application/gzip', 'application/x-gzip'),
'test.snappy.parquet': 'application/octet-stream',
'text.txt': 'text/plain',
b'\xce\xbb'.decode('utf-8'): 'text/plain',
b'\xce\xbb': 'text/plain',
})
finally:
os.unlink(dest)
def test_descriptions(self):
m = magic.Magic()
os.environ['TZ'] = 'UTC' # To get last modified date of test.gz in UTC
try:
self.assert_values(m, {
'magic._pyc_': 'python 2.4 byte-compiled',
'test.pdf': ('PDF document, version 1.2',
'PDF document, version 1.2, 2 pages'),
'test.gz':
('gzip compressed data, was "test", from Unix, last '
'modified: Sun Jun 29 01:32:52 2008',
'gzip compressed data, was "test", last modified'
': Sun Jun 29 01:32:52 2008, from Unix',
'gzip compressed data, was "test", last modified'
': Sun Jun 29 01:32:52 2008, from Unix, original size 15',
'gzip compressed data, was "test", '
'last modified: Sun Jun 29 01:32:52 2008, '
'from Unix, original size modulo 2^32 15',
'gzip compressed data, was "test", last modified'
': Sun Jun 29 01:32:52 2008, from Unix, truncated'
),
'text.txt': 'ASCII text',
'test.snappy.parquet': ('Apache Parquet', 'Par archive data'),
}, buf_equals_file=False)
finally:
del os.environ['TZ']
def test_extension(self):
try:
m = magic.Magic(extension=True)
self.assert_values(m, {
# some versions return '' for the extensions of a gz file,
# including w/ the command line. Who knows...
'test.gz': ('gz/tgz/tpz/zabw/svgz', '', '???'),
'name_use.jpg': 'jpeg/jpg/jpe/jfif',
})
except NotImplementedError:
self.skipTest('MAGIC_EXTENSION not supported in this version')
def test_unicode_result_nonraw(self):
m = magic.Magic(raw=False)
src = os.path.join(MagicTest.TESTDATA_DIR, 'pgpunicode')
result = m.from_file(src)
# NOTE: This check is added as otherwise some magic files don't identify the test case as a PGP key.
if 'PGP' in result:
assert r"PGP\011Secret Sub-key -" == result
else:
raise unittest.SkipTest("Magic file doesn't return expected type.")
def test_unicode_result_raw(self):
m = magic.Magic(raw=True)
src = os.path.join(MagicTest.TESTDATA_DIR, 'pgpunicode')
result = m.from_file(src)
if 'PGP' in result:
assert b'PGP\tSecret Sub-key -' == result.encode('utf-8')
else:
raise unittest.SkipTest("Magic file doesn't return expected type.")
def test_mime_encodings(self):
m = magic.Magic(mime_encoding=True)
self.assert_values(m, {
'text-iso8859-1.txt': 'iso-8859-1',
'text.txt': 'us-ascii',
})
def test_errors(self):
m = magic.Magic()
self.assertRaises(IOError, m.from_file, 'nonexistent')
self.assertRaises(magic.MagicException, magic.Magic,
magic_file='nonexistent')
os.environ['MAGIC'] = 'nonexistent'
try:
self.assertRaises(magic.MagicException, magic.Magic)
finally:
del os.environ['MAGIC']
def test_keep_going(self):
filename = os.path.join(self.TESTDATA_DIR, 'keep-going.jpg')
m = magic.Magic(mime=True)
self.assertEqual(m.from_file(filename), 'image/jpeg')
try:
# this will throw if you have an "old" version of the library
# I'm otherwise not sure how to query if keep_going is supported
magic.version()
m = magic.Magic(mime=True, keep_going=True)
self.assertEqual(m.from_file(filename),
'image/jpeg\\012- application/octet-stream')
except NotImplementedError:
pass
def test_rethrow(self):
old = magic.magic_buffer
try:
def t(x, y):
raise magic.MagicException("passthrough")
magic.magic_buffer = t
with self.assertRaises(magic.MagicException):
magic.from_buffer("hello", True)
finally:
magic.magic_buffer = old
def test_getparam(self):
m = magic.Magic(mime=True)
try:
m.setparam(magic.MAGIC_PARAM_INDIR_MAX, 1)
self.assertEqual(m.getparam(magic.MAGIC_PARAM_INDIR_MAX), 1)
except NotImplementedError:
pass
def test_name_count(self):
m = magic.Magic()
with open(os.path.join(self.TESTDATA_DIR, 'name_use.jpg'), 'rb') as f:
m.from_buffer(f.read())
def test_pathlike(self):
if sys.version_info < (3, 6):
return
from pathlib import Path
path = Path(self.TESTDATA_DIR, "test.pdf")
m = magic.Magic(mime=True)
self.assertEqual('application/pdf', m.from_file(path))
if __name__ == '__main__':
unittest.main()
| 38.158798 | 108 | 0.571589 |
2d3058a4aa532993bfcd7f92d15396b145222b8b | 4,020 | py | Python | gps_navigation/gps_nav.py | KingKorb/RobotNinjas | 5737483056198b80d324836319a841146333e0dd | [
"MIT"
] | null | null | null | gps_navigation/gps_nav.py | KingKorb/RobotNinjas | 5737483056198b80d324836319a841146333e0dd | [
"MIT"
] | 1 | 2022-02-11T04:22:57.000Z | 2022-02-21T23:40:59.000Z | gps_navigation/gps_nav.py | KingKorb/RobotNinjas | 5737483056198b80d324836319a841146333e0dd | [
"MIT"
] | 1 | 2022-03-17T22:00:18.000Z | 2022-03-17T22:00:18.000Z | import RPi.GPIO as GPIO
import pymap3d as pm
import numpy as np
import serial
import time
# setup PWM pins
Motor1PWM = 12
Motor1D = 24
Motor1E= 22
Motor2PWM = 13
Motor2D = 25
Motor2E= 23
GPIO.setmode(GPIO.BCM)
GPIO.setup(Motor1PWM, GPIO.OUT)
GPIO.setup(Motor1D, GPIO.OUT)
GPIO.setup(Motor1E, GPIO.OUT)
GPIO.setup(Motor2PWM, GPIO.OUT)
GPIO.setup(Motor2D, GPIO.OUT)
GPIO.setup(Motor2E, GPIO.OUT)
pwm = GPIO.PWM(Motor1PWM, 1000)
pwm2 = GPIO.PWM(Motor2PWM, 1000)
pwm.start(0)
pwm2.start(0)
def forward():
print ("Moving Forward")
GPIO.output(Motor1PWM,GPIO.HIGH)
GPIO.output(Motor1D,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
pwm.ChangeDutyCycle(80)
GPIO.output(Motor2PWM,GPIO.HIGH)
GPIO.output(Motor2D,GPIO.HIGH)
GPIO.output(Motor2E,GPIO.HIGH)
pwm2.ChangeDutyCycle(80)
#time.sleep(2)
def backward():
print ("Moving Backwards")
#GPIO.output(Motor1PWM,GPIO.HIGH)
GPIO.output(Motor1D,GPIO.HIGH)
GPIO.output(Motor1E,GPIO.HIGH)
pwm.ChangeDutyCycle(80)
#GPIO.output(Motor2PWM,GPIO.HIGH)
GPIO.output(Motor2D,GPIO.LOW)
GPIO.output(Motor2E,GPIO.HIGH)
pwm2.ChangeDutyCycle(80)
#time.sleep(2)
def right():
print ("Moving Right")
#GPIO.output(Motor1PWM,GPIO.HIGH)
GPIO.output(Motor1D,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
pwm.ChangeDutyCycle(60)
#GPIO.output(Motor2PWM,GPIO.HIGH)
GPIO.output(Motor2D,GPIO.LOW)
GPIO.output(Motor2E,GPIO.HIGH)
pwm2.ChangeDutyCycle(60)
#time.sleep(2)
def left():
print ("Moving Left")
#GPIO.output(Motor1PWM,GPIO.HIGH)
GPIO.output(Motor1D,GPIO.HIGH)
GPIO.output(Motor1E,GPIO.HIGH)
pwm.ChangeDutyCycle(60)
#GPIO.output(Motor2PWM,GPIO.HIGH)
GPIO.output(Motor2D,GPIO.HIGH)
GPIO.output(Motor2E,GPIO.HIGH)
pwm2.ChangeDutyCycle(60)
def stop():
print ("Now stop")
GPIO.output(Motor1E,GPIO.LOW)
GPIO.output(Motor2E,GPIO.LOW)
# setup serial communication
ser = serial.Serial('/dev/ttyACM0', 115200, timeout=1)
ser.reset_input_buffer()
target_lat = [35.08331,35.08335,35.08331,35.08343,35.08343,3508343,35.08341]
target_lon = [-92.45882,-92.45871,-92.45862,-92.45859,-92.45867,-92.45879,-92.45881]
i=0
try:
while True:
if ser.in_waiting > 0:
# read gps coords and facing angle (relative to east)
line = ser.readline().decode('utf-8').rstrip().split(',')
Lat = float(line[0])
Long = float(line[1])
Ori = float(line[2])
# convert target coords in Geodetic frame to ENU frame. Robot's location is the origin
x,y,z = pm.geodetic2enu(target_lat[i], target_lon[i] , 95.0976, Lat, Long, 95.0976)
# compute pointing angle adjustment
theta = np.deg2rad(Ori) # robot facing angle
if theta > np.pi: # restric theta in range: (-pi, pi)
theta = theta - 2*np.pi
elif theta > 2*np.pi:
theta = theta - 2*np.pi
phe = np.arctan2(y, x) # angle of robot-target line vs. East
del_theta = phe - theta # robot facing angle adjustment
if del_theta > np.pi/6: # change robot facing angle if it is off too much
left()
elif del_theta < -np.pi/6:
right()
else:
forward() # otherwise, go straightfoward
# detect if robot within the range of target
if np.sqrt(x**2+y**2) < 2: # tweak this number so that robot find next target earlier or later
i += 1
# debugging prints
print(f'current target: {i+1}')
print(f'distance to target: {np.linalg.norm((x,y))} m')
print(f'robot pose (Geodetic): {Lat} deg, {Long} deg, {Ori} deg')
print(f'target location: {x, y} m')
print(f'angle offset: {np.rad2deg(del_theta)} deg')
print('---\n')
time.sleep(.05)
except KeyboardInterrupt: # ctrl-c to stop robot
GPIO.cleanup()
| 31.40625 | 107 | 0.625124 |
2cad29bb85d1265c0388bb5a258279e5a438b87b | 7,645 | py | Python | dap/tests/test_tf_utils.py | isabella232/differentiable-atomistic-potentials | f08c6e93aaf2706c0c5ac9b59eebd132d3e443fb | [
"Apache-2.0"
] | 53 | 2018-01-24T03:05:59.000Z | 2021-10-13T23:10:28.000Z | dap/tests/test_tf_utils.py | Senhongl/differentiable-atomistic-potentials | f08c6e93aaf2706c0c5ac9b59eebd132d3e443fb | [
"Apache-2.0"
] | 1 | 2021-09-05T21:00:00.000Z | 2021-09-05T21:00:00.000Z | dap/tests/test_tf_utils.py | Senhongl/differentiable-atomistic-potentials | f08c6e93aaf2706c0c5ac9b59eebd132d3e443fb | [
"Apache-2.0"
] | 15 | 2018-01-24T03:20:58.000Z | 2021-09-05T10:09:17.000Z | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow module."""
import itertools
import numpy as np
import tensorflow as tf
from dap.tf.utils import (tri, triu_indices, tril_indices, triu_indices_from,
tril_indices_from, combinations,
slices_values_to_sparse_tensor)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class TestTFUtils_tri(tf.test.TestCase):
def test_tri(self):
npt = np.tri(3, dtype=np.bool)
tft = tri(3)
with self.test_session():
self.assertTrue(np.all(npt == tft.eval()))
def test_above(self):
npt = np.tri(3, k=1, dtype=np.bool)
tft = tri(3, k=1)
with self.test_session():
self.assertTrue(np.all(npt == tft.eval()))
def test_below(self):
npt = np.tri(3, k=-1, dtype=np.bool)
tft = tri(3, k=-1)
with self.test_session():
self.assertTrue(np.all(npt == tft.eval()))
def test_notsquare(self):
npt = np.tri(3, 4, dtype=np.bool)
tft = tri(3, 4)
with self.test_session():
self.assertTrue(np.all(npt == tft.eval()))
def test_notsquare_above(self):
npt = np.tri(3, 4, k=1, dtype=np.bool)
tft = tri(3, 4, k=1)
with self.test_session():
self.assertTrue(np.all(npt == tft.eval()))
def test_notsquare_below(self):
npt = np.tri(3, 4, k=-1, dtype=np.bool)
tft = tri(3, 4, k=-1)
with self.test_session():
self.assertTrue(np.all(npt == tft.eval()))
class TestTFUtils_triu(tf.test.TestCase):
def test_triu(self):
npu = np.triu_indices(3)
r0, r1 = triu_indices(3)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_triu_k_over(self):
npu = np.triu_indices(3, k=1)
r0, r1 = triu_indices(3, k=1)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_triu_k_under(self):
npu = np.triu_indices(3, k=-1)
r0, r1 = triu_indices(3, k=-1)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_triu_nonsquare(self):
npu = np.triu_indices(3, m=4)
r0, r1 = triu_indices(3, m=4)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_triu_nonsquare_long(self):
npu = np.triu_indices(3, m=2)
r0, r1 = triu_indices(3, m=2)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
class TestTFUtils_tril(tf.test.TestCase):
def test_tril(self):
npu = np.tril_indices(3)
r0, r1 = tril_indices(3)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_tril_k_over(self):
npu = np.tril_indices(3, k=1)
r0, r1 = tril_indices(3, k=1)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_tril_k_under(self):
npu = np.tril_indices(3, k=-1)
r0, r1 = tril_indices(3, k=-1)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_tril_nonsquare(self):
npu = np.tril_indices(3, m=4)
r0, r1 = tril_indices(3, m=4)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
def test_tril_nonsquare_long(self):
npu = np.tril_indices(3, m=2)
r0, r1 = tril_indices(3, m=2)
with self.test_session():
self.assertTrue(np.all(npu[0] == r0.eval()))
self.assertTrue(np.all(npu[1] == r1.eval()))
class TestTFUtils_triu_indices_from(tf.test.TestCase):
def test_triu_indices_from(self):
a = np.zeros((3, 3))
ref1, ref2 = np.triu_indices_from(a)
tref1, tref2 = triu_indices_from(a)
with self.test_session():
self.assertTrue(np.all(ref1 == tref1.eval()))
self.assertTrue(np.all(ref2 == tref2.eval()))
def test_triu_indices_from_kover(self):
a = np.zeros((3, 3))
ref1, ref2 = np.triu_indices_from(a, k=1)
tref1, tref2 = triu_indices_from(a, k=1)
with self.test_session():
self.assertTrue(np.all(ref1 == tref1.eval()))
self.assertTrue(np.all(ref2 == tref2.eval()))
def test_triu_indices_from_kunder(self):
a = np.zeros((3, 3))
ref1, ref2 = np.triu_indices_from(a, k=-1)
tref1, tref2 = triu_indices_from(a, k=-1)
with self.test_session():
self.assertTrue(np.all(ref1 == tref1.eval()))
self.assertTrue(np.all(ref2 == tref2.eval()))
def test_triu_indices_from_non2d(self):
a = np.zeros((3, 3, 3))
with self.test_session():
with self.assertRaises(ValueError):
triu_indices_from(a)
class TestTFUtils_tril_indices_from(tf.test.TestCase):
def test_tril_indices_from(self):
a = np.zeros((3, 3))
ref1, ref2 = np.tril_indices_from(a)
tref1, tref2 = tril_indices_from(a)
with self.test_session():
self.assertTrue(np.all(ref1 == tref1.eval()))
self.assertTrue(np.all(ref2 == tref2.eval()))
def test_tril_indices_from_kover(self):
a = np.zeros((3, 3))
ref1, ref2 = np.tril_indices_from(a, k=1)
tref1, tref2 = tril_indices_from(a, k=1)
with self.test_session():
self.assertTrue(np.all(ref1 == tref1.eval()))
self.assertTrue(np.all(ref2 == tref2.eval()))
def test_tril_indices_from_kunder(self):
a = np.zeros((3, 3))
ref1, ref2 = np.tril_indices_from(a, k=-1)
tref1, tref2 = tril_indices_from(a, k=-1)
with self.test_session():
self.assertTrue(np.all(ref1 == tref1.eval()))
self.assertTrue(np.all(ref2 == tref2.eval()))
def test_tril_indices_from_non2d(self):
a = np.zeros((3, 3, 3))
with self.test_session():
with self.assertRaises(ValueError):
tril_indices_from(a)
class TestTFUtils_combinations(tf.test.TestCase):
def test_combinations_2(self):
a = [0, 1, 2, 3, 4]
for k in [2, 3]:
combs = np.array(list(itertools.combinations(a, k)))
with self.test_session():
tf_combs = combinations(a, k).eval()
self.assertTrue(np.all(combs == tf_combs))
def test_combinations_non1d(self):
a = [[0, 1, 2, 3, 4]]
with self.assertRaises(ValueError):
with self.test_session():
combinations(a, 2).eval()
class TestTFUtils_slices(tf.test.TestCase):
def test(self):
arr = [[1, 2, 3], [3, 2, 1], [2, 1, 3]]
k = 2
kv, ki = tf.nn.top_k(arr, k)
st = slices_values_to_sparse_tensor(ki, kv, (3, 3))
ref = tf.SparseTensor([[0, 1], [0, 2], [1, 0], [1, 1], [2, 0], [2, 2]],
[2, 3, 3, 2, 2, 3], (3, 3))
dst = tf.sparse_tensor_to_dense(st, validate_indices=False)
dref = tf.sparse_tensor_to_dense(
ref,
validate_indices=False,
)
with self.test_session():
self.assertTrue(np.all((tf.equal(dst, dref).eval())))
| 30.217391 | 77 | 0.632832 |
1665a34a2c00683166f525fe392ddae3019c8e44 | 2,739 | py | Python | test/acceptance/collision_test.py | seomoz/bloomfilter-py | 88b7f752c15a029c4bb86322496a74a6316e953a | [
"MIT"
] | 2 | 2017-03-06T08:28:49.000Z | 2017-08-07T18:22:09.000Z | test/acceptance/collision_test.py | seomoz/bloomfilter-py | 88b7f752c15a029c4bb86322496a74a6316e953a | [
"MIT"
] | 6 | 2016-12-16T20:38:07.000Z | 2019-11-14T22:37:50.000Z | test/acceptance/collision_test.py | seomoz/bloomfilter-py | 88b7f752c15a029c4bb86322496a74a6316e953a | [
"MIT"
] | 8 | 2016-12-16T19:18:37.000Z | 2020-06-15T18:41:35.000Z | #! /usr/bin/env python
"""Bloom filter collision tests"""
# pylint: disable=invalid-name
import os
import unittest
from bloomfilter import BloomFilter
class TestCollisions(unittest.TestCase):
"""Set of tests to ensure desirable collision rate"""
def test_non_randoms_at_all(self):
"""Ensure that small bit differences do not play bad"""
bloom_filter = BloomFilter(1000000, 1e-5)
collision_count = 0
for ix in range(1000000):
if bloom_filter.test_by_hash(ix):
collision_count += 1
else:
bloom_filter.add_by_hash(ix)
self.assertEqual(collision_count, 0)
def test_objects(self):
"""Ensure that objects work well"""
# hash of object (with no __hash__) is its address, so it is
# not overly random
#
# Nota Bene!: since memory is reused, there is a real
# possibility of object hash collisions.
#
# For example:
# for ix in xrange(1000000):
# obj = object()
# produces objects with exactly two hashes.
bloom_filter = BloomFilter(1000000, 1e-5)
collision_count = 0
objects = [object() for _ in range(1000000)]
for obj in objects:
if bloom_filter.test_by_hash(obj):
collision_count += 1
else:
bloom_filter.add_by_hash(obj)
self.assertEqual(collision_count, 0)
def test_words(self):
"""Ensure that strings work well"""
vocabulary = self.load_words("words")
test_words = self.load_words("testwords")
bloom_filter = BloomFilter(100000, 1e-4)
intersection = set(vocabulary) & set(test_words)
setup_collision_count = 0
for word in vocabulary:
if bloom_filter.test_by_hash(word):
setup_collision_count += 1
else:
bloom_filter.add_by_hash(word)
self.assertLess(setup_collision_count, 5)
false_positive_count = 0
false_negative_count = 0
for word in test_words:
if word in intersection:
if not bloom_filter.test_by_hash(word):
false_negative_count += 1
else:
if bloom_filter.test_by_hash(word):
false_positive_count += 1
self.assertEqual(false_negative_count, 0)
self.assertLessEqual(false_positive_count, 6)
def load_words(self, file_name):
"""Load word list from the local file"""
test_dir = os.path.dirname(__file__)
with open(os.path.join(test_dir, file_name), "r") as infile:
return [word for word in infile.read().split("\n") if word]
| 34.2375 | 71 | 0.60387 |
d7957020b1e61e25f3cc663f64817ba88884f448 | 1,496 | py | Python | maximum_depth_of_binary_tree.py | KevinLuo41/LeetCodeInPython | 051e1aab9bab17b0d63b4ca73473a7a00899a16a | [
"Apache-2.0"
] | 19 | 2015-01-19T19:36:09.000Z | 2020-03-18T03:10:12.000Z | maximum_depth_of_binary_tree.py | CodingVault/LeetCodeInPython | 051e1aab9bab17b0d63b4ca73473a7a00899a16a | [
"Apache-2.0"
] | null | null | null | maximum_depth_of_binary_tree.py | CodingVault/LeetCodeInPython | 051e1aab9bab17b0d63b4ca73473a7a00899a16a | [
"Apache-2.0"
] | 12 | 2015-04-25T14:20:38.000Z | 2020-09-27T04:59:59.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
maximum_depth_of_binary_tree.py
Created by Shengwei on 2014-07-15.
"""
# https://oj.leetcode.com/problems/maximum-depth-of-binary-tree/
# tags: easy, tree, dfs, bfs, level-order
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
######### recursive #########
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
def max_depth(node):
if node is None:
return 0
return 1 + max(max_depth(node.left), max_depth(node.right))
return max_depth(root)
######### iterative #########
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root is None:
return 0
depth = 1 # the depth traversal is working on
queue = [root, None]
while len(queue) > 1:
node = queue.pop(0)
if node is None:
depth += 1
queue.append(None)
continue
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return depth
| 23.746032 | 114 | 0.555481 |
1b1fac4e898868876a8ead4fe9ae813d7c4479af | 8,346 | py | Python | astropy/visualization/wcsaxes/tests/test_misc.py | jairideout/astropy | 2534a2dd747da3d50644812ce4faab6d909e7f36 | [
"BSD-3-Clause"
] | null | null | null | astropy/visualization/wcsaxes/tests/test_misc.py | jairideout/astropy | 2534a2dd747da3d50644812ce4faab6d909e7f36 | [
"BSD-3-Clause"
] | 1 | 2018-11-14T14:18:55.000Z | 2020-01-21T10:36:05.000Z | astropy/visualization/wcsaxes/tests/test_misc.py | jairideout/astropy | 2534a2dd747da3d50644812ce4faab6d909e7f36 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
import pytest
import numpy as np
import matplotlib.pyplot as plt
from .... import units as u
from ....wcs import WCS
from ....io import fits
from ....coordinates import SkyCoord
from ....tests.helper import catch_warnings
from ....tests.image_tests import ignore_matplotlibrc
from ..core import WCSAxes
from ..utils import get_coord_meta
from ..transforms import CurvedTransform
DATA = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
@ignore_matplotlibrc
def test_grid_regression():
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initalization.
plt.rc('axes', grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
@ignore_matplotlibrc
def test_format_coord_regression(tmpdir):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring("""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""", sep='\n')
@ignore_matplotlibrc
def test_no_numpy_warnings(tmpdir):
# Make sure that no warnings are raised if some pixels are outside WCS
# (since this is normal)
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color='white')
with catch_warnings(RuntimeWarning) as ws:
plt.savefig(tmpdir.join('test.png').strpath)
# For debugging
for w in ws:
print(w)
assert len(ws) == 0
@ignore_matplotlibrc
def test_invalid_frame_overlay():
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError) as exc:
ax.get_coords_overlay('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
with pytest.raises(ValueError) as exc:
get_coord_meta('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
@ignore_matplotlibrc
def test_plot_coord_transform():
twoMASS_k_header = os.path.join(DATA, '2MASS_k_header')
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))
@ignore_matplotlibrc
def test_set_label_properties():
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel('Test x label', labelpad=2, color='red')
ax.set_ylabel('Test y label', labelpad=3, color='green')
assert ax.coords[0].axislabels.get_text() == 'Test x label'
assert ax.coords[0].axislabels.get_minpad('b') == 2
assert ax.coords[0].axislabels.get_color() == 'red'
assert ax.coords[1].axislabels.get_text() == 'Test y label'
assert ax.coords[1].axislabels.get_minpad('l') == 3
assert ax.coords[1].axislabels.get_color() == 'green'
GAL_HEADER = fits.Header.fromstring("""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""", sep='\n')
@ignore_matplotlibrc
def test_slicing_warnings(tmpdir):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
plt.savefig(tmpdir.join('test.png').strpath)
# For easy debugging if there are indeed warnings
for warning in warning_lines:
print(warning)
assert len(warning_lines) == 0
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))
plt.savefig(tmpdir.join('test.png').strpath)
# For easy debugging if there are indeed warnings
for warning in warning_lines:
print(warning)
assert len(warning_lines) == 0
def test_plt_xlabel_ylabel(tmpdir):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel('Galactic Longitude')
plt.ylabel('Galactic Latitude')
plt.savefig(tmpdir.join('test.png').strpath)
def test_grid_type_contours_transform(tmpdir):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {'type': ('scalar', 'scalar'),
'unit': (u.m, u.s),
'wrap': (None, None),
'name': ('x', 'y')}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],
transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type='contours')
fig.savefig(tmpdir.join('test.png').strpath)
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmpdir):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmpdir.join('test.png').strpath
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
| 29.701068 | 77 | 0.624251 |
cc6e34eb07b7196d10b4cbf77fe2c4b19f85e6e3 | 191 | py | Python | turtle/tkHex.py | shilpasayura/python | 8f3b2432f972c9aeb4b04e2141ea4abf2437762c | [
"MIT"
] | 1 | 2021-10-07T15:15:01.000Z | 2021-10-07T15:15:01.000Z | turtle/tkHex.py | shilpasayura/python | 8f3b2432f972c9aeb4b04e2141ea4abf2437762c | [
"MIT"
] | null | null | null | turtle/tkHex.py | shilpasayura/python | 8f3b2432f972c9aeb4b04e2141ea4abf2437762c | [
"MIT"
] | null | null | null | import tkinter as tk
import turtle
root = tk.Tk()
turtle.color("red")
turtle.shape("turtle")
for i in range(7):
turtle.forward(100) #pixels
turtle.left(60)
root.mainloop()
| 11.235294 | 31 | 0.659686 |
f418e26c552657a3eca3e2fee4daaf82f74a83ef | 8,145 | py | Python | cogs/botinfo.py | Code-Cecilia/botman-rewrite | 9d8baeebf267c62df975d2f209e85589b81934af | [
"MIT"
] | 2 | 2022-02-21T14:10:15.000Z | 2022-02-21T14:10:50.000Z | cogs/botinfo.py | Code-Cecilia/botman-rewrite | 9d8baeebf267c62df975d2f209e85589b81934af | [
"MIT"
] | null | null | null | cogs/botinfo.py | Code-Cecilia/botman-rewrite | 9d8baeebf267c62df975d2f209e85589b81934af | [
"MIT"
] | null | null | null | import os
import platform
import random
import subprocess
import time
import discord
import psutil
from discord.ext import commands
from assets import random_assets, time_assets
from assets.discord_funcs import get_color, get_avatar_url
from assets.file_handling import count_lines
class BotInfo(commands.Cog, description="Information on various aspects of the bot."):
def __init__(self, bot):
self.bot = bot
self.startTime = time.monotonic()
@commands.command(name='ping', description='Returns the latency in milliseconds.')
async def ping_command(self, ctx):
latency = float(self.bot.latency) * 1000
latency = round(latency, 2) # convert to float with 2 decimal places
await ctx.send(f'Pong! `Latency: {latency}ms`')
@commands.command(name="vote", description="Vote for BotMan on top.gg!")
async def vote_topgg(self, ctx):
embed = discord.Embed(title=f"{ctx.author.display_name}, you can vote for me here!",
description="__[Link to my (very own) page!]("
"https://top.gg/bot/845225811152732179/vote)__",
color=discord.Color.blue())
embed.set_footer(
text=f"It's the gesture that counts first, so thanks a lot, {ctx.author.name}!")
await ctx.send(embed=embed)
@commands.command(name='countlines', aliases=['countline'], description='Counts the number of lines of python code '
'the bot currently has.')
async def countlines_func(self, ctx):
total_lines = count_lines('./')
asset_lines = count_lines('./assets')
cog_lines = count_lines('./cogs')
text_lines = count_lines('.', file_extensions=['txt', 'md', 'rtf'])
misc_lines = count_lines('.', blacklisted_dirs=['assets', 'cogs', 'venv'])
embed = discord.Embed(title=random.choice(random_assets.countlines_responses).format(total_lines),
color=get_color(ctx.author))
embed.add_field(name='Assets', value=f"{asset_lines} lines", inline=True)
embed.add_field(name='Cogs', value=f"{cog_lines} lines", inline=True)
embed.add_field(name='Miscellaneous', value=f"{misc_lines} lines", inline=True)
embed.set_footer(text=f"I also have {text_lines} lines of text-file documentation, apparently.")
await ctx.send(embed=embed)
@commands.command(name='botinfo', aliases=['clientinfo', 'botstats'],
description='Returns information about the bot.')
async def stats(self, ctx):
pycord_version = discord.__version__
server_count = len(self.bot.guilds)
member_count = len(set(self.bot.get_all_members())) # returns a list, so we're getting the length of that list
latency = float(self.bot.latency) * 1000
latency = f"{int(latency)} ms" # integer is good enough in this case
source = "__[Github](https://github.com/Mahas1/BotMan.py)__"
cecilia_link = f"__[Code Cecilia](https://github.com/Mahas1/)__"
now = time.monotonic()
uptime_seconds = int(now - self.bot.start_time)
m, s = divmod(uptime_seconds, 60) # getting the uptime minutes, secs, hrs, days
h, m = divmod(m, 60)
d, h = divmod(h, 24)
embed = discord.Embed(title=f'{self.bot.user.name} Stats', description='\uFEFF',
color=get_color(ctx.guild.me),
timestamp=ctx.message.created_at)
embed.description = f"I am made of {len(self.bot.commands)} commands across {len(self.bot.cogs)} cogs!"
embed.set_thumbnail(url=get_avatar_url(self.bot.user))
embed.add_field(name='PyCord version', value=pycord_version, inline=True)
embed.add_field(name='Server Count',
value=str(server_count), inline=True)
embed.add_field(name='Member Count', value=str(
member_count), inline=True)
embed.add_field(name='Latency', value=str(latency), inline=True)
embed.add_field(
name="Uptime", value=f"{d}d, {h}h, {m}m, {s}s", inline=True)
embed.add_field(name='Talk to my maker!',
value="__[MTank.exe](https://discord.com/users/775176626773950474)__", inline=True)
embed.add_field(name="Source Code", value=source, inline=True)
embed.add_field(name="Parent Organization", value=cecilia_link, inline=True)
embed.add_field(name="Found an issue?",
value="__[Report Here!](https://github.com/Mahas1/BotMan.py/issues)__", inline=True)
embed.add_field(name='Invite Me!',
value=f"__[Link To Invite](https://discord.com/api/oauth2/authorize?client_id"
f"=848529420716867625&permissions=261993005047&scope=applications.commands%20bot)__",
inline=True)
embed.add_field(name="Support Server",
value="__[Link To Server](https://discord.gg/8gUVYtT4cW)__", inline=True)
embed.set_footer(
text=f"Requested by {ctx.author}", icon_url=get_avatar_url(ctx.author))
await ctx.send(embed=embed)
@commands.command(name="uptime")
async def get_uptime(self, ctx):
"""How long have I been awake?"""
now = time.monotonic()
uptime_seconds = int(now - self.bot.start_time)
time_string = time_assets.pretty_time_from_seconds(uptime_seconds)
embed = discord.Embed(title="I have been awake for:", description=time_string,
color=get_color(self.bot.user))
embed.set_footer(text=random.choice(random_assets.uptime_footers))
await ctx.send(embed=embed)
@commands.command(name="hostinfo", description="Returns information about my host.")
async def hostinfo(self, ctx):
system = platform.uname()
cpu_usage = psutil.cpu_percent()
memstats = psutil.virtual_memory()
mem_used_gb = "{0:.1f}".format(((memstats.used / 1024) / 1024) / 1024) # Thanks CorpNewt
mem_total_gb = "{0:.1f}".format(((memstats.total / 1024) / 1024) / 1024)
processor = str(system.processor) if str(system.processor) != "" else "N/A"
try:
processor_freq = int(list(psutil.cpu_freq())[0])
except:
processor_freq = None
embed = discord.Embed(title=f"Host Name: {system.node}",
description=f"Platform: {system.system} {system.release}",
color=get_color(ctx.guild.me))
embed.add_field(name="Machine Type", value=system.machine, inline=True)
embed.add_field(name="CPU", value=processor, inline=True)
if processor_freq:
embed.add_field(name="CPU Frequency", value=f"{processor_freq} MHz", inline=True)
embed.add_field(name="CPU Usage", value=f"{cpu_usage}%", inline=True)
embed.add_field(name="CPU Threads", value=str(os.cpu_count()),
inline=True)
embed.add_field(name="RAM Usage", value=f"{mem_used_gb} GB of {mem_total_gb} GB ({memstats.percent}%)",
inline=True)
await ctx.send(embed=embed)
@commands.command(name="neofetch")
async def neofetch(self, ctx):
"""Runs neofetch on the host."""
await ctx.trigger_typing()
output = subprocess.run("neofetch --stdout", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if output.returncode != 0:
return await ctx.send("Neofetch is not installed in my host machine :(")
text_we_need = "\n".join(output.stdout.decode("utf-8").split("\n")[2:])
# split the output into lines and then remove the first two lines, which have the host's name and username
embed = discord.Embed(title="Neofetch", description=f"```\n{text_we_need[:1992]}\n```",
color=get_color(ctx.author))
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BotInfo(bot))
| 53.235294 | 120 | 0.621363 |
ab1a9cc300965dbf061346c7d20d6fec1d17b229 | 2,818 | py | Python | mars/learn/contrib/joblib/backend.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | 2 | 2019-03-29T04:11:10.000Z | 2020-07-08T10:19:54.000Z | mars/learn/contrib/joblib/backend.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | null | null | null | mars/learn/contrib/joblib/backend.py | HarshCasper/mars | 4c12c968414d666c7a10f497bc22de90376b1932 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
from .... import remote
from ....session import Session, new_session
try:
from joblib.parallel import ParallelBackendBase, AutoBatchingMixin, \
register_parallel_backend
except ImportError:
ParallelBackendBase = object
AutoBatchingMixin = object
register_parallel_backend = None
class MarsDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
MIN_IDEAL_BATCH_DURATION = 0.2
MAX_IDEAL_BATCH_DURATION = 1.0
supports_timeout = True
def __init__(self, service=None, session=None, backend=None, n_parallel=None):
super().__init__()
if session is None:
if service is not None:
self.session = new_session(service, backend=backend)
else:
self.session = Session.default_or_local()
else:
self.session = session
self.n_parallel = n_parallel or 1
self.executor = None
def get_nested_backend(self):
return MarsDistributedBackend(session=self.session), -1
def configure(self, n_jobs=1, parallel=None, **backend_args):
self.parallel = parallel
n_parallel = self.effective_n_jobs(n_jobs)
self.executor = concurrent.futures.ThreadPoolExecutor(n_parallel)
return n_parallel
def effective_n_jobs(self, n_jobs):
eff_n_jobs = super(MarsDistributedBackend, self).effective_n_jobs(n_jobs)
if n_jobs == -1:
eff_n_jobs = self.session.get_cpu_count() or self.n_parallel
return eff_n_jobs
def apply_async(self, func, callback=None):
# todo allow execute f() in remote end to reduce data copy latency
def f():
spawned = []
for func_obj, args, kwargs in func.items:
spawned.append(remote.spawn(func_obj, args=args, kwargs=kwargs))
ret = remote.ExecutableTuple(spawned) \
.execute(session=self.session) \
.fetch(self.session)
callback(ret)
return ret
future = self.executor.submit(f)
future.get = future.result
return future
def register_mars_backend():
register_parallel_backend('mars', MarsDistributedBackend)
| 33.951807 | 82 | 0.681334 |
c63a0cc2ba4313e5ab5926fc894ede7b0d4f89f8 | 70 | py | Python | parking-lot/app.py | akshilv/parking-lot | d720fab769ae700f157bae979b94e6b906147ab8 | [
"Apache-2.0"
] | null | null | null | parking-lot/app.py | akshilv/parking-lot | d720fab769ae700f157bae979b94e6b906147ab8 | [
"Apache-2.0"
] | null | null | null | parking-lot/app.py | akshilv/parking-lot | d720fab769ae700f157bae979b94e6b906147ab8 | [
"Apache-2.0"
] | null | null | null | from parkinglot import ParkingLot
if __name__ == "__main__":
pass | 17.5 | 33 | 0.742857 |
84afab2f163d6f2d83a9d7686cf213e1c2dac5c2 | 114,271 | py | Python | kmip/tests/unit/core/objects/test_objects.py | openstack/deb-python-kmip | f86134878b5f558b39f51e67a6e6ba5a0b03e222 | [
"Apache-2.0"
] | 12 | 2016-09-14T21:59:10.000Z | 2020-03-11T07:37:25.000Z | kmip/tests/unit/core/objects/test_objects.py | openstack/deb-python-kmip | f86134878b5f558b39f51e67a6e6ba5a0b03e222 | [
"Apache-2.0"
] | null | null | null | kmip/tests/unit/core/objects/test_objects.py | openstack/deb-python-kmip | f86134878b5f558b39f51e67a6e6ba5a0b03e222 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import string_types
import testtools
from testtools import TestCase
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import AttributeType
from kmip.core.enums import BlockCipherMode
from kmip.core.enums import HashingAlgorithm as HashingAlgorithmEnum
from kmip.core.enums import KeyRoleType
from kmip.core.enums import PaddingMethod
from kmip.core.enums import Tags
from kmip.core.factories.attributes import AttributeValueFactory
from kmip.core import objects
from kmip.core.objects import Attribute
from kmip.core.objects import ExtensionName
from kmip.core.objects import ExtensionTag
from kmip.core.objects import ExtensionType
from kmip.core.objects import KeyMaterialStruct
from kmip.core import utils
from kmip.core.utils import BytearrayStream
class TestAttributeClass(TestCase):
"""
A test suite for the Attribute class
"""
def setUp(self):
super(TestAttributeClass, self).setUp()
name_a = 'CRYPTOGRAPHIC PARAMETERS'
name_b = 'CRYPTOGRAPHIC ALGORITHM'
self.attribute_name_a = Attribute.AttributeName(name_a)
self.attribute_name_b = Attribute.AttributeName(name_b)
self.factory = AttributeValueFactory()
self.attribute_value_a = self.factory.create_attribute_value(
AttributeType.CRYPTOGRAPHIC_PARAMETERS,
{'block_cipher_mode': BlockCipherMode.CBC,
'padding_method': PaddingMethod.PKCS5,
'hashing_algorithm': HashingAlgorithmEnum.SHA_1,
'key_role_type': KeyRoleType.BDK})
self.attribute_value_b = self.factory.create_attribute_value(
AttributeType.CRYPTOGRAPHIC_PARAMETERS,
{'block_cipher_mode': BlockCipherMode.CCM,
'padding_method': PaddingMethod.PKCS5,
'hashing_algorithm': HashingAlgorithmEnum.SHA_1,
'key_role_type': KeyRoleType.BDK})
index_a = 2
index_b = 3
self.attribute_index_a = Attribute.AttributeIndex(index_a)
self.attribute_index_b = Attribute.AttributeIndex(index_b)
self.attributeObj_a = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_a)
self.attributeObj_b = Attribute(
attribute_name=self.attribute_name_b,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_a)
self.attributeObj_c = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_b,
attribute_index=self.attribute_index_a)
self.attributeObj_d = Attribute(
attribute_name=self.attribute_name_a,
attribute_value=self.attribute_value_a,
attribute_index=self.attribute_index_b)
self.key_req_with_crypt_params = BytearrayStream((
b'\x42\x00\x08\x01\x00\x00\x00\x78\x42\x00\x0a\x07\x00\x00\x00\x18'
b'\x43\x52\x59\x50\x54\x4f\x47\x52\x41\x50\x48\x49\x43\x20\x50\x41'
b'\x52\x41\x4d\x45\x54\x45\x52\x53'
b'\x42\x00\x09\x02\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00\x00'
b'\x42\x00\x0b\x01\x00\x00\x00\x40'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x5f\x05\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x00'
b'\x42\x00\x38\x05\x00\x00\x00\x04\x00\x00\x00\x04\x00\x00\x00\x00'
b'\x42\x00\x83\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
))
def tearDown(self):
super(TestAttributeClass, self).tearDown()
def test_read(self):
attrObj = Attribute()
attrObj.read(self.key_req_with_crypt_params)
self.assertEqual(self.attributeObj_a, attrObj)
def test_write(self):
attrObj = Attribute(self.attribute_name_a, self.attribute_index_a,
self.attribute_value_a)
ostream = BytearrayStream()
attrObj.write(ostream)
self.assertEqual(self.key_req_with_crypt_params, ostream)
def test_equal_on_equal(self):
self.assertFalse(self.attributeObj_a == self.attributeObj_b)
self.assertFalse(self.attributeObj_a == self.attributeObj_c)
self.assertFalse(self.attributeObj_a == self.attributeObj_d)
def test_not_equal_on_not_equal(self):
self.assertTrue(self.attributeObj_a != self.attributeObj_b)
class TestKeyMaterialStruct(TestCase):
"""
A test suite for the KeyMaterialStruct.
A placeholder test suite. Should be removed when KeyMaterialStruct is
removed from the code base.
"""
def setUp(self):
super(TestKeyMaterialStruct, self).setUp()
def tearDown(self):
super(TestKeyMaterialStruct, self).tearDown()
def test_valid_tag(self):
"""
Test that the KeyMaterialStruct tag is valid.
"""
struct = KeyMaterialStruct()
self.assertEqual(Tags.KEY_MATERIAL, struct.tag)
class TestExtensionName(TestCase):
"""
A test suite for the ExtensionName class.
Since ExtensionName is a simple wrapper for the TextString primitive, only
a few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionName, self).setUp()
def tearDown(self):
super(TestExtensionName, self).tearDown()
def _test_init(self, value):
if (isinstance(value, string_types)) or (value is None):
extension_name = ExtensionName(value)
if value is None:
value = ''
msg = "expected {0}, observed {1}".format(
value, extension_name.value)
self.assertEqual(value, extension_name.value, msg)
else:
self.assertRaises(TypeError, ExtensionName, value)
def test_init_with_none(self):
"""
Test that an ExtensionName object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionName object can be constructed with a valid
string value.
"""
self._test_init("valid")
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-string value is
used to construct an ExtensionName object.
"""
self._test_init(0)
class TestExtensionTag(TestCase):
"""
A test suite for the ExtensionTag class.
Since ExtensionTag is a simple wrapper for the Integer primitive, only a
few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionTag, self).setUp()
def tearDown(self):
super(TestExtensionTag, self).tearDown()
def _test_init(self, value):
if (isinstance(value, int)) or (value is None):
extension_tag = ExtensionTag(value)
if value is None:
value = 0
msg = "expected {0}, observed {1}".format(
value, extension_tag.value)
self.assertEqual(value, extension_tag.value, msg)
else:
self.assertRaises(TypeError, ExtensionTag, value)
def test_init_with_none(self):
"""
Test that an ExtensionTag object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionTag object can be constructed with a valid
integer value.
"""
self._test_init(0)
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-integer value is
used to construct an ExtensionName object.
"""
self._test_init("invalid")
class TestExtensionType(TestCase):
"""
A test suite for the ExtensionType class.
Since ExtensionType is a simple wrapper for the Integer primitive, only a
few tests pertaining to construction are needed.
"""
def setUp(self):
super(TestExtensionType, self).setUp()
def tearDown(self):
super(TestExtensionType, self).tearDown()
def _test_init(self, value):
if (isinstance(value, int)) or (value is None):
extension_type = ExtensionType(value)
if value is None:
value = 0
msg = "expected {0}, observed {1}".format(
value, extension_type.value)
self.assertEqual(value, extension_type.value, msg)
else:
self.assertRaises(TypeError, ExtensionType, value)
def test_init_with_none(self):
"""
Test that an ExtensionType object can be constructed with no specified
value.
"""
self._test_init(None)
def test_init_with_valid(self):
"""
Test that an ExtensionType object can be constructed with a valid
integer value.
"""
self._test_init(0)
def test_init_with_invalid(self):
"""
Test that a TypeError exception is raised when a non-string value is
used to construct an ExtensionType object.
"""
self._test_init("invalid")
class TestEncryptionKeyInformation(testtools.TestCase):
"""
Test suite for the EncryptionKeyInformation struct.
"""
def setUp(self):
super(TestEncryptionKeyInformation, self).setUp()
# Encoding obtained from the KMIP 1.1 testing document, Section 14.1.
#
# This encoding matches the following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.full_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
self.partial_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x36\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestEncryptionKeyInformation, self).tearDown()
def test_init(self):
"""
Test that an EncryptionKeyInformation struct can be constructed with
no arguments.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
def test_init_with_args(self):
"""
Test that an EncryptionKeyInformation struct can be constructed with
valid values.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=cryptographic_parameters
)
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
encryption_key_information.unique_identifier
)
self.assertIsInstance(
encryption_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
parameters = encryption_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.CTR,
parameters.block_cipher_mode
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of an EncryptionKeyInformation struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
objects.EncryptionKeyInformation,
**kwargs
)
encryption_key_information = objects.EncryptionKeyInformation()
args = (encryption_key_information, 'unique_identifier', 0)
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of an EncryptionKeyInformation struct.
"""
kwargs = {'cryptographic_parameters': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
objects.EncryptionKeyInformation,
**kwargs
)
encryption_key_information = objects.EncryptionKeyInformation()
args = (
encryption_key_information,
'cryptographic_parameters',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that an EncryptionKeyInformation struct can be read from a data
stream.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
encryption_key_information.read(self.full_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
encryption_key_information.unique_identifier
)
self.assertIsInstance(
encryption_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
cryptographic_parameters = \
encryption_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cryptographic_parameters.block_cipher_mode
)
def test_read_partial(self):
"""
Test that an EncryptionKeyInformation struct can be read from a partial
data stream.
"""
encryption_key_information = objects.EncryptionKeyInformation()
self.assertEqual(None, encryption_key_information.unique_identifier)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
encryption_key_information.read(self.partial_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
encryption_key_information.unique_identifier
)
self.assertEqual(
None,
encryption_key_information.cryptographic_parameters
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
EncryptionKeyInformation field is missing from the struct encoding.
"""
encryption_key_information = objects.EncryptionKeyInformation()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
encryption_key_information.read,
*args
)
def test_write(self):
"""
Test that an EncryptionKeyInformation struct can be written to a data
stream.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
stream = BytearrayStream()
encryption_key_information.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined EncryptionKeyInformation struct can be
written to a data stream.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
stream = BytearrayStream()
encryption_key_information.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
EncryptionKeyInformation field is missing when encoding the struct.
"""
encryption_key_information = objects.EncryptionKeyInformation()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
encryption_key_information.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
EncryptionKeyInformation structs with the same data.
"""
a = objects.EncryptionKeyInformation()
b = objects.EncryptionKeyInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different unique identifiers.
"""
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different cryptographic
parameters.
"""
a = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
EncryptionKeyInformation structs with different types.
"""
a = objects.EncryptionKeyInformation()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
EncryptionKeyInformation structs with the same data.
"""
a = objects.EncryptionKeyInformation()
b = objects.EncryptionKeyInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different unique identifiers.
"""
a = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.EncryptionKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different cryptographic
parameters.
"""
a = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.EncryptionKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
EncryptionKeyInformation structs with different types.
"""
a = objects.EncryptionKeyInformation()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an EncryptionKeyInformation struct.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
expected = (
"EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None))"
)
observed = repr(encryption_key_information)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to an EncryptionKeyInformation struct.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
expected = str({
'unique_identifier': "100182d5-72b8-47aa-8383-4d97d512e98a",
'cryptographic_parameters': cryptographic_parameters
})
observed = str(encryption_key_information)
self.assertEqual(expected, observed)
class TestMACSignatureKeyInformation(testtools.TestCase):
"""
Test suite for the MACSignatureKeyInformation struct.
"""
def setUp(self):
super(TestMACSignatureKeyInformation, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Section 14.1. The rest of the encoding was built by hand.
#
# This encoding matches the following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.full_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
self.partial_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x30'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x4E\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestMACSignatureKeyInformation, self).tearDown()
def test_init(self):
"""
Test that a MACSignatureKeyInformation struct can be constructed with
no arguments.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
def test_init_with_args(self):
"""
Test that a MACSignatureKeyInformation struct can be constructed with
valid values.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=cryptographic_parameters
)
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
mac_signature_key_information.unique_identifier
)
self.assertIsInstance(
mac_signature_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
parameters = mac_signature_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.CTR,
parameters.block_cipher_mode
)
def test_invalid_unique_identifier(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the unique identifier of a MACSignatureKeyInformation struct.
"""
kwargs = {'unique_identifier': 0}
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
objects.MACSignatureKeyInformation,
**kwargs
)
args = (objects.MACSignatureKeyInformation(), 'unique_identifier', 0)
self.assertRaisesRegexp(
TypeError,
"Unique identifier must be a string.",
setattr,
*args
)
def test_invalid_cryptographic_parameters(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the cryptographic parameters of a MACSignatureKeyInformation struct.
"""
kwargs = {'cryptographic_parameters': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
objects.MACSignatureKeyInformation,
**kwargs
)
args = (
objects.MACSignatureKeyInformation(),
'cryptographic_parameters',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Cryptographic parameters must be a CryptographicParameters "
"struct.",
setattr,
*args
)
def test_read(self):
"""
Test that a MACSignatureKeyInformation struct can be read from a data
stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
mac_signature_key_information.read(self.full_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
mac_signature_key_information.unique_identifier
)
self.assertIsInstance(
mac_signature_key_information.cryptographic_parameters,
attributes.CryptographicParameters
)
cryptographic_parameters = \
mac_signature_key_information.cryptographic_parameters
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
cryptographic_parameters.block_cipher_mode
)
def test_read_partial(self):
"""
Test that a MACSignatureKeyInformation struct can be read from a
partial data stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
self.assertEqual(
None,
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
mac_signature_key_information.read(self.partial_encoding)
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
mac_signature_key_information.unique_identifier
)
self.assertEqual(
None,
mac_signature_key_information.cryptographic_parameters
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing from the struct encoding.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
mac_signature_key_information.read,
*args
)
def test_write(self):
"""
Test that a MACSignatureKeyInformation struct can be written to a data
stream.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
stream = BytearrayStream()
mac_signature_key_information.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined MACSignatureKeyInformation struct can be
written to a data stream.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
stream = BytearrayStream()
mac_signature_key_information.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing when encoding the struct.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the unique identifier attribute.",
mac_signature_key_information.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
MACSignatureKeyInformation structs with the same data.
"""
a = objects.MACSignatureKeyInformation()
b = objects.MACSignatureKeyInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_unique_identifier(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different unique identifiers.
"""
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different cryptographic
parameters.
"""
a = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
MACSignatureKeyInformation structs with different types.
"""
a = objects.MACSignatureKeyInformation()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
MACSignatureKeyInformation structs with the same data.
"""
a = objects.MACSignatureKeyInformation()
b = objects.MACSignatureKeyInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_unique_identifier(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different unique identifiers.
"""
a = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a"
)
b = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444"
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_cryptographic_parameters(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different cryptographic
parameters.
"""
a = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
b = objects.MACSignatureKeyInformation(
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.GCM
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
MACSignatureKeyInformation structs with different types.
"""
a = objects.MACSignatureKeyInformation()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an MACSignatureKeyInformation struct.
"""
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
expected = (
"MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None))"
)
observed = repr(mac_signature_key_information)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a MACSignatureKeyInformation struct.
"""
cryptographic_parameters = attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=cryptographic_parameters
)
expected = str({
'unique_identifier': "100182d5-72b8-47aa-8383-4d97d512e98a",
'cryptographic_parameters': cryptographic_parameters
})
observed = str(mac_signature_key_information)
self.assertEqual(expected, observed)
class TestKeyWrappingData(testtools.TestCase):
"""
Test suite for the KeyWrappingData struct.
"""
def setUp(self):
super(TestKeyWrappingData, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 14.1. The rest was built by hand.
#
# This encoding matches the following set of values:
#
# Wrapping Method - ENCRYPT
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature - 0x0123456789ABCDEF
# IV/Counter/Nonce - 0x01
# Encoding Option - NO_ENCODING
self.full_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\xE0'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4D\x08\x00\x00\x00\x08\x01\x23\x45\x67\x89\xAB\xCD\xEF'
b'\x42\x00\x3D\x08\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Encoding obtained from the KMIP 1.1 testing document, Section 14.1.
# This encoding matches the following set of values:
#
# Wrapping Method - ENCRYPT
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# Encoding Option - NO_ENCODING
self.partial_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\x70'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x46\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestKeyWrappingData, self).tearDown()
def test_init(self):
"""
Test that a KeyWrappingData struct can be constructed with no
arguments.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
def test_init_with_args(self):
"""
Test that a KeyWrappingData struct can be constructed with valid
values.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="12345678-9012-3456-7890-123456789012",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01',
iv_counter_nonce=b'\x02',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"12345678-9012-3456-7890-123456789012",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.CTR,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_data.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_data.mac_signature_key_information
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(b'\x01', key_wrapping_data.mac_signature)
self.assertEqual(b'\x02', key_wrapping_data.iv_counter_nonce)
self.assertEqual(
enums.EncodingOption.TTLV_ENCODING,
key_wrapping_data.encoding_option
)
def test_invalid_wrapping_method(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the wrapping method of a KeyWrappingData struct.
"""
kwargs = {'wrapping_method': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
objects.KeyWrappingData,
**kwargs
)
args = (objects.KeyWrappingData(), 'wrapping_method', 0)
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
setattr,
*args
)
def test_invalid_encryption_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encryption key information of a KeyWrappingData struct.
"""
kwargs = {'encryption_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'encryption_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
setattr,
*args
)
def test_invalid_mac_signature_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature key information of a KeyWrappingData struct.
"""
kwargs = {'mac_signature_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'mac_signature_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
setattr,
*args
)
def test_invalid_mac_signature(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature of a KeyWrappingData struct.
"""
kwargs = {'mac_signature': 0}
self.assertRaisesRegexp(
TypeError,
"MAC/signature must be bytes.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'mac_signature',
0
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature must be bytes.",
setattr,
*args
)
def test_invalid_iv_counter_nonce(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the IV/counter/nonce of a KeyWrappingData struct.
"""
kwargs = {'iv_counter_nonce': 0}
self.assertRaisesRegexp(
TypeError,
"IV/counter/nonce must be bytes.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'iv_counter_nonce',
0
)
self.assertRaisesRegexp(
TypeError,
"IV/counter/nonce must be bytes.",
setattr,
*args
)
def test_invalid_encoding_option(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encoding option of a KeyWrappingData struct.
"""
kwargs = {'encoding_option': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
objects.KeyWrappingData,
**kwargs
)
args = (
objects.KeyWrappingData(),
'encoding_option',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
setattr,
*args
)
def test_read(self):
"""
Test that a KeyWrappingData struct can be read from a data stream.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
key_wrapping_data.read(self.full_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_data.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_data.mac_signature_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertEqual(
b'\x01\x23\x45\x67\x89\xAB\xCD\xEF',
key_wrapping_data.mac_signature
)
self.assertEqual(
b'\x01',
key_wrapping_data.iv_counter_nonce
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.encoding_option
)
def test_read_partial(self):
"""
Test that a KeyWrappingData struct can be read from a partial data
stream.
"""
key_wrapping_data = objects.KeyWrappingData()
self.assertEqual(None, key_wrapping_data.wrapping_method)
self.assertEqual(None, key_wrapping_data.encryption_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature_key_information)
self.assertEqual(None, key_wrapping_data.mac_signature)
self.assertEqual(None, key_wrapping_data.iv_counter_nonce)
self.assertEqual(None, key_wrapping_data.encoding_option)
key_wrapping_data.read(self.partial_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_data.wrapping_method
)
self.assertIsInstance(
key_wrapping_data.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_data.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsNone(key_wrapping_data.mac_signature_key_information)
self.assertIsNone(key_wrapping_data.mac_signature)
self.assertIsNone(key_wrapping_data.iv_counter_nonce)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_data.encoding_option
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required KeyWrappingData
field is missing from the struct encoding.
"""
key_wrapping_data = objects.KeyWrappingData()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_data.read,
*args
)
def test_write(self):
"""
Test that a KeyWrappingData struct can be written to a data stream.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x23\x45\x67\x89\xAB\xCD\xEF',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_data.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined KeyWrappingData struct can be written to
a data stream.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_data.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required KeyWrappingData
field is missing when encoding the struct.
"""
key_wrapping_data = objects.KeyWrappingData()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_data.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
KeyWrappingData structs with the same data.
"""
a = objects.KeyWrappingData()
b = objects.KeyWrappingData()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_wrapping_method(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different wrapping methods.
"""
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encryption_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different encryption key information.
"""
a = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different MAC/signature key information.
"""
a = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signatures(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different MAC/signatures.
"""
a = objects.KeyWrappingData(mac_signature=b'\x01')
b = objects.KeyWrappingData(mac_signature=b'\x10')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_iv_counter_nonce(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different IV/counter/nonces.
"""
a = objects.KeyWrappingData(iv_counter_nonce=b'\x01')
b = objects.KeyWrappingData(iv_counter_nonce=b'\x10')
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encoding_option(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different encoding options.
"""
a = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingData structs with different types.
"""
a = objects.KeyWrappingData()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
KeyWrappingData structs with the same data.
"""
a = objects.KeyWrappingData()
b = objects.KeyWrappingData()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature=b'\x01\x01\x01\x01\x01\x01\x01\x01',
iv_counter_nonce=b'\x01',
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_wrapping_method(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different wrapping methods.
"""
a = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encryption_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different encryption key information.
"""
a = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingData(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different MAC/signature key information.
"""
a = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingData(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signatures(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different MAC/signatures.
"""
a = objects.KeyWrappingData(mac_signature=b'\x01')
b = objects.KeyWrappingData(mac_signature=b'\x10')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_iv_counter_nonce(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different IV/counter/nonces.
"""
a = objects.KeyWrappingData(iv_counter_nonce=b'\x01')
b = objects.KeyWrappingData(iv_counter_nonce=b'\x10')
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encoding_option(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different encoding options.
"""
a = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingData(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingData structs with different types.
"""
a = objects.KeyWrappingData()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an KeyWrappingData struct.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
mac_signature=b'\x01\x01\x02\x02\x03\x03\x04\x04',
iv_counter_nonce=b'\xFF',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = (
"KeyWrappingData("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-ffff-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature={0}, "
"iv_counter_nonce={1}, "
"encoding_option=EncodingOption.TTLV_ENCODING)".format(
b'\x01\x01\x02\x02\x03\x03\x04\x04',
b'\xFF'
)
)
observed = repr(key_wrapping_data)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a KeyWrappingData struct.
"""
key_wrapping_data = objects.KeyWrappingData(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
mac_signature=b'\x01\x01\x02\x02\x03\x03\x04\x04',
iv_counter_nonce=b'\xFF',
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = str({
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
'mac_signature_key_information':
objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
'mac_signature': b'\x01\x01\x02\x02\x03\x03\x04\x04',
'iv_counter_nonce': b'\xFF',
'encoding_option': enums.EncodingOption.TTLV_ENCODING
})
observed = str(key_wrapping_data)
self.assertEqual(expected, observed)
class TestKeyWrappingSpecification(testtools.TestCase):
"""
Test suite for the KeyWrappingSpecification struct.
"""
def setUp(self):
super(TestKeyWrappingSpecification, self).setUp()
# Encoding obtained in part from the KMIP 1.1 testing document,
# Sections 14.1 and 14.2. The rest was built by hand.
#
# This encoding matches the following set of values:
#
# Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# MAC/Signature Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
# Attribute Names
# Cryptographic Usage Mask
# Encoding Option - NO_ENCODING
self.full_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\xE0'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x4E\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
b'\x42\x00\x0A\x07\x00\x00\x00\x18'
b'\x43\x72\x79\x70\x74\x6F\x67\x72\x61\x70\x68\x69\x63\x20\x55\x73'
b'\x61\x67\x65\x20\x4D\x61\x73\x6B'
b'\x42\x00\xA3\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
)
# Adapted from the full encoding above. This encoding matches the
# following set of values:
#
# Wrapping Method - Encrypt
# Encryption Key Information
# Unique Identifier - 100182d5-72b8-47aa-8383-4d97d512e98a
# Cryptographic Parameters
# Block Cipher Mode - NIST_KEY_WRAP
self.partial_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\x60'
b'\x42\x00\x9E\x05\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00'
b'\x42\x00\x36\x01\x00\x00\x00\x48'
b'\x42\x00\x94\x07\x00\x00\x00\x24'
b'\x31\x30\x30\x31\x38\x32\x64\x35\x2D\x37\x32\x62\x38\x2D\x34\x37'
b'\x61\x61\x2D\x38\x33\x38\x33\x2D\x34\x64\x39\x37\x64\x35\x31\x32'
b'\x65\x39\x38\x61\x00\x00\x00\x00'
b'\x42\x00\x2B\x01\x00\x00\x00\x10'
b'\x42\x00\x11\x05\x00\x00\x00\x04\x00\x00\x00\x0D\x00\x00\x00\x00'
)
self.empty_encoding = BytearrayStream(
b'\x42\x00\x47\x01\x00\x00\x00\x00'
)
def tearDown(self):
super(TestKeyWrappingSpecification, self).tearDown()
def test_init(self):
"""
Test that a KeyWrappingSpecification struct can be constructed with
no arguments.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
def test_init_with_args(self):
"""
Test that a KeyWrappingSpecification struct can be constructed with
valid values.
"""
encryption_key_information = objects.EncryptionKeyInformation(
unique_identifier="12345678-9012-3456-7890-123456789012",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CTR
)
)
mac_signature_key_information = objects.MACSignatureKeyInformation(
unique_identifier="00000000-1111-2222-3333-444444444444",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=encryption_key_information,
mac_signature_key_information=mac_signature_key_information,
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length',
'Cryptographic Usage Mask'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"12345678-9012-3456-7890-123456789012",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.CTR,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_specification.mac_signature_key_information
self.assertEqual(
"00000000-1111-2222-3333-444444444444",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.attribute_names,
list
)
self.assertEqual(3, len(key_wrapping_specification.attribute_names))
self.assertEqual(
'Cryptographic Algorithm',
key_wrapping_specification.attribute_names[0]
)
self.assertEqual(
'Cryptographic Length',
key_wrapping_specification.attribute_names[1]
)
self.assertEqual(
'Cryptographic Usage Mask',
key_wrapping_specification.attribute_names[2]
)
self.assertEqual(
enums.EncodingOption.TTLV_ENCODING,
key_wrapping_specification.encoding_option
)
def test_invalid_wrapping_method(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the wrapping method of a KeyWrappingSpecification struct.
"""
kwargs = {'wrapping_method': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (objects.KeyWrappingSpecification(), 'wrapping_method', 0)
self.assertRaisesRegexp(
TypeError,
"Wrapping method must be a WrappingMethod enumeration.",
setattr,
*args
)
def test_invalid_encryption_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encryption key information of a KeyWrappingSpecification struct.
"""
kwargs = {'encryption_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'encryption_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encryption key information must be an EncryptionKeyInformation "
"struct.",
setattr,
*args
)
def test_invalid_mac_signature_key_information(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the MAC/signature key information of a KeyWrappingSpecification
struct.
"""
kwargs = {'mac_signature_key_information': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'mac_signature_key_information',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"MAC/signature key information must be an "
"MACSignatureKeyInformation struct.",
setattr,
*args
)
def test_invalid_attribute_names(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the attribute names of a KeyWrappingSpecification struct.
"""
kwargs = {'attribute_names': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Attribute names must be a list of strings.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'attribute_names',
['valid', 0]
)
self.assertRaisesRegexp(
TypeError,
"Attribute names must be a list of strings.",
setattr,
*args
)
def test_invalid_encoding_option(self):
"""
Test that a TypeError is raised when an invalid value is used to set
the encoding option of a KeyWrappingSpecification struct.
"""
kwargs = {'encoding_option': 'invalid'}
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
objects.KeyWrappingSpecification,
**kwargs
)
args = (
objects.KeyWrappingSpecification(),
'encoding_option',
'invalid'
)
self.assertRaisesRegexp(
TypeError,
"Encoding option must be an EncodingOption enumeration.",
setattr,
*args
)
def test_read(self):
"""
Test that a KeyWrappingSpecification struct can be read from a data
stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
key_wrapping_specification.read(self.full_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.mac_signature_key_information,
objects.MACSignatureKeyInformation
)
m = key_wrapping_specification.mac_signature_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
m.unique_identifier
)
self.assertIsInstance(
m.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
m.cryptographic_parameters.block_cipher_mode
)
self.assertIsInstance(
key_wrapping_specification.attribute_names,
list
)
self.assertEqual(
'Cryptographic Usage Mask',
key_wrapping_specification.attribute_names[0]
)
self.assertEqual(
enums.EncodingOption.NO_ENCODING,
key_wrapping_specification.encoding_option
)
def test_read_partial(self):
"""
Test that a KeyWrappingSpecification struct can be read from a
partial data stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
self.assertEqual(None, key_wrapping_specification.wrapping_method)
self.assertEqual(
None,
key_wrapping_specification.encryption_key_information
)
self.assertEqual(
None,
key_wrapping_specification.mac_signature_key_information
)
self.assertEqual(None, key_wrapping_specification.attribute_names)
self.assertEqual(None, key_wrapping_specification.encoding_option)
key_wrapping_specification.read(self.partial_encoding)
self.assertEqual(
enums.WrappingMethod.ENCRYPT,
key_wrapping_specification.wrapping_method
)
self.assertIsInstance(
key_wrapping_specification.encryption_key_information,
objects.EncryptionKeyInformation
)
e = key_wrapping_specification.encryption_key_information
self.assertEqual(
"100182d5-72b8-47aa-8383-4d97d512e98a",
e.unique_identifier
)
self.assertIsInstance(
e.cryptographic_parameters,
attributes.CryptographicParameters
)
self.assertEqual(
enums.BlockCipherMode.NIST_KEY_WRAP,
e.cryptographic_parameters.block_cipher_mode
)
self.assertIsNone(
key_wrapping_specification.mac_signature_key_information
)
self.assertIsNone(
key_wrapping_specification.attribute_names
)
self.assertIsNone(
key_wrapping_specification.encoding_option
)
def test_read_invalid(self):
"""
Test that a ValueError gets raised when a required
MACSignatureKeyInformation field is missing from the struct encoding.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
args = (self.empty_encoding,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_specification.read,
*args
)
def test_write(self):
"""
Test that a KeyWrappingSpecification struct can be written to a data
stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
stream = BytearrayStream()
key_wrapping_specification.write(stream)
self.assertEqual(len(self.full_encoding), len(stream))
self.assertEqual(str(self.full_encoding), str(stream))
def test_write_partial(self):
"""
Test that a partially defined KeyWrappingSpecification struct can be
written to a data stream.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
stream = BytearrayStream()
key_wrapping_specification.write(stream)
self.assertEqual(len(self.partial_encoding), len(stream))
self.assertEqual(str(self.partial_encoding), str(stream))
def test_write_invalid(self):
"""
Test that a ValueError gets raised when a required
KeyWrappingSpecification field is missing when encoding the struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification()
stream = utils.BytearrayStream()
args = (stream,)
self.assertRaisesRegexp(
ValueError,
"Invalid struct missing the wrapping method attribute.",
key_wrapping_specification.write,
*args
)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
KeyWrappingSpecification structs with the same data.
"""
a = objects.KeyWrappingSpecification()
b = objects.KeyWrappingSpecification()
self.assertTrue(a == b)
self.assertTrue(b == a)
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal_wrapping_method(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different wrapping methods.
"""
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encryption_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different encryption key
information.
"""
a = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different MAC/signature key
information.
"""
a = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_attribute_names(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different attribute names.
"""
a = objects.KeyWrappingSpecification(
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
]
)
b = objects.KeyWrappingSpecification(
attribute_names=['Cryptographic Usage Mask']
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_not_equal_encoding_option(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different encoding options.
"""
a = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing two
KeyWrappingSpecification structs with different types.
"""
a = objects.KeyWrappingSpecification()
b = 'invalid'
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
KeyWrappingSpecification structs with the same data.
"""
a = objects.KeyWrappingSpecification()
b = objects.KeyWrappingSpecification()
self.assertFalse(a != b)
self.assertFalse(b != a)
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
attribute_names=['Cryptographic Usage Mask'],
encoding_option=enums.EncodingOption.NO_ENCODING
)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal_wrapping_method(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different wrapping methods.
"""
a = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT
)
b = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.MAC_SIGN
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encryption_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different encryption key
information.
"""
a = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
b = objects.KeyWrappingSpecification(
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_mac_signature_key_information(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different MAC/signature key
information.
"""
a = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
)
)
b = objects.KeyWrappingSpecification(
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
)
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_attribute_names(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different attribute names.
"""
a = objects.KeyWrappingSpecification(
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
]
)
b = objects.KeyWrappingSpecification(
attribute_names=['Cryptographic Usage Mask']
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_not_equal_encoding_option(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different encoding options.
"""
a = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.NO_ENCODING
)
b = objects.KeyWrappingSpecification(
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing two
KeyWrappingSpecification structs with different types.
"""
a = objects.KeyWrappingSpecification()
b = 'invalid'
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr(self):
"""
Test that repr can be applied to an KeyWrappingSpecification struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = (
"KeyWrappingSpecification("
"wrapping_method=WrappingMethod.ENCRYPT, "
"encryption_key_information=EncryptionKeyInformation("
"unique_identifier='100182d5-72b8-ffff-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.NIST_KEY_WRAP, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"mac_signature_key_information=MACSignatureKeyInformation("
"unique_identifier='100182d5-72b8-47aa-8383-4d97d512e98a', "
"cryptographic_parameters=CryptographicParameters("
"block_cipher_mode=BlockCipherMode.CBC, "
"padding_method=None, "
"hashing_algorithm=None, "
"key_role_type=None, "
"digital_signature_algorithm=None, "
"cryptographic_algorithm=None, "
"random_iv=None, "
"iv_length=None, "
"tag_length=None, "
"fixed_field_length=None, "
"invocation_field_length=None, "
"counter_length=None, "
"initial_counter_value=None)), "
"attribute_names=["
"'Cryptographic Algorithm', 'Cryptographic Length'], "
"encoding_option=EncodingOption.TTLV_ENCODING)"
)
observed = repr(key_wrapping_specification)
self.assertEqual(expected, observed)
def test_str(self):
"""
Test that str can be applied to a KeyWrappingSpecification struct.
"""
key_wrapping_specification = objects.KeyWrappingSpecification(
wrapping_method=enums.WrappingMethod.ENCRYPT,
encryption_key_information=objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
mac_signature_key_information=objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
attribute_names=[
'Cryptographic Algorithm',
'Cryptographic Length'
],
encoding_option=enums.EncodingOption.TTLV_ENCODING
)
expected = str({
'wrapping_method': enums.WrappingMethod.ENCRYPT,
'encryption_key_information': objects.EncryptionKeyInformation(
unique_identifier="100182d5-72b8-ffff-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.NIST_KEY_WRAP
)
),
'mac_signature_key_information':
objects.MACSignatureKeyInformation(
unique_identifier="100182d5-72b8-47aa-8383-4d97d512e98a",
cryptographic_parameters=attributes.CryptographicParameters(
block_cipher_mode=enums.BlockCipherMode.CBC
)
),
'attribute_names': [
'Cryptographic Algorithm',
'Cryptographic Length'
],
'encoding_option': enums.EncodingOption.TTLV_ENCODING
})
observed = str(key_wrapping_specification)
self.assertEqual(expected, observed)
| 37.125081 | 79 | 0.631324 |
86fa39539e36c12bdb6c7a3c09b5262d417e2db0 | 22,560 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_inbound_nat_rules_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_inbound_nat_rules_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_inbound_nat_rules_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations(object):
"""InboundNatRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.InboundNatRuleListResult"]
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.InboundNatRule"]
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2020_03_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
| 50.469799 | 232 | 0.669991 |
93ea3c74d639120f6b41d780a2a4ee76a64e0cf5 | 25,909 | py | Python | gn/gn_to_cmake.py | vaxpl/skia | 3a3d4573640a04fd2102be9565dac8c213545ef2 | [
"BSD-3-Clause"
] | null | null | null | gn/gn_to_cmake.py | vaxpl/skia | 3a3d4573640a04fd2102be9565dac8c213545ef2 | [
"BSD-3-Clause"
] | null | null | null | gn/gn_to_cmake.py | vaxpl/skia | 3a3d4573640a04fd2102be9565dac8c213545ef2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Usage: gn_to_cmake.py <json_file_name>
gn gen out/config --ide=json --json-ide-script=../../gn/gn_to_cmake.py
or
gn gen out/config --ide=json
python gn/gn_to_cmake.py out/config/project.json
The first is recommended, as it will auto-update.
"""
import itertools
import functools
import json
import posixpath
import os
import string
import sys
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def CMakeTargetEscape(a):
"""Escapes the string 'a' for use as a CMake target name.
CMP0037 in CMake 3.0 restricts target names to "^[A-Za-z0-9_.:+-]+$"
The ':' is only allowed for imported targets.
"""
def Escape(c):
if c in string.ascii_letters or c in string.digits or c in '_.+-':
return c
else:
return '__'
return ''.join(map(Escape, a))
def SetVariable(out, variable_name, value):
"""Sets a CMake variable."""
out.write('set("')
out.write(CMakeStringEscape(variable_name))
out.write('" "')
out.write(CMakeStringEscape(value))
out.write('")\n')
def SetVariableList(out, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(out, variable_name, "")
if len(values) == 1:
return SetVariable(out, variable_name, values[0])
out.write('list(APPEND "')
out.write(CMakeStringEscape(variable_name))
out.write('"\n "')
out.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
out.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetCurrentTargetProperty(out, property_name, values, sep=''):
"""Given a target, sets the given property."""
out.write('set_target_properties("${target}" PROPERTIES ')
out.write(property_name)
out.write(' "')
for value in values:
out.write(CMakeStringEscape(value))
out.write(sep)
out.write('")\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
# See GetSourceFileType in gn
source_file_types = {
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.m': 'objc',
'.mm': 'objcc',
'.c': 'c',
'.s': 'asm',
'.S': 'asm',
'.asm': 'asm',
'.o': 'obj',
'.obj': 'obj',
}
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier, is_linkable):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
self.is_linkable = is_linkable
CMakeTargetType.custom = CMakeTargetType('add_custom_target', 'SOURCES',
None, False)
# See GetStringForOutputType in gn
cmake_target_types = {
'unknown': CMakeTargetType.custom,
'group': CMakeTargetType.custom,
'executable': CMakeTargetType('add_executable', None, 'RUNTIME', True),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY', True),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY', True),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE', True),
'source_set': CMakeTargetType('add_library', 'OBJECT', None, False),
'copy': CMakeTargetType.custom,
'action': CMakeTargetType.custom,
'action_foreach': CMakeTargetType.custom,
'bundle_data': CMakeTargetType.custom,
'create_bundle': CMakeTargetType.custom,
}
def FindFirstOf(s, a):
return min(s.find(i) for i in a if i in s)
class Project(object):
def __init__(self, project_json):
self.targets = project_json['targets']
build_settings = project_json['build_settings']
self.root_path = build_settings['root_path']
self.build_path = self.GetAbsolutePath(build_settings['build_dir'])
def GetAbsolutePath(self, path):
if path.startswith('//'):
return posixpath.join(self.root_path, path[2:])
else:
return path
def GetObjectSourceDependencies(self, gn_target_name, object_dependencies):
"""All OBJECT libraries whose sources have not been absorbed."""
dependencies = self.targets[gn_target_name].get('deps', [])
for dependency in dependencies:
dependency_type = self.targets[dependency].get('type', None)
if dependency_type == 'source_set':
object_dependencies.add(dependency)
if dependency_type not in gn_target_types_that_absorb_objects:
self.GetObjectSourceDependencies(dependency, object_dependencies)
def GetObjectLibraryDependencies(self, gn_target_name, object_dependencies):
"""All OBJECT libraries whose libraries have not been absorbed."""
dependencies = self.targets[gn_target_name].get('deps', [])
for dependency in dependencies:
dependency_type = self.targets[dependency].get('type', None)
if dependency_type == 'source_set':
object_dependencies.add(dependency)
self.GetObjectLibraryDependencies(dependency, object_dependencies)
def GetCMakeTargetName(self, gn_target_name):
# See <chromium>/src/tools/gn/label.cc#Resolve
# //base/test:test_support(//build/toolchain/win:msvc)
path_separator = FindFirstOf(gn_target_name, (':', '('))
location = None
name = None
toolchain = None
if not path_separator:
location = gn_target_name[2:]
else:
location = gn_target_name[2:path_separator]
toolchain_separator = gn_target_name.find('(', path_separator)
if toolchain_separator == -1:
name = gn_target_name[path_separator + 1:]
else:
if toolchain_separator > path_separator:
name = gn_target_name[path_separator + 1:toolchain_separator]
assert gn_target_name.endswith(')')
toolchain = gn_target_name[toolchain_separator + 1:-1]
assert location or name
cmake_target_name = None
if location.endswith('/' + name):
cmake_target_name = location
elif location:
cmake_target_name = location + '_' + name
else:
cmake_target_name = name
if toolchain:
cmake_target_name += '--' + toolchain
return CMakeTargetEscape(cmake_target_name)
class Target(object):
def __init__(self, gn_target_name, project):
self.gn_name = gn_target_name
self.properties = project.targets[self.gn_name]
self.cmake_name = project.GetCMakeTargetName(self.gn_name)
self.gn_type = self.properties.get('type', None)
self.cmake_type = cmake_target_types.get(self.gn_type, None)
def WriteAction(out, target, project, sources, synthetic_dependencies):
outputs = []
output_directories = set()
for output in target.properties.get('outputs', []):
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
output_directory = posixpath.dirname(output_abs_path)
if output_directory:
output_directories.add(output_directory)
outputs_name = '${target}__output'
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
if output_directories:
out.write(' COMMAND ${CMAKE_COMMAND} -E make_directory "')
out.write('" "'.join(map(CMakeStringEscape, output_directories)))
out.write('"\n')
script = target.properties['script']
arguments = target.properties['args']
out.write(' COMMAND python "')
out.write(CMakeStringEscape(project.GetAbsolutePath(script)))
out.write('"')
if arguments:
out.write('\n "')
out.write('"\n "'.join(map(CMakeStringEscape, arguments)))
out.write('"')
out.write('\n')
out.write(' DEPENDS ')
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
out.write('\n')
#TODO: CMake 3.7 is introducing DEPFILE
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Action: ${target}"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def ExpandPlaceholders(source, a):
source_dir, source_file_part = posixpath.split(source)
source_name_part, _ = posixpath.splitext(source_file_part)
#TODO: {{source_gen_dir}}, {{source_out_dir}}, {{response_file_name}}
return a.replace('{{source}}', source) \
.replace('{{source_file_part}}', source_file_part) \
.replace('{{source_name_part}}', source_name_part) \
.replace('{{source_dir}}', source_dir) \
.replace('{{source_root_relative_dir}}', source_dir)
def WriteActionForEach(out, target, project, sources, synthetic_dependencies):
all_outputs = target.properties.get('outputs', [])
inputs = target.properties.get('sources', [])
# TODO: consider expanding 'output_patterns' instead.
outputs_per_input = len(all_outputs) / len(inputs)
for count, source in enumerate(inputs):
source_abs_path = project.GetAbsolutePath(source)
outputs = []
output_directories = set()
for output in all_outputs[outputs_per_input * count:
outputs_per_input * (count+1)]:
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
output_directory = posixpath.dirname(output_abs_path)
if output_directory:
output_directories.add(output_directory)
outputs_name = '${target}__output_' + str(count)
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
if output_directories:
out.write(' COMMAND ${CMAKE_COMMAND} -E make_directory "')
out.write('" "'.join(map(CMakeStringEscape, output_directories)))
out.write('"\n')
script = target.properties['script']
# TODO: need to expand {{xxx}} in arguments
arguments = target.properties['args']
out.write(' COMMAND python "')
out.write(CMakeStringEscape(project.GetAbsolutePath(script)))
out.write('"')
if arguments:
out.write('\n "')
expand = functools.partial(ExpandPlaceholders, source_abs_path)
out.write('"\n "'.join(map(CMakeStringEscape, map(expand,arguments))))
out.write('"')
out.write('\n')
out.write(' DEPENDS')
if 'input' in sources:
WriteVariable(out, sources['input'], ' ')
out.write(' "')
out.write(CMakeStringEscape(source_abs_path))
out.write('"\n')
#TODO: CMake 3.7 is introducing DEPFILE
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Action ${target} on ')
out.write(CMakeStringEscape(source_abs_path))
out.write('"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def WriteCopy(out, target, project, sources, synthetic_dependencies):
inputs = target.properties.get('sources', [])
raw_outputs = target.properties.get('outputs', [])
# TODO: consider expanding 'output_patterns' instead.
outputs = []
for output in raw_outputs:
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
outputs_name = '${target}__output'
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
for src, dst in zip(inputs, outputs):
abs_src_path = CMakeStringEscape(project.GetAbsolutePath(src))
# CMake distinguishes between copying files and copying directories but
# gn does not. We assume if the src has a period in its name then it is
# a file and otherwise a directory.
if "." in os.path.basename(abs_src_path):
out.write(' COMMAND ${CMAKE_COMMAND} -E copy "')
else:
out.write(' COMMAND ${CMAKE_COMMAND} -E copy_directory "')
out.write(abs_src_path)
out.write('" "')
out.write(CMakeStringEscape(dst))
out.write('"\n')
out.write(' DEPENDS ')
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
out.write('\n')
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Copy ${target}"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def WriteCompilerFlags(out, target, project, sources):
# Hack, set linker language to c if no c or cxx files present.
if not 'c' in sources and not 'cxx' in sources:
SetCurrentTargetProperty(out, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if 'input' in sources:
SetFilesProperty(out, sources['input'], 'HEADER_FILE_ONLY', ('True',), '')
if 'other' in sources:
SetFilesProperty(out, sources['other'], 'HEADER_FILE_ONLY', ('True',), '')
# Mark object sources as linkable.
if 'obj' in sources:
SetFilesProperty(out, sources['obj'], 'EXTERNAL_OBJECT', ('True',), '')
# TODO: 'output_name', 'output_dir', 'output_extension'
# This includes using 'source_outputs' to direct compiler output.
# Includes
includes = target.properties.get('include_dirs', [])
if includes:
out.write('set_property(TARGET "${target}" ')
out.write('APPEND PROPERTY INCLUDE_DIRECTORIES')
for include_dir in includes:
out.write('\n "')
out.write(project.GetAbsolutePath(include_dir))
out.write('"')
out.write(')\n')
# Defines
defines = target.properties.get('defines', [])
if defines:
SetCurrentTargetProperty(out, 'COMPILE_DEFINITIONS', defines, ';')
# Compile flags
# "arflags", "asmflags", "cflags",
# "cflags_c", "clfags_cc", "cflags_objc", "clfags_objcc"
# CMake does not have per target lang compile flags.
# TODO: $<$<COMPILE_LANGUAGE:CXX>:cflags_cc style generator expression.
# http://public.kitware.com/Bug/view.php?id=14857
flags = []
flags.extend(target.properties.get('cflags', []))
cflags_asm = target.properties.get('asmflags', [])
cflags_c = target.properties.get('cflags_c', [])
cflags_cxx = target.properties.get('cflags_cc', [])
cflags_objc = cflags_c[:]
cflags_objc.extend(target.properties.get('cflags_objc', []))
cflags_objcc = cflags_cxx[:]
cflags_objcc.extend(target.properties.get('cflags_objcc', []))
if 'c' in sources and not any(k in sources for k in ('asm', 'cxx', 'objc', 'objcc')):
flags.extend(cflags_c)
elif 'cxx' in sources and not any(k in sources for k in ('asm', 'c', 'objc', 'objcc')):
flags.extend(cflags_cxx)
elif 'objc' in sources and not any(k in sources for k in ('asm', 'c', 'cxx', 'objcc')):
flags.extend(cflags_objc)
elif 'objcc' in sources and not any(k in sources for k in ('asm', 'c', 'cxx', 'objc')):
flags.extend(cflags_objcc)
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if 'asm' in sources and cflags_asm:
SetFilesProperty(out, sources['asm'], 'COMPILE_FLAGS', cflags_asm, ' ')
if 'c' in sources and cflags_c:
SetFilesProperty(out, sources['c'], 'COMPILE_FLAGS', cflags_c, ' ')
if 'cxx' in sources and cflags_cxx:
SetFilesProperty(out, sources['cxx'], 'COMPILE_FLAGS', cflags_cxx, ' ')
if 'objc' in sources and cflags_objc:
SetFilesProperty(out, sources['objc'], 'COMPILE_FLAGS', cflags_objc, ' ')
if 'objcc' in sources and cflags_objcc:
SetFilesProperty(out, sources['objcc'], 'COMPILE_FLAGS', cflags_objcc, ' ')
if flags:
SetCurrentTargetProperty(out, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = target.properties.get('ldflags', [])
if ldflags:
SetCurrentTargetProperty(out, 'LINK_FLAGS', ldflags, ' ')
gn_target_types_that_absorb_objects = (
'executable',
'loadable_module',
'shared_library',
'static_library'
)
def WriteSourceVariables(out, target, project):
# gn separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see Compile flags).
source_types = {'cxx':[], 'c':[], 'asm':[], 'objc':[], 'objcc':[],
'obj':[], 'obj_target':[], 'input':[], 'other':[]}
all_sources = target.properties.get('sources', [])
# As of cmake 3.11 add_library must have sources. If there are
# no sources, add empty.cpp as the file to compile.
if len(all_sources) == 0:
all_sources.append(posixpath.join(project.build_path, 'empty.cpp'))
# TODO .def files on Windows
for source in all_sources:
_, ext = posixpath.splitext(source)
source_abs_path = project.GetAbsolutePath(source)
source_types[source_file_types.get(ext, 'other')].append(source_abs_path)
for input_path in target.properties.get('inputs', []):
input_abs_path = project.GetAbsolutePath(input_path)
source_types['input'].append(input_abs_path)
# OBJECT library dependencies need to be listed as sources.
# Only executables and non-OBJECT libraries may reference an OBJECT library.
# https://gitlab.kitware.com/cmake/cmake/issues/14778
if target.gn_type in gn_target_types_that_absorb_objects:
object_dependencies = set()
project.GetObjectSourceDependencies(target.gn_name, object_dependencies)
for dependency in object_dependencies:
cmake_dependency_name = project.GetCMakeTargetName(dependency)
obj_target_sources = '$<TARGET_OBJECTS:' + cmake_dependency_name + '>'
source_types['obj_target'].append(obj_target_sources)
sources = {}
for source_type, sources_of_type in source_types.items():
if sources_of_type:
sources[source_type] = '${target}__' + source_type + '_srcs'
SetVariableList(out, sources[source_type], sources_of_type)
return sources
def WriteTarget(out, target, project):
out.write('\n#')
out.write(target.gn_name)
out.write('\n')
if target.cmake_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target.gn_name, target.gn_type ) )
return
SetVariable(out, 'target', target.cmake_name)
sources = WriteSourceVariables(out, target, project)
synthetic_dependencies = set()
if target.gn_type == 'action':
WriteAction(out, target, project, sources, synthetic_dependencies)
if target.gn_type == 'action_foreach':
WriteActionForEach(out, target, project, sources, synthetic_dependencies)
if target.gn_type == 'copy':
WriteCopy(out, target, project, sources, synthetic_dependencies)
out.write(target.cmake_type.command)
out.write('("${target}"')
if target.cmake_type.modifier is not None:
out.write(' ')
out.write(target.cmake_type.modifier)
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
if synthetic_dependencies:
out.write(' DEPENDS')
for synthetic_dependencie in synthetic_dependencies:
WriteVariable(out, synthetic_dependencie, ' ')
out.write(')\n')
if target.cmake_type.command != 'add_custom_target':
WriteCompilerFlags(out, target, project, sources)
libraries = set()
nonlibraries = set()
dependencies = set(target.properties.get('deps', []))
# Transitive OBJECT libraries are in sources.
# Those sources are dependent on the OBJECT library dependencies.
# Those sources cannot bring in library dependencies.
object_dependencies = set()
if target.gn_type != 'source_set':
project.GetObjectLibraryDependencies(target.gn_name, object_dependencies)
for object_dependency in object_dependencies:
dependencies.update(project.targets.get(object_dependency).get('deps', []))
for dependency in dependencies:
gn_dependency_type = project.targets.get(dependency, {}).get('type', None)
cmake_dependency_type = cmake_target_types.get(gn_dependency_type, None)
cmake_dependency_name = project.GetCMakeTargetName(dependency)
if cmake_dependency_type.command != 'add_library':
nonlibraries.add(cmake_dependency_name)
elif cmake_dependency_type.modifier != 'OBJECT':
if target.cmake_type.is_linkable:
libraries.add(cmake_dependency_name)
else:
nonlibraries.add(cmake_dependency_name)
# Non-library dependencies.
if nonlibraries:
out.write('add_dependencies("${target}"')
for nonlibrary in nonlibraries:
out.write('\n "')
out.write(nonlibrary)
out.write('"')
out.write(')\n')
# Non-OBJECT library dependencies.
combined_library_lists = [target.properties.get(key, []) for key in ['libs', 'frameworks']]
external_libraries = list(itertools.chain(*combined_library_lists))
if target.cmake_type.is_linkable and (external_libraries or libraries):
library_dirs = target.properties.get('lib_dirs', [])
if library_dirs:
SetVariableList(out, '${target}__library_directories', library_dirs)
system_libraries = []
for external_library in external_libraries:
if '/' in external_library:
libraries.add(project.GetAbsolutePath(external_library))
else:
if external_library.endswith('.framework'):
external_library = external_library[:-len('.framework')]
system_library = 'library__' + external_library
if library_dirs:
system_library = system_library + '__for_${target}'
out.write('find_library("')
out.write(CMakeStringEscape(system_library))
out.write('" "')
out.write(CMakeStringEscape(external_library))
out.write('"')
if library_dirs:
out.write(' PATHS "')
WriteVariable(out, '${target}__library_directories')
out.write('"')
out.write(')\n')
system_libraries.append(system_library)
out.write('target_link_libraries("${target}"')
for library in libraries:
out.write('\n "')
out.write(CMakeStringEscape(library))
out.write('"')
for system_library in system_libraries:
WriteVariable(out, system_library, '\n "')
out.write('"')
out.write(')\n')
def WriteProject(project):
out = open(posixpath.join(project.build_path, 'CMakeLists.txt'), 'w+')
extName = posixpath.join(project.build_path, 'CMakeLists.ext')
out.write('# Generated by gn_to_cmake.py.\n')
out.write('cmake_minimum_required(VERSION 3.7 FATAL_ERROR)\n')
out.write('cmake_policy(VERSION 3.7)\n')
out.write('project(Skia)\n\n')
out.write('file(WRITE "')
out.write(CMakeStringEscape(posixpath.join(project.build_path, "empty.cpp")))
out.write('")\n')
# Update the gn generated ninja build.
# If a build file has changed, this will update CMakeLists.ext if
# gn gen out/config --ide=json --json-ide-script=../../gn/gn_to_cmake.py
# style was used to create this config.
out.write('execute_process(COMMAND\n')
out.write(' ninja -C "')
out.write(CMakeStringEscape(project.build_path))
out.write('" build.ninja\n')
out.write(' RESULT_VARIABLE ninja_result)\n')
out.write('if (ninja_result)\n')
out.write(' message(WARNING ')
out.write('"Regeneration failed running ninja: ${ninja_result}")\n')
out.write('endif()\n')
out.write('include("')
out.write(CMakeStringEscape(extName))
out.write('")\n')
out.close()
out = open(extName, 'w+')
out.write('# Generated by gn_to_cmake.py.\n')
out.write('cmake_minimum_required(VERSION 3.7 FATAL_ERROR)\n')
out.write('cmake_policy(VERSION 3.7)\n')
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
out.write('enable_language(ASM)\n\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
# Current issues with automatic re-generation:
# The gn generated build.ninja target uses build.ninja.d
# but build.ninja.d does not contain the ide or gn.
# Currently the ide is not run if the project.json file is not changed
# but the ide needs to be run anyway if it has itself changed.
# This can be worked around by deleting the project.json file.
out.write('file(READ "')
gn_deps_file = posixpath.join(project.build_path, 'build.ninja.d')
out.write(CMakeStringEscape(gn_deps_file))
out.write('" "gn_deps_string" OFFSET ')
out.write(str(len('build.ninja: ')))
out.write(')\n')
# One would think this would need to worry about escaped spaces
# but gn doesn't escape spaces here (it generates invalid .d files).
out.write('string(REPLACE " " ";" "gn_deps" ${gn_deps_string})\n')
out.write('foreach("gn_dep" ${gn_deps})\n')
out.write(' configure_file("')
out.write(CMakeStringEscape(project.build_path))
out.write('${gn_dep}" "CMakeLists.devnull" COPYONLY)\n')
out.write('endforeach("gn_dep")\n')
out.write('list(APPEND other_deps "')
out.write(CMakeStringEscape(os.path.abspath(__file__)))
out.write('")\n')
out.write('foreach("other_dep" ${other_deps})\n')
out.write(' configure_file("${other_dep}" "CMakeLists.devnull" COPYONLY)\n')
out.write('endforeach("other_dep")\n')
for target_name in project.targets.keys():
out.write('\n')
WriteTarget(out, Target(target_name, project), project)
def main():
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <json_file_name>')
exit(1)
json_path = sys.argv[1]
project = None
with open(json_path, 'r') as json_file:
project = json.loads(json_file.read())
WriteProject(Project(project))
if __name__ == "__main__":
main()
| 35.154681 | 93 | 0.692964 |
c2d84cde413dd36bbec255f0d1646c38fa14e338 | 16,053 | py | Python | strategy/scripts/expired_task.py | uptopia/timda_dual_arm | 4f4a0135a4c2aa5f7f8ab7644d548a61baf3337e | [
"MIT"
] | 1 | 2022-03-09T10:53:15.000Z | 2022-03-09T10:53:15.000Z | strategy/scripts/expired_task.py | uptopia/timda_dual_arm | 4f4a0135a4c2aa5f7f8ab7644d548a61baf3337e | [
"MIT"
] | 2 | 2021-06-23T17:06:41.000Z | 2021-06-30T09:44:41.000Z | strategy/scripts/expired_task.py | uptopia/timda_dual_arm | 4f4a0135a4c2aa5f7f8ab7644d548a61baf3337e | [
"MIT"
] | 9 | 2021-02-01T08:20:53.000Z | 2021-09-17T05:52:35.000Z | #!/usr/bin/env python
from enum import IntEnum
# from Queue import Queue
import rospy
#import queue
import Queue as queue
import copy
import numpy as np
from std_msgs.msg import Bool, Int32
from arm_control import DualArmTask
from arm_control import ArmTask, SuctionTask, Command, Status
from get_image_info import GetObjInfo
from math import radians, degrees, sin, cos, pi
c_pose = {'left' :[[[0.38, 0.2, 0.15], [0.0, 65, 0.0]],
[[0.38, 0.2, -0.25], [0.0, 65, 0.0]],
[[0.38, 0.2, -0.65], [0.0, 65, 0.0]]],
'right':[[[0.38, -0.2, 0.15], [0.0, 65, 0.0]],
[[0.38, -0.2, -0.25], [0.0, 65, 0.0]],
[[0.38, -0.2, -0.65], [0.0, 65, 0.0]]],
'left_indx' : 0, 'right_indx' : 0}
#[0.58, -0.2, -0.2], [0.0, 65, 0.0]
#[0.68, -0.2, -0.2], [0.0, 65, 0.0]line
#[0.4, -0.2, -0.0], [0.0, 65, 0.0]
#[0.4, -0.5, -0.0], [0.0, 65, 0.0]
place_pose = [[[-0.38, 0, -0.796],[0.0, 0.0, 0.0]],
[[-0.38, 0, -0.796],[0.0, 0.0, 0.0]],
[[-0.43, 0, -0.796],[0.0, 0.0, 0.0]],
[[-0.43, 0, -0.796],[0.0, 0.0, 0.0]],
[[-0.38, 0.02, -0.73],[0.0, 0.0, 0.0]],
[[-0.38, -0.02, -0.73],[0.0, 0.0, 0.0]],
[[-0.43, -0.02, -0.73],[0.0, 0.0, 0.0]],
[[-0.43, 0.02, -0.73],[0.0, 0.0, 0.0]],
[[-0.38, 0, -0.68],[0.0, 0.0, 0.0]],
[[-0.38, 0, -0.68],[0.0, 0.0, 0.0]],
[[-0.43, 0, -0.68],[0.0, 0.0, 0.0]],
[[-0.43, 0, -0.7],[0.0, 0.0, 0.0]],
[[-0.38, 0, -0.7],[0.0, 0.0, 0.0]],
[[-0.38, 0, -0.7],[0.0, 0.0, 0.0]],
[[-0.43, 0, -0.7],[0.0, 0.0, 0.0]],
[[-0.43, 0, -0.7],[0.0, 0.0, 0.0]]]
obj_pose = [[[[0.465, -0.1, -0.18], [0, 90, 0]],
[[0.465, 0.1, -0.18], [0, 90, 0]]],
[[[0.545, -0.1, -0.43], [0, 90, 0]],
[[0.545, 0.1, -0.43], [0, 90, 0]]],
[[[0.6, -0.2, -0.883], [0, 90, 0]],
[[0.6, 0.2, -0.883], [0, 90, 0]]]]
class ObjInfo(dict):
def __init__(self):
self['id'] = 0
self['side_id'] = 'front' # 'front', 'back', 'side'
self['name'] = 'plum_riceball' # 'plum_riceball', 'salmon_riceball', 'sandwich', 'burger', 'drink', 'lunch_box'
self['state'] = 'new' # 'new', 'old', 'expired'
self['pos'] = None
self['euler'] = None
self['sucang'] = 0
class State(IntEnum):
init = 0
get_obj_inf = 1
select_obj = 2
move2obj = 3
check_pose = 4
pick = 5
place = 6
finish = 7
class ExpiredTask:
def __init__(self, _name, en_sim):
self.name = _name
self.en_sim = en_sim
self.state = State.init
self.dual_arm = DualArmTask(self.name, self.en_sim)
self.camara = GetObjInfo()
self.left_cpose_queue = queue.Queue()
self.right_cpose_queue = queue.Queue()
self.place_pose_queue = queue.Queue()
self.object_queue = queue.Queue()
self.object_list = []
self.left_tar_obj = queue.Queue()
self.right_tar_obj = queue.Queue()
self.retry_obj_queue_left = queue.Queue()
self.retry_obj_queue_right = queue.Queue()
self.target_obj_queue = {'left' : self.left_tar_obj, 'right' : self.right_tar_obj}
self.target_obj = {'left': None, 'right': None}
self.retry_obj_queue = {'left': self.retry_obj_queue_left, 'right': self.retry_obj_queue_right}
self.obj_done = np.zeros((100), dtype=bool)
self.obj_retry = np.zeros((100), dtype=bool)
self.next_level = {'left': False, 'right': False}
self.init()
def init(self):
for pose in place_pose:
self.place_pose_queue.put(pose)
def get_obj_inf(self, side):
fb = self.dual_arm.get_feedback(side)
ids, mats, names, exps, side_ids = self.camara.get_obj_info(side, fb.orientation)
if ids is None:
return
for _id, mat, name, exp, side_id in zip(ids, mats, names, exps, side_ids):
obj = ObjInfo()
obj['id'] = _id
obj['name'] = name
obj['expired'] = exp
obj['side_id'] = side_id
obj['pos'] = mat[0:3, 3]
obj['vector'] = mat[0:3, 2]
obj['sucang'], roll = self.dual_arm.suc2vector(mat[0:3, 2], [0, 1.57, 0])
obj['euler'] = [roll, 90, 0]
if obj['vector'][2] > -0.2:
self.object_queue.put(obj)
print('fuck+++++============--------------', obj['pos'])
else:
print('fuck < -0.2 ', obj['vector'])
print('fuckkkkkkkkkkkkkkkkkkkkkkk', obj['id'])
def arrange_obj(self, side):
pass
def check_pose(self, side):
self.target_obj[side] = self.target_obj_queue[side].get()
fb = self.dual_arm.get_feedback(side)
ids, mats, _, _, _ = self.camara.get_obj_info(side, fb.orientation)
if ids is None:
return
for _id, mat in zip(ids, mats):
if _id == self.target_obj[side]['id']:
self.target_obj[side]['pos'] = mat[0:3, 3]
if mat[2, 2] > -0.1:
self.target_obj[side]['sucang'], roll = self.dual_arm.suc2vector(mat[0:3, 2], [0, 1.57, 0])
self.target_obj[side]['euler'] = [roll, 90, 0]
pass
def state_control(self, state, side):
if state is None:
state = State.init
elif state == State.init:
state = State.get_obj_inf
elif state == State.get_obj_inf:
state = State.select_obj
elif state == State.select_obj:
if self.object_queue.empty():
if c_pose[side+'_indx'] >= 3:
state = State.finish
else:
state = State.get_obj_inf
else:
state = State.move2obj
elif state == State.move2obj:
state = State.check_pose
elif state == State.check_pose:
state = State.pick
elif state == State.pick:
if side == 'left':
is_grip = self.dual_arm.left_arm.suction.is_grip
else:
is_grip = self.dual_arm.right_arm.suction.is_grip
if is_grip:
state = State.place
elif self.next_level[side] == True:
self.next_level[side] = False
if c_pose[side+'_indx'] >= 3:
state = State.finish
else:
state = State.get_obj_inf
else:
if self.obj_retry[self.target_obj[side]['id']] == False:
self.retry_obj_queue[side].put(self.target_obj[side])
state = State.move2obj
elif state == State.place:
if self.next_level[side] == True:
self.next_level[side] = False
if c_pose[side+'_indx'] >= 3:
state = State.finish
else:
state = State.get_obj_inf
else:
state = State.move2obj
elif state == State.finish:
state = None
return state
def strategy(self, state, side):
cmd = Command()
cmd_queue = queue.Queue()
if state == State.init:
cmd['cmd'] = 'jointMove'
cmd['jpos'] = [0, 0, -1.2, 0, 1.87, 0, -0.87, 0]
cmd['state'] = State.init
cmd['speed'] = 40
cmd_queue.put(copy.deepcopy(cmd))
self.dual_arm.send_cmd(side, False, cmd_queue)
elif state == State.get_obj_inf:
cmd['suc_cmd'] = 'Off'
cmd['cmd'], cmd['mode'] = 'ikMove', 'p2p'
cmd['pos'], cmd['euler'], cmd['phi'] = c_pose[side][c_pose[side+'_indx']][0], c_pose[side][c_pose[side+'_indx']][1], 0
cmd_queue.put(copy.deepcopy(cmd))
cmd['suc_cmd'] = 0
cmd['cmd'] = 'occupied'
cmd['state'] = State.get_obj_inf
cmd_queue.put(copy.deepcopy(cmd))
side = self.dual_arm.send_cmd(side, False, cmd_queue)
if side != 'fail':
c_pose[side+'_indx'] += 1
else:
print('fuckfailfuckfailfuckfail')
elif state == State.select_obj:
print('oooooooooooooo')
self.get_obj_inf(side)
self.arrange_obj(side)
cmd['cmd'], cmd['state'] = None, State.select_obj
cmd_queue.put(copy.deepcopy(cmd))
self.dual_arm.send_cmd(side, True, cmd_queue)
elif state == State.move2obj:
print('fuckmove2obj ++++++++++ ', side)
obj = None
chosed = False
if self.retry_obj_queue[side].empty() and self.target_obj_queue[side].empty():
if self.object_queue.empty():
self.next_level[side] = True
print('fuck10')
return
for _ in range(self.object_queue.qsize()):
obj = self.object_queue.get()
if self.obj_done[obj['id']] == False:
if side == 'left' and obj['pos'][1] < -0.02:
self.object_queue.put(obj)
continue
if side == 'right' and obj['pos'][1] > 0.02:
self.object_queue.put(obj)
continue
self.obj_done[obj['id']] = True
chosed = True
print('fuck1')
break
if chosed is False:
self.next_level[side] = True
print('fuck2')
return
print('fuck11')
elif self.target_obj_queue[side].empty():
obj = self.retry_obj_queue[side].get()
if self.obj_retry[obj['id']] == False:
self.obj_retry[obj['id']] = True
print('fuck3')
else:
print('fuck4')
return
else:
obj = self.target_obj_queue[side].get()
if self.obj_done[obj['id']] == False:
self.obj_retry[obj['id']] = True
print('fuck5')
else:
print('fuck6')
return
pos = copy.deepcopy(obj['pos'])
pos[1] += 0.032
pos[2] += 0.065
cmd['suc_cmd'] = 'Off'
cmd['cmd'], cmd['mode'], cmd['state'] = 'ikMove', 'p2p', State.move2obj
cmd['pos'], cmd['euler'], cmd['phi'] = [0.4, pos[1], pos[2]], [0, 90, 0], 0
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'] = 'occupied'
cmd_queue.put(copy.deepcopy(cmd))
side = self.dual_arm.send_cmd(side, False, cmd_queue)
if side == 'fail':
self.object_queue.put(obj)
self.obj_done[obj['id']] = False
print('fffffffffffuuuuuuuuuuccccccccccckkkkkkkkkkk')
else:
self.target_obj_queue[side].put(obj)
print('side = ', side, 'id = ',obj['id'])
elif state == State.check_pose:
self.check_pose(side)
cmd['cmd'], cmd['state'] = 'occupied', State.check_pose
cmd_queue.put(copy.deepcopy(cmd))
self.dual_arm.send_cmd(side, True, cmd_queue)
elif state == State.pick:
obj = copy.deepcopy(self.target_obj[side])
if obj['vector'][2] > 0.7:
obj['pos'][0] -= 0.02
# obj['pos'][2] += 0.05
cmd['state'] = State.pick
cmd['cmd'], cmd['mode'] = 'fromtNoaTarget', 'line'
cmd['pos'], cmd['euler'], cmd['phi'] = obj['pos'], obj['euler'], 0
cmd['suc_cmd'], cmd['noa'] = obj['sucang'], [0, 0, -0.03]
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'], cmd['mode'], cmd['noa'] = 'grasping', 'line', [0, 0, 0.05]
cmd['suc_cmd'], cmd['speed'] = 'On', 15
if obj['vector'][2] < 0.2:
cmd['speed'] = 30
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'], cmd['mode'], = 'relativePos', 'line'
cmd['speed'], cmd['suc_cmd'] = 40, 'calibration'
cmd['pos'] = [0, 0, 0.03]
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'], cmd['mode'] = 'ikMove', 'line'
cmd['pos'], cmd['euler'], cmd['phi'] = [0.45, obj['pos'][1], obj['pos'][2]+0.08], obj['euler'], 0
cmd_queue.put(copy.deepcopy(cmd))
self.dual_arm.send_cmd(side, True, cmd_queue)
elif state == State.place:
cmd['state'] = State.place
cmd['cmd'] = 'jointMove'
cmd['jpos'] = [0, 0, -1.5, 0, 2.07, 0, -0.57, 0]
cmd_queue.put(copy.deepcopy(cmd))
pose = self.place_pose_queue.get()
pos, euler = pose[0], pose[1]
if side == 'left':
pos[1] += 0.12
else:
pos[1] -= 0.12
cmd['cmd'], cmd['mode'] = 'fromtNoaTarget', 'line'
cmd['pos'], cmd['euler'], cmd['phi'] = pos, euler, 0
cmd['suc_cmd'], cmd['noa'] = 0, [0, 0, -0.2]
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'], cmd['mode'], cmd['noa'] = 'noaMove', 'line', [0, 0, 0.2]
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'], cmd['mode'], cmd['noa'] = 'noaMove', 'line', [0, 0, -0.2]
cmd['suc_cmd'] = 'Off'
cmd_queue.put(copy.deepcopy(cmd))
cmd['cmd'] = 'jointMove'
cmd['jpos'] = [0, 0, -1.8, 0, 2.57, 0, -0.87, 0]
cmd_queue.put(copy.deepcopy(cmd))
self.dual_arm.send_cmd(side, True, cmd_queue)
elif state == State.finish:
cmd['suc_cmd'] = 'Off'
cmd['cmd'] = 'jointMove'
cmd['jpos'] = [0, 0, -1, 0, 1.57, 0, -0.57, 0]
cmd['state'] = State.finish
cmd_queue.put(copy.deepcopy(cmd))
self.dual_arm.send_cmd(side, False, cmd_queue)
return side
def process(self):
rate = rospy.Rate(10)
rospy.on_shutdown(self.dual_arm.shutdown)
while True:
#print("8")
# l_status = self.dual_arm.left_arm.status
# # print(l_status)
# if l_status == Status.idle or l_status == Status.occupied:
# l_state = self.state_control(self.dual_arm.left_arm.state, 'left')
# self.strategy(l_state, 'left')
# rate.sleep()
#==============================================================================
r_status = self.dual_arm.right_arm.status
if r_status == Status.idle or r_status == Status.occupied:
r_state = self.state_control(self.dual_arm.right_arm.state, 'right')
self.strategy(r_state, 'right')
rate.sleep()
# if l_state is None and r_state is None:
# if l_status == Status.idle and r_status == Status.idle:
# # return
# if l_state is None :
# if l_status == Status.idle:
# return
if r_state is None :
if r_status == Status.idle:
return
if __name__ == '__main__':
rospy.init_node('expired')
strategy = ExpiredTask('dual_arm', True)
rospy.on_shutdown(strategy.dual_arm.shutdown)
strategy.process()
strategy.dual_arm.shutdown()
del strategy.dual_arm
| 41.373711 | 130 | 0.465645 |
4bd866629b3ec61586fe9568d9c3ea4fddc73602 | 2,925 | py | Python | aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/TagResourcesRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/TagResourcesRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/TagResourcesRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class TagResourcesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'TagResources','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_ResourceIds(self):
return self.get_query_params().get('ResourceIds')
def set_ResourceIds(self, ResourceIds):
for depth1 in range(len(ResourceIds)):
if ResourceIds[depth1] is not None:
self.add_query_param('ResourceId.' + str(depth1 + 1) , ResourceIds[depth1])
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType) | 36.5625 | 89 | 0.74906 |
776c51df0abcc3d12c1440cc4be31da316261516 | 1,937 | py | Python | journey11/src/lib/state.py | parrisma/AI-intuition | 3b081696b1d226815e029cbb536fac5e4d3de9a7 | [
"MIT"
] | null | null | null | journey11/src/lib/state.py | parrisma/AI-intuition | 3b081696b1d226815e029cbb536fac5e4d3de9a7 | [
"MIT"
] | 4 | 2020-04-26T18:18:22.000Z | 2020-05-16T14:47:32.000Z | journey11/src/lib/state.py | parrisma/AI-intuition | 3b081696b1d226815e029cbb536fac5e4d3de9a7 | [
"MIT"
] | null | null | null | import logging
from typing import List
from enum import EnumMeta, Enum, unique
class DefaultStateEnumMeta(EnumMeta):
default = object()
def __call__(cls, value=default, *args, **kwargs):
if value is DefaultStateEnumMeta.default:
return next(iter(cls))
return super().__call__(value, *args, **kwargs)
@unique
class State(Enum, metaclass=DefaultStateEnumMeta):
S0 = 0
S1 = 1
S2 = 2
S3 = 3
S4 = 4
S5 = 5
S6 = 6
S7 = 7
S8 = 8
S9 = 9
def id(self):
return self.value
@classmethod
def range(cls,
start_state: 'State',
end_state: 'State') -> List['State']:
rev = False
if start_state.value > end_state.value:
rev = True
st = end_state
ed = start_state
else:
rev = False
st = start_state
ed = end_state
rng = list()
for _, member in State.__members__.items():
if st.value <= member.value <= ed.value:
rng.append(member)
if rev:
rng = rng[::-1]
return rng
def __str__(self) -> str:
return "State:{}".format(self.value)
def __add__(self, other):
if not isinstance(other, int):
msg = ""
logging.critical(msg)
raise ValueError(msg)
res = None
if other == 0:
res = self
elif other > 0:
rng = self.range(self, State.S9)
if other > len(rng) - 1:
res = State.S9
else:
res = rng[other]
else:
rng = self.range(State.S0, self)[::-1]
other = abs(other)
if other > len(rng) - 1:
res = State.S0
else:
res = rng[other]
return res
def __radd__(self, other):
return self.__add__(other)
| 23.337349 | 55 | 0.494063 |
214c1e7773dcecd405c305ef1bf4bbdeec0bd01a | 726 | py | Python | ParaViewCore/ServerManager/Default/Testing/Python/GhostCellsInMergeBlocks.py | brown-ccv/paraview-scalable | 64b221a540737d2ac94a120039bd8d1e661bdc8f | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-09-27T08:04:34.000Z | 2019-10-16T22:30:54.000Z | ParaViewCore/ServerManager/Default/Testing/Python/GhostCellsInMergeBlocks.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ParaViewCore/ServerManager/Default/Testing/Python/GhostCellsInMergeBlocks.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from paraview.simple import *
from paraview import smtesting
import sys
import os
import os.path
smtesting.ProcessCommandLineArguments()
xmf_file = os.path.join(smtesting.DataDir, "poisson_3dall_2.xmf")
XDMFReader(FileNames=xmf_file)
MergeBlocks()
Show()
view = Render()
view.CameraViewUp = [0.40869219753099151, 0.77141145522009946, -0.48774486612623352]
view.CameraPosition = [-1.1388262528701965, 2.72641269058122, 2.6480590031133158]
view.CameraFocalPoint = [0.49999999999999989, 0.49999999999999978, 0.49999999999999989]
view.CameraParallelScale = 0.90892941461412635
view.CenterOfRotation = [0.5, 0.5, 0.5]
Render()
if not smtesting.DoRegressionTesting(view.SMProxy):
raise smtesting.TestError('Test failed.')
| 27.923077 | 87 | 0.800275 |
2f0aca9e72e088b9155fb29157e67993611eb359 | 131 | py | Python | django_private_chat/apps.py | MattBrown88/django-private-chat | 4e7d71c82ecbc5b206f684353be9269d48a2b947 | [
"ISC"
] | 1 | 2019-04-07T23:14:21.000Z | 2019-04-07T23:14:21.000Z | django_private_chat/apps.py | MattBrown88/django-private-chat | 4e7d71c82ecbc5b206f684353be9269d48a2b947 | [
"ISC"
] | 4 | 2020-06-05T21:50:11.000Z | 2021-06-10T21:43:28.000Z | django_private_chat/apps.py | MattBrown88/django-private-chat | 4e7d71c82ecbc5b206f684353be9269d48a2b947 | [
"ISC"
] | null | null | null | # -*- coding: utf-8
from django.apps import AppConfig
class DjangoPrivateChatConfig(AppConfig):
name = 'django_private_chat'
| 18.714286 | 41 | 0.755725 |
a3b6d9a956d2679e81fce09c200ed5977a028282 | 3,672 | py | Python | homeassistant/components/zwave/util.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | homeassistant/components/zwave/util.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | homeassistant/components/zwave/util.py | alindeman/home-assistant | b274b10f3874c196f0db8f9cfa5f47eb756d1f8e | [
"Apache-2.0"
] | null | null | null | """Zwave util methods."""
import asyncio
import logging
import homeassistant.util.dt as dt_util
from . import const
_LOGGER = logging.getLogger(__name__)
def check_node_schema(node, schema):
"""Check if node matches the passed node schema."""
if (const.DISC_NODE_ID in schema and
node.node_id not in schema[const.DISC_NODE_ID]):
_LOGGER.debug("node.node_id %s not in node_id %s",
node.node_id, schema[const.DISC_NODE_ID])
return False
if (const.DISC_GENERIC_DEVICE_CLASS in schema and
node.generic not in schema[const.DISC_GENERIC_DEVICE_CLASS]):
_LOGGER.debug("node.generic %s not in generic_device_class %s",
node.generic, schema[const.DISC_GENERIC_DEVICE_CLASS])
return False
if (const.DISC_SPECIFIC_DEVICE_CLASS in schema and
node.specific not in schema[const.DISC_SPECIFIC_DEVICE_CLASS]):
_LOGGER.debug("node.specific %s not in specific_device_class %s",
node.specific, schema[const.DISC_SPECIFIC_DEVICE_CLASS])
return False
return True
def check_value_schema(value, schema):
"""Check if the value matches the passed value schema."""
if (const.DISC_COMMAND_CLASS in schema and
value.command_class not in schema[const.DISC_COMMAND_CLASS]):
_LOGGER.debug("value.command_class %s not in command_class %s",
value.command_class, schema[const.DISC_COMMAND_CLASS])
return False
if (const.DISC_TYPE in schema and
value.type not in schema[const.DISC_TYPE]):
_LOGGER.debug("value.type %s not in type %s",
value.type, schema[const.DISC_TYPE])
return False
if (const.DISC_GENRE in schema and
value.genre not in schema[const.DISC_GENRE]):
_LOGGER.debug("value.genre %s not in genre %s",
value.genre, schema[const.DISC_GENRE])
return False
if (const.DISC_INDEX in schema and
value.index not in schema[const.DISC_INDEX]):
_LOGGER.debug("value.index %s not in index %s",
value.index, schema[const.DISC_INDEX])
return False
if (const.DISC_INSTANCE in schema and
value.instance not in schema[const.DISC_INSTANCE]):
_LOGGER.debug("value.instance %s not in instance %s",
value.instance, schema[const.DISC_INSTANCE])
return False
if const.DISC_SCHEMAS in schema:
found = False
for schema_item in schema[const.DISC_SCHEMAS]:
found = found or check_value_schema(value, schema_item)
if not found:
return False
return True
def node_name(node):
"""Return the name of the node."""
if is_node_parsed(node):
return node.name or '{} {}'.format(
node.manufacturer_name, node.product_name)
return 'Unknown Node {}'.format(node.node_id)
async def check_has_unique_id(entity, ready_callback, timeout_callback):
"""Wait for entity to have unique_id."""
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow()-start_time).total_seconds())
if entity.unique_id:
ready_callback(waited)
return
if waited >= const.NODE_READY_WAIT_SECS:
# Wait up to NODE_READY_WAIT_SECS seconds for unique_id to appear.
timeout_callback(waited)
return
await asyncio.sleep(1)
def is_node_parsed(node):
"""Check whether the node has been parsed or still waiting to be parsed."""
return bool((node.manufacturer_name and node.product_name) or node.name)
| 38.652632 | 79 | 0.651688 |
36f6067a48d943ab8e97b75fe65a13a5e2245c79 | 7,862 | py | Python | 2016/arkansas_pp/ar_locality.py | democracyworks/hand-collection-to-vip | 0b6e90f6055c1325930b53905bba2d1bfc111457 | [
"BSD-3-Clause"
] | null | null | null | 2016/arkansas_pp/ar_locality.py | democracyworks/hand-collection-to-vip | 0b6e90f6055c1325930b53905bba2d1bfc111457 | [
"BSD-3-Clause"
] | 2 | 2016-08-12T20:52:24.000Z | 2021-02-08T20:24:27.000Z | 2016/arkansas_pp/ar_locality.py | democracyworks/hand-collection-to-vip | 0b6e90f6055c1325930b53905bba2d1bfc111457 | [
"BSD-3-Clause"
] | 1 | 2018-08-17T21:16:30.000Z | 2018-08-17T21:16:30.000Z | """
Contains class that generates the 'locality.txt' file for any state.
locality.txt contains the following columns:
election_administration_id,
external_identifier_type,
external_identifier_othertype,
external_identifier_value,
name,
polling_location_ids,
state_id,
type,
other_type,
id
"""
import pandas as pd
import hashlib
import re
import config
from ar_polling_location import PollingLocationTxt
class LocalityTxt(object):
"""#
"""
def __init__(self, polling_place_df, state):
self.base_df = polling_place_df
self.state = state
def create_election_administration_id(self, index):
"""#"""
return ''
def get_external_identifier_type(self):
"""#"""
return "ocd-id"
def get_external_identifier_othertype(self):
# create conditional when/if column is present
return ''
def get_external_identifier_value(self, external_identifier_value):
"""Extracts external identifier (ocd-division)."""
if external_identifier_value:
return external_identifier_value
else:
return ''
def create_name(self, index, division_description):
"""
Creates a name by concatenating the 'locality' (town name along with town or county designation)
with an 'index_str' based on the Dataframes row index.'0s' are added, if necesary, to
maintain a consistent id length.
"""
if division_description:
locality = str(division_description[:-3].lower().replace(" ", "_"))
else:
locality = ''
print 'Missing data at row ' + str(index) + '.'
# Add leading '0s' depending on index number.
if index <= 9:
index_str = '000' + str(index)
elif index in range(10,100):
index_str = '00' + str(index)
elif index in range(100, 1000):
index_str = '0' + str(index)
else:
index_str = str(index)
return locality + index_str
def get_polling_location_ids(self, precinct, polling_location_id):
"""Returns empty value so that polling locations are not linked to locality."""
return ''
def polling_location_ids_for_precincts(self, precinct, polling_location_id):
"""Returns the polling_location_id specifically to pass the data forward to the precinct script."""
#county_wide = 'VOTE CENTER'
#if precinct != county_wide:
# return polling_location_id
#else:
# return ''
return polling_location_id
def create_state_id(self):
"""Creates the state_id by matching a key in the state_dict and retrieving
and modifying its value. A '0' is added, if necessary, to maintain a
consistent id length.
"""
for key, value in config.fips_dict.iteritems():
if key == self.state:
state_num = value
if state_num <=9:
state_num = '0' + str(state_num)
else:
state_num = str(state_num)
return 'st' + state_num
def get_type(self):
""""Set type value"""
return 'other'
def get_other_type(self):
"""#"""
return ''
def create_id(self, ocd_division):
"""Creates a sequential id by concatenating 'loc' with an 'index_str' based on the Dataframe's row index.
'0s' are added, if necesary, to maintain a consistent id length.
"""
id = int(hashlib.sha1(str(ocd_division).strip()).hexdigest(), 16) % (10 ** 8)
print 'OCD-DIV', ocd_division, id
return 'loc' + str(id)
def build_locality_txt(self):
"""
New columns that match the 'locality.txt' template are inserted into the DataFrame, apply() is
used to run methods that generate the values for each row of the new columns.
"""
self.base_df['election_administration_id'] = self.base_df.apply(
lambda row: self.create_election_administration_id(row['index']), axis=1)
self.base_df['external_identifier_type'] = self.base_df.apply(
lambda row: self.get_external_identifier_type(), axis=1)
self.base_df['external_identifier_othertype'] = self.base_df.apply(
lambda row: self.get_external_identifier_othertype(), axis=1)
self.base_df['external_identifier_value'] = self.base_df.apply(
lambda row: self.get_external_identifier_value(row['ocd_division']), axis=1)
self.base_df['name'] = self.base_df.apply(
lambda row: self.create_name(row['index'], row['county']), axis=1)
self.base_df['polling_location_ids'] = self.base_df.apply(
lambda row: self.get_polling_location_ids(row['precinct'], row['id']), axis=1)
self.base_df['polling_location_ids_for_precincts'] = self.base_df.apply(
lambda row: self.polling_location_ids_for_precincts(row['precinct'], row['id']), axis=1)
self.base_df['state_id'] = self.base_df.apply(
lambda row: self.create_state_id(), axis=1)
self.base_df['type'] = self.base_df.apply(
lambda row: self.get_type(), axis=1)
self.base_df['other_type'] = self.base_df.apply(
lambda row: self.get_other_type(), axis=1)
self.base_df['id'] = self.base_df.apply(
lambda row: self.create_id(row['ocd_division']), axis=1)
return self.base_df
def export_for_precinct(self):
loc = self.build_locality_txt()
print loc.columns
# reorder columns to VIP format
cols = ['election_administration_id', 'external_identifier_type', 'external_identifier_othertype',
'external_identifier_value', 'name', 'polling_location_ids_for_precincts', 'state_id', 'type',
'other_type', 'grouped_index', 'id', 'county', 'precinct']
final = loc.reindex(columns=cols)
print final
return final
def write_locality_txt(self):
"""Drops base DataFrame columns then writes final dataframe to text or csv file"""
loc = self.build_locality_txt()
# reorder columns to VIP format
cols = ['election_administration_id', 'external_identifier_type', 'external_identifier_othertype',
'external_identifier_value', 'name', 'polling_location_ids', 'state_id', 'type',
'other_type', 'grouped_index', 'id']
final = loc.reindex(columns=cols)
final = final.drop_duplicates(subset=['id'])
final.to_csv(config.output + 'locality.txt', index=False, encoding='utf-8') # send to txt file
final.to_csv(config.output + 'locality.csv', index=False, encoding='utf-8') # send to csv file
if __name__ == '__main__':
state_file = config.state_file
colnames = ['county', 'ocd_division', 'phone', 'email', 'precinct', 'directions', 'name', 'address1', 'address2',
'city', 'state', 'zip_code', 'start_time', 'end_time']
polling_place_df = pd.read_csv(config.input_path + state_file, names=colnames, sep=',', encoding='ISO-8859-1', skiprows=1)
#polling_place_df = polling_place_df[polling_place_df.address1.notnull()]
#polling_place_df = polling_place_df[polling_place_df.city.notnull()]
polling_place_df['index'] = polling_place_df.index
pl = PollingLocationTxt(polling_place_df, config.state_abbreviation_upper)
polling_place_df = pl.export_for_schedule_and_locality()
print polling_place_df
lt = LocalityTxt(polling_place_df, config.state)
lt.write_locality_txt()
#lt.export_for_precinct()
| 36.230415 | 127 | 0.626685 |
d31842f7e9615b8585c6355979c567c341aa2daa | 181 | py | Python | test_output/nested_if.py | roshangol/executed-path-visualize | 1759c12b0048fe117205990b151d2f5f57ad9616 | [
"MIT"
] | null | null | null | test_output/nested_if.py | roshangol/executed-path-visualize | 1759c12b0048fe117205990b151d2f5f57ad9616 | [
"MIT"
] | null | null | null | test_output/nested_if.py | roshangol/executed-path-visualize | 1759c12b0048fe117205990b151d2f5f57ad9616 | [
"MIT"
] | null | null | null | a = 5
b = 10
c = 15
if a > b:
if a > c:
print("a value is big")
else:
print("c value is big")
elif b > c:
print("b value is big")
else:
print("c is big") | 15.083333 | 30 | 0.491713 |
f1066ce327bf680448ccfdeb7a6d5768a9f43e55 | 1,635 | py | Python | filter.py | hulto/hackpack | 39fb5b031ae9b0bce8a8fd5fdbdf1e7724e5f99b | [
"MIT"
] | 15 | 2017-03-14T23:12:57.000Z | 2022-03-15T16:05:49.000Z | filter.py | hulto/hackpack | 39fb5b031ae9b0bce8a8fd5fdbdf1e7724e5f99b | [
"MIT"
] | 51 | 2015-08-28T00:36:56.000Z | 2016-02-12T21:54:19.000Z | filter.py | hulto/hackpack | 39fb5b031ae9b0bce8a8fd5fdbdf1e7724e5f99b | [
"MIT"
] | 10 | 2017-02-15T02:21:49.000Z | 2022-03-09T16:34:14.000Z | #!/usr/bin/env python3
import io
import json
import sys
from pandocfilters import walk, Header, Link, Para, Str
# record how many headers deep we are
depth = 0
# create node that is a block paragraph with a link that says 'Jump to Top' and hrefs '#top'
jump = Para([Link(['', [], []], [Str('Jump to Top')], ('#top', 'top'))])
# add jumps before headers of the document
def add_to_headers(key, val, fmt, meta):
global depth
# when we are at a header node
if key == 'Header':
# get details of header
lvl, attr, inline = val
# if we are at the first header of a larger section
if lvl > depth:
# record the depth and do not place a jump
depth += 1
return
elif lvl < depth:
# bring depth down to level
depth = lvl
# if the header is noteworthy, put a jump before it
if lvl <= 3:
return [jump, Header(lvl, attr, inline)]
# add jumps in all necessary places in the document
def jump_to_top(doc, fmt):
# add jumps throughout the document
doc = walk(doc, add_to_headers, fmt, doc['meta'] if 'meta' in doc else doc[0]['unMeta'])
# add a jump at the bottom of the document
try:
doc['blocks'].append(jump)
except TypeError:
doc[1].append(jump)
return doc
if __name__ == '__main__':
# read JSON in, parse it with an optional format argument, and write JSON out
json.dump(jump_to_top(json.load(io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')), sys.argv[1] if len(sys.argv) > 1 else ''), io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8'))
| 29.727273 | 188 | 0.6263 |
1b0f1d9f6dacd9f5fdbb9131abee3545bb49b04c | 6,139 | py | Python | tests/test_converters.py | conglei/graphene-pydantic | d4df6af2313dc7451b1d2bb159dada4a7a94c44d | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/test_converters.py | conglei/graphene-pydantic | d4df6af2313dc7451b1d2bb159dada4a7a94c44d | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/test_converters.py | conglei/graphene-pydantic | d4df6af2313dc7451b1d2bb159dada4a7a94c44d | [
"Apache-2.0",
"MIT"
] | null | null | null | import sys
import datetime
import decimal
import enum
import typing as T
import uuid
import graphene
import graphene.types
import pydantic
import pytest
from pydantic import BaseModel, create_model
import graphene_pydantic.converters as converters
from graphene_pydantic.converters import ConversionError, convert_pydantic_field
from graphene_pydantic.objecttype import PydanticObjectType
from graphene_pydantic.registry import get_global_registry, Placeholder
def _get_field_from_spec(name, type_spec_or_default):
kwargs = {name: type_spec_or_default}
m = create_model("model", **kwargs)
return m.__fields__[name]
def _convert_field_from_spec(name, type_spec_or_default):
return convert_pydantic_field(
_get_field_from_spec(name, type_spec_or_default),
get_global_registry(PydanticObjectType),
)
def test_required_string():
field = _convert_field_from_spec("s", (str, ...))
assert field is not None
assert isinstance(field, graphene.Field)
# The ellipsis in the type spec means required
assert isinstance(field.type, graphene.NonNull)
assert field.type.of_type == graphene.String
def test_default_values():
field = _convert_field_from_spec("s", "hi")
assert field is not None
assert isinstance(field, graphene.Field)
# there's a default value, so it's not required
assert not isinstance(field.type, graphene.NonNull)
assert field.type == graphene.String
assert field.default_value == "hi"
@pytest.mark.parametrize(
"input, expected",
[
((bool, False), graphene.Boolean),
((float, 0.1), graphene.Float),
((int, 6), graphene.Int),
((str, "hi"), graphene.String),
((uuid.UUID, uuid.uuid4()), graphene.UUID),
((datetime.date, datetime.date(2019, 1, 1)), graphene.Date),
((datetime.time, datetime.time(15, 29)), graphene.Time),
((datetime.datetime, datetime.datetime(2019, 1, 1, 1, 37)), graphene.DateTime),
],
)
def test_builtin_scalars(input, expected):
field = _convert_field_from_spec("attr", input)
assert isinstance(field, graphene.Field)
assert field.type == expected
assert field.default_value == input[1]
def test_union():
field = _convert_field_from_spec("attr", (T.Union[int, float, str], 5.0))
assert issubclass(field.type, graphene.Union)
assert field.default_value == 5.0
assert field.type.__name__.startswith("UnionOf")
if sys.version_info >= (3, 8):
# Python < 3.8 does not support typing.Literal
def test_literal():
field = _convert_field_from_spec(
"attr", (T.Literal["literal1", "literal2", 3], 3)
)
assert issubclass(field.type, graphene.Union)
assert field.default_value == 3
assert field.type.__name__.startswith("UnionOf")
def test_literal_singleton():
field = _convert_field_from_spec("attr", (T.Literal["literal1"], "literal1"))
assert issubclass(field.type, graphene.String)
assert field.default_value == "literal1"
assert field.type == graphene.String
def test_mapping():
with pytest.raises(ConversionError) as exc:
_convert_field_from_spec("attr", (T.Dict[str, int], {"foo": 5}))
assert exc.value.args[0] == "Don't know how to handle mappings in Graphene."
def test_decimal(monkeypatch):
monkeypatch.setattr(converters, "DECIMAL_SUPPORTED", True)
field = _convert_field_from_spec("attr", (decimal.Decimal, decimal.Decimal(1.25)))
assert field.type.__name__ == "Decimal"
monkeypatch.setattr(converters, "DECIMAL_SUPPORTED", False)
field = _convert_field_from_spec("attr", (decimal.Decimal, decimal.Decimal(1.25)))
assert field.type.__name__ == "Float"
def test_iterables():
field = _convert_field_from_spec("attr", (T.List[int], [1, 2]))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (list, [1, 2]))
assert field.type == graphene.types.List
field = _convert_field_from_spec("attr", (T.Set[int], {1, 2}))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (set, {1, 2}))
assert field.type == graphene.types.List
field = _convert_field_from_spec("attr", (T.Tuple[int, float], (1, 2.2)))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (T.Tuple[int, ...], (1, 2.2)))
assert isinstance(field.type, graphene.types.List)
field = _convert_field_from_spec("attr", (tuple, (1, 2)))
assert field.type == graphene.types.List
field = _convert_field_from_spec("attr", (T.Union[None, int], 1))
assert field.type == graphene.types.Int
def test_enum():
class Color(enum.Enum):
RED = 1
GREEN = 2
field = _convert_field_from_spec("attr", (Color, Color.RED))
assert field.type.__name__ == "Color"
assert field.type._meta.enum == Color
def test_existing_model():
from graphene_pydantic import PydanticObjectType
class Foo(BaseModel):
name: str
class GraphFoo(PydanticObjectType):
class Meta:
model = Foo
field = _convert_field_from_spec("attr", (Foo, Foo(name="bar")))
assert field.type == GraphFoo
def test_unresolved_placeholders():
# no errors should be raised here -- instead a placeholder is created
field = _convert_field_from_spec("attr", (create_model("Model", size=int), None))
assert any(
isinstance(x, Placeholder)
for x in get_global_registry(PydanticObjectType)._registry.values()
)
def test_self_referencing():
class NodeModel(BaseModel):
id: int
name: str
# nodes: Union['NodeModel', None]
nodes: T.Optional["NodeModel"]
NodeModel.update_forward_refs()
class NodeModelSchema(PydanticObjectType):
class Meta: # noqa: too-few-public-methods
model = NodeModel
@classmethod
def is_type_of(cls, root, info):
return isinstance(root, (cls, NodeModel))
NodeModelSchema.resolve_placeholders()
assert NodeModelSchema._meta.model is NodeModel
| 31.973958 | 87 | 0.690829 |
54e4ce0f8310b8f17023817582141fcc8b7f1247 | 1,204 | py | Python | jj/matchers/logical_matchers/_all_matcher.py | TeoDV/jj | a58d91ad7b37ba3115daea4890190abede8f3353 | [
"Apache-2.0"
] | 4 | 2020-09-08T08:14:21.000Z | 2022-01-27T19:22:53.000Z | jj/matchers/logical_matchers/_all_matcher.py | TeoDV/jj | a58d91ad7b37ba3115daea4890190abede8f3353 | [
"Apache-2.0"
] | 19 | 2018-02-13T05:51:25.000Z | 2022-03-27T22:48:11.000Z | jj/matchers/logical_matchers/_all_matcher.py | TeoDV/jj | a58d91ad7b37ba3115daea4890190abede8f3353 | [
"Apache-2.0"
] | 3 | 2017-11-17T13:25:23.000Z | 2022-02-03T12:57:00.000Z | from typing import Any, Dict, List
from packed import packable
from ...requests import Request
from ...resolvers import Resolver
from .._resolvable_matcher import ResolvableMatcher
from ._logical_matcher import LogicalMatcher
__all__ = ("AllMatcher",)
@packable("jj.matchers.AllMatcher")
class AllMatcher(LogicalMatcher):
def __init__(self, matchers: List[ResolvableMatcher], *, resolver: Resolver) -> None:
super().__init__(resolver=resolver)
assert len(matchers) > 0
self._matchers = matchers
async def match(self, request: Request) -> bool:
for matcher in self._matchers:
if not await matcher.match(request):
return False
return True
def __repr__(self) -> str:
return (f"{self.__class__.__qualname__}"
f"({self._matchers!r}, resolver={self._resolver!r})")
def __packed__(self) -> Dict[str, Any]:
return {"matchers": self._matchers}
@classmethod
def __unpacked__(cls, *,
matchers: List[ResolvableMatcher],
resolver: Resolver,
**kwargs: Any) -> "AllMatcher":
return cls(matchers, resolver=resolver)
| 30.871795 | 89 | 0.641196 |
b0792051a73fcc80fbaf3b17428d83cf81096960 | 19,977 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/pyprogress.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/pyprogress.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/agw/pyprogress.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------- #
# PYPROGRESS wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 03 Nov 2006
# Latest Revision: 12 May 2009, 15.00 GMT
#
#
# TODO List
#
# 1. Do we support all the styles of wx.ProgressDialog in indeterminated mode?
#
# 2. Other ideas?
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# gavana@kpo.kz
# andrea.gavana@gmail.com
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
Description
===========
PyProgress is similar to wx.ProgressDialog in indeterminated mode, but with a
different gauge appearance and a different spinning behavior. The moving gauge
can be drawn with a single solid colour or with a shading gradient foreground.
The gauge background colour is user customizable.
The bar does not move always from the beginning to the end as in wx.ProgressDialog
in indeterminated mode, but spins cyclically forward and backward.
Other options include:
- Possibility to change the proportion between the spinning bar and the
entire gauge, so that the bar can be longer or shorter (the default is 20%);
- Modifying the number of steps the spinning bar performs before a forward
(or backward) loop reverses.
PyProgress can optionally display a Cancel button, and a wx.StaticText which
outputs the elapsed time from the starting of the process.
Supported Platforms
===================
PyProgress has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (Dapper 6.06)
License And Version:
===================
PyProgress is freeware and distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 12 May 2009, 15.00 GMT
Version 0.2
"""
__docformat__ = "epytext"
import wx
# Some constants, taken straight from wx.ProgressDialog
Uncancelable = -1
Canceled = 0
Continue = 1
Finished = 2
# Margins between gauge and text/button
LAYOUT_MARGIN = 8
# ---------------------------------------------------------------------------- #
# Class ProgressGauge
# ---------------------------------------------------------------------------- #
class ProgressGauge(wx.PyWindow):
""" This class provides a visual alternative for wx.Gauge."""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=(-1,30)):
""" Default class constructor. """
wx.PyWindow.__init__(self, parent, id, pos, size, style=wx.SUNKEN_BORDER)
self._value = 0
self._steps = 50
self._pos = 0
self._current = 0
self._gaugeproportion = 0.2
self._firstGradient = wx.WHITE
self._secondGradient = wx.BLUE
self._background = wx.Brush(wx.WHITE, wx.SOLID)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def GetFirstGradientColour(self):
""" Returns the first gradient colour. """
return self._firstGradient
def SetFirstGradientColour(self, colour):
""" Sets the first gradient colour. """
self._firstGradient = colour
self.Refresh()
def GetSecondGradientColour(self):
""" Returns the second gradient colour. """
return self._secondGradient
def SetSecondGradientColour(self, colour):
""" Sets the second gradient colour. """
self._secondGradient = colour
self.Refresh()
def GetGaugeBackground(self):
""" Returns the gauge background colour. """
return self._background
def SetGaugeBackground(self, colour):
""" Sets the gauge background colour. """
self._background = wx.Brush(colour, wx.SOLID)
def SetGaugeSteps(self, steps):
"""
Sets the number of steps the gauge performs before switching from
forward to backward (or vice-versa) movement.
"""
if steps <= 0:
raise Exception("ERROR:\n Gauge steps must be greater than zero. ")
if steps != self._steps:
self._steps = steps
def GetGaugeSteps(self):
"""
Returns the number of steps the gauge performs before switching from
forward to backward (or vice-versa) movement.
"""
return self._steps
def GetGaugeProportion(self):
"""
Returns the relative proportion between the sliding bar and the
whole gauge.
"""
return self._gaugeproportion
def SetGaugeProportion(self, proportion):
"""
Sets the relative proportion between the sliding bar and the
whole gauge.
"""
if proportion <= 0 or proportion >= 1:
raise Exception("ERROR:\n Gauge proportion must be between 0 and 1. ")
if proportion != self._gaugeproportion:
self._gaugeproportion = proportion
def OnEraseBackground(self, event):
""" Handles the wx.EVT_ERASE_BACKGROUND event for ProgressGauge. """
pass
def OnPaint(self, event):
""" Handles the wx.EVT_PAINT event for ProgressGauge. """
dc = wx.BufferedPaintDC(self)
dc.SetBackground(self._background)
dc.Clear()
xsize, ysize = self.GetClientSize()
interval = xsize/float(self._steps)
self._pos = interval*self._value
status = self._current/(self._steps - int(self._gaugeproportion*xsize)/int(interval))
if status%2 == 0:
increment = 1
else:
increment = -1
self._value = self._value + increment
self._current = self._current + 1
self.DrawProgress(dc, xsize, ysize, increment)
def DrawProgress(self, dc, xsize, ysize, increment):
""" Actually draws the sliding bar. """
if increment > 0:
col1 = self.GetFirstGradientColour()
col2 = self.GetSecondGradientColour()
else:
col1 = self.GetSecondGradientColour()
col2 = self.GetFirstGradientColour()
interval = self._gaugeproportion*xsize
r1, g1, b1 = int(col1.Red()), int(col1.Green()), int(col1.Blue())
r2, g2, b2 = int(col2.Red()), int(col2.Green()), int(col2.Blue())
rstep = float((r2 - r1)) / interval
gstep = float((g2 - g1)) / interval
bstep = float((b2 - b1)) / interval
rf, gf, bf = 0, 0, 0
dc.SetBrush(wx.TRANSPARENT_BRUSH)
for ii in xrange(int(self._pos), int(self._pos+interval)):
currCol = (r1 + rf, g1 + gf, b1 + bf)
dc.SetPen(wx.Pen(currCol, 2))
dc.DrawLine(ii, 1, ii, ysize-2)
rf = rf + rstep
gf = gf + gstep
bf = bf + bstep
def Update(self):
""" Updates the gauge with a new value. """
self.Refresh()
# ---------------------------------------------------------------------------- #
# Class PyProgress
# ---------------------------------------------------------------------------- #
class PyProgress(wx.Dialog):
"""
PyProgress is similar to wx.ProgressDialog in indeterminated mode, but with a
different gauge appearance and a different spinning behavior. The moving gauge
can be drawn with a single solid colour or with a shading gradient foreground.
The gauge background colour is user customizable.
The bar does not move always from the beginning to the end as in wx.ProgressDialog
in indeterminated mode, but spins cyclically forward and backward.
"""
def __init__(self, parent=None, id=-1, title="", message="",
style=wx.PD_APP_MODAL|wx.PD_AUTO_HIDE):
""" Default class constructor. """
wx.Dialog.__init__(self, parent, id, title)
self._delay = 3
self._hasAbortButton = False
# we may disappear at any moment, let the others know about it
self.SetExtraStyle(self.GetExtraStyle()|wx.WS_EX_TRANSIENT)
self._hasAbortButton = (style & wx.PD_CAN_ABORT)
if wx.Platform == "__WXMSW__":
# we have to remove the "Close" button from the title bar then as it is
# confusing to have it - it doesn't work anyhow
# FIXME: should probably have a (extended?) window style for this
if not self._hasAbortButton:
self.EnableClose(False)
self._state = (self._hasAbortButton and [Continue] or [Uncancelable])[0]
self._parentTop = wx.GetTopLevelParent(parent)
dc = wx.ClientDC(self)
dc.SetFont(wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT))
widthText, dummy = dc.GetTextExtent(message)
sizer = wx.BoxSizer(wx.VERTICAL)
self._msg = wx.StaticText(self, wx.ID_ANY, message)
sizer.Add(self._msg, 0, wx.LEFT|wx.TOP, 2*LAYOUT_MARGIN)
sizeDlg = wx.Size()
sizeLabel = self._msg.GetSize()
sizeDlg.y = 2*LAYOUT_MARGIN + sizeLabel.y
self._gauge = ProgressGauge(self, -1)
sizer.Add(self._gauge, 0, wx.LEFT|wx.RIGHT|wx.TOP|wx.EXPAND, 2*LAYOUT_MARGIN)
sizeGauge = self._gauge.GetSize()
sizeDlg.y += 2*LAYOUT_MARGIN + sizeGauge.y
# create the estimated/remaining/total time zones if requested
self._elapsed = None
self._display_estimated = self._last_timeupdate = self._break = 0
self._ctdelay = 0
label = None
nTimeLabels = 0
if style & wx.PD_ELAPSED_TIME:
nTimeLabels += 1
self._elapsed = self.CreateLabel("Elapsed time : ", sizer)
if nTimeLabels > 0:
label = wx.StaticText(self, -1, "")
# set it to the current time
self._timeStart = wx.GetCurrentTime()
sizeDlg.y += nTimeLabels*(label.GetSize().y + LAYOUT_MARGIN)
label.Destroy()
sizeDlgModified = False
if wx.Platform == "__WXMSW__":
sizerFlags = wx.ALIGN_RIGHT|wx.ALL
else:
sizerFlags = wx.ALIGN_CENTER_HORIZONTAL|wx.BOTTOM|wx.TOP
if self._hasAbortButton:
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
self._btnAbort = wx.Button(self, -1, "Cancel")
self._btnAbort.Bind(wx.EVT_BUTTON, self.OnCancel)
# Windows dialogs usually have buttons in the lower right corner
buttonSizer.Add(self._btnAbort, 0, sizerFlags, LAYOUT_MARGIN)
if not sizeDlgModified:
sizeDlg.y += 2*LAYOUT_MARGIN + wx.Button.GetDefaultSize().y
if self._hasAbortButton:
sizer.Add(buttonSizer, 0, sizerFlags, LAYOUT_MARGIN )
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self._windowStyle = style
self.SetSizerAndFit(sizer)
sizeDlg.y += 2*LAYOUT_MARGIN
# try to make the dialog not square but rectangular of reasonable width
sizeDlg.x = max(widthText, 4*sizeDlg.y/3)
sizeDlg.x *= 3
sizeDlg.x /= 2
self.SetClientSize(sizeDlg)
self.Centre(wx.CENTER_FRAME|wx.BOTH)
if style & wx.PD_APP_MODAL:
self._winDisabler = wx.WindowDisabler(self)
else:
if self._parentTop:
self._parentTop.Disable()
self._winDisabler = None
self.ShowDialog()
self.Enable()
# this one can be initialized even if the others are unknown for now
# NB: do it after calling Layout() to keep the labels correctly aligned
if self._elapsed:
self.SetTimeLabel(0, self._elapsed)
if not wx.EventLoop().GetActive():
self.evtloop = wx.EventLoop()
wx.EventLoop.SetActive(self.evtloop)
self.Update()
def CreateLabel(self, text, sizer):
""" Creates the wx.StaticText that holds the elapsed time label. """
locsizer = wx.BoxSizer(wx.HORIZONTAL)
dummy = wx.StaticText(self, wx.ID_ANY, text)
label = wx.StaticText(self, wx.ID_ANY, "unknown")
if wx.Platform in ["__WXMSW__", "__WXMAC__"]:
# label and time centered in one row
locsizer.Add(dummy, 1, wx.ALIGN_LEFT)
locsizer.Add(label, 1, wx.ALIGN_LEFT|wx.LEFT, LAYOUT_MARGIN)
sizer.Add(locsizer, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.TOP, LAYOUT_MARGIN)
else:
# label and time to the right in one row
sizer.Add(locsizer, 0, wx.ALIGN_RIGHT|wx.RIGHT|wx.TOP, LAYOUT_MARGIN)
locsizer.Add(dummy)
locsizer.Add(label, 0, wx.LEFT, LAYOUT_MARGIN)
return label
# ----------------------------------------------------------------------------
# wxProgressDialog operations
# ----------------------------------------------------------------------------
def UpdatePulse(self, newmsg=""):
""" Update the progress dialog with a (optionally) new message. """
self._gauge.Update()
if newmsg and newmsg != self._msg.GetLabel():
self._msg.SetLabel(newmsg)
wx.YieldIfNeeded()
if self._elapsed:
elapsed = wx.GetCurrentTime() - self._timeStart
if self._last_timeupdate < elapsed:
self._last_timeupdate = elapsed
self.SetTimeLabel(elapsed, self._elapsed)
if self._state == Finished:
if not self._windowStyle & wx.PD_AUTO_HIDE:
self.EnableClose()
if newmsg == "":
# also provide the finishing message if the application didn't
self._msg.SetLabel("Done.")
wx.YieldIfNeeded()
self.ShowModal()
return False
else:
# reenable other windows before hiding this one because otherwise
# Windows wouldn't give the focus back to the window which had
# been previously focused because it would still be disabled
self.ReenableOtherWindows()
self.Hide()
# we have to yield because not only we want to update the display but
# also to process the clicks on the cancel and skip buttons
wx.YieldIfNeeded()
return self._state != Canceled
def GetFirstGradientColour(self):
""" Returns the gauge first gradient colour. """
return self._gauge.GetFirstGradientColour()
def SetFirstGradientColour(self, colour):
""" Sets the gauge first gradient colour. """
self._gauge.SetFirstGradientColour(colour)
def GetSecondGradientColour(self):
""" Returns the gauge second gradient colour. """
return self._gauge.GetSecondGradientColour()
def SetSecondGradientColour(self, colour):
""" Sets the gauge second gradient colour. """
self._gauge.SetSecondGradientColour(colour)
def GetGaugeBackground(self):
""" Returns the gauge background colour. """
return self._gauge.GetGaugeBackground()
def SetGaugeBackground(self, colour):
""" Sets the gauge background colour. """
self._gauge.SetGaugeBackground(colour)
def SetGaugeSteps(self, steps):
"""
Sets the number of steps the gauge performs before switching from
forward to backward (or vice-versa) movement.
"""
self._gauge.SetGaugeSteps(steps)
def GetGaugeSteps(self):
"""
Returns the number of steps the gauge performs before switching from
forward to backward (or vice-versa) movement.
"""
return self._gauge.GetGaugeSteps()
def GetGaugeProportion(self):
"""
Returns the relative proportion between the sliding bar and the
whole gauge.
"""
return self._gauge.GetGaugeProportion()
def SetGaugeProportion(self, proportion):
"""
Sets the relative proportion between the sliding bar and the
whole gauge.
"""
self._gauge.SetGaugeProportion(proportion)
def ShowDialog(self, show=True):
""" Show the dialog. """
# reenable other windows before hiding this one because otherwise
# Windows wouldn't give the focus back to the window which had
# been previously focused because it would still be disabled
if not show:
self.ReenableOtherWindows()
return self.Show()
# ----------------------------------------------------------------------------
# event handlers
# ----------------------------------------------------------------------------
def OnCancel(self, event):
""" Handles the wx.EVT_BUTTON event for the Cancel button. """
if self._state == Finished:
# this means that the count down is already finished and we're being
# shown as a modal dialog - so just let the default handler do the job
event.Skip()
else:
# request to cancel was received, the next time Update() is called we
# will handle it
self._state = Canceled
# update the buttons state immediately so that the user knows that the
# request has been noticed
self.DisableAbort()
# save the time when the dialog was stopped
self._timeStop = wx.GetCurrentTime()
self.ReenableOtherWindows()
def OnDestroy(self, event):
""" Handles the wx.EVT_WINDOW_DESTROY event for PyProgress. """
self.ReenableOtherWindows()
event.Skip()
def OnClose(self, event):
""" Handles the wx.EVT_CLOSE event for PyProgress. """
if self._state == Uncancelable:
# can't close this dialog
event.Veto()
elif self._state == Finished:
# let the default handler close the window as we already terminated
self.Hide()
event.Skip()
else:
# next Update() will notice it
self._state = Canceled
self.DisableAbort()
self._timeStop = wx.GetCurrentTime()
def ReenableOtherWindows(self):
""" Re-enables the other windows if using wx.WindowDisabler. """
if self._windowStyle & wx.PD_APP_MODAL:
if hasattr(self, "_winDisabler"):
del self._winDisabler
else:
if self._parentTop:
self._parentTop.Enable()
def SetTimeLabel(self, val, label=None):
""" Sets the elapsed time label. """
if label:
hours = val/3600
minutes = (val%3600)/60
seconds = val%60
strs = ("%lu:%02lu:%02lu")%(hours, minutes, seconds)
if strs != label.GetLabel():
label.SetLabel(strs)
def EnableAbort(self, enable=True):
""" Enables or disables the Cancel button. """
if self._hasAbortButton:
if self._btnAbort:
self._btnAbort.Enable(enable)
def EnableClose(self, enable=True):
""" Enables or disables the Close button. """
if self._hasAbortButton:
if self._btnAbort:
self._btnAbort.Enable(enable)
self._btnAbort.SetLabel("Close")
self._btnAbort.Bind(wx.EVT_BUTTON, self.OnClose)
def DisableAbort(self):
""" Disables the Cancel button. """
self.EnableAbort(False)
| 30.22239 | 93 | 0.575061 |
6d00da56a6429fea45f0ae77ce119f88815e576b | 1,561 | py | Python | saveAmebloContents/main.py | sassy/saveAmebloContents | 94a8b85144441dda40e18d63ea5a64df4548f46d | [
"MIT"
] | 1 | 2020-07-06T17:58:22.000Z | 2020-07-06T17:58:22.000Z | saveAmebloContents/main.py | sassy/saveAmebloContents | 94a8b85144441dda40e18d63ea5a64df4548f46d | [
"MIT"
] | 1 | 2021-10-18T20:49:19.000Z | 2021-10-18T20:49:19.000Z | saveAmebloContents/main.py | sassy/saveAmebloContents | 94a8b85144441dda40e18d63ea5a64df4548f46d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
from voluptuous import Schema, Url
import os
import time
import sys
links = []
def createIndexPage():
f = open("contents/index.html", 'w')
for link in links:
f.write('<a href="' + link + '">' + link + '</a><br>')
f.close()
def saveContent(url):
filename = url.split("/")[-1]
links.append(filename)
f = open("contents/" + filename, 'w')
print(url)
soup = BeautifulSoup(urlopen(url).read().decode('utf-8'), "html.parser")
f.write(soup.find("article").prettify())
f.close()
def parseContent(baseUrl, url):
f = urlopen(url)
soup = BeautifulSoup(f.read().decode('utf-8'), "html.parser")
titles = soup.find("ul", attrs={"class": "contentsList"}).find_all("li")
for title in titles:
link = title.find("a").get("href")
schema = Schema(Url())
saveContent(schema(baseUrl + link))
next = soup.find("a", attrs={"class": "pagingNext"})
if next is not None:
return baseUrl + next.get("href")
else:
return None
def main():
if len(sys.argv) < 2:
print("input ameblo id")
sys.exit()
amebloid = sys.argv[1]
baseUrl = "http://ameblo.jp/"
url = baseUrl + amebloid + "/entrylist.html"
try:
os.mkdir("contents")
except FileExistsError:
pass
while url is not None:
url = parseContent(baseUrl, url)
time.sleep(3)
createIndexPage()
if __name__ == '__main__':
main()
| 25.590164 | 76 | 0.593209 |
16a97eea5a8af7abf496073f7387485a5a3c7905 | 328 | py | Python | foilaundering/apps/foi_requests/views.py | foilaundering/foi_laundering | 786b986d0ca55ba13a17a2a943412615fa780682 | [
"CC0-1.0"
] | null | null | null | foilaundering/apps/foi_requests/views.py | foilaundering/foi_laundering | 786b986d0ca55ba13a17a2a943412615fa780682 | [
"CC0-1.0"
] | null | null | null | foilaundering/apps/foi_requests/views.py | foilaundering/foi_laundering | 786b986d0ca55ba13a17a2a943412615fa780682 | [
"CC0-1.0"
] | null | null | null | from django.views.generic import CreateView, ListView
from .forms import ArticleForm
from .models import FOIRequest
class SubmitView(CreateView):
form_class = ArticleForm
template_name = "submit_form.html"
success_url = "/request/thanks/"
class RequestsListView(ListView):
queryset = FOIRequest.objects.all()
| 25.230769 | 53 | 0.771341 |
55c0195c8eccd2ae4c238f0f1c0d54e2067e91c2 | 456 | py | Python | test/json_rpc_client.py | Vadman97/HawkProxy | ed2e2a59dd6f369756cfc459f2a5ec466615593e | [
"MIT"
] | null | null | null | test/json_rpc_client.py | Vadman97/HawkProxy | ed2e2a59dd6f369756cfc459f2a5ec466615593e | [
"MIT"
] | null | null | null | test/json_rpc_client.py | Vadman97/HawkProxy | ed2e2a59dd6f369756cfc459f2a5ec466615593e | [
"MIT"
] | 1 | 2020-07-24T00:31:05.000Z | 2020-07-24T00:31:05.000Z | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import xmlrpclib
import json
import sys
import time
import datetime
import jsonrpclib
server = jsonrpclib.Server('http://localhost:8080')
retry = 1000
t1 = datetime.datetime.now()
for i in xrange(retry):
server.echo_rpc()
t2 = datetime.datetime.now()
print "time delta is: %s. Per RPC is:%s, Per second: %s" % (
(t2 - t1), (t2 - t1) / retry, retry / (t2 - t1).total_seconds())
| 21.714286 | 68 | 0.690789 |
d2bc5e0979446bd716d2eabc3339e9089e72b76d | 1,990 | py | Python | examples/05-mag/plot_analytic.py | kimjaed/simpeg | b8d716f86a4ea07ba3085fabb24c2bc974788040 | [
"MIT"
] | 3 | 2020-11-27T03:18:28.000Z | 2022-03-18T01:29:58.000Z | examples/05-mag/plot_analytic.py | kimjaed/simpeg | b8d716f86a4ea07ba3085fabb24c2bc974788040 | [
"MIT"
] | null | null | null | examples/05-mag/plot_analytic.py | kimjaed/simpeg | b8d716f86a4ea07ba3085fabb24c2bc974788040 | [
"MIT"
] | 1 | 2020-05-26T17:00:53.000Z | 2020-05-26T17:00:53.000Z | """
PF: Magnetics: Analytics
========================
Comparing the magnetics field in Vancouver to Seoul
"""
import numpy as np
from SimPEG import PF
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def run(plotIt=True):
xr = np.linspace(-300, 300, 41)
yr = np.linspace(-300, 300, 41)
X, Y = np.meshgrid(xr, yr)
Z = np.ones((np.size(xr), np.size(yr)))*150
# Bz component in Korea
inckr = -8. + 3./60
deckr = 54. + 9./60
btotkr = 50898.6
Bokr = PF.MagAnalytics.IDTtoxyz(inckr, deckr, btotkr)
bx, by, bz = PF.MagAnalytics.MagSphereAnaFunA(
X, Y, Z, 100., 0., 0., 0., 0.01, Bokr, 'secondary'
)
Bzkr = np.reshape(bz, (np.size(xr), np.size(yr)), order='F')
# Bz component in Canada
incca = 16. + 49./60
decca = 70. + 19./60
btotca = 54692.1
Boca = PF.MagAnalytics.IDTtoxyz(incca, decca, btotca)
bx, by, bz = PF.MagAnalytics.MagSphereAnaFunA(
X, Y, Z, 100., 0., 0., 0., 0.01, Boca, 'secondary'
)
Bzca = np.reshape(bz, (np.size(xr), np.size(yr)), order='F')
if plotIt:
plt.figure(figsize=(14, 5))
ax1 = plt.subplot(121)
dat1 = plt.imshow(Bzkr, extent=[min(xr), max(xr), min(yr), max(yr)])
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
ax1.set_xlabel('East-West (m)')
ax1.set_ylabel('South-North (m)')
plt.colorbar(dat1, cax=cax1)
ax1.set_title('$B_z$ field at Seoul, South Korea')
ax2 = plt.subplot(122)
dat2 = plt.imshow(Bzca, extent=[min(xr), max(xr), min(yr), max(yr)])
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
ax2.set_xlabel('East-West (m)')
ax2.set_ylabel('South-North (m)')
plt.colorbar(dat2, cax=cax2)
ax2.set_title('$B_z$ field at Vancouver, Canada')
if __name__ == '__main__':
run()
plt.show()
| 29.701493 | 76 | 0.588945 |
d4a39acd0940fb6ef4a74f5142350177d973933d | 5,644 | py | Python | core/dbt/task/list.py | vogt4nick/dbt | 1bd82d4914fd80fcc6fe17140e46554ad677eab0 | [
"Apache-2.0"
] | null | null | null | core/dbt/task/list.py | vogt4nick/dbt | 1bd82d4914fd80fcc6fe17140e46554ad677eab0 | [
"Apache-2.0"
] | null | null | null | core/dbt/task/list.py | vogt4nick/dbt | 1bd82d4914fd80fcc6fe17140e46554ad677eab0 | [
"Apache-2.0"
] | null | null | null | import json
from typing import Type
from dbt.graph import (
parse_difference,
ResourceTypeSelector,
SelectionSpec,
)
from dbt.task.runnable import GraphRunnableTask, ManifestTask
from dbt.task.test import TestSelector
from dbt.node_types import NodeType
from dbt.exceptions import RuntimeException, InternalException
from dbt.logger import log_manager, GLOBAL_LOGGER as logger
class ListTask(GraphRunnableTask):
DEFAULT_RESOURCE_VALUES = frozenset((
NodeType.Model,
NodeType.Snapshot,
NodeType.Seed,
NodeType.Test,
NodeType.Source,
))
ALL_RESOURCE_VALUES = DEFAULT_RESOURCE_VALUES | frozenset((
NodeType.Analysis,
))
ALLOWED_KEYS = frozenset((
'alias',
'name',
'package_name',
'depends_on',
'tags',
'config',
'resource_type',
'source_name',
))
def __init__(self, args, config):
super().__init__(args, config)
self.args.single_threaded = True
if self.args.models:
if self.args.select:
raise RuntimeException(
'"models" and "select" are mutually exclusive arguments'
)
if self.args.resource_types:
raise RuntimeException(
'"models" and "resource_type" are mutually exclusive '
'arguments'
)
@classmethod
def pre_init_hook(cls, args):
"""A hook called before the task is initialized."""
log_manager.stderr_console()
super().pre_init_hook(args)
def _iterate_selected_nodes(self):
selector = self.get_node_selector()
spec = self.get_selection_spec()
nodes = sorted(selector.get_selected(spec))
if not nodes:
logger.warning('No nodes selected!')
return
if self.manifest is None:
raise InternalException(
'manifest is None in _iterate_selected_nodes'
)
for node in nodes:
if node in self.manifest.nodes:
yield self.manifest.nodes[node]
elif node in self.manifest.sources:
yield self.manifest.sources[node]
else:
raise RuntimeException(
f'Got an unexpected result from node selection: "{node}"'
f'Expected a source or a node!'
)
def generate_selectors(self):
for node in self._iterate_selected_nodes():
selector = '.'.join(node.fqn)
if node.resource_type == NodeType.Source:
yield 'source:{}'.format(selector)
else:
yield selector
def generate_names(self):
for node in self._iterate_selected_nodes():
if node.resource_type == NodeType.Source:
yield '{0.source_name}.{0.name}'.format(node)
else:
yield node.name
def generate_json(self):
for node in self._iterate_selected_nodes():
yield json.dumps({
k: v
for k, v in node.to_dict(omit_none=False).items()
if k in self.ALLOWED_KEYS
})
def generate_paths(self):
for node in self._iterate_selected_nodes():
yield node.original_file_path
def run(self):
ManifestTask._runtime_initialize(self)
output = self.config.args.output
if output == 'selector':
generator = self.generate_selectors
elif output == 'name':
generator = self.generate_names
elif output == 'json':
generator = self.generate_json
elif output == 'path':
generator = self.generate_paths
else:
raise InternalException(
'Invalid output {}'.format(output)
)
for result in generator():
self.node_results.append(result)
print(result)
return self.node_results
@property
def resource_types(self):
if self.args.models:
return [NodeType.Model]
values = set(self.config.args.resource_types)
if not values:
return list(self.DEFAULT_RESOURCE_VALUES)
if 'default' in values:
values.remove('default')
values.update(self.DEFAULT_RESOURCE_VALUES)
if 'all' in values:
values.remove('all')
values.update(self.ALL_RESOURCE_VALUES)
return list(values)
@property
def selector(self):
if self.args.models:
return self.args.models
else:
return self.args.select
def get_selection_spec(self) -> SelectionSpec:
if self.args.selector_name:
spec = self.config.get_selector(self.args.selector_name)
else:
spec = parse_difference(self.selector, self.args.exclude)
return spec
def get_node_selector(self):
if self.manifest is None or self.graph is None:
raise InternalException(
'manifest and graph must be set to get perform node selection'
)
cls: Type[ResourceTypeSelector]
if self.resource_types == [NodeType.Test]:
return TestSelector(
graph=self.graph,
manifest=self.manifest,
)
else:
return ResourceTypeSelector(
graph=self.graph,
manifest=self.manifest,
resource_types=self.resource_types,
)
def interpret_results(self, results):
return bool(results)
| 31.707865 | 78 | 0.579554 |
b57584bc33e9e73e75014bb226846741e5060b28 | 285 | py | Python | adler/siteconfig/admin.py | Yura-D/adler-congress | bbdedb7dbada4f6246f2a880204e7dd4bb8c8466 | [
"MIT"
] | null | null | null | adler/siteconfig/admin.py | Yura-D/adler-congress | bbdedb7dbada4f6246f2a880204e7dd4bb8c8466 | [
"MIT"
] | 10 | 2020-03-01T23:00:54.000Z | 2022-03-12T00:17:11.000Z | adler/siteconfig/admin.py | Yura-D/adler-congress | bbdedb7dbada4f6246f2a880204e7dd4bb8c8466 | [
"MIT"
] | null | null | null | # from solo.admin import SingletonModelAdmin
from django.contrib import admin
from .models import SiteSettings
admin.site.register(SiteSettings)
# class SiteSettingAdmin(SingletonModelAdmin):
# fields = [
# 'ticket_counter_name',
# 'ticket_counter_date',
# ]
| 23.75 | 46 | 0.726316 |
f59845b2096663a9e7e291bcd03f475dda6b8d35 | 485 | py | Python | openpyxl/styles/tests/conftest.py | dangqhuy/openpyxl | 42e929b69b0938b081a62fed529ce470054249fb | [
"MIT"
] | 12 | 2019-08-07T16:48:21.000Z | 2021-12-13T02:47:22.000Z | openpyxl/styles/tests/conftest.py | dangqhuy/openpyxl | 42e929b69b0938b081a62fed529ce470054249fb | [
"MIT"
] | 19 | 2019-12-29T05:07:36.000Z | 2021-04-22T18:09:49.000Z | openpyxl/styles/tests/conftest.py | dangqhuy/openpyxl | 42e929b69b0938b081a62fed529ce470054249fb | [
"MIT"
] | 1 | 2020-05-26T20:33:10.000Z | 2020-05-26T20:33:10.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
import pytest
@pytest.fixture
def datadir():
"""DATADIR as a LocalPath"""
import os
here = os.path.split(__file__)[0]
DATADIR = os.path.join(here, "data")
from py._path.local import LocalPath
return LocalPath(DATADIR)
# objects under test
@pytest.fixture
def FormatRule():
"""Formatting rule class"""
from openpyxl.formatting.rules import FormatRule
return FormatRule
| 19.4 | 52 | 0.71134 |
831e47829427d1cbd148de39568df7f02a7958e9 | 3,462 | py | Python | shamir/code/shamir_gen.py | sellenth/crow | 1a693825baca035e5d3f056c25875b9f60ca7882 | [
"MIT"
] | 2 | 2020-06-05T17:53:44.000Z | 2020-06-05T18:40:55.000Z | shamir/code/shamir_gen.py | sellenth/crow | 1a693825baca035e5d3f056c25875b9f60ca7882 | [
"MIT"
] | 4 | 2020-02-11T07:29:27.000Z | 2020-05-16T01:47:07.000Z | shamir/code/shamir_gen.py | TheREK3R/crow | 9e0ef90bf2fbd3900f5eb7e9ad94a9737d515a22 | [
"MIT"
] | 5 | 2019-12-07T00:10:52.000Z | 2020-05-01T16:58:09.000Z | #!/usr/bin/python3
import shamir
import sqlite3
import rsa_encrypt
import time
import base64
import settings
#Class to hold the database
class db:
name = ""
key = ""
def __init__(self):
self.name = ""
self.key = ""
#add user secret to the secrets database
def add_secret(username, name, secret, currtime):
#initiate database connection
conn = sqlite3.connect(settings.DBdir + "secrets.db")
c = conn.cursor()
#make sure table exists
c.execute("CREATE TABLE IF NOT EXISTS secrets(id PRIMARY KEY, name, secret, timestamp DOUBLE)")
#INSERT OR REPLACE into secrets the secret and user info
c.execute("REPLACE INTO secrets VALUES (?,?,?,?)", [username, name, str(secret), currtime])
#commit and close connection
conn.commit()
conn.close()
return
#Encrypt shares with db_keys and store them into their respective databases
def add_shares(username, shares, keys, currtime):
#Grab database keys
db_keys = rsa_encrypt.get_keys(settings.DBS)
#shares must be equal to dbs to prevent loss or oversharing
if((not len(shares) == len(settings.DBS))):
return -1
#For each database
for i in range(len(settings.DBS)):
#initiate database connection
conn = sqlite3.connect(settings.DBdir + settings.DBS[i] + ".db")
c = conn.cursor()
#make sure the shares table exists
create = "CREATE TABLE IF NOT EXISTS enc_shares(id PRIMARY KEY, share, timestamp DOUBLE)"
c.execute(create)
#Convert share data to a string
payload = username + ":" + str(shares[i][0]) + ":" + str(shares[i][1]) + ":" + str(keys[i])
#Grab the database key for the current database
k = db_keys[settings.DBS[i]].key
#encrypt the share string with the database public key
payload = rsa_encrypt.encrypt_str(k, payload)
#insert or replace the encrypted share, the username, and a timestamp into the database
c.execute("REPLACE INTO enc_shares VALUES(?, ?, ?)", [username, payload, currtime])
#commit the action and close the database
conn.commit()
conn.close()
return
#Generate the secrets for the sharing scheme to use
def gen_secrets(username, name, keys):
#Validate that there are enough databases
if(len(settings.DBS) < settings.TOTAL) or len(keys) < settings.TOTAL:
return -1
#Generate the secret and shares
secret, shares = shamir.make_random_shares(settings.THRESH, settings.TOTAL)
#Grab a timestamp
currtime = time.time()
#add the secret to the secrets database
add_secret(username, name, secret, currtime)
#add encrypted shares to the shares db
add_shares(username, shares, keys, currtime)
return
#add a user to the system given a username, name, and key list
def add_user(username, name, keys_list):
#make sure that all keys are non-null
for i in keys_list:
if i == "":
return -1
#generate the user
gen_secrets(username, name, keys_list)
return
#if run as main
if __name__ == "__main__":
#Exit if client node
if not settings.ID == 'auth':
print("run this on an auth node")
exit(0)
#Add test users
add_user("r3k", "Ryan Kennedy", ["111111"] * settings.TOTAL)
add_user("hal", "Halston Sellentin", ["111111"] * settings.TOTAL ) | 28.61157 | 100 | 0.645003 |
06ada39afb5e3d37155554e29a091f1cde64f563 | 217 | py | Python | lec_05/voted_hash_map.py | diable201/Grokking_Algorithms | 2597b93a91ec5aabc06f9791b42de03f7d01656b | [
"MIT"
] | 1 | 2020-09-11T10:25:32.000Z | 2020-09-11T10:25:32.000Z | lec_05/voted_hash_map.py | diable201/Grokking_Algorithms | 2597b93a91ec5aabc06f9791b42de03f7d01656b | [
"MIT"
] | null | null | null | lec_05/voted_hash_map.py | diable201/Grokking_Algorithms | 2597b93a91ec5aabc06f9791b42de03f7d01656b | [
"MIT"
] | null | null | null | voted = {}
def check_voter(name):
if voted.get(name):
print("Already voted!")
else:
voted[name] = True
print("Let them vote!")
check_voter("Tom")
check_voter("Bob")
check_voter("Bob")
| 18.083333 | 31 | 0.59447 |
0aa303964ab805d0e0a86e2b67c51e0790c2b045 | 56,580 | py | Python | testplan/testing/multitest/result.py | ymn1k/testplan | b1bde8495c449d75a74a7fe4e7c6501b0476f833 | [
"Apache-2.0"
] | null | null | null | testplan/testing/multitest/result.py | ymn1k/testplan | b1bde8495c449d75a74a7fe4e7c6501b0476f833 | [
"Apache-2.0"
] | null | null | null | testplan/testing/multitest/result.py | ymn1k/testplan | b1bde8495c449d75a74a7fe4e7c6501b0476f833 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:13:18.000Z | 2019-09-11T09:13:18.000Z | """TODO."""
import functools
import inspect
import os
import re
import uuid
from testplan import defaults
from testplan.defaults import STDOUT_STYLE
from .entries import assertions, base
from .entries.schemas.base import registry as schema_registry
from .entries.stdout.base import registry as stdout_registry
class ExceptionCapture(object):
"""
Exception capture scope, will be used by exception related assertions.
An instance of this class will be used as a context manager by
exception related assertion methods.
"""
def __init__(
self, result, assertion_kls, exceptions,
pattern=None, func=None, description=None, category=None,
):
"""
:param result: Result object of the current testcase.
:type result: ``testplan.testing.multitest.result.Result`` instance
:param exceptions: List of expected exceptions.
:type exceptions: ``list`` of exception classes.
:param description: Description text for the exception capture context,
this will be the description for
the related assertion object.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
"""
self.result = result
self.assertion_kls = assertion_kls
self.exceptions = exceptions
self.description = description
self.pattern = pattern
self.func = func
self.category = category
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
"""
Exiting the block and reporting what was thrown if anything.
"""
exc_assertion = self.assertion_kls(
raised_exception=exc_value,
expected_exceptions=self.exceptions,
pattern=self.pattern,
func=self.func,
category=self.category,
description=self.description,
)
caller_frame = inspect.stack()[1]
exc_assertion.file_path = os.path.abspath(caller_frame[1])
exc_assertion.line_no = caller_frame[2]
# We cannot use `bind_entry` here as this block will
# be run when an exception is raised
stdout_registry.log_entry(
entry=exc_assertion,
stdout_style=self.result.stdout_style
)
self.result.entries.append(exc_assertion)
return True
def bind_entry(method):
"""
Appends return value of a assertion / log method to the ``Result`` object's
``entries`` list.
"""
@functools.wraps(method)
def _wrapper(obj, *args, **kwargs):
entry = method(obj, *args, **kwargs)
# Second element is the caller
caller_frame = inspect.stack()[1]
entry.file_path = os.path.abspath(caller_frame[1])
entry.line_no = caller_frame[2]
if isinstance(obj, AssertionNamespace):
result_obj = obj.result
elif isinstance(obj, Result):
result_obj = obj
else:
raise TypeError('Invalid assertion container: {}'.format(obj))
result_obj.entries.append(entry)
stdout_registry.log_entry(
entry=entry,
stdout_style=result_obj.stdout_style,
)
if not entry and not result_obj.continue_on_failure:
raise AssertionError(entry)
return bool(entry)
return _wrapper
class AssertionNamespace(object):
"""
Base class for assertion namespaces.
Users can inherit from this class to implement custom namespaces.
"""
def __init__(self, result):
self.result = result
class RegexNamespace(AssertionNamespace):
"""Contains logic for regular expression assertions."""
@bind_entry
def match(self, regexp, value, description=None, category=None, flags=0):
"""
Checks if the given ``regexp`` matches the ``value``
via ``re.match`` operation.
.. code-block:: python
result.regex.match(regexp='foo', value='foobar')
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param flags: Regex flags that will be passed
to the ``re.match`` function.
:type flags: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.RegexMatch(
regexp=regexp, string=value,
flags=flags, description=description, category=category)
@bind_entry
def multiline_match(
self, regexp, value,
description=None, category=None
):
"""
Checks if the given ``regexp`` matches the ``value``
via ``re.match`` operation, uses ``re.MULTILINE`` and ``re.DOTALL``
flags implicitly.
.. code-block:: python
result.regex.multiline_match(
regexp='first line.*second',
value=os.linesep.join([
'first line',
'second line',
'third line'
]),
)
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param description: text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status.
:rtype: ``bool``
"""
return assertions.RegexMatch(
regexp=regexp, string=value,
flags=re.MULTILINE | re.DOTALL,
description=description, category=category)
@bind_entry
def not_match(
self, regexp, value,
description=None, category=None, flags=0
):
"""
Checks if the given ``regexp`` does not match the ``value``
via ``re.match`` operation.
.. code-block:: python
result.regex.not_match('baz', 'foobar')
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param flags: Regex flags that will be
passed to the ``re.match`` function.
:type flags: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status.
:rtype: ``bool``
"""
return assertions.RegexMatchNotExists(
regexp=regexp, string=value,
flags=flags, description=description, category=category)
@bind_entry
def multiline_not_match(
self, regexp, value, description=None, category=None
):
"""
Checks if the given ``regexp`` does not match the ``value``
via ``re.match`` operation, uses ``re.MULTILINE`` and ``re.DOTALL``
flags implicitly.
.. code-block:: python
result.regex.multiline_not_match(
regexp='foobar',
value=os.linesep.join([
'first line',
'second line',
'third line'
]),
)
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.RegexMatchNotExists(
regexp=regexp, string=value,
flags=re.MULTILINE | re.DOTALL,
description=description, category=category)
@bind_entry
def search(self, regexp, value, description=None, category=None, flags=0):
"""
Checks if the given ``regexp`` exists in the ``value``
via ``re.search`` operation.
.. code-block:: python
result.regex.search('bar', 'foobarbaz')
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param flags: Regex flags that will be passed
to the ``re.search`` function.
:type flags: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.RegexSearch(
regexp=regexp, string=value,
flags=flags, description=description, category=category)
@bind_entry
def search_empty(
self, regexp, value, description=None, category=None, flags=0
):
"""
Checks if the given ``regexp`` does not exist in the ``value``
via ``re.search`` operation.
.. code-block:: python
result.regex.search_empty('aaa', 'foobarbaz')
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param flags: Regex flags that will be passed
to the ``re.search`` function.
:type flags: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.RegexSearchNotExists(
regexp=regexp, string=value,
flags=flags, description=description, category=category)
@bind_entry
def findall(
self, regexp, value,
description=None, category=None,
flags=0, condition=None
):
"""
Checks if there are one or more matches of the ``regexp`` exist in
the ``value`` via ``re.finditer``.
Can apply further assertions via ``condition`` func.
.. code-block:: python
result.regex.findall(
regexp='foo',
value='foo foo foo bar bar foo bar',
condition=lambda num_matches: 2 < num_matches < 5,
)
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param flags: Regex flags that will be passed
to the ``re.finditer`` function.
:type flags: ``int``
:param condition: A callable that accepts a single argument,
which is the number of matches (int).
:type condition: ``callable``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.RegexFindIter(
regexp=regexp,
string=value,
description=description,
flags=flags,
condition=condition,
category=category,
)
@bind_entry
def matchline(
self, regexp, value, description=None, category=None, flags=0
):
"""
Checks if the given ``regexp`` returns a match
(``re.match``) for any of the lines in the ``value``.
.. code-block:: python
result.regex.matchline(
regexp=re.compile(r'\w+ line$'),
value=os.linesep.join([
'first line',
'second aaa',
'third line'
]),
)
:param regexp: String pattern or compiled regexp object.
:type regexp: ``str`` or compiled regex
:param value: String to match against.
:type value: ``str``
:param flags: Regex flags that will be passed
to the ``re.match`` function.
:type flags: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.RegexMatchLine(
regexp=regexp,
string=value,
description=description,
flags=flags,
category=category,
)
class TableNamespace(AssertionNamespace):
"""Contains logic for regular expression assertions."""
@bind_entry
def column_contain(
self, table, values, column,
description=None, category=None,
limit=None, report_fails_only=False
):
"""
Checks if all of the values of a table's
column contain values from a given list.
.. code-block:: python
result.table.column_contain(
table=[
['symbol', 'amount'],
['AAPL', 12],
['GOOG', 21],
['FB', 32],
['AMZN', 5],
['MSFT', 42]
],
values=['AAPL', 'AMZN'],
column='symbol',
)
:param table: Tabular data
:type table: ``list`` of ``list`` or ``list`` of ``dict``.
:param values: Values that will be checked against each cell.
:type values: ``iterable`` of ``object``
:param column: Column name to check.
:type column: ``str``
:param limit: Maximum number of rows to process,
can be used for limiting output.
:type limit: ``int``
:param report_fails_only: Filtering option, output will contain failures
only if this argument is True.
:type report_fails_only: ``bool``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.ColumnContain(
table=table, values=values, column=column, limit=limit,
report_fails_only=report_fails_only, description=description,
category=category,
)
@bind_entry
def match(
self, actual, expected,
description=None, category=None,
include_columns=None, exclude_columns=None,
report_all=True, fail_limit=0,
):
"""
Compares two tables, uses equality for each table cell for plain
values and supports regex / custom comparators as well.
If the columns of the two tables are not the same,
either ``include_columns`` or ``exclude_columns`` arguments
must be used to have column uniformity.
.. code-block:: python
result.table.match(
actual=[
['name', 'age'],
['Bob', 32],
['Susan', 24],
],
expected=[
['name', 'age'],
['Bob', 33],
['David', 24],
]
)
result.table.match(
actual=[
['name', 'age'],
['Bob', 32],
['Susan', 24],
],
expected=[
['name', 'age'],
[re.compile(r'^B\w+'), 33],
['David', lambda age: 20 < age < 50],
]
)
:param actual: Tabular data
:type actual: ``list`` of ``list`` or ``list`` of ``dict``.
:param expected: Tabular data, which can contain custom comparators.
:type expected: ``list`` of ``list`` or ``list`` of ``dict``.
:param include_columns: List of columns to include
in the comparison. Cannot be used
with ``exclude_columns``.
:type include_columns: ``list`` of ``str``
:param exclude_columns: List of columns to exclude
from the comparison. Cannot be used
with ``include_columns``.
:type exclude_columns: ``list`` of ``str``
:param report_all: Boolean flag for configuring output.
If True then all columns of the original
table will be displayed.
:type report_all: ``bool``
:param fail_limit: Max number of failures before aborting
the comparison run. Useful for large
tables, when we want to stop after we have N rows
that fail the comparison. The result will contain
only failing comparisons if this argument
is a positive integer.
:type fail_limit: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.TableMatch(
table=actual, expected_table=expected,
include_columns=include_columns, exclude_columns=exclude_columns,
report_all=report_all, fail_limit=fail_limit,
description=description, category=category,
)
@bind_entry
def log(self, table, display_index=False, description=None):
"""
Logs a table to the report.
:param table: Tabular data.
:type table: ``list`` of ``list`` or ``list`` of ``dict``
:param display_index: Flag whether to display row indices.
:type display_index: ``bool``
:param description: Text description for the assertion.
:type description: ``str``
:return: Always returns True, this is not an assertion so it cannot
fail.
:rtype: ``bool``
"""
return base.TableLog(table=table, display_index=display_index,
description=description)
class XMLNamespace(AssertionNamespace):
"""Contains logic for XML related assertions."""
@bind_entry
def check(
self, element, xpath,
description=None, category=None,
tags=None, namespaces=None,
):
"""
Checks if given xpath and tags exist in the XML body.
Supports namespace based matching as well.
.. code-block:: python
result.xml.check(
element='''
<Root>
<Test>Value1</Test>
<Test>Value2</Test>
</Root>
''',
xpath='/Root/Test',
tags=['Value1', 'Value2'],
)
result.xml.check(
element='''
<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Header/>
<SOAP-ENV:Body>
<ns0:message
xmlns:ns0="http://testplan">Hello world!</ns0:message>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
''',
xpath='//*/a:message',
tags=[re.compile(r'Hello*')],
namespaces={"a": "http://testplan"},
)
:param element: XML element
:type element: ``str`` or ``lxml.etree.Element``
:param xpath: XPath expression to be used for navigation & check.
:type xpath: ``str``
:param tags: Tag values to match against in the given xpath.
:type tags: ``list`` of ``str`` or compiled regex patterns
:param namespaces: Prefix mapping for xpath expressions.
(namespace prefixes as keys and URIs for values.)
:type namespaces: ``dict``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.XMLCheck(
element=element, xpath=xpath, tags=tags,
namespaces=namespaces, description=description,
category=category,
)
class DictNamespace(AssertionNamespace):
"""Contains logic for Dictionary related assertions."""
@bind_entry
def check(
self, dictionary, description=None, category=None,
has_keys=None, absent_keys=None
):
"""
Checks for existence / absence of dictionary keys, uses top
level keys in case of nested dictionaries.
.. code-block:: python
result.dict.check(
dictionary={
'foo': 1, 'bar': 2, 'baz': 3,
},
has_keys=['foo', 'alpha'],
absent_keys=['bar', 'beta']
)
:param dictionary: Dict object to check.
:type dictionary: ``dict``
:param has_keys: List of keys to check for existence.
:type has_keys: ``list`` or ``object`` (items must be hashable)
:param absent_keys: List of keys to check for absence.
:type absent_keys: ``list`` or ``object`` (items must be hashable)
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.DictCheck(
dictionary=dictionary, has_keys=has_keys,
absent_keys=absent_keys, description=description,
category=category,
)
@bind_entry
def match(
self, actual, expected, description=None, category=None,
include_keys=None, exclude_keys=None, report_all=True,
actual_description=None, expected_description=None,
):
"""
Matches two dictionaries, supports nested data. Custom
comparators can be used as values on the ``expected`` dict.
.. code-block:: python
from testplan.common.utils import comparison
result.dict.match(
actual={
'foo': 1,
'bar': 2,
},
expected={
'foo': 1,
'bar': 5,
'extra-key': 10,
},
)
result.dict.match(
actual={
'foo': [1, 2, 3],
'bar': {'color': 'blue'},
'baz': 'hello world',
},
expected={
'foo': [1, 2, lambda v: isinstance(v, int)],
'bar': {
'color': comparison.In(['blue', 'red', 'yellow'])
},
'baz': re.compile(r'\w+ world'),
}
)
:param actual: Original dictionary.
:type actual: ``dict``.
:param expected: Comparison dictionary, can contain custom comparators
(e.g. regex, lambda functions)
:type expected: ``dict``
:param include_keys: Keys to exclusively consider in the comparison.
:type include_keys: ``list`` of ``object`` (items must be hashable)
:param exclude_keys: Keys to ignore in the comparison.
:type include_keys: ``list`` of ``object`` (items must be hashable)
:param report_all: Formatting flag, includes even
ignored keys in report if True.
:type report_all: ``bool``
:param actual_description: Column header description for original dict.
:type actual_description: ``str``
:param expected_description: Column header
description for expected dict.
:type expected_description: ``str``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.DictMatch(
value=actual,
expected=expected,
description=description,
include_keys=include_keys,
exclude_keys=exclude_keys,
report_all=report_all,
expected_description=expected_description,
actual_description=actual_description,
category=category,
)
@bind_entry
def match_all(
self, values, comparisons,
description=None, category=None, key_weightings=None
):
"""
Match multiple unordered dictionaries.
Initially all value/expected comparison combinations are
evaluated and converted to an error weight.
If certain keys are more important than others, it is possible
to give them additional weighting during the comparison,
by specifying a "key_weightings" dict. The default weight of
a mismatch is 100.
The values/comparisons permutation that results in
the least error appended to the report.
.. code-block:: python
result.dict.match_all(
values=[
{'foo': 12, ...},
{'foo': 13, ...},
...
],
comparisons=[
Expected({'foo': 12, ...}),
Expected({'foo': 15, ...})
...
],
# twice the default weight of 100
key_weightings={'foo': 200})
:param values: Original values.
:type values: ``list`` of ``dict``
:param comparisons: Comparison objects.
:type comparisons: ``list`` of
``testplan.common.utils.comparison.Expected``
:param key_weightings: Per-key overrides that specify a different
weight for different keys.
:type key_weightings: ``dict``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.DictMatchAll(
values=values,
comparisons=comparisons,
key_weightings=key_weightings,
description=description,
category=category,
)
class FixNamespace(AssertionNamespace):
"""Contains assertion logic that operates on fix messages."""
@bind_entry
def check(
self, msg, description=None, category=None,
has_tags=None, absent_tags=None
):
"""
Checks existence / absence of tags in a Fix message.
Checks top level tags only.
.. code-block:: python
result.fix.check(
msg={
36: 6,
22: 5,
55: 2,
38: 5,
555: [ .. more nested data here ... ]
},
has_tags=[26, 22, 11],
absent_tags=[444, 555],
)
:param msg: Fix message.
:type msg: ``dict``
:param has_tags: List of tags to check for existence.
:type has_tags: ``list`` of ``object`` (items must be hashable)
:param absent_tags: List of tags to check for absence.
:type absent_tags: ``list`` of ``object`` (items must be hashable)
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.FixCheck(
msg=msg, has_tags=has_tags,
absent_tags=absent_tags, description=description,
category=category,
)
@bind_entry
def match(
self, actual, expected, description=None, category=None,
include_tags=None, exclude_tags=None, report_all=True,
actual_description=None, expected_description=None,
):
"""
Matches two FIX messages, supports repeating groups (nested data).
Custom comparators can be used as values on the ``expected`` msg.
.. code-block:: python
result.fix.match(
actual={
36: 6,
22: 5,
55: 2,
38: 5,
555: [ .. more nested data here ... ]
},
expected={
36: 6,
22: 5,
55: lambda val: val in [2, 3, 4],
38: 5,
555: [ .. more nested data here ... ]
}
)
:param actual: Original FIX message.
:type actual: ``dict``
:param expected: Expected FIX message, can include compiled
regex patterns or callables for
advanced comparison.
:type expected: ``dict``
:param include_tags: Tags to exclusively consider in the comparison.
:type include_tags: ``list`` of ``object`` (items must be hashable)
:param exclude_tags: Keys to ignore in the comparison.
:type exclude_tags: ``list`` of ``object`` (items must be hashable)
:param report_all: Formatting flag, includes even
ignored tags in report if True.
:type report_all: ``bool``
:param actual_description: Column header description for original msg.
:type actual_description: ``str``
:param expected_description: Column header
description for expected msg.
:type expected_description: ``str``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.FixMatch(
value=actual,
expected=expected,
description=description,
category=category,
include_tags=include_tags,
exclude_tags=exclude_tags,
report_all=report_all,
expected_description=expected_description,
actual_description=actual_description,
)
@bind_entry
def match_all(
self, values, comparisons,
description=None, category=None, tag_weightings=None
):
"""
Match multiple unordered FIX messages.
Initially all value/expected comparison combinations are
evaluated and converted to an error weight.
If certain fix tags are more important than others (e.g. ID FIX tags),
it is possible to give them additional weighting during the comparison,
by specifying a "tag_weightings" dict.
The default weight of a mismatch is 100.
The values/comparisons permutation that results in
the least error appended to the report.
.. code-block:: python
result.dict.match_all(
values=[
{ 36: 6, 22: 5, 55: 2, ...},
{ 36: 7, ...},
...
],
comparisons=[
Expected({ 36: 6, 22: 5, 55: 2, ...},),
Expected({ 36: 7, ...})
...
],
# twice the default weight of 100
key_weightings={36: 200})
:param values: Original values.
:type values: ``list`` of ``dict``
:param comparisons: Comparison objects.
:type comparisons: ``list`` of
``testplan.common.utils.comparison.Expected``
:param tag_weightings: Per-tag overrides that specify a different
weight for different tags.
:type tag_weightings: ``dict``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.FixMatchAll(
values=values,
comparisons=comparisons,
tag_weightings=tag_weightings,
description=description,
category=category,
)
class Result(object):
"""
Contains assertion methods and namespaces for generating test data.
A new instance of ``Result`` object is passed to each testcase when a
suite is run.
"""
namespaces = {
'regex': RegexNamespace,
'table': TableNamespace,
'xml': XMLNamespace,
'dict': DictNamespace,
'fix': FixNamespace,
}
def __init__(
self,
stdout_style=None,
continue_on_failure=True,
_group_description=None,
_parent=None,
_summarize=False,
_num_passing=defaults.SUMMARY_NUM_PASSING,
_num_failing=defaults.SUMMARY_NUM_FAILING,
_scratch = None,
):
self.entries = []
self.stdout_style = stdout_style or STDOUT_STYLE
self.continue_on_failure = continue_on_failure
for key, value in self.get_namespaces().items():
if hasattr(self, key):
raise AttributeError(
'Name clash, cannot assign namespace: {}'.format(key))
setattr(self, key, value(result=self))
self._parent = _parent
self._group_description = _group_description
self._summarize = _summarize
self._num_passing = _num_passing
self._num_failing = _num_failing
self._scratch = _scratch
def __enter__(self):
if self._parent is None:
raise RuntimeError(
'Cannot use root level result objects as context managers.'
' Use `with result.group(...)` instead.')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._summarize:
entry_group = base.Summary(
entries=self.entries,
description=self._group_description,
num_passing=self._num_passing,
num_failing=self._num_failing
)
else:
entry_group = base.Group(
entries=self.entries,
description=self._group_description
)
self._parent.entries.append(entry_group)
return exc_type is None # re-raise errors if there is any
def get_namespaces(self):
"""
This method can be overridden for enabling
custom assertion namespaces for child classes.
"""
return self.namespaces or {}
def group(
self,
description=None,
summarize=False,
num_passing=defaults.SUMMARY_NUM_PASSING,
num_failing=defaults.SUMMARY_NUM_FAILING,
):
"""
Creates an assertion group or summary, which is helpful
for formatting assertion data on certain output
targets (e.g. PDF, JSON) and reducing the amount of
content that gets displayed.
Should be used as a context manager.
.. code-block:: python
# Group and sub groups
with result.group(description='Custom group description') as group:
group.not_equal(2, 3, description='Assertion within a group')
group.greater(5, 3)
with group.group() as sub_group:
sub_group.less(6, 3, description='Assertion in sub group')
# Summary example
with result.group(
summarize=True,
num_passing=4,
num_failing=10,
) as group:
for i in range(500):
# First 4 passing assertions will be displayed
group.equal(i, i)
# First 10 failing assertions will be displayed
group.equal(i, i + 1)
:param description: Text description for the assertion group.
:type description: ``str``
:param summarize: Flag for enabling summarization.
:type summarize: ``bool``
:param num_passing: Max limit for number of passing
assertions per category & assertion type.
:type num_passing: ``int``
:param num_failing: Max limit for number of failing
assertions per category & assertion type.
:type num_failing: ``int``
:return: A new result object that refers the current result as a parent.
:rtype: Result object
"""
return Result(
stdout_style=self.stdout_style,
continue_on_failure=self.continue_on_failure,
_group_description=description,
_parent=self,
_summarize=summarize,
_num_passing=num_passing,
_num_failing=num_failing
)
@property
def passed(self):
"""Entries stored passed status."""
return all(getattr(entry, 'passed', True) for entry in self.entries)
@bind_entry
def log(self, message):
"""
Create a string message entry, can be used for providing additional
context related to test steps.
.. code-block:: python
result.log('Custom log message ...')
:param message: Log message
:type message: ``str``
:return: ``True``
:rtype: ``bool``
"""
# TODO: Generate different entries per obj type (dict, table etc)
return base.Log(message=message)
@bind_entry
def fail(self, description, category=None):
"""
Failure assertion, can be used for explicitly failing a testcase.
Most common usage is within a conditional block.
.. code-block:: python
if not some_condition:
result.fail('Unexpected failure: {}'.format(...))
:param description: Text description of the failure.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: False
:rtype: ``bool``
"""
return assertions.Fail(description, category=category)
@bind_entry
def true(self, value, description=None, category=None):
"""
Boolean assertion, checks if ``value`` is truthy.
.. code-block:: python
result.true(some_obj, 'Custom description')
:param value: Value to be evaluated for truthiness.
:type value: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.IsTrue(
value, description=description, category=category)
@bind_entry
def false(self, value, description=None, category=None):
"""
Boolean assertion, checks if ``value`` is falsy.
.. code-block:: python
result.false(some_obj, 'Custom description')
:param value: Value to be evaluated for falsiness.
:type value: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.IsFalse(
value, description=description, category=category)
@bind_entry
def equal(self, actual, expected, description=None, category=None):
"""
Equality assertion, checks if ``actual == expected``.
Can be used via shortcut: ``result.eq``.
.. code-block:: python
result.equal('foo', 'foo', 'Custom description')
:param actual: First (actual) value of the comparison.
:type actual: ``object``
:param expected: Second (expected) value of the comparison.
:type expected: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.Equal(
actual, expected, description=description, category=category)
@bind_entry
def not_equal(self, actual, expected, description=None, category=None):
"""
Inequality assertion, checks if ``actual != expected``.
Can be used via shortcut: ``result.ne``.
.. code-block:: python
result.not_equal('foo', 'bar', 'Custom description')
:param actual: First (actual) value of the comparison.
:type actual: ``object``
:param expected: Second (expected) value of the comparison.
:type expected: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.NotEqual(
actual, expected, description=description, category=category)
@bind_entry
def less(self, first, second, description=None, category=None):
"""
Checks if ``first < second``.
Can be used via shortcut: ``result.lt``
.. code-block:: python
result.less(3, 5, 'Custom description')
:param first: Left side of the comparison.
:type first: ``object``
:param second: Right side of the comparison.
:type second: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.Less(
first, second, description=description, category=category)
@bind_entry
def greater(self, first, second, description=None, category=None):
"""
Checks if ``first > second``.
Can be used via shortcut: ``result.gt``
.. code-block:: python
result.greater(5, 3, 'Custom description')
:param first: Left side of the comparison.
:type first: ``object``
:param second: Right side of the comparison.
:type second: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.Greater(
first, second, description=description, category=category)
@bind_entry
def less_equal(self, first, second, description=None, category=None):
"""
Checks if ``first <= second``.
Can be used via shortcut: ``result.le``
.. code-block:: python
result.less_equal(5, 3, 'Custom description')
:param first: Left side of the comparison.
:type first: ``object``
:param second: Right side of the comparison.
:type second: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.LessEqual(
first, second, description=description, category=category)
@bind_entry
def greater_equal(
self, first, second, description=None, category=None
):
"""
Checks if ``first >= second``.
Can be used via shortcut: ``result.ge``
.. code-block:: python
result.greater_equal(5, 3, 'Custom description')
:param first: Left side of the comparison.
:type first: ``object``
:param second: Right side of the comparison.
:type second: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.GreaterEqual(
first, second, description=description, category=category)
# Shortcut aliases for basic comparators
eq = equal
ne = not_equal
lt = less
gt = greater
le = less_equal
ge = greater_equal
@bind_entry
def contain(self, member, container, description=None, category=None):
"""
Checks if ``member in container``.
.. code-block:: python
result.contain(1, [1, 2, 3, 4], 'Custom description')
:param member: Item to be checked for existence in the container.
:type member: ``object``
:param container: Container object, should support
item lookup operations.
:type container: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.Contain(
member, container, description=description, category=category)
@bind_entry
def not_contain(
self, member, container, description=None, category=None
):
"""
Checks if ``member not in container``.
.. code-block:: python
result.not_contain(5, [1, 2, 3, 4], 'Custom description')
:param member: Item to be checked for absence from the container.
:type member: ``object``
:param container: Container object, should support
item lookup operations.
:type container: ``object``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.NotContain(
member, container, description=description, category=category)
@bind_entry
def equal_slices(
self, actual, expected, slices, description=None, category=None
):
"""
Checks if given slices of ``actual`` and ``expected`` are equal.
.. code-block:: python
result.equal_slices(
[1, 2, 3, 4, 5, 6, 7, 8],
['a', 'b', 3, 4, 'c', 'd', 7, 8],
slices=[slice(2, 4), slice(6, 8)],
description='Comparison of slices'
)
:param actual: First (actual) value of the comparison.
:type actual: ``object`` that supports slice operations.
:param expected: Second (expected) value of the comparison.
:type expected: ``object`` that supports slice operations.
:param slices: Slices that will be applied
to ``actual`` and ``expected``.
:type slices: ``list`` of ``slice``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.EqualSlices(
expected=expected,
actual=actual,
slices=slices,
description=description,
category=category,
)
@bind_entry
def equal_exclude_slices(
self, actual, expected, slices, description=None, category=None
):
"""
Checks if items that exist outside the given slices of
``actual`` and ``expected`` are equal.
.. code-block:: python
result.equal_exclude_slices(
[1, 2, 3, 4, 5, 6, 7, 8],
['a', 'b', 3, 4, 'c', 'd', 'e', 'f'],
slices=[slice(0, 2), slice(4, 8)],
description='Comparison of slices (exclusion)'
)
:param actual: First (actual) value of the comparison.
:type actual: ``object`` that supports slice operations.
:param expected: Second (expected) value of the comparison.
:type expected: ``object`` that supports slice operations.
:param slices: Slices that will be used for exclusion of items
from ``actual`` and ``expected``.
:type slices: ``list`` of ``slice``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return assertions.EqualExcludeSlices(
expected=expected,
actual=actual,
slices=slices,
description=description,
category=category
)
def raises(
self, exceptions, description=None,
category=None, pattern=None, func=None
):
"""
Checks if given code block raises certain type(s) of exception(s).
Supports further checks via ``pattern`` and ``func`` arguments.
.. code-block:: python
with result.raises(KeyError):
{'foo': 3}['bar']
with result.raises(ValueError, pattern='foo')
raise ValueError('abc foobar xyz')
def check_exception(exc):
...
with result.raises(TypeError, func=check_exception):
raise TypeError(...)
:param exceptions: Exception types to check.
:type exceptions: ``list`` of ``Exception`` classes
or a single ``Exception`` class
:param pattern: String pattern that will be
searched (``re.searched``) within exception message.
:type pattern: ``str`` or compiled regex object
:param func: Callable that accepts a single argument
(the exception object)
:type func: ``callable``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return ExceptionCapture(
result=self,
assertion_kls=assertions.ExceptionRaised,
exceptions=exceptions,
description=description,
category=category,
func=func,
pattern=pattern,
)
def not_raises(
self, exceptions, description=None,
category=None, pattern=None, func=None
):
"""
Checks if given code block does not raise
certain type(s) of exception(s).
Supports further checks via ``pattern`` and ``func`` arguments.
.. code-block:: python
with result.not_raises(AttributeError):
{'foo': 3}['bar']
with result.raises(ValueError, pattern='foo')
raise ValueError('abc xyz')
def check_exception(exc):
...
with result.raises(TypeError, func=check_exception):
raise TypeError(...)
:param exceptions: Exception types to check.
:type exceptions: ``list`` of ``Exception`` classes
or a single ``Exception`` class
:param pattern: String pattern that will be
searched (``re.searched``) within exception message.
:type pattern: ``str`` or compiled regex object
:param func: Callable that accepts a single argument
(the exception object)
:type func: ``callable``
:param description: Text description for the assertion.
:type description: ``str``
:param category: Custom category that will be used for summarization.
:type category: ``str``
:return: Assertion pass status
:rtype: ``bool``
"""
return ExceptionCapture(
result=self,
assertion_kls=assertions.ExceptionNotRaised,
exceptions=exceptions,
description=description,
category=category,
func=func,
pattern=pattern,
)
@bind_entry
def matplot(self, pyplot, width=2, height=2, description=None):
"""
Displays a Matplotlib plot in the report.
:param pyplot: Matplotlib pyplot object to be displayed.
:type pyplot: ``matplotlib.pyplot``
:param width: Width of the plot in inches.
:type width: ``int``
:param height: Height of the plot in inches.
:type height: ``int``
:param description: Text description for the assertion.
:type description: ``str``
:return: Always returns True, this is not an assertion so it cannot
fail.
:rtype: ``bool``
"""
filename = '{0}.png'.format(uuid.uuid4())
image_file_path = os.path.join(self._scratch, filename)
return base.MatPlot(
pyplot=pyplot,
image_file_path=image_file_path,
width=width,
height=height,
description=description
)
@property
def serialized_entries(self):
"""
Return entry data in dictionary form. This will then be stored
in related ``TestCaseReport``'s ``entries`` attribute.
"""
return [schema_registry.serialize(entry) for entry in self]
def __repr__(self):
return repr(self.entries)
def __iter__(self):
return iter(self.entries)
def __len__(self):
return len(self.entries)
| 35.012376 | 80 | 0.562531 |
81e322e8d893d8218c1965e7ccd01759c5836966 | 6,610 | py | Python | bisque/test_queuer.py | MattShannon/armspeech | d9259d1b64fa2e785d34877c6014788f3e16a377 | [
"BSD-3-Clause"
] | 34 | 2015-02-12T15:16:44.000Z | 2020-09-27T17:07:35.000Z | bisque/test_queuer.py | nd1511/armspeech | d9259d1b64fa2e785d34877c6014788f3e16a377 | [
"BSD-3-Clause"
] | null | null | null | bisque/test_queuer.py | nd1511/armspeech | d9259d1b64fa2e785d34877c6014788f3e16a377 | [
"BSD-3-Clause"
] | 9 | 2015-02-06T18:32:20.000Z | 2018-10-27T20:11:08.000Z | """Unit tests for distributed computation execution."""
# Copyright 2011, 2012, 2013, 2014, 2015 Matt Shannon
# This file is part of armspeech.
# See `License` for details of license and warranty.
import unittest
import logging
import time
from codedep import codeDeps
from bisque import distribute
import bisque.queuer as qr
from bisque import sge_queuer
from bisque.distribute import lift
import bisque.test_queuer_jobs as jobs
from bisque.filehelp import TempDir
@codeDeps(distribute.ThunkArtifact, jobs.AddJob, jobs.OneJob, jobs.getOne)
def simpleTestDag():
oneJob1 = jobs.OneJob(name = 'oneJob1')
oneArt = distribute.ThunkArtifact(jobs.getOne)
addJobA = jobs.AddJob(oneJob1.valueOut, oneArt, name = 'addJobA')
addJobB = jobs.AddJob(oneArt, addJobA.valueOut, name = 'addJobB')
addJobC = jobs.AddJob(addJobA.valueOut, addJobB.valueOut, name = 'addJobC')
addJobD = jobs.AddJob(oneJob1.valueOut, addJobB.valueOut, name = 'addJobD')
return [(addJobC.valueOut, 5), (addJobD.valueOut, 4)], 5, 2
@codeDeps(distribute.ThunkArtifact, jobs.add, jobs.getOne, lift)
def simpleTestDagFunctionalSugar():
outArt1 = lift(jobs.getOne, name = 'oneJob1')()
outArt2 = distribute.ThunkArtifact(jobs.getOne)
outArtA = lift(jobs.add, name = 'addJobA')(outArt1, outArt2)
outArtB = lift(jobs.add, name = 'addJobB')(outArt2, outArtA)
outArtC = lift(jobs.add, name = 'addJobC')(outArtA, outArtB)
outArtD = lift(jobs.add, name = 'addJobD')(outArt1, outArtB)
return [(outArtC, 5), (outArtD, 4)], 5, 2
@codeDeps(jobs.add, jobs.getOne, lift)
def liftExampleDag():
oneArt = lift(jobs.getOne)()
twoArt = lift(jobs.add)(oneArt, y = oneArt)
return [(twoArt, 2), (oneArt, 1)], 2, 2
@codeDeps(TempDir, liftExampleDag, qr.BuildRepo, qr.LocalQueuer,
sge_queuer.MockSgeQueuer, simpleTestDag, simpleTestDagFunctionalSugar
)
class TestDistribute(unittest.TestCase):
def test_LocalQueuer(self):
with TempDir() as tempDir:
buildRepo = qr.BuildRepo(base = tempDir.location)
queuer = qr.LocalQueuer(buildRepo = buildRepo)
for testDag, totJobs, finalJobs in [simpleTestDag(),
simpleTestDagFunctionalSugar(),
liftExampleDag()]:
live = queuer.generateArtifacts(
[ art for art, expectedValue in testDag ],
verbosity = 0
)
for art, expectedValue in testDag:
assert art.loadValue(buildRepo) == expectedValue
def test_MockSgeQueuer_one_big_submission(self):
for testDag, totJobs, finalJobs in [simpleTestDag(),
simpleTestDagFunctionalSugar(),
liftExampleDag()]:
with TempDir() as tempDir:
buildRepo = qr.BuildRepo(base = tempDir.location)
with sge_queuer.MockSgeQueuer(buildRepo = buildRepo) as queuer:
finalArtifacts = [ art for art, expectedValue in testDag ]
live = queuer.generateArtifacts(finalArtifacts,
verbosity = 0)
assert len(live) == totJobs
finalLiveJobs = [
live[job.secHash()]
for art in finalArtifacts for job in art.parents()
if job.secHash() in live
]
assert len(finalLiveJobs) == finalJobs
while not all([ liveJob.hasEnded()
for liveJob in finalLiveJobs ]):
time.sleep(0.1)
assert all([ liveJob.hasCompleted()
for liveJob in live.values() ])
for art, expectedValue in testDag:
assert art.loadValue(buildRepo) == expectedValue
# check no jobs submitted if we already have the desired
# artifacts
live = queuer.generateArtifacts(finalArtifacts,
verbosity = 0)
assert len(live) == 0
def test_MockSgeQueuer_several_little_submissions(self):
for testDag, totJobs, finalJobs in [simpleTestDag(),
simpleTestDagFunctionalSugar(),
liftExampleDag()]:
with TempDir() as tempDir:
buildRepo = qr.BuildRepo(base = tempDir.location)
with sge_queuer.MockSgeQueuer(buildRepo = buildRepo) as queuer:
finalLiveJobs = []
liveJobDirs = set()
totSubmitted = 0
for art, expectedValue in testDag:
live = queuer.generateArtifacts([art], verbosity = 0)
finalLiveJobs.extend([
live[job.secHash()]
for job in art.parents() if job.secHash() in live
])
liveJobDirs.update([ liveJob.dir
for liveJob in live.values() ])
totSubmitted += len(live)
assert len(liveJobDirs) == totJobs
assert len(finalLiveJobs) == finalJobs
if totSubmitted == totJobs:
logging.warning(
're-use of submitted jobs for MockSgeQueuer not'
' properly tested, since jobs completed too fast'
)
while not all([ liveJob.hasEnded()
for liveJob in finalLiveJobs ]):
time.sleep(0.1)
assert all([ liveJob.hasCompleted()
for liveJob in live.values() ])
for art, expectedValue in testDag:
assert art.loadValue(buildRepo) == expectedValue
# check no jobs submitted if we already have the desired
# artifacts
for art, expectedValue in testDag:
live = queuer.generateArtifacts([art], verbosity = 0)
assert len(live) == 0
@codeDeps(TestDistribute)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestDistribute)
if __name__ == '__main__':
unittest.main()
| 46.549296 | 79 | 0.545234 |
36f94a42b6a8d778563a0b0e599879d6e1e73301 | 273 | py | Python | src/multidirmap/__init__.py | janrg/multidirmap | d056607f1e5aeb59ba8447361315748dca80c4f0 | [
"MIT"
] | 3 | 2019-10-11T14:16:59.000Z | 2019-10-16T15:40:48.000Z | src/multidirmap/__init__.py | janrg/multidirmap | d056607f1e5aeb59ba8447361315748dca80c4f0 | [
"MIT"
] | 5 | 2018-07-28T16:20:50.000Z | 2019-09-27T13:47:50.000Z | src/multidirmap/__init__.py | janrg/multidirmap | d056607f1e5aeb59ba8447361315748dca80c4f0 | [
"MIT"
] | 1 | 2021-05-20T14:35:49.000Z | 2021-05-20T14:35:49.000Z | """A multidirectional mapping with an arbitrary number of key columns."""
from .multidirmap import DuplicateKeyError
from .multidirmap import MultiDirMap
from .multidirmap import Overwrite
__version__ = "0.3.0"
__all__ = ["MultiDirMap", "DuplicateKeyError", "Overwrite"]
| 30.333333 | 73 | 0.787546 |
af6ad457bc70f3a0249949b495db7a1707b9bdfa | 3,586 | py | Python | pytorch_toolbelt/losses/jaccard.py | malk271828/pytorch-toolbelt | d5fa695b7267ab2c968447391853207e2bedbef5 | [
"MIT"
] | 1 | 2020-07-02T00:22:16.000Z | 2020-07-02T00:22:16.000Z | pytorch_toolbelt/losses/jaccard.py | preheatedKD/pytorch-toolbelt | d8a7d25c887c5f1c9a6c8e07e8b887bc6fc4617c | [
"MIT"
] | null | null | null | pytorch_toolbelt/losses/jaccard.py | preheatedKD/pytorch-toolbelt | d8a7d25c887c5f1c9a6c8e07e8b887bc6fc4617c | [
"MIT"
] | null | null | null | from typing import List
import torch
import torch.nn.functional as F
from pytorch_toolbelt.utils.torch_utils import to_tensor
from torch import Tensor
from torch.nn.modules.loss import _Loss
from .functional import soft_jaccard_score
__all__ = ["JaccardLoss", "BINARY_MODE", "MULTICLASS_MODE", "MULTILABEL_MODE"]
BINARY_MODE = "binary"
MULTICLASS_MODE = "multiclass"
MULTILABEL_MODE = "multilabel"
class JaccardLoss(_Loss):
"""
Implementation of Jaccard loss for image segmentation task.
It supports binary, multi-class and multi-label cases.
"""
def __init__(self, mode: str, classes: List[int] = None, log_loss=False, from_logits=True, smooth=0, eps=1e-7):
"""
:param mode: Metric mode {'binary', 'multiclass', 'multilabel'}
:param classes: Optional list of classes that contribute in loss computation;
By default, all channels are included.
:param log_loss: If True, loss computed as `-log(jaccard)`; otherwise `1 - jaccard`
:param from_logits: If True assumes input is raw logits
:param smooth:
:param eps: Small epsilon for numerical stability
"""
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super(JaccardLoss, self).__init__()
self.mode = mode
if classes is not None:
assert mode != BINARY_MODE, "Masking classes is not supported with mode=binary"
classes = to_tensor(classes, dtype=torch.long)
self.classes = classes
self.from_logits = from_logits
self.smooth = smooth
self.eps = eps
self.log_loss = log_loss
def forward(self, y_pred: Tensor, y_true: Tensor) -> Tensor:
"""
:param y_pred: NxCxHxW
:param y_true: NxHxW
:return: scalar
"""
assert y_true.size(0) == y_pred.size(0)
if self.from_logits:
# Apply activations to get [0..1] class probabilities
# Using Log-Exp as this gives more numerically stable result and does not cause vanishing gradient on
# extreme values 0 and 1
if self.mode == MULTICLASS_MODE:
y_pred = y_pred.log_softmax(dim=1).exp()
else:
y_pred = F.logsigmoid(y_pred).exp()
bs = y_true.size(0)
num_classes = y_pred.size(1)
dims = (0, 2)
if self.mode == BINARY_MODE:
y_true = y_true.view(bs, 1, -1)
y_pred = y_pred.view(bs, 1, -1)
if self.mode == MULTICLASS_MODE:
y_true = y_true.view(bs, -1)
y_pred = y_pred.view(bs, num_classes, -1)
y_true = F.one_hot(y_true, num_classes) # N,H*W -> N,H*W, C
y_true = y_true.permute(0, 2, 1) # H, C, H*W
if self.mode == MULTILABEL_MODE:
y_true = y_true.view(bs, num_classes, -1)
y_pred = y_pred.view(bs, num_classes, -1)
scores = soft_jaccard_score(y_pred, y_true.type(y_pred.dtype), self.smooth, self.eps, dims=dims)
if self.log_loss:
loss = -torch.log(scores.clamp_min(self.eps))
else:
loss = 1 - scores
# IoU loss is defined for non-empty classes
# So we zero contribution of channel that does not have true pixels
# NOTE: A better workaround would be to use loss term `mean(y_pred)`
# for this case, however it will be a modified jaccard loss
mask = y_true.sum(dims) > 0
loss *= mask.float()
if self.classes is not None:
loss = loss[self.classes]
return loss.mean()
| 34.480769 | 115 | 0.618516 |
837aa2384af3f7574399afc49e1a0b87f8bcd828 | 455 | py | Python | Python/binary-search.py | coolryze/LeetCode | 03876232521a20d32f8fa4e7d6d19cf208739a79 | [
"MIT"
] | 2 | 2018-07-18T01:33:07.000Z | 2018-11-16T03:17:03.000Z | Python/binary-search.py | coolryze/LeetCode | 03876232521a20d32f8fa4e7d6d19cf208739a79 | [
"MIT"
] | null | null | null | Python/binary-search.py | coolryze/LeetCode | 03876232521a20d32f8fa4e7d6d19cf208739a79 | [
"MIT"
] | null | null | null | class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
left, right = 0, len(nums)-1
while left <= right:
mid = left + (right-left)//2
if nums[mid] > target:
right = mid-1
elif nums[mid] < target:
left = mid+1
else:
return mid
return -1
| 22.75 | 40 | 0.417582 |
f2476af00cbf6b3894ed9f5de481da9325724cb4 | 650 | py | Python | check/forms.py | debajyotiroyc/CrispyForm_Django | 847d3a6a2d9c2ac419d3da39ed17d1ced3bd92af | [
"MIT"
] | null | null | null | check/forms.py | debajyotiroyc/CrispyForm_Django | 847d3a6a2d9c2ac419d3da39ed17d1ced3bd92af | [
"MIT"
] | null | null | null | check/forms.py | debajyotiroyc/CrispyForm_Django | 847d3a6a2d9c2ac419d3da39ed17d1ced3bd92af | [
"MIT"
] | null | null | null | from django import forms
from check.models import info,Gen
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit,Layout,Row,Column
class StudentForm(forms.ModelForm):
gender=forms.ChoiceField(choices=Gen,widget=forms.RadioSelect,initial="male")
class Meta:
model=info
fields="__all__"
#def __init__(self,*args,**kwargs):
# super().__init__(*args,**kwargs)
# self.helper=FormHelper()
# self.helper.form_method='post'
# self.helper.add_input(Submit('save_student','Save Student'))
#self.helper.add_input(Submit('cancel', 'cancel'))
| 29.545455 | 82 | 0.676923 |
da0cb6df0ecfb7bbb9663819c636ed4861a79bda | 5,042 | py | Python | fexm/docker_scripts/docker_image.py | fgsect/fexm | cf213c9dea3778c09c1d475e6a16b9db78a6f1e6 | [
"Apache-2.0"
] | 105 | 2018-08-09T22:13:59.000Z | 2022-03-26T23:24:20.000Z | fexm/docker_scripts/docker_image.py | DeadManINDIA/fexm | ca6629bbcbf79639871d3ec52bc2a7de9ae453a4 | [
"Apache-2.0"
] | 13 | 2018-08-23T13:40:04.000Z | 2022-03-11T23:28:00.000Z | fexm/docker_scripts/docker_image.py | DeadManINDIA/fexm | ca6629bbcbf79639871d3ec52bc2a7de9ae453a4 | [
"Apache-2.0"
] | 25 | 2018-08-09T21:56:12.000Z | 2022-03-22T22:08:12.000Z | import uuid
import os
import sh
from sh import docker
def process_output(line):
print(line)
class DockerImage(object):
"""
An object of this class represents a dockerimage.
"""
BASE_IMAGE_NAME = "githubfuzzerbase"
SEED_IMAGE_NAME = "githubfuzzer"
def __init__(self, dockerfile_path: str, image_name: str):
"""
:param dockerfile_path: The path to the dockerfile.
:param image_name: The name of the built image.
"""
self.dockerfile_path = dockerfile_path
if not os.path.exists(dockerfile_path) or not os.path.isfile(dockerfile_path):
raise FileNotFoundError("dockerfile_path must be path to a file! Is {0} instead".format(dockerfile_path))
self.image_name = image_name
self.image_built = False
self.build_image()
def build_image(self):
print("Running docker build", ["-t", self.image_name, os.path.dirname(self.dockerfile_path)])
build_command = docker.build("-t", self.image_name, os.path.dirname(self.dockerfile_path),
_out=process_output) # type: sh.RunningCommand
if build_command.exit_code == 0:
self.image_built = True
def delete_image(self):
delete_command = docker.rmi(self.image_name)
if delete_command.exit_code == 0:
self.image_built = False
@classmethod
def create_afl_docker_image_from_repo_path(cls, repo_path: str):
dockerfile_string = "FROM {0}\n".format(DockerImage.SEED_IMAGE_NAME)
dockerfile_string += "\nCOPY . /" + repo_path
with open(repo_path + "/dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_string)
image_name = "fuzz_" + os.path.basename(repo_path) + str(uuid.uuid1())
image_name = image_name.lower() # Docker images names must be in lower case
return cls(dockerfile_path=repo_path + "/dockerfile", image_name=image_name)
@classmethod
def create_afl_base_image_from_seeds_path(cls, seeds_path: str, name: str):
dockerfile_string = "FROM 0x6c7862/afl-fuzz\n"
dockerfile_string += "RUN apt-get update && apt-get install -y libpcap-dev\n"
dockerfile_string += "COPY . seeds/"
print(seeds_path + "/dockerfile")
with open(seeds_path + "/dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_string)
image_name = name
di = DockerImage(dockerfile_path=seeds_path + "/dockerfile", image_name=image_name)
print("Done###")
return di
@classmethod
def create_afl_pacman_base_image_without_seeds(cls):
di = DockerImage(dockerfile_path=os.path.dirname(__file__) + "/afl_base_image/Dockerfile",
image_name="pacman-afl-fuzz")
return di
@classmethod
def create_afl_pacman_base_image_from_seeds_path(cls, seeds_path: str, name: str):
di = DockerImage(dockerfile_path=os.path.dirname(__file__) + "/afl_base_image/Dockerfile",
image_name="pacman-afl-fuzz")
dockerfile_string = "FROM pacman-afl-fuzz\n"
dockerfile_string += "RUN mkdir /fuzz\n"
dockerfile_string += "WORKDIR /fuzz/\n"
dockerfile_string += "COPY . seeds/"
print(seeds_path + "/dockerfile")
with open(seeds_path + "/dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_string)
image_name = name
di = DockerImage(dockerfile_path=seeds_path + "/dockerfile", image_name=image_name)
print("Done###")
return di
@classmethod
def create_githubfuzzer_image(cls, seeds_path: str):
dockerfile_string = "FROM {0}\n".format(DockerImage.BASE_IMAGE_NAME)
dockerfile_string += "COPY . seeds/"
with open(seeds_path + "/dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_string)
image_name = DockerImage.SEED_IMAGE_NAME
di = DockerImage(dockerfile_path=seeds_path + "/dockerfile", image_name=image_name)
return di
@classmethod
def create_aptfuzzer_iamge(cls, baseimagename: str, image_name: str):
dockerfile_string = "FROM {0}\n".format(baseimagename)
dockerfile_string += "COPY . /inputinferer\n"
dockerfile_string += 'ENTRYPOINT ["python3.5","inputinferer/config_finder_for_apt_package.py"]\n'
with open(os.path.dirname(os.path.realpath(__file__)) + "/../configfinder/dockerfile", "w") as dockerfp:
dockerfp.write(dockerfile_string)
with open(os.path.dirname(os.path.realpath(__file__)) + "/../configfinder/.dockerignore", "w") as dockerfp:
dockerfp.write("env")
di = DockerImage(dockerfile_path=os.path.dirname(os.path.realpath(__file__)) + "/../configfinder/dockerfile",
image_name=image_name)
return di
@staticmethod
def check_if_base_image_exists() -> bool:
if "githubfuzzer" in str(docker.images("-a")):
return True
else:
return False
| 43.094017 | 117 | 0.656287 |
138218ff0eb0ff4ba8dc1e464a1fd62ba1dd05b0 | 1,144 | py | Python | tests/test_fastchardet.py | mstriemer/app-validator | ceaa373965192e3e08e7b38476cca09d44b345e7 | [
"BSD-3-Clause"
] | 20 | 2015-01-16T21:35:27.000Z | 2021-11-11T00:22:43.000Z | tests/test_fastchardet.py | mattbasta/amo-validator | f4d9612c15508b991cad637be9062a10d5e38e53 | [
"BSD-3-Clause"
] | 14 | 2015-01-15T21:26:33.000Z | 2016-01-18T16:47:15.000Z | tests/test_fastchardet.py | mattbasta/amo-validator | f4d9612c15508b991cad637be9062a10d5e38e53 | [
"BSD-3-Clause"
] | 14 | 2015-02-14T22:42:40.000Z | 2021-11-11T00:22:33.000Z | import fastchardet
def test_ascii():
"""Determines that fastchardet detects ASCII properly."""
assert fastchardet.detect("This is plain ASCII")["encoding"] == "ascii"
def test_utf8():
"""Determine that fastchardet properly detects UTF-8."""
assert fastchardet.detect("""\xEF\xBB\xBF
Haldo, UTF-8
""")["encoding"] == "utf_8"
def test_utfn():
"""Determine that fastchardet properly detects UTF-N."""
assert fastchardet.detect("""\xFF\xFE\x00\x00
Haldo, UTF-Not 8
""")["encoding"] == "utf_n"
def test_unicode():
"""
Make sure that things turn out right when we're silly sallies and pass
unicode in.
"""
assert fastchardet.detect(unicode("foo"))["encoding"] == "unicode"
def test_esoteric():
"""Make sure that fastchardet can detect other encodings."""
a = lambda code: fastchardet.detect(code)["encoding"]
# High Bytes
print a("High Byte:\x91")
assert a("High Byte:\x91") == "windows-1252"
# UTF-8 without BOM
print a("\xc2\xbc + \xc2\xbd = \xcd\xbe")
assert a("\xc2\xbc + \xc2\xbd = \xcd\xbe") == "utf_8"
| 24.340426 | 75 | 0.616259 |
6c9f5d2cf299d3f1b3dbea0f0ee0e9119dbe1b4e | 4,972 | py | Python | chinese_administration_division_service.py | zihengCat/chinese-administration-division-service | 08e1a4b6d1361262caf5ba91f2ad5f6ee369986c | [
"MIT"
] | null | null | null | chinese_administration_division_service.py | zihengCat/chinese-administration-division-service | 08e1a4b6d1361262caf5ba91f2ad5f6ee369986c | [
"MIT"
] | null | null | null | chinese_administration_division_service.py | zihengCat/chinese-administration-division-service | 08e1a4b6d1361262caf5ba91f2ad5f6ee369986c | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# `OS`模块 => 文件路径
import os
# `json`模块 => JSON 数据解析
import json
# 行政区划信息服务类
class AdministrationDivisionService(object):
# 类初始化函数(无参)
def __init__(self):
# JSON数据 => Dict
f1_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \
'db/location.min.json')
f2_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \
'db/list.json')
with open(f1_path, 'rt') as f1, open(f2_path, 'rt') as f2:
self.location_dict = json.loads(f1.read())
d = json.loads(f2.read())
self.location_dict_r = dict(zip(d.values(), d.keys()))
# for `DEBUG` Mode
#print(self.location_dict_r)
# 特殊城市列表 => 北京,天津,上海,重庆,香港,澳门
self.sp_cities = ['110000', # 北京市
'120000', # 天津市
'310000', # 上海市
'500000', # 重庆市
'810000', # 香港特别行政区
'820000', # 澳门特别行政区
]
# 对外接口API:parseString()
# 接受参数:省市区字符串(`str`)
# 正确返回:6位行政区划代码(`str`)
# 错误返回:`None`
def parseString(self, string_str):
try:
return self.location_dict_r.get(string_str)
except Exception as e:
# for `DEBUG` Mode
#print(e)
# 出错返回`None`
return None
# 对外接口API:parseCode()
# 接受参数:6位行政区划代码(`str`)
# 正确返回:省市区字符串(`sep`分隔,默认`;`)
# 错误返回:`None`
def parseCode(self, code_str, sep = ';'):
try:
# 验证输入参数的合法性
if(type(code_str) != type("str") or len(code_str) != 6):
raise("Error: input argument does not fit")
# 确认参数类型
code_type = self.__checkCodeType(code_str)
# 分割参数
code_part_12 = code_str[0: 2]
code_part_34 = code_str[2: 4]
code_part_56 = code_str[4: 6]
#print(code_part_12, code_part_34, code_part_56)
'''
if(code_type == 1):
# 搜索目标:省/直辖市/特别行政区
json_part1 = self.location_dict
# 返回值第1部分:省/直辖市/特别行政区
ret_part1 = json_part1.get(code_part_12 + '0000').get('name')
return ret_part1 + sep + sep
'''
if(code_type == 1 or code_type == 2):
# 搜索目标:省/直辖市/特别行政区
json_part1 = self.location_dict
#print(json_part1)
# 搜索目标:地级市
json_part2 = json_part1.get(code_part_12 + '0000').get('cities')
#print(json_part2)
# 返回值第1部分:省/直辖市/特别行政区
ret_part1 = json_part1.get(code_part_12 + '0000').get('name')
# 返回值第2部分:地级市
if(code_part_12 + '0000' in self.sp_cities):
ret_part2 = json_part2.get(code_part_12 + '00' + '00').get('name')
else:
ret_part2 = json_part2.get(code_part_12 + code_part_34 + '00').get('name')
return ret_part1 + sep + ret_part2 + sep
elif(code_type == 3):
# 搜索目标:省/直辖市/特别行政区
json_part1 = self.location_dict
#print(json_part1)
# 搜索目标:地级市
json_part2 = json_part1.get(code_part_12 + '0000').get('cities')
#print(json_part2)
# 搜索目标:市辖区
# 对特殊城市作特殊处理
if(code_part_12 + '0000' in self.sp_cities):
json_part3 = json_part2.get(code_part_12 + '00' + '00').get('districts')
else:
json_part3 = json_part2.get(code_part_12 + code_part_34 + '00').get('districts')
#print(json_part3)
# 返回值第1部分:省/直辖市/特别行政区
ret_part1 = json_part1.get(code_part_12 + '0000').get('name')
# 返回值第2部分:地级市
if(code_part_12 + '0000' in self.sp_cities):
ret_part2 = json_part2.get(code_part_12 + '00' + '00').get('name')
else:
ret_part2 = json_part2.get(code_part_12 + code_part_34 + '00').get('name')
# 返回值第3部分:市辖区
ret_part3 = json_part3.get(code_part_12 + code_part_34 + code_part_56)
# 返回值:`sep`分隔
return ret_part1 + sep + ret_part2 + sep + ret_part3
else:
# 不明的行政区划类型
raise("Error: unknow code type")
except:
# 出错返回`None`
return None
# 内部功能函数:确认行政区划字符串类型
def __checkStringType(self, string_str, sep = ';'):
pass
# 内部功能函数:确认行政区划代码类型
def __checkCodeType(self, code_str):
if(code_str[2:6] == '0000'):
# Flag: 省/直辖市/特别行政区
return 1
elif(code_str[4:6] == '00'):
# Flag: 地级市
return 2
else:
# Flag: 市辖区
return 3
if __name__ == '__main__':
# 测试用例参看`main.py`
pass
| 38.542636 | 100 | 0.494167 |
ecefb514303453daa4148b2d8a3d3c32c6b7265f | 13,886 | py | Python | ferenda/sources/legal/se/fixedlayoutsource.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 18 | 2015-03-12T17:42:44.000Z | 2021-12-27T10:32:22.000Z | ferenda/sources/legal/se/fixedlayoutsource.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 13 | 2016-01-27T10:19:07.000Z | 2021-12-13T20:24:36.000Z | ferenda/sources/legal/se/fixedlayoutsource.py | redhog/ferenda | 6935e26fdc63adc68b8e852292456b8d9155b1f7 | [
"BSD-2-Clause"
] | 6 | 2016-11-28T15:41:29.000Z | 2022-01-08T11:16:48.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import OrderedDict
import os
import re
import json
from io import BytesIO
from rdflib import URIRef
from rdflib.namespace import DCTERMS
from lxml import etree
from . import SwedishLegalStore, SwedishLegalSource, SwedishLegalHandler
from .elements import Sidbrytning
from ferenda import util
from ferenda import CompositeRepository, PDFReader
from ferenda.errors import DocumentRemovedError, RequestHandlerError, PDFFileIsEmpty
from ferenda.pdfreader import StreamingPDFReader
from ferenda.elements import Body
class FixedLayoutHandler(SwedishLegalHandler):
@property
def doc_rules(self):
rules = super(FixedLayoutHandler, self).doc_rules
rules.append("%(root)s/<%(converter)s:basefile>/sid<pageno>.<suffix>")
return rules
@property
def rule_context(self):
return {"converter": "path"}
def get_pathfunc(self, environ, basefile, params, contenttype, suffix):
if basefile and suffix == "png":
# OK, this is a request for a particular page. Map this to
# correct repo, dir and attachment and set those params
#pi = environ['PATH_INFO']
#pageno = pi[pi.index("/sid")+4:-(len(suffix)+1)]
pageno = params['pageno']
if pageno.isdigit():
pageno = int(pageno)
if isinstance(self.repo, CompositeRepository):
for subrepo in self.repo.subrepos:
repo = self.repo.get_instance(subrepo)
if (os.path.exists(repo.store.downloaded_path(basefile)) and
os.path.exists(repo.store.path(basefile, 'intermediate','.pagemapping.json'))):
break
else:
# force the first available subrepo to get the file
# FIXME: It'd be great if we could force the
# subrepo who has the pagemapping file to
# download, but the CompositeRepository API
# doesn't allow that
self.repo.download(basefile)
for subrepo in self.repo.subrepos:
repo = self.repo.get_instance(subrepo)
if os.path.exists(repo.store.downloaded_path(basefile)):
break
else:
raise RequestHandlerError("%s: No subrepo has downloaded this basefile" % basefile)
else:
repo = self.repo
params['repo'] = repo.alias
pagemapping_path = repo.store.path(basefile, 'intermediate','.pagemapping.json')
with open(pagemapping_path) as fp:
pagemap = json.load(fp)
# invert the map (only keep the first -- hmm, maybe pagemap isn't ordered?)
invertedmap = {}
for k, v in pagemap.items():
if v not in invertedmap:
invertedmap[v] = k
attachment, pp = invertedmap[pageno].split("#page=")
params['attachment'] = attachment
for candidatedir in ('downloaded', 'intermediate'):
if os.path.exists(repo.store.path(basefile, candidatedir, '.dummy', attachment=attachment)):
params['dir'] = candidatedir
break
else:
raise RequestHandlerError("%s: Cannot find %s in any %s directory" % (basefile, attachment, repo.alias))
params['page'] = str(int(pp) - 1) # pp is 1-based, but RequestHandler.get_pathfunc expects 0-based
params['format'] = 'png'
return super(FixedLayoutHandler, self).get_pathfunc(environ, basefile, params, contenttype, suffix)
class FixedLayoutStore(SwedishLegalStore):
"""Handles storage of fixed-layout documents (either PDF or
word processing docs that are converted to PDF). A single repo may
have heterogenous usage of file formats, and this class will store
each document with an appropriate file suffix.
"""
doctypes = OrderedDict([
(".pdf", b'%PDF'),
(".rtf", b'{\\rt'),
(".docx", b'PK\x03\x04'),
(".doc", b'\xd0\xcf\x11\xe0'),
(".wpd", b'\xffWPC')
])
@property
def downloaded_suffixes(self):
return list(self.doctypes.keys())
def guess_type(self, fp, basefile):
assert False, "This seems to never be called?"
start = fp.tell()
sig = fp.read(4)
fp.seek(start)
for s in self.doctypes:
if sig == self.doctypes[s]:
return s
else:
self.log.error("%s: document file stream has magic number %r "
"-- don't know what that is" % (basefile, sig))
# FIXME: Raise something instead?
class FixedLayoutSource(SwedishLegalSource):
"""This is basically like PDFDocumentRepository, but handles other
word processing formats along with PDF files (everything is
converted to/handled as PDF internally) """
downloaded_suffix = ".pdf"
documentstore_class = FixedLayoutStore
requesthandler_class = FixedLayoutHandler
@classmethod
def get_default_options(cls):
opts = super(FixedLayoutSource, cls).get_default_options()
opts['imgfiles'] = ['img/spinner.gif']
opts['ocr'] = True
opts['legacytesseract'] = False
return opts
def downloaded_to_intermediate(self, basefile, attachment=None):
# force just the conversion part of the PDF handling
downloaded_path = self.store.downloaded_path(basefile, attachment=attachment)
intermediate_path = self.store.intermediate_path(basefile)
intermediate_dir = os.path.dirname(intermediate_path)
ocr_lang = None
convert_to_pdf = not downloaded_path.endswith(".pdf")
keep_xml = "bz2" if self.config.compress == "bz2" else True
reader = StreamingPDFReader()
try:
return reader.convert(filename=downloaded_path,
workdir=intermediate_dir,
images=self.config.pdfimages,
convert_to_pdf=convert_to_pdf,
keep_xml=keep_xml,
ocr_lang=ocr_lang,
legacy_tesseract=self.config.legacytesseract)
except PDFFileIsEmpty as e:
if self.config.ocr:
self.log.warning("%s: %s was empty, attempting OCR" % (basefile, downloaded_path))
ocr_lang = "swe" # reasonable guess
return reader.convert(filename=downloaded_path,
workdir=intermediate_dir,
images=self.config.pdfimages,
convert_to_pdf=convert_to_pdf,
keep_xml=keep_xml,
ocr_lang=ocr_lang)
else:
self.log.warning("%s: %s was empty, returning placeholder" % (basefile, downloaded_path))
fp = BytesIO(b"""<pdf2xml>
<page number="1" position="absolute" top="0" left="0" height="1029" width="701">
<fontspec id="0" size="12" family="TimesNewRomanPSMT" color="#000000"/>
<text top="67" left="77" width="287" height="26" font="0">[Avgörandetext saknas]</text>
</page>
</pdf2xml>""")
fp.name = "dummy.xml"
return fp
def extract_head(self, fp, basefile):
# at this point, fp points to the PDF file itself, which is
# hard to extract metadata from. We just let extract_metadata
# return anything we can infer from basefile
pass
def extract_metadata(self, rawhead, basefile):
return self.metadata_from_basefile(basefile)
def extract_body(self, fp, basefile):
# If we can asssume that the fp is a hOCR HTML file and not a
# PDF2XML file, use alternate parser. FIXME: There ought to be
# a cleaner way than guessing based on filename
parser = "ocr" if ".hocr." in util.name_from_fp(fp) else "xml"
reader = StreamingPDFReader().read(fp, parser=parser)
baseuri = self.canonical_uri(basefile)
for page in reader:
page.src = "%s/sid%s.png" % (baseuri, page.number)
if reader.is_empty():
raise DocumentRemovedError(dummyfile=self.store.parsed_path(basefile))
else:
return reader
def _extract_plaintext(self, resource, resources):
about = resource.get("about")
if about and "#sid" in about:
# select all text content contained in the first 2 <p>
# tags following the pagebreak -- this should typically be
# enough to show a helpful snippet in the autocomplete box
nodes = resource.xpath("following::h:p[position() < 2]//text()",
namespaces={'h': 'http://www.w3.org/1999/xhtml'})
plaintext = util.normalize_space(" ".join(nodes))
if not plaintext:
plaintext = "(Sid %s saknar text)" % about.split("#sid")[1]
return plaintext
else:
return super(FixedLayoutSource, self)._extract_plaintext(resource, resources)
def _relate_fulltext_resources(self, body):
res = super(FixedLayoutSource, self)._relate_fulltext_resources(body)
# also: add every page (the pagebreak element)
# for r in body.findall(".//*[@class='sidbrytning']"):
# each entry in the resource list may be a (resource,
# extrametadata) tuple. The extrametadata is assumed to be
# appended to by the caller as dictated by facets, then
# passed as kwargs to FulltextIndex.update.
# res.append((r, {"role": "autocomplete"}))
return res
def _relate_fulltext_value_comment(self, resourceuri, rooturi, desc):
if "#sid" not in resourceuri:
return super(FixedLayoutSource, self)._relate_fulltext_value_comment(resourceuri, rooturi, desc)
else:
pageno = resourceuri.split("#sid")[1]
return "%s s. %s" % (desc.graph.value(URIRef(rooturi), DCTERMS.identifier),
pageno)
# FIXME: This is copied verbatim from PDFDocumentRepository
def create_external_resources(self, doc):
resources = []
# there are two types of doc.body objects
# 1. PDFReader objects, ie raw PDF objects, structured by page
# and with a top-level fontspec object
# 2. elements.Body objects that are structured by logical
# elements (chapters, sections etc) and where individual
# Sidbrytning objects can be anywhere in the tree.
if not hasattr(doc.body, 'fontspec'):
# document wasn't derived from a PDF file, probably from HTML instead
return resources
cssfile = self.store.parsed_path(doc.basefile, attachment="index.css")
urltransform = self.get_url_transform_func([self], os.path.dirname(cssfile),
develurl=self.config.develurl)
resources.append(cssfile)
util.ensure_dir(cssfile)
with open(cssfile, "w") as fp:
# Create CSS header with fontspecs
for spec in list(doc.body.fontspec.values()):
fp.write(".fontspec%s {font: %spx %s; color: %s;}\n" %
(spec['id'], spec['size'], spec['family'],
spec.get('color', 'black')))
# 2 Copy all created png files to their correct locations
if isinstance(doc.body, PDFReader):
pageenumerator = enumerate(doc.body)
else:
sidbrytningar = []
def collect(node, state):
if isinstance(node, Sidbrytning):
state.append(node)
return state
self.visit_node(doc.body, collect, sidbrytningar)
pageenumerator = enumerate(sidbrytningar)
# assert isinstance(doc.body, PDFReader), "doc.body is %s, not PDFReader -- still need to access fontspecs etc" % type(doc.body)
for cnt, page in pageenumerator:
if page.background:
src = self.store.intermediate_path(
doc.basefile, attachment=os.path.basename(page.background))
dest = self.store.parsed_path(
doc.basefile, attachment=os.path.basename(page.background))
resources.append(dest)
if util.copy_if_different(src, dest):
self.log.debug("Copied %s to %s" % (src, dest))
desturi = "%s?dir=parsed&attachment=%s" % (doc.uri, os.path.basename(dest))
desturi = urltransform(desturi)
background = " background: url('%s') no-repeat grey;" % desturi
else:
background = ""
fp.write("#%s {width: %spx; height: %spx;%s}\n" %
(page.id, page.width, page.height, background))
return resources
def _relate_fulltext_value_label(self, resourceuri, rooturi, desc):
if "#sid" not in resourceuri:
return super(FixedLayoutSource, self)._relate_fulltext_value_label(resourceuri, rooturi, desc)
else:
pageno = resourceuri.split("#sid")[1]
return "s. %s" % pageno
| 45.379085 | 140 | 0.580009 |
0a77fb69439ea5e42cd66ac29548329771a5455e | 7,387 | py | Python | lib/scan/joomla_version/engine.py | hklcf/OWASP-Nettacker | 7c31cdd7e5e817031d317fa29e01409fccca7c19 | [
"Apache-2.0"
] | null | null | null | lib/scan/joomla_version/engine.py | hklcf/OWASP-Nettacker | 7c31cdd7e5e817031d317fa29e01409fccca7c19 | [
"Apache-2.0"
] | null | null | null | lib/scan/joomla_version/engine.py | hklcf/OWASP-Nettacker | 7c31cdd7e5e817031d317fa29e01409fccca7c19 | [
"Apache-2.0"
] | 1 | 2019-06-03T08:44:51.000Z | 2019-06-03T08:44:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Pradeep Jairamani , github.com/pradeepjairamani
import socket
import socks
import time
import json
import threading
import string
import random
import sys
import struct
import re
import os
from OpenSSL import crypto
import ssl
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.icmp.engine import do_one as do_one_ping
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
import requests
def extra_requirements_dict():
return {
"joomla_version_ports": [80, 443]
}
def conn(targ, port, timeout_sec, socks_proxy):
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit(':')[0]),
int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sys.stdout.flush()
s.settimeout(timeout_sec)
s.connect((targ, port))
return s
except Exception as e:
return None
def joomla_version(target, port, timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd):
try:
s = conn(target, port, timeout_sec, socks_proxy)
if not s:
return False
else:
if target_type(target) != "HTTP" and port == 443:
target = 'https://' + target
if target_type(target) != "HTTP" and port == 80:
target = 'http://' + target
req = requests.get(target+'/joomla.xml')
if req.status_code == 404:
req = requests.get(
target+'/administrator/manifests/files/joomla.xml')
try:
global version
regex = '<version>(.+?)</version>'
pattern = re.compile(regex)
version = re.findall(pattern, req.text)
version = ''.join(version)
return True
except:
return False
except Exception as e:
# some error warning
return False
def __joomla_version(target, port, timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd):
if joomla_version(target, port, timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd):
info(messages(language, "found").format(
target, "Joomla Version", version))
__log_into_file(thread_tmp_filename, 'w', '0', language)
data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': port, 'TYPE': 'joomla_version_scan',
'DESCRIPTION': messages(language, "found").format(target, "Joomla Version", version), 'TIME': now(),
'CATEGORY': "vuln",
'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
return True
else:
return False
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(target) != 'HTTP':
# requirements check
new_extra_requirements = extra_requirements_dict()
if methods_args is not None:
for extra_requirement in extra_requirements_dict():
if extra_requirement in methods_args:
new_extra_requirements[
extra_requirement] = methods_args[extra_requirement]
extra_requirements = new_extra_requirements
if ports is None:
ports = extra_requirements["joomla_version_ports"]
if target_type(target) == 'HTTP':
target = target_to_host(target)
threads = []
total_req = len(ports)
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
keyboard_interrupt_flag = False
for port in ports:
port = int(port)
t = threading.Thread(target=__joomla_version,
args=(target, int(port), timeout_sec, log_in_file, language, time_sleep,
thread_tmp_filename, socks_proxy, scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(
messages(language, "trying_message").format(trying, total_req, num, total, target, port, 'joomla_version_scan'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
keyboard_interrupt_flag = True
break
if keyboard_interrupt_flag:
break
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) is not 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() is 1 or kill_switch is kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write is 1 and verbose_level is not 0:
info(messages(language, "not_found"))
data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'joomla_version_scan',
'DESCRIPTION': messages(language, "not_found"), 'TIME': now(),
'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(
'joomla_version_scan', target))
| 41.734463 | 132 | 0.576824 |
16a5d42d61c18f64b9904b3b6459f184043220c0 | 144 | py | Python | Python/Math/Find Angle MBC mbc.py | jaswal72/hacker-rank | 95aaa71b4636928664341dc9c6f75d69af5f26ac | [
"MIT"
] | 1 | 2017-03-27T18:21:38.000Z | 2017-03-27T18:21:38.000Z | Python/Math/Find Angle MBC mbc.py | jaswal72/hacker-rank | 95aaa71b4636928664341dc9c6f75d69af5f26ac | [
"MIT"
] | null | null | null | Python/Math/Find Angle MBC mbc.py | jaswal72/hacker-rank | 95aaa71b4636928664341dc9c6f75d69af5f26ac | [
"MIT"
] | null | null | null | import math
AB=input()
BC=input()
AC=math.sqrt(pow(AB,2)+pow(BC,2))
MBC=float(math.degrees(math.asin(AB/AC)))
print(str(int(round(MBC,0)))+"°")
| 20.571429 | 41 | 0.673611 |
4f89e465257e5b108052547e81d4d04bab7af745 | 830 | py | Python | source/synchronization/package/NPDBF.py | kevinwhere/Bleeding-Pineapple | 017b2abd3947ea58722550e06a177ace8f4e6a87 | [
"MIT"
] | null | null | null | source/synchronization/package/NPDBF.py | kevinwhere/Bleeding-Pineapple | 017b2abd3947ea58722550e06a177ace8f4e6a87 | [
"MIT"
] | null | null | null | source/synchronization/package/NPDBF.py | kevinwhere/Bleeding-Pineapple | 017b2abd3947ea58722550e06a177ace8f4e6a87 | [
"MIT"
] | null | null | null | import heapq
def NPDBF(tasks,M,scheme,numQ):
sortedTasks=sorted(tasks, key=lambda item:item['period'])
for i in range(len(sortedTasks)):
HPTasks=sortedTasks[:i]
LPTasks=sortedTasks[i+1:]
task=sortedTasks[i]
for j in range(len(task['resGraph'])):
if task['resGraph'][j]['totacc']==0:
continue
else:
HPC=0
for itask in HPTasks:
if itask['resGraph'][j]['totacc'] !=0:
#demand before deadline
HPC+=itask['resGraph'][j]['totacc']*int(task['period']/itask['period'])
B=0
CI=[]
for itask in LPTasks:
if itask['resGraph'][j]['totacc'] !=0:
CI.append(itask['resGraph'][j]['maxacc'])
# pick the maximum one
decWCtasks=heapq.nlargest(1,CI)
B=sum(decWCtasks)
if (HPC+B+task['resGraph'][j]['totacc'])/task['period']>1:
return False | 25.151515 | 77 | 0.610843 |
c1a8a064be23dd6cced90086a4d54112e2b67808 | 4,367 | py | Python | uos3/configUp/migrations/0001_initial.py | Axpere/telecommand-server | aa9bb61b127d914bd77a3bbe7ec39ef0dfc9f9ff | [
"MIT"
] | null | null | null | uos3/configUp/migrations/0001_initial.py | Axpere/telecommand-server | aa9bb61b127d914bd77a3bbe7ec39ef0dfc9f9ff | [
"MIT"
] | 1 | 2019-06-19T17:20:47.000Z | 2019-06-19T17:20:47.000Z | uos3/configUp/migrations/0001_initial.py | MNahad/telecommand-server | 96d2f1e59cb4de581f6f1bbb3a61ed1b7062f91f | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-07-20 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_submitted', models.DateTimeField(auto_now_add=True)),
('user_submitted', models.CharField(max_length=64)),
('tx_enable', models.BooleanField(default=True)),
('tx_interval', models.PositiveSmallIntegerField()),
('tx_interval_downlink', models.IntegerField(choices=[(0, '0 ms'), (50, '500 ms'), (60, '600 ms'), (70, '700 ms'), (80, '800 ms'), (90, '900 ms'), (100, '1000 ms'), (150, '1500 ms'), (200, '2000 ms')])),
('tx_datarate', models.IntegerField(choices=[(0, '0.25 kbps'), (3, '0.5 kbps'), (6, '1 kbps'), (9, '3 kbps'), (12, '6 kbps')])),
('tx_power', models.IntegerField(choices=[(0, '10 mW'), (3, '50 mW'), (6, '100 mW'), (9, '200 mW'), (12, '300 mW')])),
('tx_overtemp', models.PositiveSmallIntegerField()),
('rx_overtemp', models.PositiveSmallIntegerField()),
('batt_overtemp', models.PositiveSmallIntegerField()),
('obc_overtemp', models.PositiveSmallIntegerField()),
('pa_overtemp', models.PositiveSmallIntegerField()),
('low_voltage_threshold', models.PositiveSmallIntegerField()),
('low_voltage_recovery', models.PositiveSmallIntegerField()),
('health_acquisition_interval', models.PositiveIntegerField()),
('configuration_acquisition_interval', models.PositiveIntegerField()),
('imu_acquisition_interval', models.PositiveIntegerField()),
('imu_sample_count', models.PositiveSmallIntegerField()),
('imu_sample_interval', models.PositiveSmallIntegerField()),
('gps_acquisition_interval', models.PositiveIntegerField()),
('gps_sample_count', models.PositiveSmallIntegerField()),
('gps_sample_interval', models.PositiveSmallIntegerField()),
('image_acquisition_time', models.BigIntegerField()),
('image_acquisition_profile', models.BooleanField(choices=[(0, '1600x1200'), (1, '640x480')])),
('time', models.BigIntegerField()),
('operational_mode', models.IntegerField(choices=[(0, 'Deployment Phases'), (1, 'Nominal Operations'), (3, 'Safe Mode')])),
('self_test', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('power_rail_1', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('power_rail_2', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('power_rail_3', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('power_rail_5', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('power_rail_6', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('reset_power_rail_1', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('reset_power_rail_2', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('reset_power_rail_3', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('reset_power_rail_4', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('reset_power_rail_5', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('reset_power_rail_6', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('imu_accel_enabled', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('imu_gyro_enabled', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('imu_magno_enabled', models.BooleanField(choices=[(0, 'Off'), (1, 'On')], default=False)),
('downlink_stop_time', models.BigIntegerField()),
],
),
]
| 69.31746 | 219 | 0.579345 |
f9bf7ca8c42956bca0b041f922640e492045ab9d | 6,681 | py | Python | lib/bbox/bbox_transform.py | jeremy43/FCIS | 0ed234c14d064b615d6c2553edca24031b8c7c84 | [
"Apache-2.0"
] | null | null | null | lib/bbox/bbox_transform.py | jeremy43/FCIS | 0ed234c14d064b615d6c2553edca24031b8c7c84 | [
"Apache-2.0"
] | null | null | null | lib/bbox/bbox_transform.py | jeremy43/FCIS | 0ed234c14d064b615d6c2553edca24031b8c7c84 | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Yuwen Xiong, Haozhi Qi, Guodong Zhang
# --------------------------------------------------------
import numpy as np
from bbox import bbox_overlaps_cython
def bbox_overlaps(boxes, query_boxes):
return bbox_overlaps_cython(boxes, query_boxes)
def bbox_overlaps_py(boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def filter_boxes(boxes, min_size):
"""
filter small boxes.
:param boxes: [N, 4* num_classes]
:param min_size:
:return: keep:
"""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def remove_repetition(boxes):
"""
remove repetited boxes
:param boxes: [N, 4]
:return: keep:
"""
_, x1_keep = np.unique(boxes[:, 0], return_index=True)
_, x2_keep = np.unique(boxes[:, 2], return_index=True)
_, y1_keep = np.unique(boxes[:, 1], return_index=True)
_, y2_keep = np.unique(boxes[:, 3], return_index=True)
x_keep = np.union1d(x1_keep, x2_keep)
y_keep = np.union1d(y1_keep, y2_keep)
keep = np.union1d(x_keep, y_keep)
return keep
def nonlinear_transform(ex_rois, gt_rois):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14)
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14)
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def nonlinear_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_boxes
def iou_transform(ex_rois, gt_rois):
""" return bbox targets, IoU loss uses gt_rois as gt """
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
return gt_rois
def iou_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
dx1 = box_deltas[:, 0::4]
dy1 = box_deltas[:, 1::4]
dx2 = box_deltas[:, 2::4]
dy2 = box_deltas[:, 3::4]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = dx1 + x1[:, np.newaxis]
# y1
pred_boxes[:, 1::4] = dy1 + y1[:, np.newaxis]
# x2
pred_boxes[:, 2::4] = dx2 + x2[:, np.newaxis]
# y2
pred_boxes[:, 3::4] = dy2 + y2[:, np.newaxis]
return pred_boxes
# define bbox_transform and bbox_pred
bbox_transform = nonlinear_transform
bbox_pred = nonlinear_pred
| 33.238806 | 115 | 0.563239 |
c5a9c9e841fb6a6d371f4c16245c00a4861443f4 | 9,029 | py | Python | lbry/dht/peer.py | robd003/lbry-sdk | 3fc538104d46c8d83ce1ce9e684b1c602f43f60c | [
"MIT"
] | 4,703 | 2015-09-10T20:49:05.000Z | 2019-06-20T01:34:38.000Z | lbry/dht/peer.py | robd003/lbry-sdk | 3fc538104d46c8d83ce1ce9e684b1c602f43f60c | [
"MIT"
] | 1,934 | 2015-11-25T20:40:45.000Z | 2019-06-21T00:50:03.000Z | lbry/dht/peer.py | robd003/lbry-sdk | 3fc538104d46c8d83ce1ce9e684b1c602f43f60c | [
"MIT"
] | 369 | 2015-12-05T21:18:07.000Z | 2019-06-10T12:40:50.000Z | import typing
import asyncio
import logging
from dataclasses import dataclass, field
from functools import lru_cache
from prometheus_client import Gauge
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
from lbry.dht import constants
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
ALLOW_LOCALHOST = False
CACHE_SIZE = 16384
log = logging.getLogger(__name__)
@lru_cache(CACHE_SIZE)
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
udp_port: typing.Optional[int] = None,
tcp_port: typing.Optional[int] = None,
allow_localhost: bool = False) -> 'KademliaPeer':
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
def is_valid_public_ipv4(address, allow_localhost: bool = False):
allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
return _is_valid_public_ipv4(address, allow_localhost)
class PeerManager:
peer_manager_keys_metric = Gauge(
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop):
self._loop = loop
self._rpc_failures: typing.Dict[
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
] = LRUCache(CACHE_SIZE)
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
def count_cache_keys(self):
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
self._node_tokens)
def reset(self):
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
statistic.clear()
def report_failure(self, address: str, udp_port: int):
now = self._loop.time()
_, previous = self._rpc_failures.pop((address, udp_port), (None, None))
self._rpc_failures[(address, udp_port)] = (previous, now)
def report_last_sent(self, address: str, udp_port: int):
now = self._loop.time()
self._last_sent[(address, udp_port)] = now
def report_last_replied(self, address: str, udp_port: int):
now = self._loop.time()
self._last_replied[(address, udp_port)] = now
def report_last_requested(self, address: str, udp_port: int):
now = self._loop.time()
self._last_requested[(address, udp_port)] = now
def clear_token(self, node_id: bytes):
self._node_tokens.pop(node_id, None)
def update_token(self, node_id: bytes, token: bytes):
now = self._loop.time()
self._node_tokens[node_id] = (now, token)
def get_node_token(self, node_id: bytes) -> typing.Optional[bytes]:
ts, token = self._node_tokens.get(node_id, (0, None))
if ts and ts > self._loop.time() - constants.TOKEN_SECRET_REFRESH_INTERVAL:
return token
def get_last_replied(self, address: str, udp_port: int) -> typing.Optional[float]:
return self._last_replied.get((address, udp_port))
def update_contact_triple(self, node_id: bytes, address: str, udp_port: int):
"""
Update the mapping of node_id -> address tuple and that of address tuple -> node_id
This is to handle peers changing addresses and ids while assuring that the we only ever have
one node id / address tuple mapped to each other
"""
if (address, udp_port) in self._node_id_mapping:
self._node_id_reverse_mapping.pop(self._node_id_mapping.pop((address, udp_port)))
if node_id in self._node_id_reverse_mapping:
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
self._node_id_mapping[(address, udp_port)] = node_id
self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def prune(self): # TODO: periodically call this
now = self._loop.time()
to_pop = []
for (address, udp_port), (_, last_failure) in self._rpc_failures.items():
if last_failure and last_failure < now - constants.RPC_ATTEMPTS_PRUNING_WINDOW:
to_pop.append((address, udp_port))
while to_pop:
del self._rpc_failures[to_pop.pop()]
to_pop = []
for node_id, (age, token) in self._node_tokens.items(): # pylint: disable=unused-variable
if age < now - constants.TOKEN_SECRET_REFRESH_INTERVAL:
to_pop.append(node_id)
while to_pop:
del self._node_tokens[to_pop.pop()]
def contact_triple_is_good(self, node_id: bytes, address: str, udp_port: int): # pylint: disable=too-many-return-statements
"""
:return: False if peer is bad, None if peer is unknown, or True if peer is good
"""
delay = self._loop.time() - constants.CHECK_REFRESH_INTERVAL
# fixme: find a way to re-enable that without breaking other parts
# if node_id not in self._node_id_reverse_mapping or (address, udp_port) not in self._node_id_mapping:
# return
# addr_tup = (address, udp_port)
# if self._node_id_reverse_mapping[node_id] != addr_tup or self._node_id_mapping[addr_tup] != node_id:
# return
previous_failure, most_recent_failure = self._rpc_failures.get((address, udp_port), (None, None))
last_requested = self._last_requested.get((address, udp_port))
last_replied = self._last_replied.get((address, udp_port))
if node_id is None:
return None
if most_recent_failure and last_replied:
if delay < last_replied > most_recent_failure:
return True
elif last_replied > most_recent_failure:
return
return False
elif previous_failure and most_recent_failure and most_recent_failure > delay:
return False
elif last_replied and last_replied > delay:
return True
elif last_requested and last_requested > delay:
return None
return
def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
@dataclass(unsafe_hash=True)
class KademliaPeer:
address: str = field(hash=True)
_node_id: typing.Optional[bytes] = field(hash=True)
udp_port: typing.Optional[int] = field(hash=True)
tcp_port: typing.Optional[int] = field(compare=False, hash=False)
protocol_version: typing.Optional[int] = field(default=1, compare=False, hash=False)
allow_localhost: bool = field(default=False, compare=False, hash=False)
def __post_init__(self):
if self._node_id is not None:
if not len(self._node_id) == constants.HASH_LENGTH:
raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
raise ValueError(f"invalid tcp port: {self.address}:{self.tcp_port}")
if not is_valid_public_ipv4(self.address, self.allow_localhost):
raise ValueError(f"invalid ip address: '{self.address}'")
def update_tcp_port(self, tcp_port: int):
self.tcp_port = tcp_port
@property
def node_id(self) -> bytes:
return self._node_id
def compact_address_udp(self) -> bytearray:
return make_compact_address(self.node_id, self.address, self.udp_port)
def compact_address_tcp(self) -> bytearray:
return make_compact_address(self.node_id, self.address, self.tcp_port)
def compact_ip(self):
return make_compact_ip(self.address)
def __str__(self):
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"
| 46.066327 | 128 | 0.684129 |
4b70edb5958b5efe3ef928948b58740378f9a010 | 864 | py | Python | Statistics/Pop_corr_coeff.py | SAMIA-CLOUD/sql_assignment | ba3fca67e739b48eac8ecfc0b90f8e5e8643ff25 | [
"MIT"
] | null | null | null | Statistics/Pop_corr_coeff.py | SAMIA-CLOUD/sql_assignment | ba3fca67e739b48eac8ecfc0b90f8e5e8643ff25 | [
"MIT"
] | null | null | null | Statistics/Pop_corr_coeff.py | SAMIA-CLOUD/sql_assignment | ba3fca67e739b48eac8ecfc0b90f8e5e8643ff25 | [
"MIT"
] | null | null | null | from Statistics.Mean import mean
from Statistics.Psd import psd
from Calculator.Multiplication import multiplication
from Calculator.Subtraction import subtraction
from Calculator.Division import division
from Calculator.Addition import addition
def Pop_correlation_coefficient(data1, data2):
data1_mean = mean(data1)
data2_mean = mean(data2)
a = []
b = []
tot_sum = 0
x = psd(data1)
y = psd(data2)
for i in data1:
result1 = subtraction(data1_mean, i)
z1 = division(result1, x)
a.append(z1)
for i in data2:
result2 = subtraction(data2_mean, i)
z2 = division(result2, y)
b.append(z2)
for i in range(len(data1)):
ab = multiplication(a[i], b[i])
tot_sum = addition(tot_sum, ab)
result3 = division(tot_sum, subtraction(1, len(data1)))
return result3 | 25.411765 | 59 | 0.664352 |
8e30dfa9296028d7759bd86f7528f875a716bb97 | 4,249 | py | Python | QUANTAXIS/QAUtil/QABar.py | 416104443/QUANTAXIS | 791e7d6d368ab2581b6c32f5ad9918f44cd4065f | [
"MIT"
] | 2 | 2017-10-26T12:42:04.000Z | 2021-04-14T05:22:43.000Z | QUANTAXIS/QAUtil/QABar.py | paracats/QUANTAXIS | 23907d5e1398bb57f3e8d9d50c21d9fb5bfe3e86 | [
"MIT"
] | null | null | null | QUANTAXIS/QAUtil/QABar.py | paracats/QUANTAXIS | 23907d5e1398bb57f3e8d9d50c21d9fb5bfe3e86 | [
"MIT"
] | null | null | null | # coding=utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import math
import datetime
import time
import numpy as np
import pandas as pd
from QUANTAXIS.QAUtil.QADate_trade import (QA_util_get_real_datelist, QA_util_date_gap,
QA_util_get_trade_range,
QA_util_if_trade, trade_date_sse)
def QA_util_make_min_index(day, type_='1min'):
if QA_util_if_trade(day) is True:
return pd.date_range(str(day) + ' 09:30:00', str(day) + ' 11:30:00', freq=type_, closed='right').append(
pd.date_range(str(day) + ' 13:00:00', str(day) + ' 15:00:00', freq=type_, closed='right'))
else:
return pd.DataFrame(['No trade'])
def QA_util_make_hour_index(day, type_='1h'):
if QA_util_if_trade(day) is True:
return pd.date_range(str(day) + '09:30:00', str(day) + ' 11:30:00', freq=type_, closed='right').append(
pd.date_range(str(day) + ' 13:00:00', str(day) + ' 15:00:00', freq=type_, closed='right'))
else:
return pd.DataFrame(['No trade'])
def QA_util_time_gap(time, gap, methods, type_):
'分钟线回测的时候的gap'
min_len = int(240 / int(str(type_).split('min')[0]))
day_gap = math.ceil(gap / min_len)
if methods in ['>', 'gt']:
data = pd.concat([pd.DataFrame(QA_util_make_min_index(day, type_)) for day in trade_date_sse[trade_date_sse.index(str(datetime.datetime.strptime(
time, '%Y-%m-%d %H:%M:%S').date())):trade_date_sse.index(str(datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S').date())) + day_gap+1]]).reset_index()
return np.asarray(data[data[0] > time].head(gap)[0].apply(lambda x: str(x))).tolist()[-1]
elif methods in ['>=', 'gte']:
data = pd.concat([pd.DataFrame(QA_util_make_min_index(day, type_)) for day in trade_date_sse[trade_date_sse.index(str(datetime.datetime.strptime(
time, '%Y-%m-%d %H:%M:%S').date())):trade_date_sse.index(str(datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S').date())) + day_gap+1]]).reset_index()
return np.asarray(data[data[0] >= time].head(gap)[0].apply(lambda x: str(x))).tolist()[-1]
elif methods in ['<', 'lt']:
data = pd.concat([pd.DataFrame(QA_util_make_min_index(day, type_)) for day in trade_date_sse[trade_date_sse.index(str(datetime.datetime.strptime(
time, '%Y-%m-%d %H:%M:%S').date())) - day_gap:trade_date_sse.index(str(datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S').date()))+1]]).reset_index()
return np.asarray(data[data[0] < time].tail(gap)[0].apply(lambda x: str(x))).tolist()[0]
elif methods in ['<=', 'lte']:
data = pd.concat([pd.DataFrame(QA_util_make_min_index(day, type_)) for day in trade_date_sse[trade_date_sse.index(str(datetime.datetime.strptime(
time, '%Y-%m-%d %H:%M:%S').date())) - day_gap:trade_date_sse.index(str(datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S').date()))+1]]).reset_index()
return np.asarray(data[data[0] <= time].tail(gap)[0].apply(lambda x: str(x))).tolist()[0]
elif methods in ['==', '=', 'eq']:
return time
if __name__ == '__main__':
pass
| 51.192771 | 164 | 0.66651 |
5f8c1baff7b07f77aa4e85d1325f4b735935210e | 11,057 | py | Python | mapbox_vector_tile/encoder.py | tilezen/mapbox-vector-tile | 4e3a65a6f98c317048266260b8e7aac705e31e6f | [
"MIT"
] | 121 | 2016-07-14T00:44:54.000Z | 2022-03-19T00:49:14.000Z | mapbox_vector_tile/encoder.py | tilezen/mapbox-vector-tile | 4e3a65a6f98c317048266260b8e7aac705e31e6f | [
"MIT"
] | 53 | 2016-07-05T14:35:06.000Z | 2021-05-20T22:31:02.000Z | mapbox_vector_tile/encoder.py | tilezen/mapbox-vector-tile | 4e3a65a6f98c317048266260b8e7aac705e31e6f | [
"MIT"
] | 34 | 2016-07-27T23:45:05.000Z | 2022-01-02T20:37:58.000Z | from mapbox_vector_tile.polygon import make_it_valid
from numbers import Number
from past.builtins import long
from past.builtins import unicode
from past.builtins import xrange
from shapely.geometry.base import BaseGeometry
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.polygon import orient
from shapely.geometry.polygon import Polygon
from shapely.ops import transform
from shapely.wkb import loads as load_wkb
from shapely.wkt import loads as load_wkt
import decimal
from .compat import PY3, vector_tile, apply_map
from .geom_encoder import GeometryEncoder
from .simple_shape import SimpleShape
def on_invalid_geometry_raise(shape):
raise ValueError('Invalid geometry: %s' % shape.wkt)
def on_invalid_geometry_ignore(shape):
return None
def on_invalid_geometry_make_valid(shape):
return make_it_valid(shape)
class VectorTile:
"""
"""
def __init__(self, extents, on_invalid_geometry=None,
max_geometry_validate_tries=5, round_fn=None,
check_winding_order=True):
self.tile = vector_tile.tile()
self.extents = extents
self.on_invalid_geometry = on_invalid_geometry
self.check_winding_order = check_winding_order
self.max_geometry_validate_tries = max_geometry_validate_tries
if round_fn:
self._round = round_fn
else:
self._round = self._round_quantize
def _round_quantize(self, val):
# round() has different behavior in python 2/3
# For consistency between 2 and 3 we use quantize, however
# it is slower than the built in round function.
d = decimal.Decimal(val)
rounded = d.quantize(1, rounding=decimal.ROUND_HALF_EVEN)
return float(rounded)
def addFeatures(self, features, layer_name='',
quantize_bounds=None, y_coord_down=False):
self.layer = self.tile.layers.add()
self.layer.name = layer_name
self.layer.version = 1
self.layer.extent = self.extents
self.key_idx = 0
self.val_idx = 0
self.seen_keys_idx = {}
self.seen_values_idx = {}
self.seen_values_bool_idx = {}
for feature in features:
# skip missing or empty geometries
geometry_spec = feature.get('geometry')
if geometry_spec is None:
continue
shape = self._load_geometry(geometry_spec)
if shape is None:
raise NotImplementedError(
'Can\'t do geometries that are not wkt, wkb, or shapely '
'geometries')
if shape.is_empty:
continue
if quantize_bounds:
shape = self.quantize(shape, quantize_bounds)
if self.check_winding_order:
shape = self.enforce_winding_order(shape, y_coord_down)
if shape is not None and not shape.is_empty:
self.addFeature(feature, shape, y_coord_down)
def enforce_winding_order(self, shape, y_coord_down, n_try=1):
if shape.type == 'MultiPolygon':
# If we are a multipolygon, we need to ensure that the
# winding orders of the consituent polygons are
# correct. In particular, the winding order of the
# interior rings need to be the opposite of the
# exterior ones, and all interior rings need to follow
# the exterior one. This is how the end of one polygon
# and the beginning of another are signaled.
shape = self.enforce_multipolygon_winding_order(
shape, y_coord_down, n_try)
elif shape.type == 'Polygon':
# Ensure that polygons are also oriented with the
# appropriate winding order. Their exterior rings must
# have a clockwise order, which is translated into a
# clockwise order in MVT's tile-local coordinates with
# the Y axis in "screen" (i.e: +ve down) configuration.
# Note that while the Y axis flips, we also invert the
# Y coordinate to get the tile-local value, which means
# the clockwise orientation is unchanged.
shape = self.enforce_polygon_winding_order(
shape, y_coord_down, n_try)
# other shapes just get passed through
return shape
def quantize(self, shape, bounds):
minx, miny, maxx, maxy = bounds
def fn(x, y, z=None):
xfac = self.extents / (maxx - minx)
yfac = self.extents / (maxy - miny)
x = xfac * (x - minx)
y = yfac * (y - miny)
return self._round(x), self._round(y)
return transform(fn, shape)
def handle_shape_validity(self, shape, y_coord_down, n_try):
if shape.is_valid:
return shape
if n_try >= self.max_geometry_validate_tries:
# ensure that we don't recurse indefinitely with an
# invalid geometry handler that doesn't validate
# geometries
return None
if self.on_invalid_geometry:
shape = self.on_invalid_geometry(shape)
if shape is not None and not shape.is_empty:
# this means that we have a handler that might have
# altered the geometry. We'll run through the process
# again, but keep track of which attempt we are on to
# terminate the recursion.
shape = self.enforce_winding_order(
shape, y_coord_down, n_try + 1)
return shape
def enforce_multipolygon_winding_order(self, shape, y_coord_down, n_try):
assert shape.type == 'MultiPolygon'
parts = []
for part in shape.geoms:
part = self.enforce_polygon_winding_order(
part, y_coord_down, n_try)
if part is not None and not part.is_empty:
if part.type == 'MultiPolygon':
parts.extend(part.geoms)
else:
parts.append(part)
if not parts:
return None
if len(parts) == 1:
oriented_shape = parts[0]
else:
oriented_shape = MultiPolygon(parts)
oriented_shape = self.handle_shape_validity(
oriented_shape, y_coord_down, n_try)
return oriented_shape
def enforce_polygon_winding_order(self, shape, y_coord_down, n_try):
assert shape.type == 'Polygon'
def fn(point):
x, y = point
return self._round(x), self._round(y)
exterior = apply_map(fn, shape.exterior.coords)
rings = None
if len(shape.interiors) > 0:
rings = [apply_map(fn, ring.coords) for ring in shape.interiors]
sign = 1.0 if y_coord_down else -1.0
oriented_shape = orient(Polygon(exterior, rings), sign=sign)
oriented_shape = self.handle_shape_validity(
oriented_shape, y_coord_down, n_try)
return oriented_shape
def _load_geometry(self, geometry_spec):
if isinstance(geometry_spec, BaseGeometry):
return geometry_spec
if isinstance(geometry_spec, dict):
return SimpleShape(geometry_spec['coordinates'],
geometry_spec["type"])
try:
return load_wkb(geometry_spec)
except Exception:
try:
return load_wkt(geometry_spec)
except Exception:
return None
def addFeature(self, feature, shape, y_coord_down):
geom_encoder = GeometryEncoder(y_coord_down, self.extents,
self._round)
geometry = geom_encoder.encode(shape)
feature_type = self._get_feature_type(shape)
if len(geometry) == 0:
# Don't add geometry if it's too small
return
f = self.layer.features.add()
fid = feature.get('id')
if fid is not None:
if isinstance(fid, Number) and fid >= 0:
f.id = fid
# properties
properties = feature.get('properties')
if properties is not None:
self._handle_attr(self.layer, f, properties)
f.type = feature_type
f.geometry.extend(geometry)
def _get_feature_type(self, shape):
if shape.type == 'Point' or shape.type == 'MultiPoint':
return self.tile.Point
elif shape.type == 'LineString' or shape.type == 'MultiLineString':
return self.tile.LineString
elif shape.type == 'Polygon' or shape.type == 'MultiPolygon':
return self.tile.Polygon
elif shape.type == 'GeometryCollection':
raise ValueError('Encoding geometry collections not supported')
else:
raise ValueError('Cannot encode unknown geometry type: %s' %
shape.type)
def _chunker(self, seq, size):
return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
def _can_handle_key(self, k):
return isinstance(k, (str, unicode))
def _can_handle_val(self, v):
if isinstance(v, (str, unicode)):
return True
elif isinstance(v, bool):
return True
elif isinstance(v, (int, long)):
return True
elif isinstance(v, float):
return True
return False
def _can_handle_attr(self, k, v):
return self._can_handle_key(k) and \
self._can_handle_val(v)
def _handle_attr(self, layer, feature, props):
for k, v in props.items():
if self._can_handle_attr(k, v):
if not PY3 and isinstance(k, str):
k = k.decode('utf-8')
if k not in self.seen_keys_idx:
layer.keys.append(k)
self.seen_keys_idx[k] = self.key_idx
self.key_idx += 1
feature.tags.append(self.seen_keys_idx[k])
if isinstance(v, bool):
values_idx = self.seen_values_bool_idx
else:
values_idx = self.seen_values_idx
if v not in values_idx:
values_idx[v] = self.val_idx
self.val_idx += 1
val = layer.values.add()
if isinstance(v, bool):
val.bool_value = v
elif isinstance(v, str):
if PY3:
val.string_value = v
else:
val.string_value = unicode(v, 'utf-8')
elif isinstance(v, unicode):
val.string_value = v
elif isinstance(v, (int, long)):
val.int_value = v
elif isinstance(v, float):
val.double_value = v
feature.tags.append(values_idx[v])
| 35.325879 | 77 | 0.584878 |
6080b53c22ec723b7ffb55e6f4fe47b9ee3ced1b | 744 | py | Python | Chapter 06/Chap06_Example6.12.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 06/Chap06_Example6.12.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 06/Chap06_Example6.12.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | import re
mytxt = '123 \n123 gfh'
mypattern = '\s'
myreplace = ''
mystr = re.subn(mypattern, myreplace, mytxt)
print(mystr) # SUBN1
print(type(mystr))
print(mystr[0])
print(mystr[1])
print("-------------")
mystr = re.subn(mypattern, myreplace, mytxt,count =1)
print(mystr) # SUBN2
print("-------------")
print(re.subn('st', '*#' , 'Stay safe, stay healthy', flags = re.IGNORECASE)) # SUBN3
print("-------------")
print(re.subn('st', '*#' , 'Stay safe, stay healthy')) # SUBN4
print("-------------")
print(re.subn('st', '*#' , 'Stay safe, stay healthy', count = 1, flags = re.IGNORECASE)) # SUBN5
print("-------------")
print(re.subn('\sAND\s', ' & ' , 'The prince and the pauper', count = 1, flags = re.IGNORECASE)) # SUBN6 | 37.2 | 104 | 0.568548 |
0dd43a0dac3438ed0448e0a0d3c1a5b59de2c16e | 718 | py | Python | thirdpartyauth/twitch_auth.py | secretisdead/thirdpartyauth | 4e8a367d23899693ecebc056de409ca93646d80d | [
"MIT"
] | null | null | null | thirdpartyauth/twitch_auth.py | secretisdead/thirdpartyauth | 4e8a367d23899693ecebc056de409ca93646d80d | [
"MIT"
] | null | null | null | thirdpartyauth/twitch_auth.py | secretisdead/thirdpartyauth | 4e8a367d23899693ecebc056de409ca93646d80d | [
"MIT"
] | 1 | 2021-09-05T06:18:08.000Z | 2021-09-05T06:18:08.000Z | import urllib
from . import add_state
from . oauth2 import OAuth2
class TwitchAuth(OAuth2):
def authentication_uri(self, redirect_uri, state):
uri = (
'https://api.twitch.tv/kraken/oauth2/authorize'
+ '?response_type=code'
+ '&client_id=' + self.credentials['client_id']
+ '&redirect_uri=' + urllib.parse.quote_plus(redirect_uri)
+ '&scope=user:read:email'
)
return add_state(uri, state)
def authentication_value(self, redirect_uri, *args):
self.access_token_uri = 'https://api.twitch.tv/kraken/oauth2/token'
self.user_info_uri = 'https://api.twitch.tv/helix/users'
self.get_user_id = lambda user_info: user_info['data'][0]['id']
return super().authentication_value(redirect_uri)
| 32.636364 | 69 | 0.724234 |
0ee2f61617158a409d718710f6bb397d8647e36e | 8,689 | py | Python | src/dropbot_chip_qc/ui/execute.py | cfobel/dropbot-chip-qc | e5944b88c0d423163f55a3f49ebf84bb27e229bc | [
"BSD-3-Clause"
] | null | null | null | src/dropbot_chip_qc/ui/execute.py | cfobel/dropbot-chip-qc | e5944b88c0d423163f55a3f49ebf84bb27e229bc | [
"BSD-3-Clause"
] | 5 | 2019-04-02T11:10:45.000Z | 2019-07-17T20:31:18.000Z | src/dropbot_chip_qc/ui/execute.py | cfobel/dropbot-chip-qc | e5944b88c0d423163f55a3f49ebf84bb27e229bc | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
.. versionadded:: v0.12.0
'''
import functools as ft
import itertools as it
import threading
from dropbot_chip_qc.ui.render import get_summary_dict, render_summary
from logging_helpers import _L, caller_name
import asyncio_helpers as aioh
import dropbot_chip_qc as qc
import dropbot_chip_qc.ui.plan
import dropbot_chip_qc.ui.render
import networkx as nx
import numpy as np
import pandas as pd
import path_helpers as ph
import si_prefix as si
import trollius as asyncio
from .mqtt_proxy import DropBotMqttProxy
# For colors, see: https://gist.github.com/cfobel/fd939073cf13a309d7a9
light_blue = '#88bde6'
light_green = '#90cd97'
class Executor(object):
def __init__(self, channels_graph, channel_plan):
self.base_channels_graph = channels_graph.copy()
self.channels_graph = channels_graph.copy()
self.base_channel_plan = list(channel_plan)
self.completed_results = []
self._thread = None
self._task = None
def is_alive(self):
return self._thread is not None and self._thread.is_alive()
def remove_channels(self, bad_channels):
self.channels_graph.remove_nodes_from(bad_channels)
def channel_plan(self):
if self.completed_results:
channel_plan = self.completed_results[-1]['channel_plan']
completed_transfers = \
self.completed_results[-1]['completed_transfers']
else:
channel_plan = self.base_channel_plan
completed_transfers = []
channel_plan_ = [c for c in channel_plan if c in self.channels_graph]
if len(channel_plan_) < len(channel_plan):
_L().debug('reroute around missing channels')
channel_plan = list(qc.ui.plan\
.create_channel_plan(self.channels_graph, channel_plan_,
loop=False))
return channel_plan, completed_transfers
def start(self, aproxy, signals, bad_channels=None, min_duration=.15):
'''
# TODO
- incorporate `execute()` coroutine
- add
'''
if self.is_alive():
raise RuntimeError('Executor is already running.')
channel_plan, completed_transfers = self.channel_plan()
@asyncio.coroutine
def execute_test(*args, **kwargs):
yield asyncio.From(set_capacitance_update_interval())
try:
result = yield asyncio\
.From(qc.ui.plan.transfer_windows(*args, **kwargs))
except qc.ui.plan.TransferFailed as exception:
# Save intermediate result.
result = dict(channel_plan=exception.channel_plan,
completed_transfers=exception.completed_transfers)
signals.signal('test-interrupt').send(caller_name(0), **result)
self.completed_results.append(result)
yield asyncio.From(aproxy.set_state_of_channels(pd.Series(), append=False))
# result = dict(channel_plan=channel_plan_i,
# completed_transfers=completed_transfers_i)
raise asyncio.Return(result)
@asyncio.coroutine
def set_capacitance_update_interval():
state = yield asyncio.From(aproxy.state)
max_update_interval = int(.5 * min_duration * 1e3)
if state.capacitance_update_interval_ms > max_update_interval \
or state.capacitance_update_interval_ms == 0:
yield asyncio\
.From(aproxy.update_state(capacitance_update_interval_ms=
max_update_interval))
looped_channel_plan = (channel_plan +
nx.shortest_path(self.channels_graph,
channel_plan[-1],
self.base_channel_plan[0])[1:])
self._task = aioh.cancellable(execute_test)
transfer_liquid = ft.partial(qc.ui.plan.transfer_liquid, aproxy,
min_duration=min_duration)
self._thread = threading.Thread(target=self._task,
args=(signals, looped_channel_plan,
completed_transfers,
transfer_liquid),
kwargs={'n': 3})
self._thread.daemon = True
self._thread.start()
def pause(self):
if self.is_alive():
self._task.cancel()
def reset(self):
self.pause()
del self.completed_results[:]
self.channels_graph = self.base_channels_graph.copy()
class ExecutorController(object):
def __init__(self, aproxy, ui, executor):
self.ui = ui
channel_electrodes = ui['channel_electrodes']
channel_patches = ui['channel_patches']
chip_info = ui['chip_info']
chip_info_mm = ui['chip_info_mm']
figure = ui['figure']
signals = ui['signals']
def calibrate_sheet_capacitance(target_force, *args):
'''Calibrate sheet capacitance with liquid present
**NOTE** Prior to running the following cell:
- _at least_ one electrode **MUST** be **actuated**
- all actuated electrodes **MUST** be completely covered with liquid
It may be helpful to use the interactive figure UI to manipulate liquid until
the above criteria are met.
This function performs the following steps:
1. Measure **total capacitance** across **all actuated electrodes**
2. Compute sheet capacitance with liquid present ($\Omega_L$) based on nominal
areas of actuated electrodes from `chip_file`
3. Compute voltage to match 25 μN of force, where
$F = 10^3 \cdot 0.5 \cdot \Omega_L \cdot V^2$
4. Set DropBot voltage to match target of 25 μN force.
'''
proxy = DropBotMqttProxy.from_uri('dropbot', aproxy.__client__._host)
name = 'liquid'
states = proxy.state_of_channels
channels = states[states > 0].index.tolist()
electrodes_by_id = pd.Series(chip_info_mm['electrodes'],
index=(e['id'] for e in
chip_info_mm['electrodes']))
actuated_area = (electrodes_by_id[channel_electrodes[channels]]
.map(lambda x: x['area'])).sum()
capacitance = pd.Series(proxy.capacitance(0)
for i in range(20)).median()
sheet_capacitance = capacitance / actuated_area
message = ('Measured %s sheet capacitance: %sF/%.1f mm^2 = %sF/mm^2'
% (name, si.si_format(capacitance), actuated_area,
si.si_format(sheet_capacitance)))
print(message)
voltage = np.sqrt(target_force / (1e3 * 0.5 * sheet_capacitance))
return sheet_capacitance, voltage
def pause(*args):
executor.pause()
def reset(*args):
executor.reset()
channel_patches.map(lambda x: x.set_facecolor(light_blue))
for collection in list(figure._ax.collections):
collection.remove()
figure._ax.figure.canvas.draw()
def save_results(output_directory, chip_uuid, *args):
output_dir = ph.path(output_directory)
channel_plan, completed_transfers = executor.channel_plan()
proxy = DropBotMqttProxy.from_uri('dropbot', aproxy.__client__._host)
summary_dict = \
get_summary_dict(proxy, chip_info,
sorted(set(executor.base_channel_plan)),
channel_plan, completed_transfers,
chip_uuid=chip_uuid)
output_path = output_dir.joinpath('Chip test report - %s.html' %
summary_dict['chip_uuid'])
print('save to: `%s`' % output_path)
render_summary(output_path, **summary_dict)
def start(bad_channels, *args):
executor.channels_graph = executor.base_channels_graph.copy()
executor.remove_channels(bad_channels)
executor.start(aproxy, signals)
self.calibrate_sheet_capacitance = calibrate_sheet_capacitance
self.pause = pause
self.reset = reset
self.save_results = save_results
self.start = start
| 41.37619 | 91 | 0.590747 |
dc7b24a85bd14d1ef412cbaf48cab40344cb8abb | 6,919 | py | Python | biosteam/units/_balance.py | sarangbhagwat/biosteam | fc2d227d3fce9d5f4ccb873a6d41edb535347412 | [
"MIT"
] | null | null | null | biosteam/units/_balance.py | sarangbhagwat/biosteam | fc2d227d3fce9d5f4ccb873a6d41edb535347412 | [
"MIT"
] | null | null | null | biosteam/units/_balance.py | sarangbhagwat/biosteam | fc2d227d3fce9d5f4ccb873a6d41edb535347412 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from .._unit import Unit
from thermosteam.separations import material_balance
from .._graphics import process_specification_graphics
from ..utils import static
__all__ = ('MassBalance',)
# %% Mass Balance Unit
@static
class MassBalance(Unit):
"""
Create a Unit object that changes net input flow rates to satisfy output
flow rates. This calculation is based on mass balance equations for
specified IDs.
Parameters
----------
ins : stream
Inlet stream. Doesn't actually affect mass balance. It's just to
show the position in the process.
outs : stream
Outlet stream. Doesn't actually affect mass balance. It's just to
show the position in the process.
chemical_IDs : tuple[str]
Chemicals that will be used to solve mass balance linear equations.
The number of chemicals must be same as the number of input streams varied.
variable_inlets : Iterable[Stream]
Inlet streams that can vary in net flow rate to accomodate for the
mass balance.
constant_inlets: Iterable[Stream], optional
Inlet streams that cannot vary in flow rates.
constant_outlets: Iterable[Stream], optional
Outlet streams that cannot vary in flow rates.
is_exact=True : bool, optional
True if exact flow rate solution is required for the specified IDs.
balance='flow' : {'flow', 'composition'}, optional
* 'flow': Satisfy output flow rates
* 'composition': Satisfy net output molar composition
Examples
--------
MassBalance are Unit objects that serve to alter flow rates of selected
chemicals and input streams to satisfy the mass balance.
The example below uses the MassBalance object to satisfy the target
flow rate feeding the mixer M1:
>>> from biosteam import System, Stream, settings, main_flowsheet
>>> from biosteam.units import (Mixer, Splitter, StorageTank, Pump,
... Flash, MassBalance)
>>> main_flowsheet.set_flowsheet('mass_balance_example')
>>> settings.set_thermo(['Water', 'Ethanol'])
>>> water = Stream('water',
... Water=40,
... units='lb/s',
... T=350, P=101325)
>>> ethanol = Stream('ethanol',
... Ethanol=190, Water=30,
... T=300, P=101325)
>>> target = Stream('target',
... Ethanol=500, Water=500)
>>> T1 = StorageTank('T1', outs='s1')
>>> T2 = StorageTank('T2', outs='s2')
>>> P1 = Pump('P1', P=101325, outs='s3')
>>> P2 = Pump('P2', P=101325, outs='s4')
>>> M1 = Mixer('M1', outs='s5')
>>> S1 = Splitter('S1', outs=('s6', 's7'), split=0.5)
>>> F1 = Flash('F1', outs=('s8', 's9'), V=0.5, P =101325)
>>> MB1 = MassBalance('MB1', outs='s6_2',
... variable_inlets=[water, ethanol],
... constant_inlets=[S1-0],
... constant_outlets=[target],
... chemical_IDs=('Ethanol', 'Water'),
... description='Adjust flow rate of feed to mixer')
>>> # Connect units
>>> water-T1-P1
<Pump: P1>
>>> ethanol-T2-P2
<Pump: P2>
>>> [P1-0, P2-0, MB1-0]-M1-F1-1-S1-0-MB1
<MassBalance: MB1>
>>> sys = main_flowsheet.create_system('sys')
>>> # Make diagram to view system
>>> # sys.diagram()
>>> sys.simulate();
>>> target.show()
Stream: target
phase: 'l', T: 298.15 K, P: 101325 Pa
flow (kmol/hr): Water 500
Ethanol 500
"""
_graphics = process_specification_graphics
_N_heat_utilities = 0
_N_ins = _N_outs = 1
def __init__(self, ID='', ins=None, outs=(), thermo=None,
chemical_IDs=None, variable_inlets=(),
constant_outlets=(), constant_inlets=(),
is_exact=True, balance='flow',
description=""):
Unit.__init__(self, ID, ins, outs, thermo)
self.variable_inlets = variable_inlets
self.constant_inlets = constant_inlets
self.constant_outlets = constant_outlets
self.chemical_IDs = tuple(chemical_IDs)
self.is_exact = is_exact
self.balance = balance
self.description = description
def _run(self):
material_balance(
chemical_IDs=self.chemical_IDs,
variable_inlets=self.variable_inlets,
constant_outlets=self.constant_outlets,
constant_inlets=self.constant_inlets,
is_exact=self.is_exact,
balance=self.balance
)
# %% Energy Balance Unit
# class EnergyBalance(Unit):
# """Create a Unit object that changes a stream's temperature, flow rate, or vapor fraction to satisfy energy balance.
# **Parameters**
# **index:** [int] Index of stream that can vary in temperature, flow rate, or vapor fraction.
# **Type:** [str] Should be one of the following
# * 'T': Vary temperature of output stream
# * 'F': Vary flow rate of input/output stream
# * 'V': Vary vapor fraction of output stream
# **Qin:** *[float]* Additional energy input.
# .. Note:: This is not a mixer, input streams and output streams should match flow rates.
# """
# _kwargs = {'index': None,
# 'Type': 'T',
# 'Qin': 0}
# line = 'Balance'
# _has_cost = False
# _graphics = MassBalance._graphics
# _init_ins = MassBalance._init_ins
# _init_outs = MassBalance._init_outs
# def _run(self): # Get arguments
# ins = self.ins.copy()
# outs = self.outs.copy()
# kwargs = self._kwargs
# index = kwargs['index']
# Type = kwargs['Type']
# Qin = kwargs['Qin']
# # Pop out required streams
# if Type == 'F':
# s_in = ins.pop(index)
# s_out = outs.pop(index)
# else:
# s = outs.pop(index)
# # Find required enthalpy
# H_in = sum(i.H for i in ins) + Qin
# H_out = sum(o.H for o in outs)
# H_s = H_out - H_in
# # Set enthalpy
# if Type == 'T':
# s.H = -H_s
# elif Type == 'V':
# s.enable_phases()
# s.VLE(Qin=s.H - H_s)
# elif Type == 'F':
# s.mol *= (s_out.H - s_in.H)/H_s
# else:
# raise ValueError(f"Type must be 'T', 'V' or 'F', not '{Type}'")
| 36.225131 | 122 | 0.572626 |
a64cb1ad0a9d1639dbd76c8cb96733da4d1a11e2 | 14,437 | py | Python | xcube/util/cache.py | SabineEmbacher/xcube | 25863c713a27f1ecf6efb25ee0de8d322ab295bc | [
"MIT"
] | 97 | 2018-06-26T13:02:55.000Z | 2022-03-26T21:03:13.000Z | xcube/util/cache.py | SabineEmbacher/xcube | 25863c713a27f1ecf6efb25ee0de8d322ab295bc | [
"MIT"
] | 524 | 2018-11-09T12:00:08.000Z | 2022-03-31T17:00:13.000Z | xcube/util/cache.py | SabineEmbacher/xcube | 25863c713a27f1ecf6efb25ee0de8d322ab295bc | [
"MIT"
] | 15 | 2019-07-09T08:46:03.000Z | 2022-02-07T18:47:34.000Z | # The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import os.path
import sys
import time
from abc import ABCMeta, abstractmethod
from threading import RLock
from typing import Optional
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
_DEBUG_CACHE = False
class CacheStore(metaclass=ABCMeta):
"""
Represents a store to which cached values can be stored into and restored from.
"""
@abstractmethod
def can_load_from_key(self, key) -> bool:
"""
Test whether a stored value representation can be loaded from the given key.
:param key: the key
:return: True, if so
"""
pass
@abstractmethod
def load_from_key(self, key):
"""
Load a stored value representation of the value and its size from the given key.
:param key: the key
:return: a 2-element sequence containing the stored representation of the value and it's size
"""
pass
@abstractmethod
def store_value(self, key, value):
"""
Store a value and return it's stored representation and size in any unit, e.g. in bytes.
:param key: the key
:param value: the value
:return: a 2-element sequence containing the stored representation of the value and it's size
"""
pass
@abstractmethod
def restore_value(self, key, stored_value):
"""
Restore a vale from its stored representation.
:param key: the key
:param stored_value: the stored representation of the value
:return: the item
"""
pass
@abstractmethod
def discard_value(self, key, stored_value):
"""
Discard a value from it's storage.
:param key: the key
:param stored_value: the stored representation of the value
"""
pass
class MemoryCacheStore(CacheStore):
"""
Simple memory store.
"""
def can_load_from_key(self, key) -> bool:
# This store type does not maintain key-value pairs on its own
return False
def load_from_key(self, key):
raise NotImplementedError()
def store_value(self, key, value):
"""
Return (value, 1).
:param key: the key
:param value: the original value
:return: the tuple (stored value, size) where stored value is the sequence [key, value].
"""
return [key, value], _compute_object_size(value)
def restore_value(self, key, stored_value):
"""
:param key: the key
:param stored_value: the stored representation of the value
:return: the original value.
"""
if key != stored_value[0]:
raise ValueError('key does not match stored value')
return stored_value[1]
def discard_value(self, key, stored_value):
"""
Clears the value in the given stored_value.
:param key: the key
:param stored_value: the stored representation of the value
"""
if key != stored_value[0]:
raise ValueError('key does not match stored value')
stored_value[1] = None
class FileCacheStore(CacheStore):
"""
Simple file store for values which can be written and read as bytes, e.g. encoded PNG images.
"""
def __init__(self, cache_dir: str, ext: str):
self.cache_dir = cache_dir
self.ext = ext
def can_load_from_key(self, key) -> bool:
path = self._key_to_path(key)
return os.path.exists(path)
def load_from_key(self, key):
path = self._key_to_path(key)
return path, os.path.getsize(path)
def store_value(self, key, value):
path = self._key_to_path(key)
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path, exist_ok=True)
with open(path, 'wb') as fp:
fp.write(value)
return path, os.path.getsize(path)
def restore_value(self, key, stored_value):
path = self._key_to_path(key)
with open(path, 'rb') as fp:
return fp.read()
def discard_value(self, key, stored_value):
path = self._key_to_path(key)
try:
os.remove(path)
# TODO (forman): also remove empty directories up to self.cache_dir
except IOError:
pass
def _key_to_path(self, key):
return os.path.join(self.cache_dir, str(key) + self.ext)
def _policy_lru(item):
return item.access_time
def _policy_mru(item):
return -item.access_time
def _policy_lfu(item):
return item.access_count
def _policy_rr(item):
return item.access_count % 2
#: Discard Least Recently Used items first
POLICY_LRU = _policy_lru
#: Discard Most Recently Used first
POLICY_MRU = _policy_mru
#: Discard Least Frequently Used first
POLICY_LFU = _policy_lfu
#: Discard items by Random Replacement
POLICY_RR = _policy_rr
_T0 = time.process_time()
class Cache:
"""
An implementation of a cache.
See https://en.wikipedia.org/wiki/Cache_algorithms
"""
class Item:
"""
Cache-private class representing an item in the cache.
"""
def __init__(self):
self.key = None
self.stored_value = None
self.stored_size = 0
self.creation_time = 0
self.access_time = 0
self.access_count = 0
@staticmethod
def load_from_key(store, key):
if not store.can_load_from_key(key):
return None
item = Cache.Item()
item._load_from_key(store, key)
return item
def store(self, store, key, value):
self.key = key
self.access_count = 0
self._access()
stored_value, stored_size = store.store_value(key, value)
self.stored_value = stored_value
self.stored_size = stored_size
def restore(self, store, key):
self._access()
return store.restore_value(key, self.stored_value)
def discard(self, store, key):
store.discard_value(key, self.stored_value)
self.__init__()
def _load_from_key(self, store, key):
self.key = key
self.access_count = 0
self._access()
stored_value, stored_size = store.load_from_key(key)
self.stored_value = stored_value
self.stored_size = stored_size
def _access(self):
self.access_time = time.process_time() - _T0
self.access_count += 1
def __init__(self, store=MemoryCacheStore(), capacity=1000, threshold=0.75, policy=POLICY_LRU, parent_cache=None):
"""
Constructor.
:param store: the cache store, see CacheStore interface
:param capacity: the size capacity in units used by the store's store() method
:param threshold: a number greater than zero and less than one
:param policy: cache replacement policy. This is a function that maps a :py:class:`Cache.Item`
to a numerical value. See :py:data:`POLICY_LRU`,
:py:data:`POLICY_MRU`, :py:data:`POLICY_LFU`, :py:data:`POLICY_RR`
"""
self._store = store
self._capacity = capacity
self._threshold = threshold
self._policy = policy
self._parent_cache = parent_cache
self._size = 0
self._max_size = self._capacity * self._threshold
self._item_dict = {}
self._item_list = []
self._lock = RLock()
@property
def policy(self):
return self._policy
@property
def store(self):
return self._store
@property
def capacity(self):
return self._capacity
@property
def threshold(self):
return self._threshold
@property
def size(self):
return self._size
@property
def max_size(self):
return self._max_size
def get_value(self, key):
self._lock.acquire()
item = self._item_dict.get(key)
value = None
restored = False
if item:
value = item.restore(self._store, key)
restored = True
if _DEBUG_CACHE:
_debug_print('restored value for key "%s" from cache' % key)
elif self._parent_cache:
item = self._parent_cache.get_value(key)
if item:
value = item.restore(self._parent_cache.store, key)
restored = True
if _DEBUG_CACHE:
_debug_print('restored value for key "%s" from parent cache' % key)
if not restored:
item = Cache.Item.load_from_key(self._store, key)
if item:
self._add_item(item)
value = item.restore(self._store, key)
if _DEBUG_CACHE:
_debug_print('restored value for key "%s" from cache' % key)
self._lock.release()
return value
def put_value(self, key, value):
self._lock.acquire()
if self._parent_cache:
# remove value from parent cache, because this cache will now take over
self._parent_cache.remove_value(key)
item = self._item_dict.get(key)
if item:
self._remove_item(item)
item.discard(self._store, key)
if _DEBUG_CACHE:
_debug_print('discarded value for key "%s" from cache' % key)
else:
item = Cache.Item()
item.store(self._store, key, value)
if _DEBUG_CACHE:
_debug_print('stored value for key "%s" in cache' % key)
self._add_item(item)
self._lock.release()
def remove_value(self, key):
self._lock.acquire()
if self._parent_cache:
self._parent_cache.remove_value(key)
item = self._item_dict.get(key)
if item:
self._remove_item(item)
item.discard(self._store, key)
if _DEBUG_CACHE:
_debug_print('Cache: discarded value for key "%s" from parent cache' % key)
self._lock.release()
def _add_item(self, item):
self._item_dict[item.key] = item
self._item_list.append(item)
if self._size + item.stored_size > self._max_size:
self.trim(item.stored_size)
self._size += item.stored_size
def _remove_item(self, item):
self._item_dict.pop(item.key)
self._item_list.remove(item)
self._size -= item.stored_size
def trim(self, extra_size=0):
if _DEBUG_CACHE:
_debug_print('trimming...')
self._lock.acquire()
self._item_list.sort(key=self._policy)
keys = []
size = self._size
max_size = self._max_size
for item in self._item_list:
if size + extra_size > max_size:
keys.append(item.key)
size -= item.stored_size
self._lock.release()
# release lock to give another thread a chance then require lock again
self._lock.acquire()
for key in keys:
if self._parent_cache:
# Before discarding item fully, put its value into the parent cache
value = self.get_value(key)
self.remove_value(key)
if value:
self._parent_cache.put_value(key, value)
else:
self.remove_value(key)
self._lock.release()
def clear(self, clear_parent=True):
self._lock.acquire()
if self._parent_cache and clear_parent:
self._parent_cache.clear(clear_parent)
keys = list(self._item_dict.keys())
self._lock.release()
for key in keys:
if self._parent_cache and not clear_parent:
value = self.get_value(key)
if value:
self._parent_cache.put_value(key, value)
self.remove_value(key)
def _debug_print(msg):
print("Cache:", msg)
def _compute_object_size(obj):
if hasattr(obj, 'nbytes'):
# A numpy ndarray instance
return obj.nbytes
elif hasattr(obj, 'size') and hasattr(obj, 'mode'):
# A PIL Image instance
w, h = obj.size
m = obj.mode
return w * h * (4 if m in ('RGBA', 'RGBx', 'I', 'F') else
3 if m in ('RGB', 'YCbCr', 'LAB', 'HSV') else
1. / 8. if m == '1' else
1)
else:
return sys.getsizeof(obj)
def parse_mem_size(mem_size_text: str) -> Optional[int]:
mem_size_text = mem_size_text.upper()
if mem_size_text != "" and mem_size_text not in ("OFF", "NONE", "NULL", "FALSE"):
unit = mem_size_text[-1]
factors = {"B": 10 ** 0, "K": 10 ** 3, "M": 10 ** 6, "G": 10 ** 9, "T": 10 ** 12}
try:
if unit in factors:
capacity = int(mem_size_text[0: -1]) * factors[unit]
else:
capacity = int(mem_size_text)
except ValueError:
raise ValueError(f"invalid memory size: {mem_size_text!r}")
if capacity > 0:
return capacity
elif capacity < 0:
raise ValueError(f"negative memory size: {mem_size_text!r}")
return None
| 32.011086 | 118 | 0.603172 |
ed61a9ec54aa118c7144b7a5095afc6d6cac3efc | 5,376 | py | Python | code/abbreviate.py | jemand2001/knausj_talon | 0b95e5c0a9c3af489d4e7f3e78b25be84be2e65b | [
"Unlicense"
] | null | null | null | code/abbreviate.py | jemand2001/knausj_talon | 0b95e5c0a9c3af489d4e7f3e78b25be84be2e65b | [
"Unlicense"
] | null | null | null | code/abbreviate.py | jemand2001/knausj_talon | 0b95e5c0a9c3af489d4e7f3e78b25be84be2e65b | [
"Unlicense"
] | null | null | null | # XXX - would be nice to be able pipe these through formatters
from talon import Context, Module
mod = Module()
mod.list("abbreviation", desc="Common abbreviation")
@mod.capture
def abbreviation(m) -> str:
"One abbreviation"
ctx = Context()
ctx.lists["user.abbreviation"] = {
"address": "addr",
"administrator": "admin",
"administrators": "admins",
"advance": "adv",
"advanced": "adv",
"alberta": "ab",
"alternative": "alt",
"application": "app",
"applications": "apps",
"argument": "arg",
"arguments": "args",
"as far as i can tell": "afaict",
"as far as i know": "afaik",
"assembly": "asm",
"at the moment": "atm",
"attribute": "attr",
"attributes": "attrs",
"authenticate": "auth",
"authentication": "auth",
"away from keyboard": "afk",
"binary": "bin",
"boolean": "bool",
"british columbia": "bc",
"button": "btn",
"canada": "ca",
"centimeter": "cm",
"char": "chr",
"character": "char",
"class": "cls",
"client": "cli",
"command": "cmd",
"comment": "cmt",
"compare": "cmp",
"conference": "conf",
"config": "cfg",
"configuration": "cfg",
"context": "ctx",
"control": "ctrl",
"constant": "const",
"coordinate": "coord",
"coordinates": "coords",
"copy": "cpy",
"count": "cnt",
"counter": "ctr",
"database": "db",
"declare": "decl",
"declaration": "decl",
"decode": "dec",
"decrement": "dec",
"debug": "dbg",
"define": "def",
"definition": "def",
"description": "desc",
"develop": "dev",
"development": "dev",
"device": "dev",
"dictation": "dict",
"dictionary": "dict",
"direction": "dir",
"directory": "dir",
"distribution": "dist",
"document": "doc",
"documents": "docs",
"double": "dbl",
"dupe": "dup",
"duplicate": "dup",
"dynamic": "dyn",
"encode": "enc",
"entry": "ent",
"enumerate": "enum",
"environment": "env",
"escape": "esc",
"etcetera": "etc",
"example": "ex",
"exception": "exc",
"execute": "exec",
"expression": "exp",
"extend": "ext",
"extension": "ext",
"file system": "fs",
"framework": "fw",
"function": "func",
"funny": "lol",
"generic": "gen",
"generate": "gen",
"hypertext": "http",
"history": "hist",
"image": "img",
"import table": "iat",
"import address table": "iat",
"increment": "inc",
"information": "info",
"initialize": "init",
"initializer": "init",
"in real life": "irl",
"instance": "inst",
"integer": "int",
"interrupt": "int",
"iterate": "iter",
"java archive": "jar",
"javascript": "js",
"jason": "json",
"jump": "jmp",
"keyboard": "kbd",
"keyword arguments": "kwargs",
"keyword": "kw",
"kilogram": "kg",
"kilometer": "km",
"language": "lng",
"length": "len",
"library": "lib",
"manitoba": "mb",
"markdown": "md",
"message": "msg",
"meta sploit": "msf",
"meta sploit framework": "msf",
"microphone": "mic",
"milligram": "mg",
"millisecond": "ms",
"miscellaneous": "misc",
"module": "mod",
"mount": "mnt",
"nano second": "ns",
"neo vim": "nvim",
"new brunswick": "nb",
"nova scotia": "ns",
"number": "num",
"object": "obj",
"okay": "ok",
"ontario": "on",
"option": "opt",
"operating system": "os",
"original": "orig",
"package": "pkg",
"parameter": "param",
"parameters": "params",
"pico second": "ps",
"pixel": "px",
"point": "pt",
"pointer": "ptr",
"position": "pos",
"position independent code": "pic",
"position independent executable": "pie",
"previous": "prev",
"property": "prop",
"public": "pub",
"python": "py",
"quebec": "qc",
"query string": "qs",
"random": "rnd",
"receipt": "rcpt",
"reference": "ref",
"references": "refs",
"register": "reg",
"registery": "reg",
"regular expression": "regex",
"regular expressions": "regex",
"repel": "repl",
"represent": "repr",
"representation": "repr",
"request": "req",
"return": "ret",
"revision": "rev",
"ruby": "rb",
"saskatchewan": "sk",
"service pack": "sp",
"session id": "sid",
"shell": "sh",
"shellcode": "sc",
"source": "src",
"special": "spec",
"specific": "spec",
"specification": "spec",
"specify": "spec",
"standard in": "stdin",
"standard out": "stdout",
"standard": "std",
"string": "str",
"structure": "struct",
"synchronize": "sync",
"synchronous": "sync",
"system": "sys",
"table of contents": "toc",
"table": "tbl",
"taiwan": "tw",
"technology": "tech",
"temperature": "temp",
"temporary": "tmp",
"temp": "tmp",
"text": "txt",
"time of check time of use": "toctou",
"token": "tok",
"ultimate": "ulti",
"unique id": "uuid",
"user": "usr",
"utilities": "utils",
"utility": "util",
"value": "val",
"variable": "var",
"verify": "vrfy",
"versus": "vs",
"visual": "vis",
"visual studio": "msvc",
"web": "www",
"what the fuck": "wtf",
"window": "win",
}
@ctx.capture(rule="{user.abbreviation}")
def abbreviation(m):
return m.abbreviation
| 23.578947 | 62 | 0.510045 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.