hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfaeea7c17fda917883aa9604c9e563bac4ba0f | 773 | py | Python | protonpack/worker/cli.py | WildflowerSchools/redis-proton-pack | 886e3d79abb98744776c9189764cd4252fe06d78 | [
"MIT"
] | 1 | 2019-01-22T10:31:17.000Z | 2019-01-22T10:31:17.000Z | protonpack/worker/cli.py | WildflowerSchools/redis-proton-pack | 886e3d79abb98744776c9189764cd4252fe06d78 | [
"MIT"
] | null | null | null | protonpack/worker/cli.py | WildflowerSchools/redis-proton-pack | 886e3d79abb98744776c9189764cd4252fe06d78 | [
"MIT"
] | 1 | 2019-01-08T15:17:30.000Z | 2019-01-08T15:17:30.000Z | import time
import click
from spylogger import get_logger
from protonpack.worker import GhostBuster
logger = get_logger()
@click.group()
@click.pass_context
def worker(ctx):
pass
@worker.command('startup')
@click.option('-s', '--stream', required=True, help="stream name")
@click.option('-c', '--consumer', required=True, help="consumer group name")
@click.option('-i', '--consumerid', required=True, help="consumer id")
@click.pass_context
def startup(ctx, stream, consumer, consumerid):
logger.info({
"message": f"starting up GhostBuster",
"consumer": consumer,
"consumerid": consumerid,
"stream": stream,
})
gb = GhostBuster(stream, consumer, consumerid)
while True:
gb.do_poll()
time.sleep(2)
| 22.735294 | 76 | 0.668823 |
acfaeeb0bed04c1b55c688123f8a7c58d84a6944 | 1,271 | py | Python | test/test_portfolio_coverage_tvp.py | CarbonEdge2021/SBTi-finance-tool | a5dbf1c200a9e80913c34251a918363a054dcb61 | [
"MIT"
] | 26 | 2020-07-24T14:49:24.000Z | 2021-10-13T10:04:52.000Z | test/test_portfolio_coverage_tvp.py | CarbonEdge2021/SBTi-finance-tool | a5dbf1c200a9e80913c34251a918363a054dcb61 | [
"MIT"
] | 128 | 2020-07-27T08:48:27.000Z | 2021-09-25T11:35:22.000Z | test/test_portfolio_coverage_tvp.py | CarbonEdge2021/SBTi-finance-tool | a5dbf1c200a9e80913c34251a918363a054dcb61 | [
"MIT"
] | 15 | 2020-07-31T14:47:07.000Z | 2021-07-26T19:33:07.000Z | import os
import unittest
import pandas as pd
from SBTi.portfolio_aggregation import PortfolioAggregationMethod
from SBTi.portfolio_coverage_tvp import PortfolioCoverageTVP
class TestPortfolioCoverageTVP(unittest.TestCase):
"""
Test the TVP portfolio coverage (checking which companies have a valid SBTi approved target.
"""
def setUp(self) -> None:
"""
Create the portfolio coverage tvp instance.
:return:
"""
self.portfolio_coverage_tvp = PortfolioCoverageTVP()
self.data = pd.read_csv(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"inputs",
"data_test_portfolio_coverage.csv",
)
)
def test_coverage(self) -> None:
"""
Test whether the test companies are assigned the right status.
:return:
"""
coverage = self.portfolio_coverage_tvp.get_portfolio_coverage(
self.data, PortfolioAggregationMethod.WATS
)
self.assertAlmostEqual(
coverage, 32.0663, places=4, msg="The portfolio coverage was not correct"
)
if __name__ == "__main__":
test = TestPortfolioCoverageTVP()
test.setUp()
test.test_coverage()
| 27.042553 | 96 | 0.63572 |
acfaeec2eaeee9b918739d6e2b545b0b9d866791 | 5,765 | py | Python | qa/rpc-tests/rpcbind_test.py | GreenCoinX/greencoin | 318995aa6b13a246e780fed3cb30917e36525da2 | [
"MIT"
] | null | null | null | qa/rpc-tests/rpcbind_test.py | GreenCoinX/greencoin | 318995aa6b13a246e780fed3cb30917e36525da2 | [
"MIT"
] | null | null | null | qa/rpc-tests/rpcbind_test.py | GreenCoinX/greencoin | 318995aa6b13a246e780fed3cb30917e36525da2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014 The GreenCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Add python-greencoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-greencoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from greencoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = greencoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_greencoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_greencoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave greencoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing greencoind/greencoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_greencoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| 37.193548 | 110 | 0.647702 |
acfaef8daaa06a551e3f8da27b91895695f04c13 | 20,714 | py | Python | main.py | CeroProgramming/VillageTool | cb6119d33fc3275f500c5492c92be67a577dc8b4 | [
"MIT"
] | null | null | null | main.py | CeroProgramming/VillageTool | cb6119d33fc3275f500c5492c92be67a577dc8b4 | [
"MIT"
] | null | null | null | main.py | CeroProgramming/VillageTool | cb6119d33fc3275f500c5492c92be67a577dc8b4 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''Module for a minecraft villager app in python3'''
#pylint: disable=E0611,W0611,W0201,W0640,C0301,C0200,W0613,R0201
from time import sleep
from functools import partial
from kivy.base import runTouchApp
from kivy.lang import Builder
from kivy.app import App
from kivy.config import Config
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.dropdown import DropDown
from kivy.properties import ObjectProperty
from modules.cjson import JsonHandler
class VillagesWidget(BoxLayout):
'''Widget to load start screen.'''
container = ObjectProperty(None)
class MainWidget(BoxLayout):
'''Widget to load main screen.'''
size_hint = (1, 1)
orientation = 'vertical'
padding = [20, 20, 20, 20]
spacing = 20
container = ObjectProperty(None)
class VillagerWidget(BoxLayout):
'''Widget to load villager edit screen.'''
container = ObjectProperty(None)
class ButtonGrid(GridLayout):
'''Grid of control buttons.'''
size_hint = [1, None]
cols = 3
padding = [20, 20, 20, 20]
spacing = [20, 20]
def __init__(self):
super(ButtonGrid, self).__init__()
add_villager_button = Button(text='Add Villager', size_hint=[0.25, 0.1], font_size=25, background_color=(0, 0.5, 1, 1), background_normal='src/white16x.png')
self.add_widget(add_villager_button)
add_villager_button.bind(on_release=lambda x: VTA.add_villager(villager_name_input.text))
rm_villager_button = Button(text='Remove Villager', size_hint=[0.25, 0.1], font_size=25, background_color=(0, 0.5, 1, 1), background_normal='src/white16x.png')
self.add_widget(rm_villager_button)
rm_villager_button.bind(on_release=lambda x: VTA.rm_villager(villager_name_input.text))
villager_name_input = TextInput(hint_text='Name..', hint_text_color=(1, 1, 1, 1), size_hint=[0.25, 0.1], font_size=35, background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1), multiline=False)
self.add_widget(villager_name_input)
villager_name_input.bind(on_text_validate=lambda x: VTA.add_villager(villager_name_input.text))
class VillagerGrid(GridLayout):
'''Grid for the villagers in the main menu.'''
cols = 1
padding = [5, 5, 5, 5]
spacing = [5, 5]
size_hint = (1, None)
def __init__(self):
super(VillagerGrid, self).__init__()
self.buttons = []
for i in range(len(VTA.villagers)):
self.buttons.append(Button(id=VTA.villagers[i], text=VTA.villagers[i], size_hint_y=None, height=80, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.buttons[i])
self.buttons[i].bind(on_release=partial(self.transmitter, i))
def transmitter(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.main(VTA.project, instance.text)
class TradingGrid(GridLayout):
'''Grid for the villagers in the main menu.'''
cols = 6
padding = [10, 10, 10, 10]
spacing = [10, 10]
size_hint = (None, None)
row_force_default = True
row_default_height = 50
def __init__(self):
super(TradingGrid, self).__init__()
self.amout_demands = []
self.demands = []
self.supplys = []
self.amout_supplys = []
self.remove_buttons = []
for i in range(len(VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'])):
self.amout_demands.append(TextInput(hint_text='Amount', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['amount_demand'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.amout_demands[i])
self.amout_demands[i].bind(on_text_validate=partial(self.transmitter_amount_demand, i))
self.amout_demands[i].bind(focus=partial(self.transmitter2_amount_demand, i))
self.demands.append(TextInput(hint_text='Item', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['demand'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.demands[i])
self.demands[i].bind(on_text_validate=partial(self.transmitter_demand, i))
self.demands[i].bind(focus=partial(self.transmitter2_demand, i))
self.add_widget(Label(text='-', font_size=35))
self.supplys.append(TextInput(hint_text='Item', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['supply'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.supplys[i])
self.supplys[i].bind(on_text_validate=partial(self.transmitter_supply, i))
self.supplys[i].bind(focus=partial(self.transmitter2_supply, i))
self.amout_supplys.append(TextInput(hint_text='Amount', text=VTA.village[VTA.project]['villagers'][VTA.villager]['tradings'][i]['amount_supply'], hint_text_color=(1, 1, 1, 1), font_size=35, background_color=(0, 0.5, 1, 1), multiline=False, size_hint=(70, 100), size=(70, 100), font_color=(1, 0.98, 0, 1), border=(4, 4, 4, 4), foreground_color=(1, 1, 1, 1)))
self.add_widget(self.amout_supplys[i])
self.amout_supplys[i].bind(on_text_validate=partial(self.transmitter_amount_supply, i))
self.amout_supplys[i].bind(focus=partial(self.transmitter2_amount_supply, i))
self.remove_buttons.append(Button(text='-', size_hint=(None, None), size=(40, 50), font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.remove_buttons[i])
self.remove_buttons[i].bind(on_release=partial(self.transmitter_remove, i))
def transmitter_amount_demand(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_demand_amount(i, instance.text)
def transmitter_demand(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_demand(i, instance.text)
def transmitter_supply(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_supply(i, instance.text)
def transmitter_amount_supply(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_supply_amount(i, instance.text)
def transmitter2_amount_demand(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_demand_amount(i, instance.text)
def transmitter2_demand(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_demand(i, instance.text)
def transmitter2_supply(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_supply(i, instance.text)
def transmitter2_amount_supply(self, i, instance, istrue):
'''Shows the number of the button pressed.'''
if not istrue:
VTA.change_supply_amount(i, instance.text)
def transmitter_remove(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.rm_trading(i)
class ProfessionDropDown(DropDown):
'''DropDown of all professions.'''
def __init__(self):
super(ProfessionDropDown, self).__init__()
self.buttons = []
for i in range(len(VTA.data['professions'])):
self.buttons.append(Button(id=VTA.data['professions'][i].capitalize(), text=VTA.data['professions'][i].capitalize(), size_hint_y=None, height=40, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.buttons[i])
self.buttons[i].bind(on_release=partial(self.transmitter, i))
def transmitter(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_profession(VTA.villager, VTA.data['professions'][i])
class CareerDropDown(DropDown):
'''DropDown of all careers.'''
def __init__(self):
super(CareerDropDown, self).__init__()
self.buttons = []
for i in range(len(VTA.data['careers'])):
self.buttons.append(Button(id=VTA.data['careers'][i].capitalize(), text=VTA.data['careers'][i].capitalize(), size_hint_y=None, height=40, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1)))
self.add_widget(self.buttons[i])
self.buttons[i].bind(on_release=partial(self.transmitter, i))
def transmitter(self, i, instance):
'''Shows the number of the button pressed.'''
VTA.change_career(VTA.villager, VTA.data['careers'][i])
class VillageToolApp(App):
'''All functions of the app.'''
def build(self):
'''Loading start screen.'''
self.icon = 'src/minecraft32px.png'
self.project = str()
self.file = 'kv/village.kv'
self.data = JsonHandler.importer('data')
self.root = Builder.load_file(self.file)
Window.maximize()
####################
self.main('vale', None)
####################
def main(self, project_name, villager):
'''Loading main screen.'''
if project_name == '':
return
self.title = project_name.lower()
self.project = project_name.lower()
try:
self.village = JsonHandler.importer(self.project)
except FileNotFoundError:
JsonHandler.exporter(self.project, {self.project: {'name': self.project, 'villagers': {}}})
self.village = JsonHandler.importer(self.project)
self.villagers = list(self.village[self.project]['villagers'].keys())
if villager is None:
try:
villager = self.villagers[0]
except IndexError:
pass
Builder.unload_file(self.file)
self.root.clear_widgets()
'''self.file = 'kv/main.kv'
screen = Builder.load_file(self.file)
villager_grid = VillagerGrid()
villager_grid.bind(minimum_height=villager_grid.setter('height'))
layout = ScrollView(pos_hint={'center_x': .5, 'center_y': .5}, do_scroll_x=False)
layout.add_widget(villager_grid)
screen.add_widget(layout)
self.root.add_widget(screen)'''
screen = MainWidget()
topbox = BoxLayout(size_hint=(1, 1), orientation='horizontal', padding=20, spacing=20)
quickview = GridLayout(cols=1, padding=[5, 5, 5, 5], spacing=5, size_hint=(1, None))
if villager is not None:
quickview.add_widget(TextInput(text=villager, font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text=self.village[self.project]['villagers'][villager]['profession'].capitalize(), font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text=self.village[self.project]['villagers'][villager]['career'].capitalize(), font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
edit_button = Button(text='Edit', font_size=30, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), background_normal='src/white16x.png')
edit_button.bind(on_release=lambda x: self.load_villager(villager))
quickview.add_widget(edit_button)
else:
quickview.add_widget(TextInput(text='None', font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text='None', font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
quickview.add_widget(TextInput(text='None', font_size=30, readonly=True, multiline=False, size_hint=(70, 100), size=(70, 100), background_color=(0, 0.5, 1, 1), foreground_color=(1, 1, 1, 1)))
topbox.add_widget(quickview)
villager_grid = VillagerGrid()
villager_grid.bind(minimum_height=villager_grid.setter('height'))
villager_scroll = ScrollView(pos_hint={'center_x': .5, 'center_y': .5}, do_scroll_x=False)
villager_scroll.add_widget(villager_grid)
topbox.add_widget(villager_scroll)
screen.add_widget(topbox)
button_grid = ButtonGrid()
screen.add_widget(button_grid)
self.root.add_widget(screen)
def add_villager(self, name):
'''Adding a villager to the village.'''
if name != '':
self.village[self.project]['villagers'][name] = dict()
self.village[self.project]['villagers'][name]['name'] = name
self.village[self.project]['villagers'][name]['profession'] = 'none'
self.village[self.project]['villagers'][name]['career'] = 'none'
self.village[self.project]['villagers'][name]['tradings'] = list()
JsonHandler.exporter(self.project, self.village)
self.main(self.project, None)
def rm_villager(self, name):
'''Adding a villager to the village.'''
try:
del self.village[self.project]['villagers'][name]
JsonHandler.exporter(self.project, self.village)
self.main(self.project, None)
except KeyError:
pass
def load_villager(self, name):
'''Loading the villager edit screen.'''
self.villager = name
Builder.unload_file(self.file)
self.root.clear_widgets()
self.file = 'kv/villager.kv'
screen = Builder.load_file(self.file)
layout = GridLayout(cols=1, padding=[20, 20, 20, 20], spacing=5, size_hint=(1, 1), pos=(150, 10), size=(self.root.width - 300, self.root.height - 20))
input_name = TextInput(text=name, multiline=False, size_hint_y=None, height=80, font_size=40, font_color=(1, 0.98, 0, 1), foreground_color=(1, 1, 1, 1), background_color=(0, 0.5, 1, 1))
input_name.bind(on_text_validate=lambda x: self.rename_villager(name, input_name.text))
layout.add_widget(input_name)
self.profession_dropdown = ProfessionDropDown()
profession_button = Button(text=self.village[self.project]['villagers'][name]['profession'].capitalize(), size_hint_y=None, height=50, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1))
profession_button.bind(on_release=self.profession_dropdown.open)
layout.add_widget(profession_button)
self.career_dropdown = CareerDropDown()
career_button = Button(text=self.village[self.project]['villagers'][name]['career'].capitalize(), size_hint_y=None, height=50, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1))
career_button.bind(on_release=self.career_dropdown.open)
layout.add_widget(career_button)
add_button = Button(text='+', size_hint=(None, None), size=(40, 40), font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1))
add_button.bind(on_release=lambda x: self.add_trading())
layout.add_widget(add_button)
trading_scroll = ScrollView(do_scroll_x=False) #TODO Repair scrollview
trading_grid = TradingGrid()
trading_grid.bind(minimum_height=layout.setter('height'))
trading_scroll.add_widget(trading_grid)
layout.add_widget(trading_scroll)
layout.add_widget(Button(text='Back', size_hint_y=None, height=50, font_size=25, background_normal='src/white16x.png', background_color=(1, 0.28, 0, 1), on_release=lambda x: self.main(self.project, None)))
screen.add_widget(layout)
self.root.add_widget(screen)
def rename_villager(self, legacy_name, new_name):
'''Renames a villager in the edit screen and reloads the screen.'''
if new_name != '' and new_name != legacy_name:
self.village[self.project]['villagers'][new_name] = dict()
self.village[self.project]['villagers'][new_name]['name'] = new_name
self.village[self.project]['villagers'][new_name]['profession'] = self.village[self.project]['villagers'][legacy_name]['profession']
self.village[self.project]['villagers'][new_name]['career'] = self.village[self.project]['villagers'][legacy_name]['career']
self.village[self.project]['villagers'][new_name]['supplys'] = self.village[self.project]['villagers'][legacy_name]['supplys']
self.village[self.project]['villagers'][new_name]['demands'] = self.village[self.project]['villagers'][legacy_name]['demands']
self.rm_villager(legacy_name)
self.load_villager(new_name)
def change_profession(self, name, profession):
'''Changes the profession of a villager.'''
self.village[self.project]['villagers'][name]['profession'] = profession
JsonHandler.exporter(self.project, self.village)
self.profession_dropdown.dismiss()
self.load_villager(name)
def change_career(self, name, career):
'''Changes the career of a villager.'''
self.village[self.project]['villagers'][name]['career'] = career
JsonHandler.exporter(self.project, self.village)
self.career_dropdown.dismiss()
self.load_villager(name)
def add_trading(self):
'''Adding trade to villager's trade list.'''
empty_trading = dict()
empty_trading['amount_demand'] = str()
empty_trading['amount_supply'] = str()
empty_trading['demand'] = str()
empty_trading['supply'] = str()
self.village[self.project]['villagers'][self.villager]['tradings'].append(empty_trading)
JsonHandler.exporter(self.project, self.village)
self.load_villager(self.villager)
def rm_trading(self, index):
'''Remove trade from villager's trade list.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'].remove(self.village[self.project]['villagers'][self.villager]['tradings'][index])
JsonHandler.exporter(self.project, self.village)
self.load_villager(self.villager)
except ValueError as e:
print(e)
def change_demand_amount(self, index, amount):
'''Change the amount of items for the demand.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['amount_demand'] = amount
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
def change_supply_amount(self, index, amount):
'''Change the amount of items for the supply.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['amount_supply'] = amount
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
def change_demand(self, index, item):
'''Change the items for the demand.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['demand'] = item
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
def change_supply(self, index, item):
'''Change the items for the supply.'''
try:
self.village[self.project]['villagers'][self.villager]['tradings'][index]['supply'] = item
JsonHandler.exporter(self.project, self.village)
except ValueError:
pass
if __name__ == '__main__':
VTA = VillageToolApp()
VTA.run()
| 45.030435 | 369 | 0.656899 |
acfaef957760d8b6bdf33aecb9288d1533fb6dde | 6,861 | py | Python | postProcess.py | nikotatomir/UnsteadyDiscreteLumpedVortexMethodSolver | 368b45384eff6321718c9be199c4e9943e5a6e26 | [
"MIT"
] | 1 | 2022-02-20T21:18:51.000Z | 2022-02-20T21:18:51.000Z | postProcess.py | nikotatomir/unsteadyDiscreteLumpedVortexMethodSolver | 368b45384eff6321718c9be199c4e9943e5a6e26 | [
"MIT"
] | null | null | null | postProcess.py | nikotatomir/unsteadyDiscreteLumpedVortexMethodSolver | 368b45384eff6321718c9be199c4e9943e5a6e26 | [
"MIT"
] | null | null | null | import math
import numpy as np
import scipy.special
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from parameters import *
def gridlines():
plt.minorticks_on()
plt.grid(zorder = 0, which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')
plt.grid(zorder = 0, which='minor', axis='x', linewidth=0.25, linestyle='-', color='0.75')
plt.grid(zorder = 0, which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')
plt.grid(zorder = 0, which='minor', axis='y', linewidth=0.25, linestyle='-', color='0.75')
def impulsiveFlatPlatePlot(wingSystemProperties):
print ("\nPlotting Unsteady Lift & Circulation Of Impulsively Started Flat Plate...")
nonDimTimeList = []
nonDimTotalPanelCirculationList = []
nonDimTotalPanelLiftCoefficientList = []
wagnerList = []
totalLiftCoefficient_SS = 2*np.pi*pitchSSrad
totalCirculation_SS = np.pi*pitchSSrad*c*abs(UinertialEarth)
for i in range(1,len(time)):
nonDimTimeList.append(2*time[i]*abs(UinertialEarth)/c)
wagnerList.append( 1 - (0.165*math.exp(-0.045*nonDimTimeList[i-1])) - (0.335*math.exp(-0.3*nonDimTimeList[i-1])) )
nonDimTotalPanelCirculationList.append( wingSystemProperties[i].totalPanelCirculation / totalCirculation_SS )
nonDimTotalPanelLiftCoefficientList.append( wingSystemProperties[i].totalPanelLiftCoefficient / totalLiftCoefficient_SS )
fig = plt.figure(3)
ax = fig.add_subplot(111)
ax.plot(nonDimTimeList, wagnerList, 'k-', linewidth = 0.5, label = '$Wagner$ $Function$')
ax.plot(nonDimTimeList, nonDimTotalPanelCirculationList, 'b-', linewidth = 0.5, label = '$\\frac{\Gamma}{\Gamma_{\infty}}$')
ax.plot(nonDimTimeList, nonDimTotalPanelLiftCoefficientList, 'm-', linewidth = 0.5, label = '$\\frac{c_l}{c_{l,\infty}}$')
gridlines()
ax.set_xlabel("Non-Dimensional Time (2Ut/c)")
ax.set_ylabel("Non-Dimensional Circulation $\\frac{\Gamma}{\Gamma_{\infty}}$ & Lift Coefficient $\\frac{c_l}{c_{l,\infty}}$")
ax.set_title("Impulsively Started Airfoil")
ax.legend()
ax.set_xlim(0, nonDimTimeList[-1])
ax.set_ylim(0,1.2)
plt.savefig("unsteadyLiftAndCirculation.png", bbox_inches='tight', dpi = 250)
print ("\nUnsteady Lift & Circulation Of Impulsively Started Flat Plate Plotted")
def harmonicOscillationPlot(wingKinematics, wingSystemProperties):
print ("\nPlotting Unsteady Lift Of Harmonically Oscillating Airfoil...")
theodorsenFunc = scipy.special.hankel2(1,reducedFreq) / ( scipy.special.hankel2(1,reducedFreq) + ( 1j*scipy.special.hankel2(0,reducedFreq) ) )
a = ( rotationPt - (c/2) ) / (c/2)
clalpha_SS = 2*np.pi
theoCoeff = ( 1j*np.pi*reducedFreq ) + (a*np.pi*(reducedFreq**2)) + (clalpha_SS*theodorsenFunc) + ( (clalpha_SS*theodorsenFunc)*(1j*reducedFreq*(0.5-a)) )
aoaList = []
totalPanelLiftCoefficientList = []
theodorsenLiftReal = []
theodorsenAOAreal= []
totalLiftCoefficient_SS = []
period = 1/pitchFreqHz
lastPeriodIndexStart = -period/deltaT - 2
for i in range(int(lastPeriodIndexStart),0):
aoaList.append( math.degrees(wingKinematics[i].pitchDisp + pitchSSrad))
totalPanelLiftCoefficientList.append( wingSystemProperties[i].totalPanelLiftCoefficient )
theodorsenLift = (clalpha_SS*pitchSSrad) + (theoCoeff*pitchAmpRad*np.exp(1j*pitchFreqRad*time[i]))
theodorsenAOA = pitchSSrad + ( pitchAmpRad*np.exp(1j*pitchFreqRad*time[i]) )
theodorsenLiftReal.append( np.real(theodorsenLift) )
theodorsenAOAreal.append( math.degrees( np.real(theodorsenAOA) ) )
totalLiftCoefficient_SS.append( 2*np.pi*(wingKinematics[i].pitchDisp + pitchSSrad))
fig = plt.figure(4)
ax = fig.add_subplot(111)
ax.plot(aoaList, totalPanelLiftCoefficientList, 'm-', linewidth = 0.5, label = '$Unsteady$ $Lift$ $Coefficient$')
ax.plot(theodorsenAOAreal, theodorsenLiftReal, 'r-', linewidth = 0.5, label = '$Unsteady$ $Theodorsen$ $Lift$ $Coefficient$')
ax.plot(aoaList, totalLiftCoefficient_SS, 'b-', linewidth = 0.5, label = '$Steady$ $Lift$ $Coefficient$' )
gridlines()
ax.set_xlabel("Angle of Attack $\\theta$ (in degrees)")
ax.set_ylabel("Lift Coefficient $c_l$ (-)")
ax.set_title(f"Harmonically Oscillating Airfoil\nReduced Freq. $k={np.round(reducedFreq,3)}$, Rotation Pt $a={rotationPt}c$, Chord $c={c}m$")
ax.legend()
ax.set_xlim(np.round(min(aoaList)), np.round(max(aoaList)))
#ax.set_ylim(0.0,0.25)
plt.savefig("unsteadyLift.png", bbox_inches='tight', dpi = 250)
print ("\nUnsteady Lift Of Harmonically Oscillating Airfoil Plotted")
def animationMovieKinematics(wingKinematics):
print ("\nCreating Animation...")
# set up the figure and subplot
fig = plt.figure(2,figsize=(32.0,12.0))
fig.canvas.set_window_title('Matplotlib Animation')
ax = fig.add_subplot(111, aspect='equal', autoscale_on = False, xlim=(-15,2), ylim=(-2,2))
ax.grid()
ax.set_title('2D Wing Motion Animation', fontsize = 22)
ax.set_xlabel('Distance (m)', fontsize = 18)
line, = ax.plot([],[], 'r|-', lw=1, label = 'Panel End Points')
line2, = ax.plot([],[], 'b.', markersize=8, label = 'Bound Vortex Points')
line3, = ax.plot([],[], 'c.', markersize=8, label = 'Evaluation Points')
line4, = ax.plot([],[], 'g.', markersize=2, label = 'Free Vortex Points')
ax.legend(fontsize = 18)
def init():
line.set_data([],[])
line2.set_data([],[])
line3.set_data([],[])
line4.set_data([],[])
return line, line2, line3, line4,
def animate(i):
#print (i)
x_points = wingKinematics[i+1].panelEndPtsEarth[:,0]
z_points = wingKinematics[i+1].panelEndPtsEarth[:,1]
line.set_data(x_points, z_points)
x_points = wingKinematics[i+1].panelVortexPtsEarth[:,0]
z_points = wingKinematics[i+1].panelVortexPtsEarth[:,1]
line2.set_data(x_points, z_points)
x_points = wingKinematics[i+1].panelEvalPtsEarth[:,0]
z_points = wingKinematics[i+1].panelEvalPtsEarth[:,1]
line3.set_data(x_points, z_points)
x_points = []
z_points = []
for j in range(i+1):
x_points.append(wingKinematics[j+1].unknownShedVortexPtEarth[0])
z_points.append(wingKinematics[j+1].unknownShedVortexPtEarth[1])
line4.set_data(x_points, z_points)
return line, line2, line3, line4,
ani = animation.FuncAnimation(fig, animate, init_func=init, frames=len(wingKinematics)-1, interval=1, blit=True, repeat=False)
ani.save('airfoilMotionAnimation.gif', fps = 30)
print ("\nAnimation Created") | 45.138158 | 158 | 0.658796 |
acfaf09346508e420aa336fbbab2a32edbf768ff | 15,126 | py | Python | nets/resnet_imagenet.py | wangjunxiao/unlearning | d34fdceb1a37ee6beb08747f45b5c0f3be1c5970 | [
"MIT"
] | null | null | null | nets/resnet_imagenet.py | wangjunxiao/unlearning | d34fdceb1a37ee6beb08747f45b5c0f3be1c5970 | [
"MIT"
] | null | null | null | nets/resnet_imagenet.py | wangjunxiao/unlearning | d34fdceb1a37ee6beb08747f45b5c0f3be1c5970 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from collections import OrderedDict
from nets.base_models import MyNetwork
def conv3x3(in_planes, out_planes, stride=1, groups=1, padding=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes_1, planes_2=0, stride=1, downsample=None, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
conv1 = conv3x3(inplanes, planes_1, stride)
bn1 = norm_layer(planes_1)
relu = nn.ReLU(inplace=True)
if planes_2 == 0:
conv2 = conv3x3(planes_1, inplanes)
bn2 = norm_layer(inplanes)
else:
conv2 = conv3x3(planes_1, planes_2)
bn2 = norm_layer(planes_2)
self.relu = relu
self.conv1 = nn.Sequential(OrderedDict([('conv', conv1), ('bn', bn1), ('relu', relu)]))
self.conv2 = nn.Sequential(OrderedDict([('conv', conv2), ('bn', bn2)]))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes_1, planes_2, planes_3=0, stride=1, downsample=None, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
conv1 = conv1x1(inplanes, planes_1)
bn1 = norm_layer(planes_1)
conv2 = conv3x3(planes_1, planes_2, stride)
bn2 = norm_layer(planes_2)
if planes_3 == 0:
conv3 = conv1x1(planes_2, inplanes)
bn3 = norm_layer(inplanes)
else:
conv3 = conv1x1(planes_2, planes_3)
bn3 = norm_layer(planes_3)
relu = nn.ReLU(inplace=True)
self.relu = relu
self.conv1 = nn.Sequential(OrderedDict([('conv', conv1), ('bn', bn1), ('relu', relu)]))
self.conv2 = nn.Sequential(OrderedDict([('conv', conv2), ('bn', bn2), ('relu', relu)]))
self.conv3 = nn.Sequential(OrderedDict([('conv', conv3), ('bn', bn3)]))
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet_ImageNet(MyNetwork):
def __init__(self, cfg=None, depth=18, block=BasicBlock, num_classes=1000):
super(ResNet_ImageNet, self).__init__()
self.cfgs_base = {18: [64, 64, 64, 64, 128, 128, 128, 256, 256, 256, 512, 512, 512],
34: [64, 64, 64, 64, 64, 128, 128, 128, 128, 128, 256, 256, 256, 256, 256, 256, 256, 512, 512, 512, 512],
50: [64, 64, 64, 256, 64, 64, 64, 64, 128, 128, 512, 128, 128, 128, 128, 128, 128, 256, 256, 1024, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 512, 512, 2048, 512, 512, 512, 512]}
if depth==18:
block = BasicBlock
blocks = [2, 2, 2, 2]
_cfg = self.cfgs_base[18]
elif depth==34:
block = BasicBlock
blocks = [3, 4, 6, 3]
_cfg = self.cfgs_base[34]
elif depth==50:
block = Bottleneck
blocks = [3, 4, 6, 3]
_cfg = self.cfgs_base[50]
if cfg == None:
cfg = _cfg
norm_layer = nn.BatchNorm2d
self.num_classes = num_classes
self._norm_layer = norm_layer
self.depth = depth
self.cfg = cfg
self.inplanes = cfg[0]
self.blocks = blocks
self.conv1 = nn.Sequential(OrderedDict([('conv', nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)),
('bn', norm_layer(self.inplanes)),
('relu', nn.ReLU(inplace=True))]))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
if depth!=50:
self.layer1 = self._make_layer(block, cfg[1 : blocks[0]+2], blocks[0])
self.layer2 = self._make_layer(block, cfg[blocks[0]+2 : blocks[0]+2+blocks[1]+1], blocks[1], stride=2,)
self.layer3 = self._make_layer(block, cfg[blocks[0]+blocks[1]+3 : blocks[0]+blocks[1]+blocks[2]+4], blocks[2], stride=2,)
self.layer4 = self._make_layer(block, cfg[blocks[0]+blocks[1]+blocks[2]+4: ], blocks[3], stride=2,)
self.fc = nn.Linear(cfg[blocks[0]+blocks[1]+blocks[2]+5], num_classes)
else:
self.layer1 = self._make_layer(block, cfg[1 : 2*blocks[0]+2], blocks[0])
self.layer2 = self._make_layer(block, cfg[2*blocks[0]+2 : 2*blocks[0]+2+2*blocks[1]+1], blocks[1], stride=2,)
self.layer3 = self._make_layer(block, cfg[2*blocks[0]+2*blocks[1]+3 : 2*blocks[0]+2*blocks[1]+2*blocks[2]+4], blocks[2], stride=2,)
self.layer4 = self._make_layer(block, cfg[2*blocks[0]+2*blocks[1]+2*blocks[2]+4: ], blocks[3], stride=2,)
self.fc = nn.Linear(cfg[2*blocks[0]+2*blocks[1]+2*blocks[2]+6], num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def _make_layer(self, block, planes, blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
if self.depth == 50:
first_planes = planes[0:3]
# downsample at each 1'st layer, for pruning
downsample = nn.Sequential(OrderedDict([('conv', conv1x1(self.inplanes, first_planes[-1], stride)),
('bn', norm_layer(first_planes[-1]))]))
layers = []
layers.append(block(self.inplanes, first_planes[0], first_planes[1], first_planes[2], stride, downsample, norm_layer))
self.inplanes = first_planes[-1]
later_planes = planes[3:3+2*(blocks-1)]
for i in range(1, blocks):
layers.append(block(self.inplanes, later_planes[2*(i-1)], later_planes[2*(i-1)+1], norm_layer=norm_layer))
return nn.Sequential(*layers)
else:
first_planes = planes[0:2]
# downsample at each 1'st layer, for pruning
downsample = nn.Sequential(OrderedDict([('conv', conv1x1(self.inplanes, first_planes[-1], stride)),
('bn', norm_layer(first_planes[-1]))]))
layers = []
layers.append(block(self.inplanes, first_planes[0], first_planes[1], stride, downsample, norm_layer))
self.inplanes = first_planes[-1]
later_planes = planes[2:2+blocks-1]
for i in range(1, blocks):
layers.append(block(self.inplanes, later_planes[i-1], norm_layer=norm_layer))
return nn.Sequential(*layers)
def cfg2params(self, cfg):
blocks = self.blocks
params = 0.
params += (3 * 7 * 7 * cfg[0] + 2 * cfg[0]) # first layer
inplanes = cfg[0]
if self.depth != 50:
sub_cfgs = [cfg[1 : blocks[0]+2],
cfg[blocks[0]+2 : blocks[0]+2+blocks[1]+1],
cfg[blocks[0]+blocks[1]+3 : blocks[0]+blocks[1]+blocks[2]+4],
cfg[blocks[0]+blocks[1]+blocks[2]+4: ]]
else:
sub_cfgs = [cfg[1 : 2*blocks[0]+2],
cfg[2*blocks[0]+2 : 2*blocks[0]+2+2*blocks[1]+1],
cfg[2*blocks[0]+2*blocks[1]+3 : 2*blocks[0]+2*blocks[1]+2*blocks[2]+4],
cfg[2*blocks[0]+2*blocks[1]+2*blocks[2]+4: ]]
for i in range(4):
planes = sub_cfgs[i]
if self.depth != 50:
first_planes = planes[0:2]
later_planes = planes[2:2+blocks[i]-1]
else:
first_planes = planes[0:3]
later_planes = planes[3:3+2*(blocks[i]-1)]
params += (inplanes * 1 * 1 * first_planes[-1] + 2 * first_planes[-1]) # downsample layer
if self.depth != 50:
params += (inplanes * 3 * 3 * first_planes[0] + 2 * first_planes[0])
params += (first_planes[0] * 3 * 3 * first_planes[1] + 2 * first_planes[1])
else:
params += (inplanes * 1 * 1 * first_planes[0] + 2 * first_planes[0])
params += (first_planes[0] * 3 * 3 * first_planes[1] + 2 * first_planes[1])
params += (first_planes[1] * 1 * 1 * first_planes[2] + 2 * first_planes[2])
for j in range(1, self.blocks[i]):
inplanes = first_planes[-1]
if self.depth != 50:
params += (inplanes * 3 * 3 * later_planes[j-1] + 2 * later_planes[j-1])
params += (later_planes[j-1] * 3 * 3 * inplanes + 2 * inplanes)
else:
params += (inplanes * 1 * 1 * later_planes[2*(j-1)] + 2 * later_planes[2*(j-1)])
params += (later_planes[2*(j-1)] * 3 * 3 * later_planes[2*(j-1)+1] + 2 * later_planes[2*(j-1)+1])
params += (later_planes[2*(j-1)+1] * 1 * 1 * inplanes + 2 * inplanes)
if self.depth==50:
params += (cfg[2*blocks[0]+2*blocks[1]+2*blocks[2]+6] + 1) * self.num_classes
else:
params += (cfg[blocks[0]+blocks[1]+blocks[2]+5] + 1) * self.num_classes
return params
def cfg2flops(self, cfg): # to simplify, only count convolution flops
blocks = self.blocks
flops = 0.
size = 224
size /= 2 # first conv layer s=2
flops += (3 * 7 * 7 * cfg[0] * size * size + 5 * cfg[0] * size * size) # first layer, conv+bn+relu
inplanes = cfg[0]
size /= 2 # pooling s=2
flops += (3 * 3 * cfg[0] * size * size) # maxpooling
if self.depth != 50:
sub_cfgs = [cfg[1 : blocks[0]+2],
cfg[blocks[0]+2 : blocks[0]+2+blocks[1]+1],
cfg[blocks[0]+blocks[1]+3 : blocks[0]+blocks[1]+blocks[2]+4],
cfg[blocks[0]+blocks[1]+blocks[2]+4: ]]
else:
sub_cfgs = [cfg[1 : 2*blocks[0]+2],
cfg[2*blocks[0]+2 : 2*blocks[0]+2+2*blocks[1]+1],
cfg[2*blocks[0]+2*blocks[1]+3 : 2*blocks[0]+2*blocks[1]+2*blocks[2]+4],
cfg[2*blocks[0]+2*blocks[1]+2*blocks[2]+4: ]]
for i in range(4): # each layer
planes = sub_cfgs[i]
if self.depth != 50:
first_planes = planes[0:2]
later_planes = planes[2:2+blocks[i]-1]
else:
first_planes = planes[0:3]
later_planes = planes[3:3+2*(blocks[i]-1)]
if i in [1, 2, 3]:
size /= 2
flops += (inplanes * 1 * 1 * first_planes[-1] + 5 * first_planes[-1]) * size * size # downsample layer
if self.depth != 50:
flops += (inplanes * 3 * 3 * first_planes[0] + 5 * first_planes[0]) * size * size
flops += (first_planes[0] * 3 * 3 * first_planes[1] + 5 * first_planes[1]) * size * size
else:
size *= 2
flops += (inplanes * 1 * 1 * first_planes[0] + 5 * first_planes[0]) * size * size
size /= 2
flops += (first_planes[0] * 3 * 3 * first_planes[1] + 5 * first_planes[1]) * size * size
flops += (first_planes[1] * 1 * 1 * first_planes[2] + 5 * first_planes[2]) * size * size
for j in range(1, self.blocks[i]):
inplanes = first_planes[-1]
if self.depth != 50:
flops += (inplanes * 3 * 3 * later_planes[j-1] + 5 * later_planes[j-1]) * size * size
flops += (later_planes[j-1] * 3 * 3 * inplanes + 5 * inplanes) * size * size
else:
flops += (inplanes * 1 * 1 * later_planes[2*(j-1)] + 5 * later_planes[2*(j-1)]) * size * size
flops += (later_planes[2*(j-1)] * 3 * 3 * later_planes[2*(j-1)+1] + 5 * later_planes[2*(j-1)+1]) * size * size
flops += (later_planes[2*(j-1)+1] * 1 * 1 * inplanes + 5 * inplanes) * size * size
flops += (2 * cfg[-1] + 1) * self.num_classes
return flops
# flops += (inplanes * 1 * 1 * cfg[i+1] * self.expansion * size * size + 5 * cfg[i+1] * self.expansion * size * size) # downsample layer, conv+bn
# if self.expansion == 1:
# flops += (inplanes * 3 * 3 * cfg[i+1] + 5 * cfg[i+1]) * size * size # conv+bn+relu
# flops += (cfg[i+1] * 3 * 3 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# elif self.expansion == 4:
# size *= 2
# flops += (inplanes * 1 * 1 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# size /= 2
# flops += (cfg[i+1] * 3 * 3 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# flops += (cfg[i+1] * 1 * 1 * cfg[i+1] * self.expansion + 5 * cfg[i+1] * self.expansion) * size * size
# flops += cfg[i+1] * self.expansion * size * size * 2 # relu+add
# for _ in range(1, self.blocks[i]):
# inplanes = self.expansion * cfg[i+1]
# if self.expansion == 1:
# flops += (inplanes * 3 * 3 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# flops += (cfg[i+1] * 3 * 3 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# elif self.expansion == 4:
# flops += (inplanes * 1 * 1 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# flops += (cfg[i+1] * 3 * 3 * cfg[i+1] + 5 * cfg[i+1]) * size * size
# flops += (cfg[i+1] * 1 * 1 * cfg[i+1] * self.expansion + 5 * cfg[i+1] * self.expansion) * size * size
# flops += cfg[i+1] * self.expansion * size * size * 2
# flops += (2 * cfg[-1] * self.expansion - 1) * self.num_classes
# return flops
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
@property
def config(self):
return {
'name': self.__class__.__name__,
'depth': self.depth,
'cfg': self.cfg,
'cfg_base': self.cfgs_base[self.depth],
'dataset': 'ImageNet',
}
def ResNet18():
return ResNet_ImageNet(depth=18)
def ResNet34():
return ResNet_ImageNet(depth=34)
def ResNet50():
return ResNet_ImageNet(depth=50)
| 48.951456 | 211 | 0.517784 |
acfaf0bb97239c2e603b06a7641ad8722fe144ef | 260 | py | Python | tests/artificial/transf_Logit/trend_PolyTrend/cycle_0/ar_12/test_artificial_128_Logit_PolyTrend_0_12_0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Logit/trend_PolyTrend/cycle_0/ar_12/test_artificial_128_Logit_PolyTrend_0_12_0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Logit/trend_PolyTrend/cycle_0/ar_12/test_artificial_128_Logit_PolyTrend_0_12_0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "Logit", sigma = 0.0, exog_count = 0, ar_order = 12); | 37.142857 | 160 | 0.726923 |
acfaf1803b2d88eb25a16ac3105ec97eebac2450 | 6,354 | py | Python | 4p-sequence-align.py | wezil/algorithmic-thinking | 002957de1cf7b63c941cfafc08db807f2a1aedec | [
"MIT"
] | null | null | null | 4p-sequence-align.py | wezil/algorithmic-thinking | 002957de1cf7b63c941cfafc08db807f2a1aedec | [
"MIT"
] | null | null | null | 4p-sequence-align.py | wezil/algorithmic-thinking | 002957de1cf7b63c941cfafc08db807f2a1aedec | [
"MIT"
] | null | null | null | """
Algorithmic Thinking Part 2
Project 4: Computing alignment of Sequences
Author: Weikang Sun
Date: 11/2/15
CodeSkulptor source:
http://www.codeskulptor.org/#user40_tbt1hSyQm6_25.py
"""
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score):
"""
Function to build a scoring matrix given the alphabet, diagonal score,
off-diagonal score, and dash score.
Returns dictionary of dictionaries.
"""
alphabet_dash = list(alphabet) + ["-"]
score_matrix = {}
for entry_row in alphabet_dash:
matrix_row = {}
for entry_column in alphabet_dash:
if entry_row is "-" or entry_column is "-":
matrix_row[entry_column] = dash_score
elif entry_column is entry_row:
matrix_row[entry_column] = diag_score
else:
matrix_row[entry_column] = off_diag_score
score_matrix[entry_row] = matrix_row
return score_matrix
def print_scoring_matrix(scoring_matrix):
""" Helper function to print scoring matrix nicely """
for row in scoring_matrix.keys():
print str(row) + ": " + str(scoring_matrix[row])
def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag = True):
"""
Function to compute the alignment matrix for two sequences given
those sequences and the scoring matrix. Global flag dictates whether
a global or local alignment should be computed.
Returns a matrix.
"""
len_x = len(seq_x) + 1
len_y = len(seq_y) + 1
# first create an empty grid of the right dimensions
align_matrix = [[0 for dummy_col in range(len_y)] for dummy_row in range(len_x)]
# global flag allows negative scores
if global_flag:
# fill out leftmost column with incrementing dash score
for row in range(1, len_x ):
align_matrix[row][0] = align_matrix[row - 1][0] + \
scoring_matrix[seq_x[row - 1]]["-"]
# fill out uppermost row with increment dash score
for col in range(1, len_y):
align_matrix[0][col] = align_matrix[0][col - 1] + \
scoring_matrix["-"][seq_y[col - 1]]
# iteratively fill out the rest of the matrix
for row in range(1, len_x):
for col in range(1, len_y):
align_matrix[row][col] = max(align_matrix[row - 1][col - 1] +
scoring_matrix[seq_x[row - 1]][seq_y[col - 1]],
align_matrix[row - 1][col] +
scoring_matrix[seq_x[row - 1]]["-"],
align_matrix[row][col - 1] +
scoring_matrix["-"][seq_y[col - 1]])
if not global_flag:
# must be all positive or 0
align_matrix[row][col] = max(align_matrix[row][col], 0)
return align_matrix
def print_alignment_matrix(align_matrix):
""" Helper function to print alignment matrix nicely"""
for row in range(len(align_matrix)):
print align_matrix[row]
return
def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):
"""
Function to compute the global alignment of two sequences given the
scoring matrix and their global alignment matrix.
Returns a tuple of the form (score, align_x, align_y)
"""
row = len(seq_x)
col = len(seq_y)
align_x = ""
align_y = ""
while row != 0 and col != 0:
# checks along diagonal
if alignment_matrix[row][col] == alignment_matrix[row - 1][col - 1] + \
scoring_matrix[seq_x[row - 1]][seq_y[col - 1]]:
align_x = seq_x[row - 1] + align_x
align_y = seq_y[col - 1] + align_y
row -= 1
col -= 1
else:
# checks along row
if alignment_matrix[row][col] == alignment_matrix[row - 1][col] + \
scoring_matrix[seq_x[row - 1]]["-"]:
align_x = seq_x[row - 1] + align_x
align_y = "-" + align_y
row -= 1
else:
align_x = "-" + align_x
align_y = seq_y[col - 1] + align_y
col -= 1
while row != 0:
align_x = seq_x[row - 1] + align_x
align_y = "-" + align_y
row -= 1
while col != 0:
align_x = "-" + align_x
align_y = seq_y[col - 1] + align_y
col -= 1
return (alignment_matrix[-1][-1], align_x, align_y)
def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):
"""
Function to compute the local alignment of two sequences given the
scoring matrix and their local alignment matrix.
Returns a tuple of the form (score, align_x, align_y)
"""
row = 0
col = 0
max_value = 0
# find the maximum value and grid coordinates in the alignment matrix
for row_i in range(len(seq_x) + 1):
for col_j in range(len(seq_y) + 1):
value = alignment_matrix[row_i][col_j]
if value > max_value:
max_value = value
row = row_i
col = col_j
align_x = ""
align_y = ""
while row != 0 and col != 0:
# checks along diagonal
if alignment_matrix[row][col] == alignment_matrix[row - 1][col - 1] + \
scoring_matrix[seq_x[row - 1]][seq_y[col - 1]]:
align_x = seq_x[row - 1] + align_x
align_y = seq_y[col - 1] + align_y
row -= 1
col -= 1
else:
# checks along row
if alignment_matrix[row][col] == alignment_matrix[row - 1][col] + \
scoring_matrix[seq_x[row - 1]]["-"]:
align_x = seq_x[row - 1] + align_x
align_y = "-" + align_y
row -= 1
else:
align_x = "-" + align_x
align_y = seq_y[col - 1] + align_y
col -= 1
if alignment_matrix[row][col] == 0:
break
return (max_value, align_x, align_y)
| 34.16129 | 88 | 0.538558 |
acfaf22cc18cd01fa601fa3c910fa38410f91f31 | 6,961 | py | Python | fld_proj_ensmean.py | oet808/PDO | a1af01787f027ba953685fec0ffd9988d551d249 | [
"MIT"
] | null | null | null | fld_proj_ensmean.py | oet808/PDO | a1af01787f027ba953685fec0ffd9988d551d249 | [
"MIT"
] | null | null | null | fld_proj_ensmean.py | oet808/PDO | a1af01787f027ba953685fec0ffd9988d551d249 | [
"MIT"
] | null | null | null | #!/usr/bin/python
###############################################################################
# calculate projection index for a time-varying field
# using eofs (or any other pattern) as vectors for the projection
# This script works with the PCA results of the North Pacific domain.
# This script uses the ensemble members' mean eof projection
# vector (as the defaul PDO pattern).
###############################################################################
# Results: netcdf time series output
###############################################################################
import xarray
import numpy as np
import os
#import sys
#sys.path.append("./modules")
from clens import *
# for testing use the plot utilities
# for faster execution of script at startup
# exclude this matplotlib import and any plotting
import matplotlib.pylab as plt
def proj_field(x,e):
"""Vector dot product of field pattern with projection pattern.
Project field x onto field e using vector projection (dot product).
Input assumed 2dim lat,lon.
Currently no area (latitude weighting) supported.
"""
ndim=np.shape(x)
vx=np.reshape(x,np.size(x))
ve=np.reshape(e,np.size(e))
# must remove nan values from arrays before np.dot function
is_x=~np.isnan(vx)
is_e=~np.isnan(ve)
is_use=np.logical_and(is_x,is_e)
ve=np.reshape(e,np.size(x))
rhelp=np.dot(vx[is_use],ve[is_use])/np.sqrt(np.dot(ve[is_use],ve[is_use]))
return rhelp
def save_result(x,time,copy_from_source,dflt_units='k'):
"""Saves results from projection in netcdf output format.
Input parameters:
x: projection indices (1dim array with time)
time: coordinates from input netcdf file
copy_from_source: the field variable from the source netcdf file
The copy_from_source provides a netcdf source file (the input field
data file to copy the information about dimensions, variables, units etc.
Output contains the projection index time series.
"""
ncsrc=copy_from_source # use shorter variable name
xproj=xarray.DataArray(x,coords=[time],dims=['time'])
xproj.name="proj"
xproj.attrs["long_name"]="projection index"
try:
xproj.attrs['units']=ncsrc.units # check if that is right
except:
print("save_result: could not find attribute 'units' for copying")
print("assign default units to variable: "+dflt_units)
xproj.attrs['units']=dflt_units # eigenvectors of unit length
xproj.attrs['info']="projection onto ensemble mean EOF pattern in eof_ens_mean.nc"
ds=xarray.Dataset({'proj':xproj})
ds.to_netcdf('proj.nc',format="NETCDF4")
return ds
# APPLIED OPERATION
# (used in output file name, added just before input file name '*.nc')
app="pdo_proj_ensmean"
###############################################################################
# If RESID is True then the input data is
# the linear regression residual
# (removed global mean trend)
# RESID=False uses anomaly data
# (global mean trend signal included)
###############################################################################
RESID=True
iscen=-1
# LOOP OVER SCENARIOS
for scen in SCENARIOLIST:
iscen=iscen+1
nmodel=0
i=-1
for run in ENSEMBLELIST:
for v in VARLIST:
# 3-dim field
# EOF projection eignevectors
# The projection vector is loaded from eof_ens_mean.nc
# eofscen is set as default to the historical scenario
eofscen=TRANSLATE['historical']['scen']
eoftime=TRANSLATE['historical']['time']
cesmscen=TRANSLATE[scen]['scen']
cesmtime=TRANSLATE[scen]['time']
infile_eof=MODEL+"_"+eofscen+"_"+v+"_"+eoftime+"_ensmean_ann_ano_resid_eof.nc"
if RESID:
infile=MODEL+"_"+cesmscen+"_"+v+"_"+cesmtime+"_"+run+"_ann_ano_resid.nc"
outfile=MODEL+"_"+cesmscen+"_"+v+"_"+cesmtime+"_"+run+"_ann_ano_resid_"+app+".nc"
else:
infile=MODEL+"_"+cesmscen+"_"+v+"_"+cesmtime+"_"+run+"_ann_ano.nc"
outfile=MODEL+"_"+cesmscen+"_"+v+"_"+cesmtime+"_"+run+"_ann_ano_"+app+".nc"
print("field data: "+OUTPATH+infile)
print("eigenvectors from "+OUTPATH+infile_eof)
print("output file: "+OUTPATH+outfile)
print ("call function to read the netcdf files")
### open the data sets ###
nc1=xarray.open_dataset(OUTPATH+infile)
ntime1=nc1.time.size
field=(nc1[v].values[:]).squeeze() # save use for annual anomaly data
nc2=xarray.open_dataset(OUTPATH+infile_eof)
eof=(nc2['eofm'].values[:]) # eofm is variable name in netcdf file
nmodes=1
#######################################################################
# select North Pacific Domain and apply PCA
# to the residuals
#######################################################################
sellon=REGION_PDO[0:2]
sellat=REGION_PDO[2:4]
is_lon1=np.logical_and(nc1.lon.values>=sellon[0],nc1.lon.values<=sellon[1])
nlon1=np.sum(is_lon1)
is_lat1=np.logical_and(nc1.lat.values>=sellat[0],nc1.lat.values<=sellat[1])
nlat1=np.sum(is_lat1)
buffer=field[:,:,is_lon1]
field_npac=buffer[:,is_lat1,:]
# make this check here, in case we combine with other domain
# sizes
is_lon2=np.logical_and(nc2.lon.values>=sellon[0],nc2.lon.values<=sellon[1])
nlon2=np.sum(is_lon2)
is_lat2=np.logical_and(nc2.lat.values>=sellat[0],nc2.lat.values<=sellat[1])
nlat2=np.sum(is_lat2)
buffer=eof[:,is_lon2]
field_eof=buffer[is_lat2,:]
#######################################################################
# Projection of field data onto eigenvector
# (1st EOF should represent PDO mode)
#######################################################################
t=0
ntime=len(field_npac[:,0,0])
proj=np.zeros(ntime)
while t<ntime:
proj[t]=proj_field(field_npac[t,:,:],field_eof[:,:])
#print(t)
t=t+1
ds=save_result(proj,nc1.time,copy_from_source=nc1[v])
if False:
fig,ax=plt.subplots(2,2)
ax[0,0].plot(nc1['time'],proj[:,MODE_PDO])
ax[0,0].set_xlabel('PCA mode #')
ax[0,0].set_ylabel('projection index')
ax[1,0].contourf(nc2.lon[is_lon2],nc2.lat[is_lat2],field_eof[MODE_PDO,:,:],cmap=plt.cm.coolwarm)
plt.show()
i=i+1
os.system("mv proj.nc "+OUTPATH+outfile)
print ("outfile: "+OUTPATH+outfile)
nmodel+=1
print ("done")
| 38.038251 | 112 | 0.557248 |
acfaf356c6d84d9e0eb55a014be5ea530c3e9b6c | 859 | py | Python | .github/update_docs.py | apjanco/projects | 2f8850140ba13ab18b9cf622e46e79013d41701f | [
"MIT"
] | 823 | 2019-11-22T17:08:39.000Z | 2022-03-31T03:03:23.000Z | .github/update_docs.py | apjanco/projects | 2f8850140ba13ab18b9cf622e46e79013d41701f | [
"MIT"
] | 46 | 2019-11-25T15:14:05.000Z | 2022-03-31T12:59:45.000Z | .github/update_docs.py | apjanco/projects | 2f8850140ba13ab18b9cf622e46e79013d41701f | [
"MIT"
] | 326 | 2019-11-24T01:31:27.000Z | 2022-03-27T19:48:04.000Z | from pathlib import Path
from spacy.cli.project.document import project_document
from spacy.cli._util import PROJECT_FILE
from wasabi import msg
import typer
def main(root: Path = typer.Argument(Path.cwd(), help="Root path to look in")):
"""
Automatically update all auto-generated docs in the repo. If existing
auto-generated docs are found, only that section is replaced. README.md
files including an ignore comment are skipped (e.g. to support projects
without an auto-generated README and prevent those files from being
auto-replaced).
"""
msg.info(f"Updating auto-generated docs in {root}")
# We look specifically for project directories
for path in root.glob(f"**/*/{PROJECT_FILE}"):
path = path.parent
project_document(path, path / "README.md")
if __name__ == "__main__":
typer.run(main)
| 34.36 | 79 | 0.718277 |
acfaf6ca653c2e7ebde9b7ed7fa185eadfeaadfb | 696 | py | Python | rapid7_attackerkb/icon_rapid7_attackerkb/connection/connection.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | rapid7_attackerkb/icon_rapid7_attackerkb/connection/connection.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | rapid7_attackerkb/icon_rapid7_attackerkb/connection/connection.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import komand
from .schema import ConnectionSchema, Input
# Custom imports below
from icon_rapid7_attackerkb.util.api import AttackerKB
class Connection(komand.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
self.attackerKB_api = None
def connect(self, params):
self.logger.info("Connect: Connecting...")
self.attackerKB_api = AttackerKB(params.get(Input.CREDENTIALS).get("secretKey"),
self.logger,
params.get(Input.MAX_PAGES, 100))
def test(self):
self.attackerKB_api.call_api("api-docs/openapi_spec.json")
| 31.636364 | 88 | 0.642241 |
acfaf6d335a8593cd1597798e46c7506a9c9cd3f | 1,089 | py | Python | count_inversions.py | skebix/algorithms | c4f5d9a3e4088341817838fbf7ae0c03c32cea8a | [
"MIT"
] | null | null | null | count_inversions.py | skebix/algorithms | c4f5d9a3e4088341817838fbf7ae0c03c32cea8a | [
"MIT"
] | null | null | null | count_inversions.py | skebix/algorithms | c4f5d9a3e4088341817838fbf7ae0c03c32cea8a | [
"MIT"
] | null | null | null | import time
def count_inversions(a):
"""
Count left and right inversions recursively, piggybacking on merge sort
"""
n = len(a)
if n == 0 or n == 1:
return a, 0
middle = n // 2
left, x = count_inversions(a[:middle])
right, y = count_inversions(a[middle:])
ordered, z = split_inversions(left, right)
return ordered, x + y + z
def split_inversions(left, right):
"""
Count the number of inversions where i <= n // 2 <= j, and a[i] > a[j]
"""
total = 0
result = []
while len(left) != 0 and len(right) != 0:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
total += len(left)
if len(left) == 0:
result += right
else:
result += left
return result, total
with open("count_inversion_test.txt", "r") as ins:
array = []
for line in ins:
array.append(int(line.strip()))
start_time = time.time()
l, t = count_inversions(array)
print("%s seconds" % (time.time() - start_time))
print(t)
| 20.942308 | 75 | 0.560147 |
acfafa7fb29baea77d152acbaab758c8abf25eb3 | 3,943 | py | Python | venv/lib/python3.8/site-packages/azureml/_restclient/operations/arm_template_operations.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_restclient/operations/arm_template_operations.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_restclient/operations/arm_template_operations.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class ArmTemplateOperations(object):
"""ArmTemplateOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of Azure Machine Learning resource provider API. Constant value: "2020-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
self.api_version = "2020-06-01"
def env_set_up(
self, subscription_id, resource_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create workspace with arm template API.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which
the workspace is located.
:type resource_group_name: str
:param parameters: The object for creating a new workspace using arm
template api
:type parameters: ~_restclient.models.ArmTemplateDto
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`MachineLearningServiceErrorException<_restclient.models.MachineLearningServiceErrorException>`
"""
# Construct URL
url = self.env_set_up.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(parameters, 'ArmTemplateDto')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.MachineLearningServiceErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
env_set_up.metadata = {'url': '/rp/armtemplates/envsetup/subscriptions/{subscriptionId}/resourceGroupName/{resourceGroupName}'}
| 42.397849 | 132 | 0.654071 |
acfafac3dbdf558a8fc00a941424d10155f94959 | 3,678 | py | Python | scrapers/scraper_primorskiVal.py | do5562/SLEDIMedO | 1abba7e5454b251244213abe3cd8cdadd1c94475 | [
"MIT"
] | null | null | null | scrapers/scraper_primorskiVal.py | do5562/SLEDIMedO | 1abba7e5454b251244213abe3cd8cdadd1c94475 | [
"MIT"
] | null | null | null | scrapers/scraper_primorskiVal.py | do5562/SLEDIMedO | 1abba7e5454b251244213abe3cd8cdadd1c94475 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup as bs
import hashlib
from database.dbExecutor import dbExecutor
import datetime
base_url = 'http://www.primorskival.si/'
full_url = 'http://www.primorskival.si/snovice.php?page=' #dodaj se stevilo strani - prva stran je 1
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}
meseci = {'januar': '1.', 'februar': '2.', 'marec': '3.', 'april': '4.', 'maj': '5.',
'junij': '6.', 'julij': '7.', 'avgust': '8.', 'september': '9.',
'oktober': '10.', 'november': '11.', 'december': '12.'}
def make_hash(title, date):
return hashlib.sha1((title + date).encode('utf-8')).hexdigest()
def is_article_new(hash):
is_new = False
try:
f = open('article_list.txt', 'r+')
except FileNotFoundError:
f = open('article_list.txt', 'a+')
if hash not in f.read().split():
is_new = True
f.write(hash + '\n')
print('new article found')
f.close()
return is_new
def get_title(soup):
title = soup.find('h2')
if title:
return title.text
print('title not found, update select() method')
return 'title not found'
def get_date(soup):
raw_date = soup.find('span', class_='datum')
if raw_date:
date = raw_date.text
date = date.split()
date[2] = meseci[date[2]]
return formatDate(''.join(date[1:]))
print('date not found')
return '1.1.1111' #code for date not found
def get_link(soup):
link = soup.find('a')
if link:
return base_url + link.get('href')
print('link not found')
return base_url #return base url to avoid exceptions
def get_content(soup):
content = soup.find('div', class_='maincontent')
if content:
for script in content(['script']):
script.decompose()
return content.text.strip()
print('content not found')
return 'content not found'
def get_articles_on_pages(num_pages_to_check, session):
articles = []
for n in range(num_pages_to_check):
r = session.get(full_url + str(n+1))
soup = bs(r.text, 'html.parser')
articles += soup.find('div', class_='maincontent').find_all('div', style='float: right; width:470px; padding: 2px;')
return articles
def formatDate(date):
#format date for consistent database
date = date.split('.')
for i in range(2):
if len(date[i]) == 1:
date[i] = '0'+date[i]
return '-'.join(reversed(date))
def main():
num_pages_to_check = 2
num_new_articles = 0
articles_checked = 0
with requests.Session() as session:
session.headers.update(headers)
articles = get_articles_on_pages(num_pages_to_check,session)
articles_checked = len(articles)
new_articles_tuples = []
for x in articles:
title = get_title(x)
date = get_date(x)
hash_str = make_hash(title, date)
if is_article_new(hash_str):
link = get_link(x)
r = requests.get(link)
soup = bs(r.text, 'html.parser')
content = get_content(soup)
print(link + '\n')
new_tup = (str(datetime.date.today()), title, content, date, hash_str, link, base_url)
new_articles_tuples.append(new_tup)
num_new_articles += 1
#add new articles to database
dbExecutor.insertMany(new_articles_tuples)
print(num_new_articles, 'new articles found,', articles_checked,'articles checked')
if __name__ == '__main__':
main() | 29.902439 | 149 | 0.605492 |
acfafbab92def6d7214a01300e2300acda976272 | 3,925 | py | Python | src/backend/data/budget.py | akmadian/openfinance | 450b39023072e2eba0a70cd14d0abe7141e8a5a6 | [
"MIT"
] | 1 | 2022-03-11T02:36:55.000Z | 2022-03-11T02:36:55.000Z | src/backend/data/budget.py | akmadian/openfinance | 450b39023072e2eba0a70cd14d0abe7141e8a5a6 | [
"MIT"
] | null | null | null | src/backend/data/budget.py | akmadian/openfinance | 450b39023072e2eba0a70cd14d0abe7141e8a5a6 | [
"MIT"
] | null | null | null | from datetime import date, timedelta, datetime
from .dbmanager import TransactionsDB
"""
category
monthly amount
time period
category
transactions: [ids]
total to date
limit amount for month
"""
TIME_START = date.fromisoformat('2020-08-12')
WEEK_DELTA = timedelta(weeks=1)
MASTER_BUDGET = {}
time_periods_ends = []
time_periods_transactions = {
}
categories = {
"Disc/ Fun Stuff": 35,
"Disc/ Essentials": 10,
"Non-Disc/ Groceries": 75
}
def get_budget_info(dbinstance):
transactions = dbinstance.read_transactions()
order_transactions(transactions)
calc_totals()
return MASTER_BUDGET
def get_last_dt(asStr=False):
return date.fromisoformat(
str(time_periods_ends[-1])
)
def calc_totals():
for period in time_periods_ends:
transactions = time_periods_transactions[period]
MASTER_BUDGET[period] = {}
for category, limit in categories.items():
time_period_total = 0
added_transactions = []
for transaction in transactions:
if transaction[5]: # If flagged to not count in budget
continue
if category in transaction[8]:
if len(transaction[10]) > 0: # Adjust for splits
split_total = split_mod_transaction_total(transaction)
if split_total < 0:
time_period_total -= split_total
else:
time_period_total += split_total
added_transactions.append(transaction[10])
else:
if transaction[9] < 0:
time_period_total -= transaction[9]
else:
time_period_total += transaction[9]
added_transactions.append(transaction[10])
MASTER_BUDGET[period][category] = {
'period_total': time_period_total,
'period_limit': limit,
'transactions': added_transactions
}
def split_mod_transaction_total(transaction):
if len(transaction[10]) == 0:
return transaction[9]
else:
amt = transaction[9]
for split in transaction[10]:
if 'SOURCE_PERSONAL_ACCOUNT' in split['categories'] or \
'PARTIAL_REIMBURSEMENT' in split['categories']:
amt -= split['amount']
elif 'COMPLETE_REIMBURSEMENT' in split['categories']:
amt = 0
return amt
def order_transactions(transactions):
if not bool(time_periods_transactions):
time_periods_ends.append(str(TIME_START + WEEK_DELTA))
time_periods_transactions[time_periods_ends[-1]] = []
else:
time_periods_ends.append(str(get_last_dt() + WEEK_DELTA))
time_periods_transactions[time_periods_ends[-1]] = []
transactions.reverse()
for transaction in transactions:
#print(str(transaction[2][:10]) + " " + str(transaction[1]))
transaction_date = datetime.fromisoformat(transaction[2][:10])
if transaction_date.date() > get_last_dt():
time_periods_ends.append(str(get_last_dt() + WEEK_DELTA))
time_periods_transactions[time_periods_ends[-1]] = [transaction]
else:
time_periods_transactions[str(get_last_dt())].append(transaction)
#for key, value in time_periods_transactions.items():
# print(key, value)
if __name__ == '__main__':
db = TransactionsDB()
transactions = db.read_transactions()
order_transactions(transactions)
calc_totals()
for time_period, categories in MASTER_BUDGET.items():
print(time_period)
for category, info in categories.items():
print(" " + str(category))
print(" " + str(info))
| 32.708333 | 78 | 0.597707 |
acfafc54128ab95a46b801e40508790c4bba1326 | 726 | py | Python | python/draw_rectangles_on_faces.py | symisc/pixlab | bf1d46a67f8e738b059fee4fc65d579f091ef0a9 | [
"BSD-2-Clause"
] | 96 | 2017-03-17T21:53:36.000Z | 2022-03-17T19:56:06.000Z | python/draw_rectangles_on_faces.py | symisc/pixlab | bf1d46a67f8e738b059fee4fc65d579f091ef0a9 | [
"BSD-2-Clause"
] | 6 | 2017-06-03T02:41:00.000Z | 2021-08-19T22:44:27.000Z | python/draw_rectangles_on_faces.py | symisc/pixlab | bf1d46a67f8e738b059fee4fc65d579f091ef0a9 | [
"BSD-2-Clause"
] | 33 | 2017-07-12T08:22:53.000Z | 2021-05-24T09:19:18.000Z | # Mark Jeremy's face by drawing a rectangle on it. The rectangle coordinates was obtained from the facedetect command and passed untouched to this command.
# Refer to the command page https://pixlab.io/#/cmd?id=drawrectangles for more info.
import requests
import json
req = requests.post('https://api.pixlab.io/drawrectangles',headers={'Content-Type':'application/json'},data=json.dumps({
'img': 'http://cf.broadsheet.ie/wp-content/uploads/2015/03/jeremy-clarkson_3090507b.jpg',
'key':'My_PixLab_Key',
'cord': [
{
"x":164,
"y":95,
"width":145,
"height":145,
#"color":"green"
}
]
}))
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
| 29.04 | 156 | 0.690083 |
acfafc6614a76e9c4ecd8d388d0ed2b17e59447f | 42,298 | py | Python | src/python/xrt_binding.py | venkatp36/XRT | 3d059992446fdfbc35f2e6b6882f79c212873fb1 | [
"Apache-2.0"
] | 1 | 2019-08-06T00:14:50.000Z | 2019-08-06T00:14:50.000Z | src/python/xrt_binding.py | venkatp36/XRT | 3d059992446fdfbc35f2e6b6882f79c212873fb1 | [
"Apache-2.0"
] | 20 | 2018-10-03T23:01:00.000Z | 2019-05-10T22:57:57.000Z | src/python/xrt_binding.py | venkatp36/XRT | 3d059992446fdfbc35f2e6b6882f79c212873fb1 | [
"Apache-2.0"
] | 1 | 2020-03-28T05:50:59.000Z | 2020-03-28T05:50:59.000Z | """
Copyright (C) 2018 Xilinx, Inc
Author(s): Ryan Radjabi
Shivangi Agarwal
Sonal Santan
ctypes based Python binding for XRT
Licensed under the Apache License, Version 2.0 (the "License"). You may
not use this file except in compliance with the License. A copy of the
License is located at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import os
import ctypes
from xclbin_binding import *
libc = ctypes.CDLL(os.environ['XILINX_XRT'] + "/lib/libxrt_core.so")
xclDeviceHandle = ctypes.c_void_p
class xclDeviceInfo2(ctypes.Structure):
# "_fields_" is a required keyword
_fields_ = [
("mMagic", ctypes.c_uint),
("mName", ctypes.c_char*256),
("mHALMajorVersion", ctypes.c_ushort),
("mHALMinorVersion", ctypes.c_ushort),
("mVendorId", ctypes.c_ushort),
("mDeviceId", ctypes.c_ushort),
("mSubsystemId", ctypes.c_ushort),
("mSubsystemVendorId", ctypes.c_ushort),
("mDeviceVersion", ctypes.c_ushort),
("mDDRSize", ctypes.c_size_t),
("mDataAlignment", ctypes.c_size_t),
("mDDRFreeSize", ctypes.c_size_t),
("mMinTransferSize", ctypes.c_size_t),
("mDDRBankCount", ctypes.c_ushort),
("mOCLFrequency", ctypes.c_ushort*4),
("mPCIeLinkWidth", ctypes.c_ushort),
("mPCIeLinkSpeed", ctypes.c_ushort),
("mDMAThreads", ctypes.c_ushort),
("mOnChipTemp", ctypes.c_short),
("mFanTemp", ctypes.c_short),
("mVInt", ctypes.c_ushort),
("mVAux", ctypes.c_ushort),
("mVBram", ctypes.c_ushort),
("mCurrent", ctypes.c_float),
("mNumClocks", ctypes.c_ushort),
("mFanSpeed", ctypes.c_ushort),
("mMigCalib", ctypes.c_bool),
("mXMCVersion", ctypes.c_ulonglong),
("mMBVersion", ctypes.c_ulonglong),
("m12VPex", ctypes.c_short),
("m12VAux", ctypes.c_short),
("mPexCurr", ctypes.c_ulonglong),
("mAuxCurr", ctypes.c_ulonglong),
("mFanRpm", ctypes.c_ushort),
("mDimmTemp", ctypes.c_ushort*4),
("mSE98Temp", ctypes.c_ushort*4),
("m3v3Pex", ctypes.c_ushort),
("m3v3Aux", ctypes.c_ushort),
("mDDRVppBottom",ctypes.c_ushort),
("mDDRVppTop", ctypes.c_ushort),
("mSys5v5", ctypes.c_ushort),
("m1v2Top", ctypes.c_ushort),
("m1v8Top", ctypes.c_ushort),
("m0v85", ctypes.c_ushort),
("mMgt0v9", ctypes.c_ushort),
("m12vSW", ctypes.c_ushort),
("mMgtVtt", ctypes.c_ushort),
("m1v2Bottom", ctypes.c_ushort),
("mDriverVersion, ", ctypes.c_ulonglong),
("mPciSlot", ctypes.c_uint),
("mIsXPR", ctypes.c_bool),
("mTimeStamp", ctypes.c_ulonglong),
("mFpga", ctypes.c_char*256),
("mPCIeLinkWidthMax", ctypes.c_ushort),
("mPCIeLinkSpeedMax", ctypes.c_ushort),
("mVccIntVol", ctypes.c_ushort),
("mVccIntCurr", ctypes.c_ushort),
("mNumCDMA", ctypes.c_ushort)
]
class xclMemoryDomains:
XCL_MEM_HOST_RAM = 0
XCL_MEM_DEVICE_RAM = 1
XCL_MEM_DEVICE_BRAM = 2
XCL_MEM_SVM = 3
XCL_MEM_CMA = 4
XCL_MEM_DEVICE_REG = 5
class xclDDRFlags:
XCL_DEVICE_RAM_BANK0 = 0
XCL_DEVICE_RAM_BANK1 = 2
XCL_DEVICE_RAM_BANK2 = 4
XCL_DEVICE_RAM_BANK3 = 8
class xclBOKind:
XCL_BO_SHARED_VIRTUAL = 0
XCL_BO_SHARED_PHYSICAL = 1
XCL_BO_MIRRORED_VIRTUAL = 2
XCL_BO_DEVICE_RAM = 3
XCL_BO_DEVICE_BRAM = 4
XCL_BO_DEVICE_PREALLOCATED_BRAM = 5
class xclBOSyncDirection:
XCL_BO_SYNC_BO_TO_DEVICE = 0
XCL_BO_SYNC_BO_FROM_DEVICE = 1
class xclAddressSpace:
XCL_ADDR_SPACE_DEVICE_FLAT = 0 # Absolute address space
XCL_ADDR_SPACE_DEVICE_RAM = 1 # Address space for the DDR memory
XCL_ADDR_KERNEL_CTRL = 2 # Address space for the OCL Region control port
XCL_ADDR_SPACE_DEVICE_PERFMON = 3 # Address space for the Performance monitors
XCL_ADDR_SPACE_DEVICE_CHECKER = 5 # Address space for protocol checker
XCL_ADDR_SPACE_MAX = 8
class xclVerbosityLevel:
XCL_QUIET = 0
XCL_INFO = 1
XCL_WARN = 2
XCL_ERROR = 3
class xclResetKind:
XCL_RESET_KERNEL = 0
XCL_RESET_FULL = 1
XCL_USER_RESET = 2
class xclDeviceUsage (ctypes.Structure):
_fields_ = [
("h2c", ctypes.c_size_t*8),
("c2h", ctypes.c_size_t*8),
("ddeMemUsed", ctypes.c_size_t*8),
("ddrBOAllocated", ctypes.c_uint *8),
("totalContents", ctypes.c_uint),
("xclbinId", ctypes.c_ulonglong),
("dma_channel_cnt", ctypes.c_uint),
("mm_channel_cnt", ctypes.c_uint),
("memSize", ctypes.c_ulonglong*8)
]
class xclBOProperties (ctypes.Structure):
_fields_ = [
("handle", ctypes.c_uint),
("flags" , ctypes.c_uint),
("size", ctypes.c_ulonglong),
("paddr", ctypes.c_ulonglong),
("domain", ctypes.c_uint),
]
def xclProbe():
"""
xclProbe() - Enumerate devices found in the system
:return: count of devices found
"""
return libc.xclProbe()
def xclVersion():
"""
:return: the version number. 1 => Hal1 ; 2 => Hal2
"""
return libc.xclVersion()
def xclOpen(deviceIndex, logFileName, level):
"""
xclOpen(): Open a device and obtain its handle
:param deviceIndex: (unsigned int) Slot number of device 0 for first device, 1 for the second device...
:param logFileName: (const char pointer) Log file to use for optional logging
:param level: (int) Severity level of messages to log
:return: device handle
"""
libc.xclOpen.restype = ctypes.POINTER(xclDeviceHandle)
libc.xclOpen.argtypes = [ctypes.c_uint, ctypes.c_char_p, ctypes.c_int]
return libc.xclOpen(deviceIndex, logFileName, level)
def xclClose(handle):
"""
xclClose(): Close an opened device
:param handle: (xclDeviceHandle) device handle
:return: None
"""
libc.xclClose.restype = None
libc.xclClose.argtype = xclDeviceHandle
libc.xclClose(handle)
def xclResetDevice(handle, kind):
"""
xclResetDevice() - Reset a device or its CL
:param handle: Device handle
:param kind: Reset kind
:return: 0 on success or appropriate error number
"""
libc.xclResetDevice.restype = ctypes.c_int
libc.xclResetDevice.argtypes = [xclDeviceHandle, ctypes.c_int]
libc.xclResetDevice(handle, kind)
def xclGetDeviceInfo2 (handle, info):
"""
xclGetDeviceInfo2() - Obtain various bits of information from the device
:param handle: (xclDeviceHandle) device handle
:param info: (xclDeviceInfo pointer) Information record
:return: 0 on success or appropriate error number
"""
libc.xclGetDeviceInfo2.restype = ctypes.c_int
libc.xclGetDeviceInfo2.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2)]
return libc.xclGetDeviceInfo2(handle, info)
def xclGetUsageInfo (handle, info):
"""
xclGetUsageInfo() - Obtain usage information from the device
:param handle: Device handle
:param info: Information record
:return: 0 on success or appropriate error number
"""
libc.xclGetUsageInfo.restype = ctypes.c_int
libc.xclGetUsageInfo.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2)]
return libc.xclGetUsageInfo(handle, info)
def xclGetErrorStatus(handle, info):
"""
xclGetErrorStatus() - Obtain error information from the device
:param handle: Device handle
:param info: Information record
:return: 0 on success or appropriate error number
"""
libc.xclGetErrorStatus.restype = ctypes.c_int
libc.xclGetErrorStatus.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2)]
return libc.xclGetErrorStatus(handle, info)
def xclLoadXclBin(handle, buf):
"""
Download FPGA image (xclbin) to the device
:param handle: (xclDeviceHandle) device handle
:param buf: (void pointer) Pointer to device image (xclbin) in memory
:return: 0 on success or appropriate error number
Download FPGA image (AXLF) to the device. The PR bitstream is encapsulated inside
xclbin as a section. xclbin may also contains other sections which are suitably
handled by the driver
"""
libc.xclLoadXclBin.restype = ctypes.c_int
libc.xclLoadXclBin.argtypes = [xclDeviceHandle, ctypes.c_void_p]
return libc.xclLoadXclBin(handle, buf)
def xclGetSectionInfo(handle, info, size, kind, index):
"""
xclGetSectionInfo() - Get Information from sysfs about the downloaded xclbin sections
:param handle: Device handle
:param info: Pointer to preallocated memory which will store the return value.
:param size: Pointer to preallocated memory which will store the return size.
:param kind: axlf_section_kind for which info is being queried
:param index: The (sub)section index for the "kind" type.
:return: 0 on success or appropriate error number
"""
libc.xclGetSectionInfo.restype = ctypes.c_int
libc.xclGetSectionInfo.argtypes = [xclDeviceHandle, ctypes.POINTER(xclDeviceInfo2),
ctypes.POINTER(ctypes.sizeof(xclDeviceInfo2)),
ctypes.c_int, ctypes.c_int]
return libc.xclGetSectionInfo(handle, info, size, kind, index)
def xclReClock2(handle, region, targetFreqMHz):
"""
xclReClock2() - Configure PR region frequencies
:param handle: Device handle
:param region: PR region (always 0)
:param targetFreqMHz: Array of target frequencies in order for the Clock Wizards driving the PR region
:return: 0 on success or appropriate error number
"""
libc.xclReClock2.restype = ctypes.c_int
libc.xclReClock2.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_uint]
return libc.xclReClock2(handle, region, targetFreqMHz)
def xclLockDevice(handle):
"""
Get exclusive ownership of the device
:param handle: (xclDeviceHandle) device handle
:return: 0 on success or appropriate error number
The lock is necessary before performing buffer migration, register access or bitstream downloads
"""
libc.xclLockDevice.restype = ctypes.c_int
libc.xclLockDevice.argtype = xclDeviceHandle
return libc.xclLockDevice(handle)
def xclUnlockDevice(handle):
"""
xclUnlockDevice() - Release exclusive ownership of the device
:param handle: (xclDeviceHandle) device handle
:return: 0 on success or appropriate error number
"""
libc.xclUnlockDevice.restype = ctypes.c_int
libc.xclUnlockDevice.argtype = xclDeviceHandle
return libc.xclUnlockDevice(handle)
def xclOpenContext(handle, xclbinId, ipIndex, shared):
"""
xclOpenContext() - Create shared/exclusive context on compute units
:param handle: Device handle
:param xclbinId: UUID of the xclbin image running on the device
:param ipIndex: IP/CU index in the IP LAYOUT array
:param shared: Shared access or exclusive access
:return: 0 on success or appropriate error number
The context is necessary before submitting execution jobs using xclExecBuf(). Contexts may be
exclusive or shared. Allocation of exclusive contexts on a compute unit would succeed
only if another client has not already setup up a context on that compute unit. Shared
contexts can be concurrently allocated by many processes on the same compute units.
"""
libc.xclOpenContext.restype = ctypes.c_int
libc.xclOpenContext.argtypes = [xclDeviceHandle, ctypes.c_char_p, ctypes.c_uint, ctypes.c_bool]
return libc.xclOpenContext(handle, xclbinId.bytes, ipIndex, shared)
def xclCloseContext(handle, xclbinId, ipIndex):
"""
xclCloseContext() - Close previously opened context
:param handle: Device handle
:param xclbinId: UUID of the xclbin image running on the device
:param ipIndex: IP/CU index in the IP LAYOUT array
:return: 0 on success or appropriate error number
Close a previously allocated shared/exclusive context for a compute unit.
"""
libc.xclCloseContext.restype = ctypes.c_int
libc.xclCloseContext.argtypes = [xclDeviceHandle, ctypes.c_char_p, ctypes.c_uint]
return libc.xclCloseContext(handle, xclbinId.bytes, ipIndex)
def xclUpgradeFirmware(handle, fileName):
"""
Update the device BPI PROM with new image
:param handle: Device handle
:param fileName:
:return: 0 on success or appropriate error number
"""
libc.xclUpgradeFirmware.restype = ctypes.c_int
libc.xclUpgradeFirmware.argtypes = [xclDeviceHandle, ctypes.c_void_p]
return libc.xclUpgradeFirmware(handle, fileName)
def xclUpgradeFirmware2(handle, file1, file2):
"""
Update the device BPI PROM with new image with clearing bitstream
:param handle: Device handle
:param fileName:
:return: 0 on success or appropriate error number
"""
libc.xclUpgradeFirmware2.restype = ctypes.c_int
libc.xclUpgradeFirmware2.argtypes = [xclDeviceHandle, ctypes.c_void_p, ctypes.c_void_p]
return libc.xclUpgradeFirmware2(handle, file1, file2)
def xclUpgradeFirmwareXSpi (handle, fileName, index):
"""
Update the device SPI PROM with new image
:param handle:
:param fileName:
:param index:
:return:
"""
libc.xclUpgradeFirmwareXSpi.restype = ctypes.c_int
libc.xclUpgradeFirmwareXSpi.argtypes = [xclDeviceHandle, ctypes.c_void_p, ctypes.c_int]
return libc.xclUpgradeFirmwareXSpi(handle, fileName, index)
def xclBootFPGA(handle):
"""
Boot the FPGA from PROM
:param handle: Device handle
:return: 0 on success or appropriate error number
"""
libc.xclBootFPGA.restype = ctypes.c_int
libc.xclBootFPGA.argtype = xclDeviceHandle
return libc.xclBootFPGA(handle)
def xclRemoveAndScanFPGA():
"""
Write to /sys/bus/pci/devices/<deviceHandle>/remove and initiate a pci rescan by
writing to /sys/bus/pci/rescan.
:return:
"""
libc.xclRemoveAndScanFPGA.restype = ctypes.c_int
return libc.xclRemoveAndScanFPGA()
def xclAllocBO(handle, size, domain, flags):
"""
Allocate a BO of requested size with appropriate flags
:param handle: (xclDeviceHandle) device handle
:param size: (size_t) Size of buffer
:param domain: (xclBOKind) Memory domain
:param flags: (unsigned int) Specify bank information, etc
:return: BO handle
"""
libc.xclAllocBO.restype = ctypes.c_uint
libc.xclAllocBO.argtypes = [xclDeviceHandle, ctypes.c_size_t, ctypes.c_int, ctypes.c_uint]
return libc.xclAllocBO(handle, size, domain, flags)
def xclAllocUserPtrBO(handle, userptr, size, flags):
"""
Allocate a BO using userptr provided by the user
:param handle: Device handle
:param userptr: Pointer to 4K aligned user memory
:param size: Size of buffer
:param flags: Specify bank information, etc
:return: BO handle
"""
libc.xclAllocUserPtrBO.restype = ctypes.c_uint
libc.xclAllocUserPtrBO.argtypes = [xclDeviceHandle, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_uint]
return libc.xclAllocUserPtrBO(handle, userptr, size, flags)
def xclFreeBO(handle, boHandle):
"""
Free a previously allocated BO
:param handle: device handle
:param boHandle: BO handle
"""
libc.xclFreeBO.restype = None
libc.xclFreeBO.argtypes = [xclDeviceHandle, ctypes.c_uint]
libc.xclFreeBO(handle, boHandle)
def xclWriteBO(handle, boHandle, src, size, seek):
"""
Copy-in user data to host backing storage of BO
:param handle: Device handle
:param boHandle: BO handle
:param src: Source data pointer
:param size: Size of data to copy
:param seek: Offset within the BO
:return: 0 on success or appropriate error number
"""
libc.xclWriteBO.restype = ctypes.c_int
libc.xclWriteBO.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_size_t]
return libc.xclWriteBO(handle, boHandle, src, size, seek)
def xclReadBO(handle, boHandle, dst, size, skip):
"""
Copy-out user data from host backing storage of BO
:param handle: Device handle
:param boHandle: BO handle
:param dst: Destination data pointer
:param size: Size of data to copy
:param skip: Offset within the BO
:return: 0 on success or appropriate error number
"""
libc.xclReadBO.restype = ctypes.c_int
libc.xclReadBO.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_size_t]
return libc.xclReadBO(handle, boHandle, dst, size, skip)
def xclMapBO(handle, boHandle, write, buf_type='char', buf_size=1):
"""
Memory map BO into user's address space
:param handle: (xclDeviceHandle) device handle
:param boHandle: (unsigned int) BO handle
:param write: (boolean) READ only or READ/WRITE mapping
:param buf_type: type of memory mapped buffer
:param buf_size: size of buffer
:return: (pointer) Memory mapped buffer
Map the contents of the buffer object into host memory
To unmap the buffer call POSIX unmap() on mapped void pointer returned from xclMapBO
Return type void pointer doesn't get correctly binded in ctypes
To map the buffer, explicitly specify the type and size of data
"""
if buf_type is 'char':
prop = xclBOProperties()
xclGetBOProperties(handle, boHandle, prop)
libc.xclMapBO.restype = ctypes.POINTER(ctypes.c_char * prop.size)
elif buf_size is 1 and buf_type is 'int':
libc.xclMapBO.restype = ctypes.POINTER(ctypes.c_int)
elif buf_type is 'int':
libc.xclMapBO.restype = ctypes.POINTER(ctypes.c_int * buf_size)
else:
print("ERROR: This data type is not supported ")
libc.xclMapBO.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_bool]
ptr = libc.xclMapBO(handle, boHandle, write)
return ptr
def xclSyncBO(handle, boHandle, direction, size, offset):
"""
Synchronize buffer contents in requested direction
:param handle: (xclDeviceHandle) device handle
:param boHandle: (unsigned int) BO handle
:param direction: (xclBOSyncDirection) To device or from device
:param size: (size_t) Size of data to synchronize
:param offset: (size_t) Offset within the BO
:return: 0 on success or standard errno
"""
libc.xclSyncBO.restype = ctypes.c_uint
libc.xclSyncBO.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_int, ctypes.c_size_t, ctypes.c_size_t]
return libc.xclSyncBO(handle, boHandle, direction, size, offset)
def xclCopyBO(handle, dstBoHandle, srcBoHandle, size, dst_offset, src_offset):
"""
Copy device buffer contents to another buffer
:param handle: Device handle
:param dstBoHandle: Destination BO handle
:param srcBoHandle: Source BO handle
:param size: Size of data to synchronize
:param dst_offset: dst Offset within the BO
:param src_offset: src Offset within the BO
:return: 0 on success or standard errno
"""
libc.xclCopyBO.restype = ctypes.c_int
libc.xclCopyBO.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_uint, ctypes.c_size_t, ctypes.c_size_t,
ctypes.c_uint]
libc.xclCopyBO(handle, dstBoHandle, srcBoHandle, size, dst_offset, src_offset)
def xclExportBO(handle, boHandle):
"""
Obtain DMA-BUF file descriptor for a BO
:param handle: Device handle
:param boHandle: BO handle which needs to be exported
:return: File handle to the BO or standard errno
"""
libc.xclExportBO.restype = ctypes.c_int
libc.xclExportBO.argtypes = [xclDeviceHandle, ctypes.c_uint]
return libc.xclExportBO(handle, boHandle)
def xclImportBO(handle, fd, flags):
"""
Obtain BO handle for a BO represented by DMA-BUF file descriptor
:param handle: Device handle
:param fd: File handle to foreign BO owned by another device which needs to be imported
:param flags: Unused
:return: BO handle of the imported BO
Import a BO exported by another device.
This operation is backed by Linux DMA-BUF framework
"""
libc.xclImportBO.restype = ctypes.c_int
libc.xclImportBO.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint]
libc.xclImportBO(handle, fd, flags)
def xclGetBOProperties(handle, boHandle, properties):
"""
Obtain xclBOProperties struct for a BO
:param handle: (xclDeviceHandle) device handle
:param boHandle: (unsigned int) BO handle
:param properties: BO properties struct pointer
:return: 0 on success
"""
libc.xclGetBOProperties.restype = ctypes.c_int
libc.xclGetBOProperties.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.POINTER(xclBOProperties)]
return libc.xclGetBOProperties(handle, boHandle, properties)
def xclUnmgdPread(handle, flags, buf, size, offeset):
"""
Perform unmanaged device memory read operation
:param handle: Device handle
:param flags: Unused
:param buf: Destination data pointer
:param size: Size of data to copy
:param offeset: Absolute offset inside device
:return: size of bytes read or appropriate error number
This API may be used to perform DMA operation from absolute location specified. Users
may use this if they want to perform their own device memory management -- not using the buffer
object (BO) framework defined before.
"""
libc.xclUnmgdPread.restype = ctypes.c_size_t
libc.xclUnmgdPread.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_uint64]
return libc.xclUnmgdPread(handle, flags, buf, size, offeset)
def xclUnmgdPwrite(handle, flags, buf, size, offset):
"""
Perform unmanaged device memory write operation
:param handle: Device handle
:param flags: Unused
:param buf: Destination data pointer
:param size: Size of data to copy
:param offeset: Absolute offset inside device
:return: size of bytes read or appropriate error number
This API may be used to perform DMA operation from absolute location specified. Users
may use this if they want to perform their own device memory management -- not using the buffer
object (BO) framework defined before.
"""
libc.xclUnmgdPwrite.restype = ctypes.c_size_t
libc.xclUnmgdPwrite.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_uint64]
return libc.xclUnmgdPwrite(handle, flags, buf, size, offset)
def xclWrite(handle, space, offset, hostBuf, size):
"""
Perform register write operation
:param handle: Device handle
:param space: Address space
:param offset: Offset in the address space
:param hostBuf: Source data pointer
:param size: Size of data to copy
:return: size of bytes written or appropriate error number
This API may be used to write to device registers exposed on PCIe BAR. Offset is relative to the
the address space. A device may have many address spaces.
This API will be deprecated in future. Please use this API only for IP bringup/debugging. For
execution management please use XRT Compute Unit Execution Management APIs defined below
"""
libc.xclWrite.restype = ctypes.c_size_t
libc.xclWrite.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_size_t]
return libc.xclWrite(handle, space, offset, hostBuf, size)
def xclRead(handle, space, offset, hostBuf, size):
"""
Perform register write operation
:param handle: Device handle
:param space: Address space
:param offset: Offset in the address space
:param hostBuf: Destination data pointer
:param size: Size of data to copy
:return: size of bytes written or appropriate error number
This API may be used to write to device registers exposed on PCIe BAR. Offset is relative to the
the address space. A device may have many address spaces.
This API will be deprecated in future. Please use this API only for IP bringup/debugging. For
execution management please use XRT Compute Unit Execution Management APIs defined below
"""
libc.xclRead.restype = ctypes.c_size_t
libc.xclRead.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_size_t]
return libc.xclRead(handle, space, offset, hostBuf, size)
def xclExecBuf(handle, cmdBO):
"""
xclExecBuf() - Submit an execution request to the embedded (or software) scheduler
:param handle: Device handle
:param cmdBO: BO handle containing command packet
:return: 0 or standard error number
Submit an exec buffer for execution. The exec buffer layout is defined by struct ert_packet
which is defined in file *ert.h*. The BO should been allocated with DRM_XOCL_BO_EXECBUF flag.
"""
libc.xclExecBuf.restype = ctypes.c_int
libc.xclExecBuf.argtypes = [xclDeviceHandle, ctypes.c_uint]
return libc.xclExecBuf(handle, cmdBO)
def xclExecBufWithWaitList(handle, cmdBO, num_bo_in_wait_list, bo_wait_list):
"""
Submit an execution request to the embedded (or software) scheduler
:param handle: Device handle
:param cmdBO:BO handle containing command packet
:param num_bo_in_wait_list: Number of BO handles in wait list
:param bo_wait_list: BO handles that must complete execution before cmdBO is started
:return:0 or standard error number
Submit an exec buffer for execution. The BO handles in the wait
list must complete execution before cmdBO is started. The BO
handles in the wait list must have beeen submitted prior to this
call to xclExecBufWithWaitList.
"""
libc.xclExecBufWithWaitList.restype = ctypes.c_int
libc.xclExecBufWithWaitList.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_size_t, ctypes.POINTER(ctypes.c_uint)]
return libc.xclExecBufWithWaitList(handle, cmdBO, num_bo_in_wait_list, bo_wait_list)
def xclExecWait(handle, timeoutMilliSec):
"""
xclExecWait() - Wait for one or more execution events on the device
:param handle: Device handle
:param timeoutMilliSec: How long to wait for
:return: Same code as poll system call
Wait for notification from the hardware. The function essentially calls "poll" system
call on the driver file handle. The return value has same semantics as poll system call.
If return value is > 0 caller should check the status of submitted exec buffers
"""
libc.xclExecWait.restype = ctypes.c_int
libc.xclExecWait.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclExecWait(handle, timeoutMilliSec)
def xclRegisterInterruptNotify(handle, userInterrupt, fd):
"""
register *eventfdfile handle for a MSIX interrupt
:param handle: Device handle
:param userInterrupt: MSIX interrupt number
:param fd: Eventfd handle
:return: 0 on success or standard errno
Support for non managed interrupts (interrupts from custom IPs). fd should be obtained from
eventfd system call. Caller should use standard poll/read eventfd framework in order to wait for
interrupts. The handles are automatically unregistered on process exit.
"""
libc.xclRegisterInterruptNotify.restype = ctypes.c_int
libc.xclRegisterInterruptNotify.argtypes = [xclDeviceHandle, ctypes.c_uint, ctypes.c_int]
return libc.xclRegisterInterruptNotify(handle, userInterrupt, fd)
class xclStreamContextFlags:
XRT_QUEUE_FLAG_POLLING = (1 << 2)
class xclQueueContext(ctypes.Structure):
# structure to describe a Queue
_fields_ = [
("type", ctypes.c_uint32),
("state", ctypes.c_uint32),
("route", ctypes.c_uint64),
("flow", ctypes.c_uint64),
("qsize", ctypes.c_uint32),
("desc_size", ctypes.c_uint32),
("flags", ctypes.c_uint64)
]
def xclCreateWriteQueue(handle, q_ctx, q_hdl):
"""
Create Write Queue
:param handle:Device handle
:param q_ctx:Queue Context
:param q_hdl:Queue handle
:return:
This is used to create queue based on information provided in Queue context. Queue handle is generated if creation
successes.
This feature will be enabled in a future release.
"""
libc.xclCreateWriteQueue.restype = ctypes.c_int
libc.xclCreateWriteQueue.argtypes = [xclDeviceHandle, ctypes.POINTER(xclQueueContext), ctypes.c_uint64]
return libc.xclCreateWriteQueue(handle, q_ctx, q_hdl)
def xclCreateReadQueue(handle, q_ctx, q_hdl):
"""
Create Read Queue
:param handle:Device handle
:param q_ctx:Queue Context
:param q_hdl:Queue handle
:return:
This is used to create queue based on information provided in Queue context. Queue handle is generated if creation
successes.
This feature will be enabled in a future release.
"""
libc.xclCreateReadQueue.restype = ctypes.c_int
libc.xclCreateReadQueue.argtypes = [xclDeviceHandle, ctypes.POINTER(xclQueueContext), ctypes.c_uint64]
return libc.xclCreateReadQueue(handle, q_ctx, q_hdl)
def xclAllocQDMABuf(handle, size, buf_hdl):
"""
Allocate DMA buffer
:param handle: Device handle
:param size: Buffer handle
:param buf_hdl: Buffer size
:return: buffer pointer
These functions allocate and free DMA buffers which is used for queue read and write.
This feature will be enabled in a future release.
"""
libc.xclAllocQDMABuf.restypes = ctypes.c_void_p
libc.xclAllocQDMABuf.argtypes = [xclDeviceHandle, ctypes.c_size_t, ctypes.c_uint64]
return libc.xclAllocQDMABuf(handle, size, buf_hdl)
def xclFreeQDMABuf(handle, buf_hdl):
"""
Allocate DMA buffer
:param handle: Device handle
:param size: Buffer handle
:param buf_hdl: Buffer size
:return: buffer pointer
These functions allocate and free DMA buffers which is used for queue read and write.
This feature will be enabled in a future release.
"""
libc.xclFreeQDMABuf.restypes = ctypes.c_int
libc.xclFreeQDMABuf.argtypes = [xclDeviceHandle, ctypes.c_uint64]
return libc.xclFreeQDMABuf(handle, buf_hdl)
def xclDestroyQueue(handle, q_hdl):
"""
Destroy Queue
:param handle: Device handle
:param q_hdl: Queue handle
This function destroy Queue and release all resources. It returns -EBUSY if Queue is in running state.
This feature will be enabled in a future release.
"""
libc.xclDestroyQueue.restypes = ctypes.c_int
libc.xclDestroyQueue.argtypes = [xclDeviceHandle, ctypes.c_uint64]
return libc.xclDestroyQueue(handle, q_hdl)
def xclModifyQueue(handle, q_hdl):
"""
Modify Queue
:param handle: Device handle
:param q_hdl: Queue handle
This function modifies Queue context on the fly. Modifying rid implies
to program hardware traffic manager to connect Queue to the kernel pipe.
"""
libc.xclModifyQueue.restypes = ctypes.c_int
libc.xclModifyQueue.argtypes = [xclDeviceHandle, ctypes.c_uint64]
return libc.xclModifyQueue(handle, q_hdl)
def xclStartQueue(handle, q_hdl):
"""
set Queue to running state
:param handle: Device handle
:param q_hdl: Queue handle
This function set xclStartQueue to running state. xclStartQueue starts to process Read and Write requests.
"""
libc.xclStartQueue.restypes = ctypes.c_int
libc.xclStartQueue.argtypes = [xclDeviceHandle, ctypes.c_uint64]
return libc.xclStartQueue(handle, q_hdl)
def xclStopQueue(handle, q_hdl):
"""
set Queue to init state
:param handle: Device handle
:param q_hdl: Queue handle
This function set Queue to init state. all pending read and write requests will be flushed.
wr_complete and rd_complete will be called with error wbe for flushed requests.
"""
libc.xclStopQueue.restypes = ctypes.c_int
libc.xclStopQueue.argtypes = [xclDeviceHandle, ctypes.c_uint64]
return libc.xclStopQueue(handle, q_hdl)
class anonymous_union(ctypes.Union):
_fields_ = [
("buf", ctypes.POINTER(ctypes.c_char)),
("va", ctypes.c_uint64)
]
class xclReqBuffer(ctypes.Structure):
_fields_ = [
("anonymous_union", anonymous_union),
("len", ctypes.c_uint64),
("buf_hdl", ctypes.c_uint64),
]
class xclQueueRequestKind:
XCL_QUEUE_WRITE = 0
XCL_QUEUE_READ = 1
class xclQueueRequestFlag:
XCL_QUEUE_REQ_EOT = 1 << 0
XCL_QUEUE_REQ_CDH = 1 << 1
XCL_QUEUE_REQ_NONBLOCKING = 1 << 2
XCL_QUEUE_REQ_SILENT = 1 << 3
class xclQueueRequest(ctypes.Structure):
_fields_ = [
("op_code", ctypes.c_int),
("bufs", ctypes.POINTER(xclReqBuffer)),
("buf_num", ctypes.c_uint32),
("cdh", ctypes.POINTER(ctypes.c_char)),
("cdh_len", ctypes.c_uint32),
("flag", ctypes.c_uint32),
("priv_data", ctypes.c_void_p),
("timeout", ctypes.c_uint32)
]
class xclReqCompletion(ctypes.Structure):
_fields_ = [
("resv", ctypes.c_char*64),
("priv_data", ctypes.c_void_p),
("nbytes", ctypes.c_size_t),
("err_code", ctypes.c_int)
]
def xclWriteQueue(handle, q_hdl, wr_req):
"""
write data to queue
:param handle: Device handle
:param q_hdl: Queue handle
:param wr_req: write request
:return:
This function moves data from host memory. Based on the Queue type, data is written as stream or packet.
Return: number of bytes been written or error code.
stream Queue:
There is not any Flag been added to mark the end of buffer.
The bytes been written should equal to bytes been requested unless error happens.
Packet Queue:
There is Flag been added for end of buffer. Thus kernel may recognize that a packet is receviced.
This function supports blocking and non-blocking write
blocking:
return only when the entire buf has been written, or error.
non-blocking:
return 0 immediatly.
EOT:
end of transmit signal will be added at last
silent: (only used with non-blocking);
No event generated after write completes
"""
libc.xclWriteQueue.restype = ctypes.c_ssize_t
libc.xclWriteQueue.argtypes = [xclDeviceHandle, ctypes.POINTER(xclQueueRequest)]
return libc.xclWriteQueue(handle, q_hdl, wr_req)
def xclReadQueue(handle, q_hdl, wr_req):
"""
write data to queue
:param handle: Device handle
:param q_hdl: Queue handle
:param wr_req: write request
:return:
This function moves data to host memory. Based on the Queue type, data is read as stream or packet.
Return: number of bytes been read or error code.
stream Queue:
read until all the requested bytes is read or error happens.
blocking:
return only when the requested bytes are read (stream) or the entire packet is read (packet)
non-blocking:
return 0 immidiately.
"""
libc.xclReadQueue.restype = ctypes.c_ssize_t
libc.xclReadQueue.argtypes = [xclDeviceHandle, ctypes.POINTER(xclQueueRequest)]
return libc.xclReadQueue(handle, q_hdl, wr_req)
def xclPollCompletion(handle, min_compl, max_compl, comps, actual_compl, timeout):
"""
for non-blocking read/write, check if there is any request been completed
:param handle: device handle
:param min_compl: unblock only when receiving min_compl completions
:param max_compl: Max number of completion with one poll
:param comps:
:param actual_compl:
:param timeout: timeout
:return:
"""
libc.xclPollCompletion.restype = ctypes.c_int
libc.xclPollCompletion.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_int, ctypes.POINTER(xclReqCompletion),
ctypes.POINTER(ctypes.c_int), ctypes.c_int]
return libc.xclPollCompletion(handle, min_compl, max_compl, comps, actual_compl, timeout)
def xclWriteHostEvent(handle, type,id):
"""
:param handle:
:param type:
:param id:
:return:
"""
libc.xclWriteHostEvent.restype = None
libc.xclWriteHostEvent.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_int]
return libc.xclWriteHostEvent(handle, type, id)
def xclGetDeviceTimestamp(handle):
"""
:param handle:
:return:
"""
libc.xclGetDeviceTimestamp.restype = ctypes.c_size_t
libc.xclGetDeviceTimestamp.argtype = xclDeviceHandle
return libc.xclGetDeviceTimestamp(handle)
def xclGetDeviceClockFreqMHz(handle):
"""
:param handle:
:return:
"""
libc.xclGetDeviceClockFreqMHz.restype = ctypes.c_double
libc.xclGetDeviceClockFreqMHz.argtype = xclDeviceHandle
return libc.xclGetDeviceClockFreqMHz(handle)
def xclGetReadMaxBandwidthMBps(handle):
"""
:param handle:
:return:
"""
libc.xclGetReadMaxBandwidthMBps.restype = ctypes.c_double
libc.xclGetReadMaxBandwidthMBps.argtype = xclDeviceHandle
return libc.xclGetReadMaxBandwidthMBps(handle)
def xclGetWriteMaxBandwidthMBps(handle):
"""
:param handle:
:return:
"""
libc.xclGetWriteMaxBandwidthMBps.restype = ctypes.c_double
libc.xclGetWriteMaxBandwidthMBps.argtype = xclDeviceHandle
return libc.xclGetWriteMaxBandwidthMBps(handle)
def xclSetProfilingNumberSlots(handle, type, numSlots):
"""
:param handle:
:param type:
:param numSlots:
:return:
"""
libc.xclSetProfilingNumberSlots.restype = None
libc.xclSetProfilingNumberSlots.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint32]
libc.xclSetProfilingNumberSlots(handle, type, numSlots)
def xclGetProfilingNumberSlots(handle, type):
"""
:param handle:
:param type:
:return:
"""
libc.xclGetProfilingNumberSlots.restype = ctypes.c_uint32
libc.xclGetProfilingNumberSlots.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclGetProfilingNumberSlots(handle, type)
def xclGetProfilingSlotName(handle, type, slotnum, slotName, length):
"""
:param handle:
:param type:
:param slotnum:
:param slotName:
:param length:
:return:
"""
libc.xclGetProfilingSlotName.restype = None
libc.xclGetProfilingSlotName.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint32,
ctypes.POINTER(ctypes.c_char), ctypes.c_uint32]
return libc.xclGetProfilingSlotName(handle, type, slotnum, slotName, length)
def xclGetProfilingSlotProperties(handle, type, slotnum):
"""
:param handle:
:param type:
:param slotnum:
:return:
"""
libc.xclGetProfilingSlotProperties.restype = ctypes.c_uint32
libc.xclGetProfilingSlotProperties.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint32]
return libc.xclGetProfilingSlotProperties(handle, type, slotnum)
def xclPerfMonClockTraining(handle, type):
"""
:param handle:
:param type:
:return:
"""
libc.xclPerfMonClockTraining.restype = ctypes.c_size_t
libc.xclPerfMonClockTraining.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclPerfMonClockTraining(handle, type)
def xclPerfMonStartCounters(handle, type):
"""
:param handle:
:param type:
:return:
"""
libc.xclPerfMonStartCounters.restype = ctypes.c_size_t
libc.xclPerfMonStartCounters.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclPerfMonStartCounters(handle, type)
def xclPerfMonStopCounters(handle, type):
"""
:param handle:
:param type:
:return:
"""
libc.xclPerfMonStopCounters.restype = ctypes.c_size_t
libc.xclPerfMonStopCounters.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclPerfMonStopCounters(handle, type)
def xclPerfMonReadCounters(handle, type, counterResults):
"""
:param handle:
:param type:
:param counterResults:
:return:
"""
libc.xclPerfMonReadCounters.restype = ctypes.c_size_t
libc.xclPerfMonReadCounters.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.POINTER(xclcounterResults)] # defined in xclperf.h not implemented in python yet
return libc.xclPerfMonReadCounters(handle, type, counterResults)
def xclDebugReadIPStatus(handle, type, debugResults):
"""
:param handle:
:param type:
:param debugResults:
:return:
"""
libc.xclDebugReadIPStatusrestype = ctypes.c_size_t
libc.xclDebugReadIPStatus.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_void_p]
return libc.xclDebugReadIPStatus(handle, type, debugResults)
def xclPerfMonStartTrace(handle, type, startTrigger):
"""
:param handle:
:param type:
:param startTrigger:
:return:
"""
libc.xclPerfMonStartTrace.restype = ctypes.c_size_t
libc.xclPerfMonStartTrace.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.c_uint32]
return libc.xclPerfMonStartTrace(handle, type, startTrigger)
def xclPerfMonStopTrace(handle, type):
"""
:param handle:
:param type:
:return:
"""
libc.xclPerfMonStopTrace.restype = ctypes.c_size_t
libc.xclPerfMonStopTrace.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclPerfMonStopTrace(handle, type)
def xclPerfMonGetTraceCount(handle, type):
"""
:param handle:
:param type:
:return:
"""
libc.xclPerfMonGetTraceCount.restype = ctypes.c_size_t
libc.xclPerfMonGetTraceCount.argtypes = [xclDeviceHandle, ctypes.c_int]
return libc.xclPerfMonGetTraceCount(handle, type)
def xclPerfMonReadTrace(handle, type, traceVector):
"""
:param handle:
:param type:
:param traceVector:
:return:
"""
libc.xclPerfMonReadTrace.restype = ctypes.c_size_t
libc.xclPerfMonReadTrace.argtypes = [xclDeviceHandle, ctypes.c_int, ctypes.POINTER(xclTraceResultsVector)] # defined in xclperf.h not implemented in python yet
return libc.xclPerfMonReadTrace(handle, type, traceVector)
def xclMapMgmt(handle):
"""
:param handle:
:return:
"""
libc.xclMapMgmt.restype = ctypes.POINTER(ctypes.c_char)
libc.xclMapMgmt.argtype = xclDeviceHandle
return libc.xclMapMgmt(handle)
def xclOpenMgmt(deviceIndex):
"""
:param deviceIndex:
:return:
"""
libc.xclOpenMgmt.restype = xclDeviceHandle
libc.xclOpenMgmt.argtype = ctypes.c_uint
return libc.xclOpenMgmt(deviceIndex)
| 36.338488 | 164 | 0.710199 |
acfafdbcae1f0d84d044d65d53fe7e2956fecb07 | 7,064 | py | Python | tests/providers/amazon/aws/sensors/test_emr_job_flow.py | harishmk/airflow | 5abce471e0690c6b8d06ca25685b0845c5fd270f | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2019-01-14T16:39:27.000Z | 2019-01-24T21:53:13.000Z | tests/providers/amazon/aws/sensors/test_emr_job_flow.py | harishmk/airflow | 5abce471e0690c6b8d06ca25685b0845c5fd270f | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2018-10-05T18:00:01.000Z | 2019-03-27T22:17:44.000Z | tests/providers/amazon/aws/sensors/test_emr_job_flow.py | harishmk/airflow | 5abce471e0690c6b8d06ca25685b0845c5fd270f | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2018-09-26T19:37:33.000Z | 2019-03-01T21:28:04.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from unittest.mock import MagicMock, patch
from dateutil.tz import tzlocal
from airflow import AirflowException
from airflow.providers.amazon.aws.sensors.emr_job_flow import EmrJobFlowSensor
DESCRIBE_CLUSTER_RUNNING_RETURN = {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'STARTING',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
DESCRIBE_CLUSTER_TERMINATED_RETURN = {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
DESCRIBE_CLUSTER_TERMINATED_WITH_ERRORS_RETURN = {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': 'j-27ZY9GBEEU2GU',
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED_WITH_ERRORS',
'StateChangeReason': {
'Code': 'BOOTSTRAP_FAILURE',
'Message': 'Master instance (i-0663047709b12345c) failed attempting to '
'download bootstrap action 1 file from S3'
},
'Timeline': {
'CreationDateTime': datetime.datetime(2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
class TestEmrJobFlowSensor(unittest.TestCase):
def setUp(self):
# Mock out the emr_client (moto has incorrect response)
self.mock_emr_client = MagicMock()
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_RUNNING_RETURN,
DESCRIBE_CLUSTER_TERMINATED_RETURN
]
mock_emr_session = MagicMock()
mock_emr_session.client.return_value = self.mock_emr_client
# Mock out the emr_client creator
self.boto3_session_mock = MagicMock(return_value=mock_emr_session)
def test_execute_calls_with_the_job_flow_id_until_it_reaches_a_terminal_state(self):
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_RUNNING_RETURN,
DESCRIBE_CLUSTER_TERMINATED_RETURN
]
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrJobFlowSensor(
task_id='test_task',
poke_interval=0,
job_flow_id='j-8989898989',
aws_conn_id='aws_default'
)
operator.execute(None)
# make sure we called twice
self.assertEqual(self.mock_emr_client.describe_cluster.call_count, 2)
# make sure it was called with the job_flow_id
calls = [
unittest.mock.call(ClusterId='j-8989898989'),
unittest.mock.call(ClusterId='j-8989898989')
]
self.mock_emr_client.describe_cluster.assert_has_calls(calls)
def test_execute_calls_with_the_job_flow_id_until_it_reaches_failed_state_with_exception(self):
self.mock_emr_client.describe_cluster.side_effect = [
DESCRIBE_CLUSTER_RUNNING_RETURN,
DESCRIBE_CLUSTER_TERMINATED_WITH_ERRORS_RETURN
]
with patch('boto3.session.Session', self.boto3_session_mock):
operator = EmrJobFlowSensor(
task_id='test_task',
poke_interval=2,
job_flow_id='j-8989898989',
aws_conn_id='aws_default'
)
with self.assertRaises(AirflowException):
operator.execute(None)
# make sure we called twice
self.assertEqual(self.mock_emr_client.describe_cluster.call_count, 2)
# make sure it was called with the job_flow_id
self.mock_emr_client.describe_cluster.assert_called_once_with(ClusterId='j-8989898989')
if __name__ == '__main__':
unittest.main()
| 35.676768 | 103 | 0.604473 |
acfafde9e0f74d4e3ad6f2ee8ada9da3df94f5b9 | 21,057 | py | Python | tensorflow/python/kernel_tests/map_stage_op_test.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/python/kernel_tests/map_stage_op_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/python/kernel_tests/map_stage_op_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class MapStageTest(test.TestCase):
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
stage = stager.put(pi, [v], [0])
k, y = stager.get(gi)
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32])
stage = stager.put(pi, [x, v], [0, 1])
k, (z, y) = stager.get(gi)
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
key, ret = stager.get(gi)
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
y = stager.put(1, [v], [0])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
_, x = stager.get(1)
y = stager.peek(1)[0]
_, z = stager.get()
self.assertEqual(x[0].device, '/device:CPU:0')
self.assertEqual(y.device, '/device:CPU:0')
self.assertEqual(z[0].device, '/device:CPU:0')
G.finalize()
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put(pi, [x], [0])
peek = stager.peek(gi)
size = stager.size()
G.finalize()
n = 10
with self.test_session(use_gpu=True, graph=G) as sess:
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
for i in range(n):
self.assertTrue(sess.run(peek, feed_dict={gi: i})[0] == i)
self.assertTrue(sess.run(size) == 10)
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
size = stager.size()
clear = stager.clear()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1, pi: 3})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1, pi: 1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.uint8], memory_limit=memory_limit, shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
data = np.full(chunk, i, dtype=np.uint8)
sess.run(stage, feed_dict={x: data, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertTrue(sess.run(size) == 0)
def testOrdering(self):
import six
import random
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[
dtypes.int32,
], shapes=[[]], ordered=True)
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
G.finalize()
n = 10
with self.test_session(use_gpu=True, graph=G) as sess:
# Keys n-1..0
keys = list(reversed(six.moves.range(n)))
for i in keys:
sess.run(stage, feed_dict={pi: i, x: i})
self.assertTrue(sess.run(size) == n)
# Check that key, values come out in ascending order
for i, k in enumerate(reversed(keys)):
get_key, values = sess.run(get)
self.assertTrue(i == k == get_key == values)
self.assertTrue(sess.run(size) == 0)
def testPartialDictInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(
sess.run([key, ret], feed_dict={
gi: 0
}) == [0, {
'x': 1,
'f': 2,
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(
sess.run([key, ret], feed_dict={
gi: 1
}) == [1, {
'x': 1,
'f': 2,
'v': 3
}])
def testPartialIndexInsert(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xf = stager.put(pi, [x, f], [0, 2])
stage_v = stager.put(pi, [v], [1])
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain tuple associated with key 0
self.assertTrue(sess.run([key, ret], feed_dict={gi: 0}) == [0, [1, 1, 2]])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertTrue(sess.run([key, ret], feed_dict={gi: 1}) == [1, [1, 3, 2]])
def testPartialDictGetsAndPeeks(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
peek_xf = stager.peek(pei, ['x', 'f'])
peek_v = stager.peek(pei, ['v'])
key_xf, get_xf = stager.get(gi, ['x', 'f'])
key_v, get_v = stager.get(gi, ['v'])
pop_key_xf, pop_xf = stager.get(indices=['x', 'f'])
pop_key_v, pop_v = stager.get(pi, ['v'])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# 0 complete and incomplete entries
self.assertTrue(sess.run([size, isize]) == [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertTrue(sess.run([size, isize]) == [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now peek at 'x' and 'f' values associated with key 0
self.assertTrue(sess.run(peek_xf, feed_dict={pei: 0}) == {'x': 1, 'f': 2})
# Peek at 'v' value associated with key 0
self.assertTrue(sess.run(peek_v, feed_dict={pei: 0}) == {'v': 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can now obtain 'x' and 'f' values associated with key 0
self.assertTrue(
sess.run([key_xf, get_xf], feed_dict={
gi: 0
}) == [0, {
'x': 1,
'f': 2
}])
# Still have 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 1])
# We can no longer get 'x' and 'f' from key 0
with self.assertRaises(errors.InvalidArgumentError) as cm:
sess.run([key_xf, get_xf], feed_dict={gi: 0})
exc_str = ("Tensor at index '0' for key '0' " 'has already been removed.')
self.assertTrue(exc_str in cm.exception.message)
# Obtain 'v' value associated with key 0
self.assertTrue(
sess.run([key_v, get_v], feed_dict={
gi: 0
}) == [0, {
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 1})
# 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Pop without key to obtain 'x' and 'f' values associated with key 1
self.assertTrue(sess.run([pop_key_xf, pop_xf]) == [1, {'x': 1, 'f': 2}])
# still 1 complete and 1 incomplete entry
self.assertTrue(sess.run([size, isize]) == [1, 0])
# We can now obtain 'x' and 'f' values associated with key 1
self.assertTrue(
sess.run([pop_key_v, pop_v], feed_dict={
pi: 1
}) == [1, {
'v': 1
}])
# Nothing is left
self.assertTrue(sess.run([size, isize]) == [0, 0])
def testPartialIndexGets(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test again with partial index gets
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xvf = stager.put(pi, [x, v, f], [0, 1, 2])
key_xf, get_xf = stager.get(gi, [0, 2])
key_v, get_v = stager.get(gi, [1])
size = stager.size()
isize = stager.incomplete_size()
G.finalize()
with self.test_session(use_gpu=True, graph=G) as sess:
# Stage complete tuple
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get using indices
self.assertTrue(
sess.run([key_xf, get_xf], feed_dict={
gi: 0
}) == [0, [1, 2]])
# Still some of key 0 left
self.assertTrue(sess.run([size, isize]) == [1, 0])
# Partial get of remaining index
self.assertTrue(sess.run([key_v, get_v], feed_dict={gi: 0}) == [0, [3]])
# All gone
self.assertTrue(sess.run([size, isize]) == [0, 0])
if __name__ == '__main__':
test.main()
| 35.811224 | 80 | 0.582894 |
acfafe1ebf5cf6ccdcc4180d1c0f43e6397e992e | 7,361 | py | Python | warehouse/accounts/interfaces.py | Dithn/warehouse | 953b77ecfc7dade203db423307539ea9d6115657 | [
"Apache-2.0"
] | 4 | 2018-03-29T10:42:56.000Z | 2021-11-17T10:21:43.000Z | warehouse/accounts/interfaces.py | Dithn/warehouse | 953b77ecfc7dade203db423307539ea9d6115657 | [
"Apache-2.0"
] | 258 | 2021-11-29T18:29:38.000Z | 2022-03-31T18:34:18.000Z | warehouse/accounts/interfaces.py | Dithn/warehouse | 953b77ecfc7dade203db423307539ea9d6115657 | [
"Apache-2.0"
] | 1 | 2020-12-01T21:12:24.000Z | 2020-12-01T21:12:24.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zope.interface import Attribute, Interface
class RateLimiterException(Exception):
def __init__(self, *args, resets_in, **kwargs):
self.resets_in = resets_in
return super().__init__(*args, **kwargs)
class TooManyFailedLogins(RateLimiterException):
pass
class TooManyEmailsAdded(RateLimiterException):
pass
class TokenException(Exception):
pass
class TokenExpired(TokenException):
pass
class TokenInvalid(TokenException):
pass
class TokenMissing(TokenException):
pass
class IUserService(Interface):
def get_user(user_id):
"""
Return the user object that represents the given userid, or None if
there is no user for that ID.
"""
def get_user_by_username(username):
"""
Return the user object corresponding with the given username, or None
if there is no user with that username.
"""
def get_user_by_email(email):
"""
Return the user object corresponding with the given email, or None
if there is no user with that email.
"""
def find_userid(username):
"""
Find the unique user identifier for the given username or None if there
is no user with the given username.
"""
def check_password(user_id, password, *, tags=None):
"""
Returns a boolean representing whether the given password is valid for
the given userid.
May have an optional list of tags, which allows identifying the purpose of
checking the password.
"""
def create_user(username, name, password):
"""
Accepts a user object, and attempts to create a user with those
attributes.
A UserAlreadyExists Exception is raised if the user already exists.
"""
def add_email(
user_id, email_address, ip_address, primary=False, verified=False, public=False
):
"""
Adds an email for the provided user_id
"""
def update_user(user_id, **changes):
"""
Updates the user object
"""
def disable_password(user_id, reason=None):
"""
Disables the given user's password, preventing further login until the user
resets their password. If a reason was given, this will be persisted and reset
when the user is re-enabled.
"""
def is_disabled(user_id):
"""
Checks if a user has been disabled, and returns a tuple of
(IsDisabled: bool, Reason: Optional[DisableReason])
"""
def has_two_factor(user_id):
"""
Returns True if the user has any form of two factor
authentication and is allowed to use it.
"""
def has_totp(user_id):
"""
Returns True if the user has a TOTP device provisioned.
"""
def has_webauthn(user_id):
"""
Returns True if the user has a security key provisioned.
"""
def has_recovery_codes(user_id):
"""
Returns True if the user has at least one valid recovery code.
"""
def get_recovery_codes(user_id):
"""
Returns RecoveryCode objects associated with the user.
"""
def get_totp_secret(user_id):
"""
Returns the user's TOTP secret as bytes.
If the user doesn't have a TOTP secret or is not
allowed to use a second factor, returns None.
"""
def check_totp_value(user_id, totp_value, *, tags=None):
"""
Returns True if the given TOTP code is valid.
"""
def add_webauthn(user_id, **kwargs):
"""
Adds a WebAuthn credential to the given user.
Returns None if the user already has this credential.
"""
def get_webauthn_credential_options(user_id, *, challenge, rp_name, rp_id):
"""
Returns a dictionary of credential options suitable for beginning the WebAuthn
provisioning process for the given user.
"""
def get_webauthn_assertion_options(user_id, *, challenge, rp_id):
"""
Returns a dictionary of assertion options suitable for beginning the WebAuthn
authentication process for the given user.
"""
def verify_webauthn_credential(credential, *, challenge, rp_id, origin):
"""
Checks whether the given credential is valid, i.e. suitable for generating
assertions during authentication.
Returns the validated credential on success, raises
webauthn.RegistrationRejectedException on failure.
"""
def verify_webauthn_assertion(user_id, assertion, *, challenge, origin, rp_id):
"""
Checks whether the given assertion was produced by the given user's WebAuthn
device.
Returns the updated signage count on success, raises
webauthn.AuthenticationRejectedException on failure.
"""
def get_webauthn_by_label(user_id, label):
"""
Returns a WebAuthn credential for the given user by its label,
or None if no credential for the user has this label.
"""
def get_webauthn_by_credential_id(user_id, credential_id):
"""
Returns a WebAuthn credential for the given user by its credential ID,
or None of the user doesn't have a credential with this ID.
"""
def record_event(user_id, *, tag, ip_address, additional=None):
"""
Creates a new UserEvent for the given user with the given
tag, IP address, and additional metadata.
Returns the event.
"""
def generate_recovery_codes(user_id):
"""
Generates RecoveryCode objects for the given user.
Returns a list of plain-text codes.
"""
def check_recovery_code(user_id, code):
"""
Checks if supplied code matches a valid hashed recovery code for the given user.
Returns True if supplied recovery code is valid, and destroys stored code.
"""
class ITokenService(Interface):
def dumps(data):
"""
Generates a unique token based on the data provided
"""
def loads(token):
"""
Gets the data corresponding to the token provided
"""
class IPasswordBreachedService(Interface):
failure_message = Attribute("The message to describe the failure that occurred")
failure_message_plain = Attribute(
"The message to describe the failure that occurred in plain text"
)
def check_password(password, *, tags=None):
"""
Returns a boolean indicating if the given password has been involved in a breach
or is otherwise insecure.
May have an optional list of tags, which allows identifying the purpose of
checking the password.
"""
| 29.326693 | 88 | 0.647738 |
acfaff802f5063625ab3f7b72c4617534d3627d5 | 29,376 | py | Python | sample_project/env/lib/python3.9/_collections_abc.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | sample_project/env/lib/python3.9/_collections_abc.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | sample_project/env/lib/python3.9/_collections_abc.py | Istiakmorsalin/ML-Data-Science | 681e68059b146343ef55b0671432dc946970730d | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
GenericAlias = type(list[int])
EllipsisType = type(...)
def _f(): pass
FunctionType = type(_f)
del _f
__all__ = ["Awaitable", "Coroutine",
"AsyncIterable", "AsyncIterator", "AsyncGenerator",
"Hashable", "Iterable", "Iterator", "Generator", "Reversible",
"Sized", "Container", "Callable", "Collection",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# This module has been renamed from collections.abc to _collections_abc to
# speed up interpreter startup. Some of the types such as MutableMapping are
# required early but collections module imports a lot of other modules.
# See issue #19218
__name__ = "collections.abc"
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types might not be distinct
# and they may have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
longrange_iterator = type(iter(range(1 << 1000)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
generator = type((lambda: (yield))())
## coroutine ##
async def _coro(): pass
_coro = _coro()
coroutine = type(_coro)
_coro.close() # Prevent ResourceWarning
del _coro
## asynchronous generator ##
async def _ag(): yield
_ag = _ag()
async_generator = type(_ag)
del _ag
### ONE-TRICK PONIES ###
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
return _check_methods(C, "__hash__")
return NotImplemented
class Awaitable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __await__(self):
yield
@classmethod
def __subclasshook__(cls, C):
if cls is Awaitable:
return _check_methods(C, "__await__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class Coroutine(Awaitable):
__slots__ = ()
@abstractmethod
def send(self, value):
"""Send a value into the coroutine.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the coroutine.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("coroutine ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is Coroutine:
return _check_methods(C, '__await__', 'send', 'throw', 'close')
return NotImplemented
Coroutine.register(coroutine)
class AsyncIterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __aiter__(self):
return AsyncIterator()
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncIterable:
return _check_methods(C, "__aiter__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class AsyncIterator(AsyncIterable):
__slots__ = ()
@abstractmethod
async def __anext__(self):
"""Return the next item or raise StopAsyncIteration when exhausted."""
raise StopAsyncIteration
def __aiter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncIterator:
return _check_methods(C, "__anext__", "__aiter__")
return NotImplemented
class AsyncGenerator(AsyncIterator):
__slots__ = ()
async def __anext__(self):
"""Return the next item from the asynchronous generator.
When exhausted, raise StopAsyncIteration.
"""
return await self.asend(None)
@abstractmethod
async def asend(self, value):
"""Send a value into the asynchronous generator.
Return next yielded value or raise StopAsyncIteration.
"""
raise StopAsyncIteration
@abstractmethod
async def athrow(self, typ, val=None, tb=None):
"""Raise an exception in the asynchronous generator.
Return next yielded value or raise StopAsyncIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
async def aclose(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
await self.athrow(GeneratorExit)
except (GeneratorExit, StopAsyncIteration):
pass
else:
raise RuntimeError("asynchronous generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncGenerator:
return _check_methods(C, '__aiter__', '__anext__',
'asend', 'athrow', 'aclose')
return NotImplemented
AsyncGenerator.register(async_generator)
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
return _check_methods(C, "__iter__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
return _check_methods(C, '__iter__', '__next__')
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(longrange_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Reversible(Iterable):
__slots__ = ()
@abstractmethod
def __reversed__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Reversible:
return _check_methods(C, "__reversed__", "__iter__")
return NotImplemented
class Generator(Iterator):
__slots__ = ()
def __next__(self):
"""Return the next item from the generator.
When exhausted, raise StopIteration.
"""
return self.send(None)
@abstractmethod
def send(self, value):
"""Send a value into the generator.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the generator.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside generator.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is Generator:
return _check_methods(C, '__iter__', '__next__',
'send', 'throw', 'close')
return NotImplemented
Generator.register(generator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
return _check_methods(C, "__len__")
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
return _check_methods(C, "__contains__")
return NotImplemented
__class_getitem__ = classmethod(GenericAlias)
class Collection(Sized, Iterable, Container):
__slots__ = ()
@classmethod
def __subclasshook__(cls, C):
if cls is Collection:
return _check_methods(C, "__len__", "__iter__", "__contains__")
return NotImplemented
class _CallableGenericAlias(GenericAlias):
""" Represent `Callable[argtypes, resulttype]`.
This sets ``__args__`` to a tuple containing the flattened``argtypes``
followed by ``resulttype``.
Example: ``Callable[[int, str], float]`` sets ``__args__`` to
``(int, str, float)``.
"""
__slots__ = ()
def __new__(cls, origin, args):
try:
return cls.__create_ga(origin, args)
except TypeError as exc:
import warnings
warnings.warn(f'{str(exc)} '
f'(This will raise a TypeError in Python 3.10.)',
DeprecationWarning)
return GenericAlias(origin, args)
@classmethod
def __create_ga(cls, origin, args):
if not isinstance(args, tuple) or len(args) != 2:
raise TypeError(
"Callable must be used as Callable[[arg, ...], result].")
t_args, t_result = args
if isinstance(t_args, (list, tuple)):
ga_args = tuple(t_args) + (t_result,)
# This relaxes what t_args can be on purpose to allow things like
# PEP 612 ParamSpec. Responsibility for whether a user is using
# Callable[...] properly is deferred to static type checkers.
else:
ga_args = args
return super().__new__(cls, origin, ga_args)
def __repr__(self):
if len(self.__args__) == 2 and self.__args__[0] is Ellipsis:
return super().__repr__()
return (f'collections.abc.Callable'
f'[[{", ".join([_type_repr(a) for a in self.__args__[:-1]])}], '
f'{_type_repr(self.__args__[-1])}]')
def __reduce__(self):
args = self.__args__
if not (len(args) == 2 and args[0] is Ellipsis):
args = list(args[:-1]), args[-1]
return _CallableGenericAlias, (Callable, args)
def __getitem__(self, item):
# Called during TypeVar substitution, returns the custom subclass
# rather than the default types.GenericAlias object.
ga = super().__getitem__(item)
args = ga.__args__
t_result = args[-1]
t_args = args[:-1]
args = (t_args, t_result)
return _CallableGenericAlias(Callable, args)
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
Copied from :mod:`typing` since collections.abc
shouldn't depend on that module.
"""
if isinstance(obj, GenericAlias):
return repr(obj)
if isinstance(obj, type):
if obj.__module__ == 'builtins':
return obj.__qualname__
return f'{obj.__module__}.{obj.__qualname__}'
if obj is Ellipsis:
return '...'
if isinstance(obj, FunctionType):
return obj.__name__
return repr(obj)
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
return _check_methods(C, "__call__")
return NotImplemented
__class_getitem__ = classmethod(_CallableGenericAlias)
### SETS ###
class Set(Collection):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h ^= (h >> 11) ^ (h >> 25)
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError from None
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Collection):
__slots__ = ()
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return KeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return ItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
__reversed__ = None
Mapping.register(mappingproxy)
class MappingView(Sized):
__slots__ = '_mapping',
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
__class_getitem__ = classmethod(GenericAlias)
class KeysView(MappingView, Set):
__slots__ = ()
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
yield from self._mapping
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
__slots__ = ()
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v is value or v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView, Collection):
__slots__ = ()
def __contains__(self, value):
for key in self._mapping:
v = self._mapping[key]
if v is value or v == value:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError from None
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(self, other=(), /, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Reversible, Collection):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v is value or v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value, start=0, stop=None):
'''S.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
Supporting start and stop arguments is optional, but
recommended.
'''
if start is not None and start < 0:
start = max(len(self) + start, 0)
if stop is not None and stop < 0:
stop += len(self)
i = start
while stop is None or i < stop:
try:
v = self[i]
if v is value or v == value:
return i
except IndexError:
break
i += 1
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v is value or v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
Sequence.register(memoryview)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
"""All the operations on a read-write sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, value) -- insert value before index'
raise IndexError
def append(self, value):
'S.append(value) -- append value to the end of the sequence'
self.insert(len(self), value)
def clear(self):
'S.clear() -> None -- remove all items from S'
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
if values is self:
values = list(values)
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| 26.299015 | 87 | 0.596507 |
acfaff9eee1830963dd44197af7f57f751e33e26 | 2,007 | py | Python | mlmodels/model_tf/misc/tf_serving/13.text-classification-kafka/producer.py | gitter-badger/mlmodels | f08cc9b6ec202d4ad25ecdda2f44487da387569d | [
"MIT"
] | 1 | 2022-03-11T07:57:48.000Z | 2022-03-11T07:57:48.000Z | mlmodels/model_tf/misc/tf_serving/13.text-classification-kafka/producer.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | mlmodels/model_tf/misc/tf_serving/13.text-classification-kafka/producer.py | whitetiger1002/mlmodels | f70f1da7434e8855eed50adc67b49cc169f2ea24 | [
"MIT"
] | null | null | null | import json
import numpy as np
import tensorflow as tf
from kafka import KafkaProducer
def publish_message(producer_instance, topic_name, key, value):
try:
key_bytes = bytes(key, encoding="utf-8")
value_bytes = bytes(value, encoding="utf-8")
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
print("Message published successfully.")
except Exception as ex:
print("Exception in publishing message")
print(str(ex))
def connect_kafka_producer():
print("connecting to kafka")
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=["localhost:9092"], api_version=(0, 10))
except Exception as ex:
print("Exception while connecting Kafka")
print(str(ex))
finally:
print("successfully connected to kafka")
return _producer
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph("frozen_model.pb")
label = ["negative", "positive"]
X = g.get_tensor_by_name("import/Placeholder:0")
Y = g.get_tensor_by_name("import/logits:0")
sess = tf.InteractiveSession(graph=g)
maxlen = 50
UNK = 3
with open("dictionary-test.json", "r") as fopen:
dic = json.load(fopen)
with open("text.txt") as fopen:
sentences = fopen.read().split("\n")
kafka_producer = connect_kafka_producer()
for sentence in sentences:
x = np.zeros((1, maxlen))
for no, k in enumerate(sentence.split()[:maxlen][::-1]):
val = dic[k] if k in dic else UNK
x[0, -1 - no] = val
index = np.argmax(sess.run(Y, feed_dict={X: x})[0])
print("feeding " + sentence)
publish_message(kafka_producer, "polarities", "polarity", label[index])
if kafka_producer is not None:
kafka_producer.close()
| 28.267606 | 92 | 0.674141 |
acfb00609aef908e6fcd3b93828a6d2d4f1a5ec7 | 50,726 | py | Python | sensor.py | Jahismighty/maltrail | 9bc70430993b2140ceb4dbac4b487251a9254416 | [
"MIT"
] | null | null | null | sensor.py | Jahismighty/maltrail | 9bc70430993b2140ceb4dbac4b487251a9254416 | [
"MIT"
] | null | null | null | sensor.py | Jahismighty/maltrail | 9bc70430993b2140ceb4dbac4b487251a9254416 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from __future__ import print_function # Requires: Python >= 2.6
import sys
sys.dont_write_bytecode = True
import core.versioncheck
import inspect
import math
import mmap
import optparse
import os
import platform
import re
import socket
import subprocess
import struct
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import inet_ntoa6
from core.attribdict import AttribDict
from core.common import check_connection
from core.common import check_sudo
from core.common import check_whitelisted
from core.common import load_trails
from core.enums import BLOCK_MARKER
from core.enums import PROTO
from core.enums import TRAIL
from core.log import create_log_directory
from core.log import get_error_log_handle
from core.log import log_error
from core.log import log_event
from core.parallel import worker
from core.parallel import write_block
from core.settings import check_memory
from core.settings import config
from core.settings import CAPTURE_TIMEOUT
from core.settings import CHECK_CONNECTION_MAX_RETRIES
from core.settings import CONFIG_FILE
from core.settings import CONSONANTS
from core.settings import DAILY_SECS
from core.settings import DLT_OFFSETS
from core.settings import DNS_EXHAUSTION_THRESHOLD
from core.settings import HTTP_TIME_FORMAT
from core.settings import IGNORE_DNS_QUERY_SUFFIXES
from core.settings import IPPROTO_LUT
from core.settings import LOCALHOST_IP
from core.settings import MMAP_ZFILL_CHUNK_LENGTH
from core.settings import MAX_RESULT_CACHE_ENTRIES
from core.settings import NAME
from core.settings import NO_SUCH_NAME_COUNTERS
from core.settings import NO_SUCH_NAME_PER_HOUR_THRESHOLD
from core.settings import PORT_SCANNING_THRESHOLD
from core.settings import read_config
from core.settings import REGULAR_SENSOR_SLEEP_TIME
from core.settings import SNAP_LEN
from core.settings import SUSPICIOUS_CONTENT_TYPES
from core.settings import SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS
from core.settings import SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD
from core.settings import SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD
from core.settings import SUSPICIOUS_HTTP_PATH_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION
from core.settings import SUSPICIOUS_HTTP_REQUEST_REGEXES
from core.settings import SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS
from core.settings import SUSPICIOUS_PROXY_PROBE_PRE_CONDITION
from core.settings import SUSPICIOUS_UA_REGEX
from core.settings import trails
from core.settings import TRAILS_FILE
from core.settings import VALID_DNS_CHARS
from core.settings import VERSION
from core.settings import WEB_SHELLS
from core.settings import WHITELIST
from core.settings import WHITELIST_DIRECT_DOWNLOAD_KEYWORDS
from core.settings import WHITELIST_LONG_DOMAIN_NAME_KEYWORDS
from core.settings import WHITELIST_HTTP_REQUEST_PATHS
from core.settings import WHITELIST_UA_KEYWORDS
from core.update import update_ipcat
from core.update import update_trails
_buffer = None
_caps = []
_connect_sec = 0
_connect_src_dst = {}
_connect_src_details = {}
_count = 0
_locks = AttribDict()
_multiprocessing = None
_n = None
_result_cache = {}
_last_syn = None
_last_logged_syn = None
_last_udp = None
_last_logged_udp = None
_last_dns_exhaustion = None
_quit = threading.Event()
_subdomains = {}
_subdomains_sec = None
_dns_exhausted_domains = set()
try:
import pcapy
except ImportError:
if subprocess.mswindows:
exit("[!] please install 'WinPcap' (e.g. 'http://www.winpcap.org/install/') and Pcapy (e.g. 'https://breakingcode.wordpress.com/?s=pcapy')")
else:
msg, _ = "[!] please install 'Pcapy'", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install pcapy", ("debian", "ubuntu"): "sudo apt-get install python-pcapy"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
exit(msg)
def _check_domain_member(query, domains):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in domains:
return True
return False
def _check_domain_whitelisted(query):
return _check_domain_member(re.split(r"(?i)[^A-Z0-9._-]", query or "")[0], WHITELIST)
def _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, proto, packet=None):
if query:
query = query.lower()
if ':' in query:
query = query.split(':', 1)[0]
if query.replace('.', "").isdigit(): # IP address
return
if _result_cache.get(query) == False:
return
result = False
if not _check_domain_whitelisted(query) and all(_ in VALID_DNS_CHARS for _ in query):
parts = query.lower().split('.')
for i in xrange(0, len(parts)):
domain = '.'.join(parts[i:])
if domain in trails:
if domain == query:
trail = domain
else:
_ = ".%s" % domain
trail = "(%s)%s" % (query[:-len(_)], _)
if not (re.search(r"(?i)\Ad?ns\d*\.", query) and any(_ in trails.get(domain, " ")[0] for _ in ("suspicious", "sinkhole"))): # e.g. ns2.nobel.su
if not ((query == trail) and any(_ in trails.get(domain, " ")[0] for _ in ("dynamic", "free web"))): # e.g. noip.com
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, trails[domain][0], trails[domain][1]), packet)
break
if not result and config.USE_HEURISTICS:
if len(parts[0]) > SUSPICIOUS_DOMAIN_LENGTH_THRESHOLD and '-' not in parts[0]:
trail = None
if len(parts) > 2:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
trail = "(%s).%s" % (parts[0], parts[1])
else:
trail = query
if trail and not any(_ in trail for _ in WHITELIST_LONG_DOMAIN_NAME_KEYWORDS):
result = True
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, proto, TRAIL.DNS, trail, "long domain (suspicious)", "(heuristic)"), packet)
if result == False:
_result_cache[query] = False
def _process_packet(packet, sec, usec, ip_offset):
"""
Processes single (raw) IP layer data
"""
global _connect_sec
global _last_syn
global _last_logged_syn
global _last_udp
global _last_logged_udp
global _last_dns_exhaustion
global _subdomains_sec
try:
if len(_result_cache) > MAX_RESULT_CACHE_ENTRIES:
_result_cache.clear()
if config.USE_HEURISTICS:
if _locks.connect_sec:
_locks.connect_sec.acquire()
connect_sec = _connect_sec
_connect_sec = sec
if _locks.connect_sec:
_locks.connect_sec.release()
if sec > connect_sec:
for key in _connect_src_dst:
if len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
_src_ip, _dst_ip = key.split('~')
if not check_whitelisted(_src_ip):
for _ in _connect_src_details[key]:
log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)
_connect_src_dst.clear()
_connect_src_details.clear()
ip_data = packet[ip_offset:]
ip_version = ord(ip_data[0]) >> 4
localhost_ip = LOCALHOST_IP[ip_version]
if ip_version == 0x04: # IPv4
ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
iph_length = (ip_header[0] & 0xf) << 2
protocol = ip_header[6]
src_ip = socket.inet_ntoa(ip_header[8])
dst_ip = socket.inet_ntoa(ip_header[9])
elif ip_version == 0x06: # IPv6
# Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
iph_length = 40
protocol = ip_header[4]
src_ip = inet_ntoa6(ip_header[6])
dst_ip = inet_ntoa6(ip_header[7])
else:
return
if protocol == socket.IPPROTO_TCP: # TCP
src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])
if flags != 2 and config.plugin_functions:
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
elif src_ip in trails and dst_ip != localhost_ip:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)
if flags == 2: # SYN set (only)
_ = _last_syn
_last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_syn: # skip bursts
return
if dst_ip in trails or "%s:%s" % (dst_ip, dst_port) in trails:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = dst_ip if dst_ip in trails else "%s:%s" % (dst_ip, dst_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.ADDR, trail, trails[trail][0], trails[trail][1]), packet)
elif (src_ip in trails or "%s:%s" % (src_ip, src_port) in trails) and dst_ip != localhost_ip:
_ = _last_logged_syn
_last_logged_syn = _last_syn
if _ != _last_logged_syn:
trail = src_ip if src_ip in trails else "%s:%s" % (src_ip, src_port)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP if ':' not in trail else TRAIL.ADDR, trail, trails[trail][0], trails[trail][1]), packet)
if config.USE_HEURISTICS:
if dst_ip != localhost_ip:
key = "%s~%s" % (src_ip, dst_ip)
if key not in _connect_src_dst:
_connect_src_dst[key] = set()
_connect_src_details[key] = set()
_connect_src_dst[key].add(dst_port)
_connect_src_details[key].add((sec, usec, src_port, dst_port))
else:
tcph_length = doff_reserved >> 4
h_size = iph_length + (tcph_length << 2)
tcp_data = ip_data[h_size:]
if tcp_data.startswith("HTTP/"):
if any(_ in tcp_data[:tcp_data.find("\r\n\r\n")] for _ in ("X-Sinkhole:", "X-Malware-Sinkhole:", "Server: You got served", "Server: Apache 1.0/SinkSoft", "sinkdns.org")) or "\r\n\r\nsinkhole" in tcp_data:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, "sinkhole response (malware)", "(heuristic)"), packet)
else:
index = tcp_data.find("<title>")
if index >= 0:
title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
if all(_ in title.lower() for _ in ("this domain", "has been seized")):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, title, "seized domain (suspicious)", "(heuristic)"), packet)
content_type = None
first_index = tcp_data.find("\r\nContent-Type:")
if first_index >= 0:
first_index = first_index + len("\r\nContent-Type:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
content_type = tcp_data[first_index:last_index].strip().lower()
if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)
method, path = None, None
index = tcp_data.find("\r\n")
if index >= 0:
line = tcp_data[:index]
if line.count(' ') == 2 and " HTTP/" in line:
method, path, _ = line.split(' ')
if method and path:
post_data = None
host = dst_ip
first_index = tcp_data.find("\r\nHost:")
path = path.lower()
if first_index >= 0:
first_index = first_index + len("\r\nHost:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
host = tcp_data[first_index:last_index]
host = host.strip().lower()
if host.endswith(":80"):
host = host[:-3]
if host and host[0].isalpha() and dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif config.CHECK_HOST_DOMAINS:
_check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)
index = tcp_data.find("\r\n\r\n")
if index >= 0:
post_data = tcp_data[index + 4:]
if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and any(_ in path for _ in SUSPICIOUS_PROXY_PROBE_PRE_CONDITION) and not _check_domain_whitelisted(path.split('/')[2]):
trail = re.sub(r"(http://[^/]+/)(.+)", r"\g<1>(\g<2>)", path)
trail = re.sub(r"(http://)([^/(]+)", lambda match: "%s%s" % (match.group(1), match.group(2).split(':')[0].rstrip('.')), trail)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "potential proxy probe (suspicious)", "(heuristic)"), packet)
return
elif "://" in path:
url = path.split("://", 1)[1]
if '/' not in url:
url = "%s/" % url
host, path = url.split('/', 1)
if host.endswith(":80"):
host = host[:-3]
path = "/%s" % path
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
elif method == "CONNECT":
if '/' in path:
host, path = path.split('/', 1)
path = "/%s" % path
else:
host, path = path, '/'
if host.endswith(":80"):
host = host[:-3]
url = "%s%s" % (host, path)
proxy_domain = host.split(':')[0]
_check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
else:
url = "%s%s" % (host, path)
if config.USE_HEURISTICS:
user_agent, result = None, None
first_index = tcp_data.find("\r\nUser-Agent:")
if first_index >= 0:
first_index = first_index + len("\r\nUser-Agent:")
last_index = tcp_data.find("\r\n", first_index)
if last_index >= 0:
user_agent = tcp_data[first_index:last_index]
user_agent = urllib.unquote(user_agent).strip()
if user_agent:
result = _result_cache.get(user_agent)
if result is None:
if not any(_ in user_agent for _ in WHITELIST_UA_KEYWORDS):
match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
if match:
def _(value):
return value.replace('(', "\\(").replace(')', "\\)")
parts = user_agent.split(match.group(0), 1)
if len(parts) > 1 and parts[0] and parts[-1]:
result = _result_cache[user_agent] = "%s (%s)" % (_(match.group(0)), _(user_agent))
else:
result = _result_cache[user_agent] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
if not result:
_result_cache[user_agent] = False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)
if not _check_domain_whitelisted(host):
checks = [path.rstrip('/')]
if '?' in path:
checks.append(path.split('?')[0].rstrip('/'))
_ = os.path.splitext(checks[-1])
if _[1]:
checks.append(_[0])
if checks[-1].count('/') > 1:
checks.append(checks[-1][:checks[-1].rfind('/')])
checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])
for check in filter(None, checks):
for _ in ("", host):
check = "%s%s" % (_, check)
if check in trails:
parts = url.split(check)
other = ("(%s)" % _ if _ else _ for _ in parts)
trail = check.join(other)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
return
if "%s/" % host in trails:
trail = "%s/" % host
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[trail][0], trails[trail][1]))
return
if config.USE_HEURISTICS:
unquoted_path = urllib.unquote(path)
unquoted_post_data = urllib.unquote(post_data or "")
for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
path = path.replace(char, replacement)
if post_data:
post_data = post_data.replace(char, replacement)
if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get(unquoted_path)
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_path, re.I | re.DOTALL):
found = desc
break
_result_cache[unquoted_path] = found or ""
if found:
trail = "%s(%s)" % (host, path)
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
found = _result_cache.get(unquoted_post_data)
if found is None:
for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
found = desc
break
_result_cache[unquoted_post_data] = found or ""
if found:
trail = "%s(%s \(%s %s\))" % (host, path, method, post_data.strip())
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
return
if '.' in path:
_ = urlparse.urlparse("http://%s" % url) # dummy scheme
path = path.lower()
filename = _.path.split('/')[-1]
name, extension = os.path.splitext(filename)
trail = "%s(%s)" % (host, path)
if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
elif filename in WEB_SHELLS:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
else:
for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
if re.search(regex, filename, re.I):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
break
elif protocol == socket.IPPROTO_UDP: # UDP
_ = ip_data[iph_length:iph_length + 4]
if len(_) < 4:
return
src_port, dst_port = struct.unpack("!HH", _)
_ = _last_udp
_last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
if _ == _last_udp: # skip bursts
return
if src_port != 53 and dst_port != 53: # not DNS
if dst_ip in trails:
trail = dst_ip
elif src_ip in trails:
trail = src_ip
else:
trail = None
if trail:
_ = _last_logged_udp
_last_logged_udp = _last_udp
if _ != _last_logged_udp:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)
else:
dns_data = ip_data[iph_length + 8:]
# Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
if len(dns_data) > 6:
qdcount = struct.unpack("!H", dns_data[4:6])[0]
if qdcount > 0:
offset = 12
query = ""
while len(dns_data) > offset:
length = ord(dns_data[offset])
if not length:
query = query[:-1]
break
query += dns_data[offset + 1:offset + length + 1] + '.'
offset += length + 1
query = query.lower()
if not query or '.' not in query or not all(_ in VALID_DNS_CHARS for _ in query) or any(_ in query for _ in (".intranet.",)) or any(query.endswith(_) for _ in IGNORE_DNS_QUERY_SUFFIXES):
return
parts = query.split('.')
if ord(dns_data[2]) & 0xfe == 0x00: # standard query (both recursive and non-recursive)
type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])
if len(parts) > 2:
if len(parts) > 3 and len(parts[-2]) <= 3:
domain = '.'.join(parts[-3:])
else:
domain = '.'.join(parts[-2:])
if not _check_domain_whitelisted(domain): # e.g. <hash>.hashserver.cs.trendmicro.com
if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
_subdomains.clear()
_dns_exhausted_domains.clear()
_subdomains_sec = sec
subdomains = _subdomains.get(domain)
if not subdomains:
subdomains = _subdomains[domain] = set()
if not re.search(r"\A\d+\-\d+\-\d+\-\d+\Z", parts[0]):
if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
subdomains.add('.'.join(parts[:-2]))
else:
if (sec - (_last_dns_exhaustion or 0)) > 60:
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
_dns_exhausted_domains.add(domain)
_last_dns_exhaustion = sec
return
# Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
if type_ not in (12, 28) and class_ == 1: # Type not in (PTR, AAAA), Class IN
if dst_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
_check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)
elif config.USE_HEURISTICS:
if ord(dns_data[2]) & 0x80: # standard response
if ord(dns_data[3]) == 0x80: # recursion available, no error
_ = offset + 5
try:
while _ < len(dns_data):
if ord(dns_data[_]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01": # Type A
break
else:
_ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]
_ = dns_data[_ + 12:_ + 16]
if _:
answer = socket.inet_ntoa(_)
if answer in trails:
_ = trails[answer]
if "sinkhole" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet) # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
elif "parking" in _[0]:
trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
except IndexError:
pass
elif ord(dns_data[3]) == 0x83: # recursion available, no such name
if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
if parts[-1].isdigit():
return
if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])): # generic check for DNSBL IP lookups
for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec / 3600:
NO_SUCH_NAME_COUNTERS[_] = [sec / 3600, 1, set()]
else:
NO_SUCH_NAME_COUNTERS[_][1] += 1
NO_SUCH_NAME_COUNTERS[_][2].add(query)
if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
if _.startswith("*."):
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:]), "excessive no such domain (suspicious)", "(heuristic)"), packet)
for item in NO_SUCH_NAME_COUNTERS[_][2]:
try:
del NO_SUCH_NAME_COUNTERS[item]
except KeyError:
pass
else:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)
try:
del NO_SUCH_NAME_COUNTERS[_]
except KeyError:
pass
break
if len(parts) > 2:
part = parts[0] if parts[0] != "www" else parts[1]
trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
elif len(parts) == 2:
part = parts[0]
trail = "(%s).%s" % (parts[0], parts[1])
else:
part = query
trail = query
if part and '-' not in part:
result = _result_cache.get(part)
if result is None:
# Reference: https://github.com/exp0se/dga_detector
probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
result = "entropy threshold no such domain (suspicious)"
if not result:
if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
result = "consonant threshold no such domain (suspicious)"
_result_cache[part] = result or False
if result:
log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)
elif protocol in IPPROTO_LUT: # non-TCP/UDP (e.g. ICMP)
if protocol == socket.IPPROTO_ICMP:
if ord(ip_data[iph_length]) != 0x08: # Non-echo request
return
elif protocol == socket.IPPROTO_ICMPV6:
if ord(ip_data[iph_length]) != 0x80: # Non-echo request
return
if dst_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
elif src_ip in trails:
log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)
except struct.error:
pass
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
def init():
"""
Performs sensor initialization
"""
global _multiprocessing
try:
import multiprocessing
if config.PROCESS_COUNT > 1:
_multiprocessing = multiprocessing
except (ImportError, OSError, NotImplementedError):
pass
def update_timer():
retries = 0
if not config.no_updates:
while retries < CHECK_CONNECTION_MAX_RETRIES and not check_connection():
sys.stdout.write("[!] can't update because of lack of Internet connection (waiting..." if not retries else '.')
sys.stdout.flush()
time.sleep(10)
retries += 1
if retries:
print(")")
if config.no_updates or retries == CHECK_CONNECTION_MAX_RETRIES:
if retries == CHECK_CONNECTION_MAX_RETRIES:
print("[x] going to continue without online update")
_ = update_trails(offline=True)
else:
_ = update_trails(server=config.UPDATE_SERVER)
update_ipcat()
if _:
trails.clear()
trails.update(_)
elif not trails:
trails.update(load_trails())
thread = threading.Timer(config.UPDATE_PERIOD, update_timer)
thread.daemon = True
thread.start()
create_log_directory()
get_error_log_handle()
check_memory()
msg = "[i] using '%s' for trail storage" % TRAILS_FILE
if os.path.isfile(TRAILS_FILE):
mtime = time.gmtime(os.path.getmtime(TRAILS_FILE))
msg += " (last modification: '%s')" % time.strftime(HTTP_TIME_FORMAT, mtime)
print(msg)
update_timer()
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
if config.plugins:
config.plugin_functions = []
for plugin in re.split(r"[,;]", config.plugins):
plugin = plugin.strip()
found = False
for _ in (plugin, os.path.join("plugins", plugin), os.path.join("plugins", "%s.py" % plugin)):
if os.path.isfile(_):
plugin = _
found = True
break
if not found:
exit("[!] plugin script '%s' not found" % plugin)
else:
dirname, filename = os.path.split(plugin)
dirname = os.path.abspath(dirname)
if not os.path.exists(os.path.join(dirname, '__init__.py')):
exit("[!] empty file '__init__.py' required inside directory '%s'" % dirname)
if not filename.endswith(".py"):
exit("[!] plugin script '%s' should have an extension '.py'" % filename)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding()))
except (ImportError, SyntaxError), msg:
exit("[!] unable to import plugin script '%s' (%s)" % (filename, msg))
found = False
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "plugin" and not set(inspect.getargspec(function).args) & set(("event_tuple', 'packet")):
found = True
config.plugin_functions.append(function)
function.func_name = module.__name__
if not found:
exit("[!] missing function 'plugin(event_tuple, packet)' in plugin script '%s'" % filename)
if config.pcap_file:
_caps.append(pcapy.open_offline(config.pcap_file))
else:
interfaces = set(_.strip() for _ in config.MONITOR_INTERFACE.split(','))
if (config.MONITOR_INTERFACE or "").lower() == "any":
if subprocess.mswindows or "any" not in pcapy.findalldevs():
print("[x] virtual interface 'any' missing. Replacing it with all interface names")
interfaces = pcapy.findalldevs()
else:
print("[?] in case of any problems with packet capture on virtual interface 'any', please put all monitoring interfaces to promiscuous mode manually (e.g. 'sudo ifconfig eth0 promisc')")
for interface in interfaces:
if interface.lower() != "any" and interface not in pcapy.findalldevs():
hint = "[?] available interfaces: '%s'" % ",".join(pcapy.findalldevs())
exit("[!] interface '%s' not found\n%s" % (interface, hint))
print("[i] opening interface '%s'" % interface)
try:
_caps.append(pcapy.open_live(interface, SNAP_LEN, True, CAPTURE_TIMEOUT))
except (socket.error, pcapy.PcapError):
if "permitted" in str(sys.exc_info()[1]):
exit("[!] permission problem occurred ('%s')" % sys.exc_info()[1])
elif "No such device" in str(sys.exc_info()[1]):
exit("[!] no such device '%s'" % interface)
else:
raise
if config.LOG_SERVER and not len(config.LOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'LOG_SERVER' ('%s')" % config.LOG_SERVER)
if config.SYSLOG_SERVER and not len(config.SYSLOG_SERVER.split(':')) == 2:
exit("[!] invalid configuration value for 'SYSLOG_SERVER' ('%s')" % config.SYSLOG_SERVER)
if config.CAPTURE_FILTER:
print("[i] setting capture filter '%s'" % config.CAPTURE_FILTER)
for _cap in _caps:
try:
_cap.setfilter(config.CAPTURE_FILTER)
except:
pass
if _multiprocessing:
_init_multiprocessing()
if not subprocess.mswindows and not config.DISABLE_CPU_AFFINITY:
try:
try:
mod = int(subprocess.check_output("grep -c ^processor /proc/cpuinfo", stderr=subprocess.STDOUT, shell=True).strip())
used = subprocess.check_output("for pid in $(ps aux | grep python | grep sensor.py | grep -E -o 'root[ ]*[0-9]*' | tr -d '[:alpha:] '); do schedtool $pid; done | grep -E -o 'AFFINITY .*' | cut -d ' ' -f 2 | grep -v 0xf", stderr=subprocess.STDOUT, shell=True).strip().split('\n')
max_used = max(int(_, 16) for _ in used)
affinity = max(1, (max_used << 1) % 2 ** mod)
except:
affinity = 1
p = subprocess.Popen("schedtool -n -2 -M 2 -p 10 -a 0x%02x %d" % (affinity, os.getpid()), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if "not found" in stderr:
msg, _ = "[?] please install 'schedtool' for better CPU scheduling", platform.linux_distribution()[0].lower()
for distro, install in {("fedora", "centos"): "sudo yum install schedtool", ("debian", "ubuntu"): "sudo apt-get install schedtool"}.items():
if _ in distro:
msg += " (e.g. '%s')" % install
break
print(msg)
except:
pass
def _init_multiprocessing():
"""
Inits worker processes used in multiprocessing mode
"""
global _buffer
global _n
if _multiprocessing:
print("[i] preparing capture buffer...")
try:
_buffer = mmap.mmap(-1, config.CAPTURE_BUFFER) # http://www.alexonlinux.com/direct-io-in-python
_ = "\x00" * MMAP_ZFILL_CHUNK_LENGTH
for i in xrange(config.CAPTURE_BUFFER / MMAP_ZFILL_CHUNK_LENGTH):
_buffer.write(_)
_buffer.seek(0)
except KeyboardInterrupt:
raise
except:
exit("[!] unable to allocate network capture buffer. Please adjust value of 'CAPTURE_BUFFER'")
print("[i] creating %d more processes (out of total %d)" % (config.PROCESS_COUNT - 1, config.PROCESS_COUNT))
_n = _multiprocessing.Value('L', lock=False)
for i in xrange(config.PROCESS_COUNT - 1):
process = _multiprocessing.Process(target=worker, name=str(i), args=(_buffer, _n, i, config.PROCESS_COUNT - 1, _process_packet))
process.daemon = True
process.start()
def monitor():
"""
Sniffs/monitors given capturing interface
"""
print("[o] running...")
def packet_handler(datalink, header, packet):
global _count
ip_offset = None
dlt_offset = DLT_OFFSETS[datalink]
try:
if datalink == pcapy.DLT_RAW:
ip_offset = dlt_offset
elif datalink == pcapy.DLT_PPP:
if packet[2:4] in ("\x00\x21", "\x00\x57"): # (IPv4, IPv6)
ip_offset = dlt_offset
elif dlt_offset >= 2:
if packet[dlt_offset - 2:dlt_offset] == "\x81\x00": # VLAN
dlt_offset += 4
if packet[dlt_offset - 2:dlt_offset] in ("\x08\x00", "\x86\xdd"): # (IPv4, IPv6)
ip_offset = dlt_offset
except IndexError:
pass
if ip_offset is None:
return
try:
sec, usec = header.getts()
if _multiprocessing:
if _locks.count:
_locks.count.acquire()
write_block(_buffer, _count, struct.pack("=III", sec, usec, ip_offset) + packet)
_n.value = _count = _count + 1
if _locks.count:
_locks.count.release()
else:
_process_packet(packet, sec, usec, ip_offset)
except socket.timeout:
pass
try:
def _(_cap):
datalink = _cap.datalink()
while True:
success = False
try:
(header, packet) = _cap.next()
if header is not None:
success = True
packet_handler(datalink, header, packet)
elif config.pcap_file:
_quit.set()
break
except (pcapy.PcapError, socket.timeout):
pass
if not success:
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
if len(_caps) > 1:
if _multiprocessing:
_locks.count = threading.Lock()
_locks.connect_sec = threading.Lock()
for _cap in _caps:
threading.Thread(target=_, args=(_cap,)).start()
while _caps and not _quit.is_set():
time.sleep(1)
print("[i] all capturing interfaces closed")
except SystemError, ex:
if "error return without" in str(ex):
print("\r[x] stopping (Ctrl-C pressed)")
else:
raise
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
finally:
print("\r[i] please wait...")
if _multiprocessing:
try:
for _ in xrange(config.PROCESS_COUNT - 1):
write_block(_buffer, _n.value, "", BLOCK_MARKER.END)
_n.value = _n.value + 1
while _multiprocessing.active_children():
time.sleep(REGULAR_SENSOR_SLEEP_TIME)
except KeyboardInterrupt:
pass
def main():
print("%s (sensor) #v%s\n" % (NAME, VERSION))
parser = optparse.OptionParser(version=VERSION)
parser.add_option("-c", dest="config_file", default=CONFIG_FILE, help="configuration file (default: '%s')" % os.path.split(CONFIG_FILE)[-1])
parser.add_option("-i", dest="pcap_file", help="open pcap file for offline analysis")
parser.add_option("-p", dest="plugins", help="plugin(s) to be used per event")
parser.add_option("--console", dest="console", action="store_true", help="print events to console (too)")
parser.add_option("--no-updates", dest="no_updates", action="store_true", help="disable (online) trail updates")
parser.add_option("--debug", dest="debug", action="store_true", help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
read_config(options.config_file)
for option in dir(options):
if isinstance(getattr(options, option), (basestring, bool)) and not option.startswith('_'):
config[option] = getattr(options, option)
if options.debug:
config.console = True
config.PROCESS_COUNT = 1
config.SHOW_DEBUG = True
if options.pcap_file:
if options.pcap_file == '-':
print("[i] using STDIN")
elif not os.path.isfile(options.pcap_file):
exit("[!] missing pcap file '%s'" % options.pcap_file)
else:
print("[i] using pcap file '%s'" % options.pcap_file)
if not config.DISABLE_CHECK_SUDO and not check_sudo():
exit("[!] please run '%s' with sudo/Administrator privileges" % __file__)
try:
init()
monitor()
except KeyboardInterrupt:
print("\r[x] stopping (Ctrl-C pressed)")
if __name__ == "__main__":
show_final = True
try:
main()
except SystemExit, ex:
show_final = False
if isinstance(getattr(ex, "message"), basestring):
print(ex)
os._exit(1)
except IOError:
show_final = False
log_error("\n\n[!] session abruptly terminated\n[?] (hint: \"https://stackoverflow.com/a/20997655\")")
except Exception:
msg = "\r[!] unhandled exception occurred ('%s')" % sys.exc_info()[1]
msg += "\n[x] please report the following details at 'https://github.com/stamparm/maltrail/issues':\n---\n'%s'\n---" % traceback.format_exc()
log_error("\n\n%s" % msg.replace("\r", ""))
print(msg)
finally:
if show_final:
print("[i] finished")
os._exit(0)
| 48.541627 | 306 | 0.500591 |
acfb00e8c7bf798b311ffa9e0534cf2307c75e43 | 436 | py | Python | configs/_base_/det_datasets/comics_speech_bubble_dataset.py | gsoykan/mmocr | e8ca58fc2faa1cf1d798dc440c39615ebc908558 | [
"Apache-2.0"
] | 1 | 2022-02-21T18:38:57.000Z | 2022-02-21T18:38:57.000Z | configs/_base_/det_datasets/comics_speech_bubble_dataset.py | gsoykan/mmocr | e8ca58fc2faa1cf1d798dc440c39615ebc908558 | [
"Apache-2.0"
] | null | null | null | configs/_base_/det_datasets/comics_speech_bubble_dataset.py | gsoykan/mmocr | e8ca58fc2faa1cf1d798dc440c39615ebc908558 | [
"Apache-2.0"
] | null | null | null | root = 'tests/data/comics_speech_bubble_dataset'
# dataset with type='IcdarDataset'
train = dict(
type='IcdarDataset',
ann_file=f'{root}/train/instances_train.json',
img_prefix=f'{root}/train/imgs',
pipeline=None)
test = dict(
type='IcdarDataset',
ann_file=f'{root}/test/instances_test.json',
img_prefix=f'{root}/test/imgs',
pipeline=None,
test_mode=True)
train_list = [train]
test_list = [test]
| 21.8 | 50 | 0.690367 |
acfb03705c27649ad1f5865c957917038f62a92e | 2,872 | py | Python | setup.py | C0DK/lightbus | be5cc2771b1058f7c927cca870ed75d4cbbe61a3 | [
"Apache-2.0"
] | null | null | null | setup.py | C0DK/lightbus | be5cc2771b1058f7c927cca870ed75d4cbbe61a3 | [
"Apache-2.0"
] | null | null | null | setup.py | C0DK/lightbus | be5cc2771b1058f7c927cca870ed75d4cbbe61a3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ""
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, "README.rst")
if os.path.exists(readme_path):
with open(readme_path, "rb") as stream:
readme = stream.read().decode("utf8")
setup(
long_description=readme,
name="lightbus",
version="1.1.0",
description="RPC & event framework for Python 3",
python_requires=">=3.7",
project_urls={
"documentation": "https://lightbus.org",
"homepage": "https://lightbus.org",
"repository": "https://github.com/adamcharnock/lightbus/",
},
author="Adam Charnock",
author_email="adam@adamcharnock.com",
keywords="python messaging redis bus queue",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: AsyncIO",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: System :: Networking",
"Topic :: Communications",
],
entry_points={
"console_scripts": ["lightbus = lightbus.commands:lightbus_entry_point"],
"lightbus_event_transports": [
"debug = lightbus:DebugEventTransport",
"redis = lightbus:RedisEventTransport",
],
"lightbus_plugins": [
"internal_metrics = lightbus.plugins.metrics:MetricsPlugin",
"internal_state = lightbus.plugins.state:StatePlugin",
],
"lightbus_result_transports": [
"debug = lightbus:DebugResultTransport",
"redis = lightbus:RedisResultTransport",
],
"lightbus_rpc_transports": [
"debug = lightbus:DebugRpcTransport",
"redis = lightbus:RedisRpcTransport",
],
"lightbus_schema_transports": [
"debug = lightbus:DebugSchemaTransport",
"redis = lightbus:RedisSchemaTransport",
],
},
packages=[
"lightbus",
"lightbus.client",
"lightbus.client.docks",
"lightbus.client.internal_messaging",
"lightbus.client.subclients",
"lightbus.commands",
"lightbus.config",
"lightbus.plugins",
"lightbus.schema",
"lightbus.serializers",
"lightbus.transports",
"lightbus.transports.redis",
"lightbus.utilities",
],
package_dir={"": "."},
package_data={},
install_requires=["aioredis>=1.2.0", "jsonschema>=3.2", "pyyaml>=3.12"],
)
| 31.56044 | 81 | 0.607591 |
acfb038f5692fd27791af21767cb5171a2b850df | 10,025 | py | Python | docs/conf.py | rethore/waketor | 81a6688f27b5c718b98cf61e264ba9f127345ca6 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | rethore/waketor | 81a6688f27b5c718b98cf61e264ba9f127345ca6 | [
"Apache-2.0"
] | 3 | 2015-12-10T08:35:19.000Z | 2015-12-10T08:37:36.000Z | docs/conf.py | rethore/waketor | 81a6688f27b5c718b98cf61e264ba9f127345ca6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# waketor documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 8 11:57:58 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from mock import Mock
import sys
MOCK_MODULES = ['scipy', 'numpy']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# on_rtd is whether we are on readthedocs.org
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'waketor'
copyright = u'2015, Pierre-Elouan Rethore'
author = u'Pierre-Elouan Rethore'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# dd_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'waketordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'waketor.tex', u'waketor Documentation',
u'Pierre-Elouan Rethore', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'waketor', u'waketor Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'waketor', u'waketor Documentation',
author, 'waketor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 32.33871 | 82 | 0.718005 |
acfb045eb558c6280a18793b672ff8c35665a0c1 | 10,127 | py | Python | tests/test_style/test_palettes.py | TobiasHerr/yellowbrick-fork | 09c8aeafe1b9e9524167bee25380c40aed2ccd4b | [
"Apache-2.0"
] | null | null | null | tests/test_style/test_palettes.py | TobiasHerr/yellowbrick-fork | 09c8aeafe1b9e9524167bee25380c40aed2ccd4b | [
"Apache-2.0"
] | null | null | null | tests/test_style/test_palettes.py | TobiasHerr/yellowbrick-fork | 09c8aeafe1b9e9524167bee25380c40aed2ccd4b | [
"Apache-2.0"
] | null | null | null | # tests.test_style.test_palettes
# Tests the palettes module of the yellowbrick library.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Tue Oct 04 16:21:58 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_palettes.py [] benjamin@bengfort.com $
"""
Tests the palettes module of the yellowbrick library.
"""
##########################################################################
## Imports
##########################################################################
import warnings
import unittest
import numpy as np
import matplotlib as mpl
from yellowbrick.exceptions import *
from yellowbrick.style.palettes import *
from yellowbrick.style.colors import get_color_cycle
from yellowbrick.style.rcmod import set_aesthetic, set_palette
from yellowbrick.style.palettes import color_sequence, color_palette
from yellowbrick.style.palettes import ColorPalette, PALETTES, SEQUENCES
from tests.base import VisualTestCase
##########################################################################
## Color Palette Tests
##########################################################################
class ColorPaletteObjectTests(VisualTestCase):
"""
Tests the ColorPalette object
"""
def test_init_palette_by_name(self):
"""
Test that a palette can be initialized by name
"""
# Try all the names in the palettes
for name, value in PALETTES.items():
try:
palette = ColorPalette(name)
except YellowbrickValueError:
self.fail(
"Could not instantiate {} color palette by name".format(name)
)
self.assertEqual(value, palette)
# Try a name not in PALETTES
with self.assertRaises(YellowbrickValueError):
self.assertNotIn('foo', PALETTES, "Cannot test bad name 'foo' it is in PALETTES!")
palette = ColorPalette('foo')
def test_init_palette_by_list(self):
"""
Test that a palette can be initialized by a list
"""
# Try all the values in the palettes (HEX)
for value in PALETTES.values():
palette = ColorPalette(value)
self.assertEqual(len(value), len(palette))
# Try all the values converted to RGB
for value in PALETTES.values():
palette = ColorPalette(map(mpl.colors.colorConverter.to_rgb, value))
self.assertEqual(len(value), len(palette))
def test_color_palette_context(self):
"""
Test ColorPalette context management
"""
default = color_palette()
context = color_palette('dark')
with ColorPalette('dark') as palette:
self.assertIsInstance(palette, ColorPalette)
self.assertEqual(get_color_cycle(), context)
self.assertEqual(get_color_cycle(), default)
def test_as_hex_as_rgb(self):
"""
Test the conversion of a ColorPalette to hex values and back to rgb
"""
palette = color_palette('flatui')
expected = PALETTES['flatui']
morgified = palette.as_hex()
self.assertIsNot(morgified, palette)
self.assertIsInstance(morgified, ColorPalette)
self.assertEqual(morgified, expected)
remorgified = morgified.as_rgb()
self.assertIsNot(remorgified, morgified)
self.assertIsNot(remorgified, palette)
self.assertEqual(remorgified, palette)
@unittest.skip("not implemented yet")
def test_plot_color_palette(self):
"""
Test the plotting of a color palette for color visualization
"""
raise NotImplementedError(
"Not quite sure how to implement this yet"
)
class ColorPaletteFunctionTests(VisualTestCase):
"""
Tests the color_palette function.
"""
def test_current_palette(self):
"""
Test modifying the current palette with a simple palette
"""
pal = color_palette(["red", "blue", "green"], 3)
set_palette(pal, 3)
self.assertEqual(pal, get_color_cycle())
# Reset the palette
set_aesthetic()
def test_palette_context(self):
"""
Test the context manager for the color_palette function
"""
default_pal = color_palette()
context_pal = color_palette("muted")
with color_palette(context_pal):
self.assertEqual(get_color_cycle(), context_pal)
self.assertEqual(get_color_cycle(), default_pal)
def test_big_palette_context(self):
"""
Test that the context manager also resets the number of colors
"""
original_pal = color_palette("accent", n_colors=8)
context_pal = color_palette("bold", 10)
set_palette(original_pal)
with color_palette(context_pal, 10):
self.assertEqual(get_color_cycle(), context_pal)
self.assertEqual(get_color_cycle(), original_pal)
# Reset default
set_aesthetic()
def test_yellowbrick_palettes(self):
"""
Test the yellowbrick palettes have length 6 (bgrmyck)
"""
pals = ["accent", "dark", "pastel", "bold", "muted"]
for name in pals:
pal_out = color_palette(name)
self.assertEqual(len(pal_out), 6, "{} is not of len 6".format(name))
def test_seaborn_palettes(self):
"""
Test the seaborn palettes have length 6 (bgrmyck)
"""
pals = ["sns_deep", "sns_muted", "sns_pastel",
"sns_bright", "sns_dark", "sns_colorblind"]
for name in pals:
pal_out = color_palette(name)
self.assertEqual(len(pal_out), 6)
def test_bad_palette_name(self):
"""
Test that a bad palette name raises an exception
"""
with self.assertRaises(ValueError):
color_palette("IAmNotAPalette")
with self.assertRaises(YellowbrickValueError):
color_palette("IAmNotAPalette")
def test_bad_palette_colors(self):
"""
Test that bad color names raise an exception
"""
pal = ["red", "blue", "iamnotacolor"]
with self.assertRaises(ValueError):
color_palette(pal)
with self.assertRaises(YellowbrickValueError):
color_palette(pal)
def test_palette_is_list_of_tuples(self):
"""
Assert that color_palette returns a list of RGB tuples
"""
pal_in = np.array(["red", "blue", "green"])
pal_out = color_palette(pal_in, 3)
self.assertIsInstance(pal_out, list)
self.assertIsInstance(pal_out[0], tuple)
self.assertIsInstance(pal_out[0][0], float)
self.assertEqual(len(pal_out[0]), 3)
def test_palette_cycles(self):
"""
Test that the color palette cycles for more colors
"""
accent = color_palette("accent")
double_accent = color_palette("accent", 12)
self.assertEqual(double_accent, accent + accent)
@unittest.skip("Discovered this commented out, don't know why")
def test_cbrewer_qual(self):
"""
Test colorbrewer qualitative palettes
"""
pal_short = mpl_palette("Set1", 4)
pal_long = mpl_palette("Set1", 6)
self.assertEqual(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
self.assertEqual(pal_full, pal_long[:8])
def test_color_codes(self):
"""
Test the setting of color codes
"""
set_color_codes("accent")
colors = color_palette("accent") + ["0.06666666666666667"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
self.assertEqual(rgb_want, rgb_got)
set_color_codes("reset")
def test_as_hex(self):
"""
Test converting a color palette to hex and back to rgb.
"""
pal = color_palette("accent")
for rgb, hex in zip(pal, pal.as_hex()):
self.assertEqual(mpl.colors.rgb2hex(rgb), hex)
for rgb_e, rgb_v in zip(pal, pal.as_hex().as_rgb()):
self.assertEqual(rgb_e, rgb_v)
def test_get_color_cycle(self):
"""
Test getting the default color cycle
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = get_color_cycle()
expected = mpl.rcParams['axes.color_cycle']
self.assertEqual(result, expected)
def test_preserved_palette_length(self):
"""
Test palette length is preserved when modified
"""
pal_in = color_palette("Set1", 10)
pal_out = color_palette(pal_in)
self.assertEqual(pal_in, pal_out)
def test_color_sequence(self):
"""
Ensure the color sequence returns listed colors.
"""
for name, ncols in SEQUENCES.items():
for n in ncols.keys():
cmap = color_sequence(name, n)
self.assertEqual(name, cmap.name)
self.assertEqual(n, cmap.N)
def test_color_sequence_default(self):
"""
Assert the default color sequence is RdBu
"""
cmap = color_sequence()
self.assertEqual(cmap.name, "RdBu")
self.assertEqual(cmap.N, 11)
def test_color_sequence_unrecocognized(self):
"""
Test value errors for unrecognized sequences
"""
with self.assertRaises(YellowbrickValueError):
cmap = color_sequence('PepperBucks', 3)
def test_color_sequence_bounds(self):
"""
Test color sequence out of bounds value error
"""
with self.assertRaises(YellowbrickValueError):
cmap = color_sequence('RdBu', 18)
with self.assertRaises(YellowbrickValueError):
cmap = color_sequence('RdBu', 2)
if __name__ == "__main__":
unittest.main()
| 31.746082 | 94 | 0.604424 |
acfb05094c9ec4199f5835e8615a27d3712e1771 | 867 | py | Python | src/python/analyse.py | paulpatault/ChessApp | 45809de7d6a4b016e569f30258976778275203d9 | [
"MIT"
] | null | null | null | src/python/analyse.py | paulpatault/ChessApp | 45809de7d6a4b016e569f30258976778275203d9 | [
"MIT"
] | null | null | null | src/python/analyse.py | paulpatault/ChessApp | 45809de7d6a4b016e569f30258976778275203d9 | [
"MIT"
] | null | null | null | from tensorflow_chessbot import getFEN
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Predict a chessboard FEN from supplied local image link or URL"
)
parser.add_argument(
"--url",
default="http://imgur.com/u4zF5Hj.png",
help="URL of image (ex. http://imgur.com/u4zF5Hj.png)",
)
parser.add_argument("--filepath", help="filepath to image (ex. u4zF5Hj.png)")
parser.add_argument(
"--unflip",
default=False,
action="store_true",
help="revert the image of a flipped chessboard",
)
parser.add_argument("--active", default="w")
parser.add_argument("--dir", default=None)
parser.add_argument("--verbose", default=True)
args = parser.parse_args()
FEN = getFEN(args)
print(FEN)
sys.stdout.flush()
| 27.967742 | 84 | 0.642445 |
acfb061930111305dac2845a747c7293d9a7af4c | 6,661 | py | Python | embyapi/models/public_system_info.py | stanionascu/python-embyapi | a3f7aa49aea4052277cc43605c0d89bc6ff21913 | [
"BSD-3-Clause"
] | null | null | null | embyapi/models/public_system_info.py | stanionascu/python-embyapi | a3f7aa49aea4052277cc43605c0d89bc6ff21913 | [
"BSD-3-Clause"
] | null | null | null | embyapi/models/public_system_info.py | stanionascu/python-embyapi | a3f7aa49aea4052277cc43605c0d89bc6ff21913 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PublicSystemInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'local_address': 'str',
'wan_address': 'str',
'server_name': 'str',
'version': 'str',
'operating_system': 'str',
'id': 'str'
}
attribute_map = {
'local_address': 'LocalAddress',
'wan_address': 'WanAddress',
'server_name': 'ServerName',
'version': 'Version',
'operating_system': 'OperatingSystem',
'id': 'Id'
}
def __init__(self, local_address=None, wan_address=None, server_name=None, version=None, operating_system=None, id=None): # noqa: E501
"""PublicSystemInfo - a model defined in Swagger""" # noqa: E501
self._local_address = None
self._wan_address = None
self._server_name = None
self._version = None
self._operating_system = None
self._id = None
self.discriminator = None
if local_address is not None:
self.local_address = local_address
if wan_address is not None:
self.wan_address = wan_address
if server_name is not None:
self.server_name = server_name
if version is not None:
self.version = version
if operating_system is not None:
self.operating_system = operating_system
if id is not None:
self.id = id
@property
def local_address(self):
"""Gets the local_address of this PublicSystemInfo. # noqa: E501
:return: The local_address of this PublicSystemInfo. # noqa: E501
:rtype: str
"""
return self._local_address
@local_address.setter
def local_address(self, local_address):
"""Sets the local_address of this PublicSystemInfo.
:param local_address: The local_address of this PublicSystemInfo. # noqa: E501
:type: str
"""
self._local_address = local_address
@property
def wan_address(self):
"""Gets the wan_address of this PublicSystemInfo. # noqa: E501
:return: The wan_address of this PublicSystemInfo. # noqa: E501
:rtype: str
"""
return self._wan_address
@wan_address.setter
def wan_address(self, wan_address):
"""Sets the wan_address of this PublicSystemInfo.
:param wan_address: The wan_address of this PublicSystemInfo. # noqa: E501
:type: str
"""
self._wan_address = wan_address
@property
def server_name(self):
"""Gets the server_name of this PublicSystemInfo. # noqa: E501
:return: The server_name of this PublicSystemInfo. # noqa: E501
:rtype: str
"""
return self._server_name
@server_name.setter
def server_name(self, server_name):
"""Sets the server_name of this PublicSystemInfo.
:param server_name: The server_name of this PublicSystemInfo. # noqa: E501
:type: str
"""
self._server_name = server_name
@property
def version(self):
"""Gets the version of this PublicSystemInfo. # noqa: E501
:return: The version of this PublicSystemInfo. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this PublicSystemInfo.
:param version: The version of this PublicSystemInfo. # noqa: E501
:type: str
"""
self._version = version
@property
def operating_system(self):
"""Gets the operating_system of this PublicSystemInfo. # noqa: E501
:return: The operating_system of this PublicSystemInfo. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this PublicSystemInfo.
:param operating_system: The operating_system of this PublicSystemInfo. # noqa: E501
:type: str
"""
self._operating_system = operating_system
@property
def id(self):
"""Gets the id of this PublicSystemInfo. # noqa: E501
:return: The id of this PublicSystemInfo. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PublicSystemInfo.
:param id: The id of this PublicSystemInfo. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PublicSystemInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PublicSystemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.639004 | 139 | 0.585798 |
acfb07267673e31d61d1cc694f2fe29364429e42 | 1,178 | py | Python | src/server_common/mpwp_data_sender.py | Devin0xFFFFFF/multiplayer-web-pong | 58f4663d160770747c31bc8d42c028ced9df5684 | [
"MIT"
] | null | null | null | src/server_common/mpwp_data_sender.py | Devin0xFFFFFF/multiplayer-web-pong | 58f4663d160770747c31bc8d42c028ced9df5684 | [
"MIT"
] | null | null | null | src/server_common/mpwp_data_sender.py | Devin0xFFFFFF/multiplayer-web-pong | 58f4663d160770747c31bc8d42c028ced9df5684 | [
"MIT"
] | null | null | null | import zmq
import json
from server_common import mpwp_protocol, config
class MPWPDataSender(object):
ID = None
MSGNUM = 0
context = None
log_sock = None
log_level = 0
LOGNUM = 0
def __init__(self, log_level=1):
self.context = zmq.Context()
self.log_level = log_level
if self.log_level > 0:
self.log_sock = self.context.socket(zmq.PUSH)
self.log_sock.connect(config.LOGGER_ADDR)
def close(self):
self.log_sock.close()
def assign_id(self):
self.ID = mpwp_protocol.get_uuid()
def get_packet(self, TO, TYPE, CONTENT=None):
packet = mpwp_protocol.get_mpwp_content_packet(TO, self.ID, str(self.MSGNUM).encode(), TYPE, CONTENT)
self.MSGNUM += 1
return packet
def log(self, log_level, log_msg):
if self.log_level and log_level >= self.log_level:
packet = mpwp_protocol.get_log_packet(self.ID, str(self.LOGNUM).encode(),
str(log_level).encode(), str(log_msg).encode())
self.LOGNUM += 1
# print(packet)
self.log_sock.send_multipart(packet)
| 28.047619 | 109 | 0.60781 |
acfb07a1d93ac76a99b05a9b06136ead88141e28 | 2,454 | py | Python | wordmemory.py | AlEscher/HumanBenchmarkBot | 7f09720d62b6816160f9f3c9a5e4cafb1722dfe9 | [
"MIT"
] | 1 | 2019-12-29T14:56:38.000Z | 2019-12-29T14:56:38.000Z | wordmemory.py | AlEscher/HumanBenchmarkBot | 7f09720d62b6816160f9f3c9a5e4cafb1722dfe9 | [
"MIT"
] | null | null | null | wordmemory.py | AlEscher/HumanBenchmarkBot | 7f09720d62b6816160f9f3c9a5e4cafb1722dfe9 | [
"MIT"
] | 1 | 2021-05-06T11:32:55.000Z | 2021-05-06T11:32:55.000Z | from PIL import ImageGrab
from pynput.mouse import Button, Controller
import time
from win32api import GetSystemMetrics
import pytesseract
import sys
mouse = Controller()
screenWidth = GetSystemMetrics(0)
screenHeight = GetSystemMetrics(1)
# path to tesseract's executable, this is should be the standard path
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
alreadySeenWords = []
limit = 20
image = None
if (len(sys.argv) == 1):
print("No limit specified, using default of %i" % (limit))
print("Usage example: python %s 30" % (sys.argv[0]))
elif (sys.argv[1].isdigit()):
limit = int(sys.argv[1])
else:
print("Invalid argument.")
print("Usage example: python %s 30" % (sys.argv[0]))
sys.exit(1)
# can't use variables for width / height, as bbox apparently doesn't work well with variables...
if (screenWidth == 1920 and screenHeight == 1080):
# click to start the test
mouse.position = (956, 618)
elif (screenWidth == 2560 and screenHeight == 1440):
# click to start the test
mouse.position = (1263, 615)
else:
print("Sorry, your screen resolution isn't supported.")
sys.exit(1)
mouse.click(Button.left, 1)
time.sleep(0.1)
for x in range(0, limit):
alreadySeen = False
# read the current word from the screen
if (screenWidth == 1920 and screenHeight == 1080):
image = ImageGrab.grab(bbox=(766, 390, 1166, 452))
elif (screenWidth == 2560 and screenHeight == 1440):
image = ImageGrab.grab(bbox=(973, 373, 1582, 473))
if (image is None):
sys.exit(-1)
# imageName = "screen" + str(x) + ".jpg"
# image.save(imageName)
word = pytesseract.image_to_string(image)
print(word)
# check if we already saw this word
for i in range(0, len(alreadySeenWords)):
if (word == alreadySeenWords[i]):
alreadySeen = True
if (screenWidth == 1920 and screenHeight == 1080):
if (alreadySeen):
mouse.position = (871, 502)
else:
mouse.position = (1033, 502)
alreadySeenWords.append(word)
elif (screenWidth == 2560 and screenHeight == 1440):
if (alreadySeen):
mouse.position = (1193, 507)
else:
alreadySeenWords.append(word)
mouse.position = (1357, 507)
mouse.click(Button.left, 1)
time.sleep(0.2)
| 31.87013 | 97 | 0.627954 |
acfb086457b95a4e3f00873c16a7ce277dc1d485 | 1,596 | py | Python | rosidl_adapter/rosidl_adapter/srv/__init__.py | DongheeYe/rosidl | 36fac1e367bd98a493a7a0935b2c7b5ae86f5e7d | [
"Apache-2.0"
] | 1 | 2019-09-17T05:31:47.000Z | 2019-09-17T05:31:47.000Z | rosidl_adapter/rosidl_adapter/srv/__init__.py | DongheeYe/rosidl | 36fac1e367bd98a493a7a0935b2c7b5ae86f5e7d | [
"Apache-2.0"
] | null | null | null | rosidl_adapter/rosidl_adapter/srv/__init__.py | DongheeYe/rosidl | 36fac1e367bd98a493a7a0935b2c7b5ae86f5e7d | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rosidl_adapter.parser import parse_service_string
from rosidl_adapter.resource import expand_template
def convert_srv_to_idl(package_dir, package_name, input_file, output_dir):
assert package_dir.is_absolute()
assert not input_file.is_absolute()
assert input_file.suffix == '.srv'
abs_input_file = package_dir / input_file
print('Reading input file: {abs_input_file}'.format_map(locals()))
abs_input_file = package_dir / input_file
content = abs_input_file.read_text(encoding='utf-8')
srv = parse_service_string(package_name, input_file.stem, content)
output_file = output_dir / input_file.with_suffix('.idl').name
abs_output_file = output_file.absolute()
print('Writing output file: {abs_output_file}'.format_map(locals()))
data = {
'pkg_name': package_name,
'relative_input_file': input_file,
'srv': srv,
}
expand_template('srv.idl.em', data, output_file, encoding='iso-8859-1')
return output_file
| 38.926829 | 75 | 0.744987 |
acfb09463dbf35d7cf0dbfa05fc380208022353f | 4,299 | py | Python | dephell/config/scheme.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | 1 | 2020-04-08T01:06:51.000Z | 2020-04-08T01:06:51.000Z | dephell/config/scheme.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | null | null | null | dephell/config/scheme.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | null | null | null | # project
# external
from dephell_versioning import get_schemes
# app
from ..constants import FORMATS, LOG_FORMATTERS, LOG_LEVELS, REPOSITORIES, STRATEGIES
_TARGET = dict(
type='dict',
schema={
'format': dict(
type='string',
required=True,
allowed=FORMATS,
),
'path': dict(
type='string',
required=True,
),
},
)
# + Scheme for DepHell config, validated by Cerberus:
# https://docs.python-cerberus.org/en/stable/validation-rules.html
# + All fields with default value (defaults.py) marked as required.
# + dict() for rules, {} for content.
# + Grouped in the same groups as builders (./builders.py)
SCHEME = {
'from': dict(required=False, **_TARGET),
'to': dict(required=False, **_TARGET),
'and': dict(type='list', schema=_TARGET, required=False, empty=True),
'sdist': dict(
type='dict',
required=True,
schema={'ratio': dict(type='float', required=True)},
),
'auth': dict(
type='list',
valuesrules=dict(
type='dict',
schema={
'hostname': dict(type='string', regex=r'[a-z0-9\.\-\_]+'),
'username': dict(type='string', required=True),
'password': dict(type='string', required=True),
},
),
),
# api
'warehouse': dict(type='list', schema=dict(type='string'), required=False, empty=True),
'bitbucket': dict(type='string', required=True),
'repo': dict(type='string', required=False, allowed=REPOSITORIES),
# resolver
'strategy': dict(type='string', required=True, allowed=STRATEGIES),
'prereleases': dict(type='boolean', required=True),
'mutations': dict(type='integer', required=True),
# output
'silent': dict(type='boolean', required=True),
'level': dict(type='string', required=True, allowed=LOG_LEVELS),
'format': dict(type='string', required=True, allowed=LOG_FORMATTERS),
'nocolors': dict(type='boolean', required=True),
'filter': dict(type='string', required=False),
'traceback': dict(type='boolean', required=True),
'pdb': dict(type='boolean', required=True),
'table': dict(type='boolean', required=True),
# venv
'venv': dict(type='string', required=True),
'dotenv': dict(type='string', required=True),
'python': dict(type='string', required=False),
'vars': dict(
type='dict',
keyschema={'type': 'string'},
valueschema={'type': 'string'},
required=False,
),
# docker
'docker': dict(
type='dict',
required=True,
schema={
'repo': dict(type='string', regex=r'[a-zA-Z0-9\.\-\_\/]+', required=True),
'tag': dict(type='string', required=True),
'container': dict(type='string', required=False),
},
),
# project upload
'upload': dict(
type='dict',
required=True,
schema={
'url': dict(type='string', required=True),
'sign': dict(type='boolean', required=True),
'identity': dict(type='string', required=False),
},
),
# other
'owner': dict(type='string', required=False),
'tag': dict(type='string', required=False),
'cache': dict(
type='dict',
required=True,
schema={
'path': dict(type='string', required=True),
'ttl': dict(type='integer', required=True),
},
),
'project': dict(type='string', required=True),
'bin': dict(type='string', required=True),
'envs': dict(type='list', schema=dict(type='string'), required=False, empty=False),
'tests': dict(type='list', schema=dict(type='string'), required=True),
'versioning': dict(type='string', required=True, allowed=get_schemes()),
'command': dict(type='string', required=False),
'vendor': dict(
type='dict',
required=True,
schema={
'exclude': dict(type='list', schema=dict(type='string'), required=True, empty=True),
'path': dict(type='string', required=True),
},
),
}
| 32.816794 | 96 | 0.5485 |
acfb0959babdc085bd4f1e642db05bb9670cf40a | 2,121 | py | Python | sdk/python/feast/infra/transformation_servers/app.py | tpvasconcelos/feast | 37971a455f955149db00644c49a3a0944ca24bc6 | [
"Apache-2.0"
] | 2,258 | 2020-05-17T02:41:07.000Z | 2022-03-31T22:30:57.000Z | sdk/python/feast/infra/transformation_servers/app.py | tpvasconcelos/feast | 37971a455f955149db00644c49a3a0944ca24bc6 | [
"Apache-2.0"
] | 1,768 | 2020-05-16T05:37:28.000Z | 2022-03-31T23:30:05.000Z | sdk/python/feast/infra/transformation_servers/app.py | tpvasconcelos/feast | 37971a455f955149db00644c49a3a0944ca24bc6 | [
"Apache-2.0"
] | 415 | 2020-05-16T18:21:27.000Z | 2022-03-31T09:59:10.000Z | import base64
import os
import tempfile
import threading
from pathlib import Path
import yaml
from feast import FeatureStore
from feast.constants import (
DEFAULT_FEATURE_TRANSFORMATION_SERVER_PORT,
FEATURE_STORE_YAML_ENV_NAME,
FEATURE_TRANSFORMATION_SERVER_PORT_ENV_NAME,
REGISTRY_ENV_NAME,
)
from feast.infra.local import LocalRegistryStore
from feast.registry import get_registry_store_class_from_scheme
# Load RepoConfig
config_base64 = os.environ[FEATURE_STORE_YAML_ENV_NAME]
config_bytes = base64.b64decode(config_base64)
# Create a new unique directory for writing feature_store.yaml
repo_path = Path(tempfile.mkdtemp())
with open(repo_path / "feature_store.yaml", "wb") as f:
f.write(config_bytes)
# Write registry contents for local registries
config_string = config_bytes.decode("utf-8")
raw_config = yaml.safe_load(config_string)
registry = raw_config["registry"]
registry_path = registry["path"] if isinstance(registry, dict) else registry
registry_store_class = get_registry_store_class_from_scheme(registry_path)
if registry_store_class == LocalRegistryStore and not os.path.exists(registry_path):
registry_base64 = os.environ[REGISTRY_ENV_NAME]
registry_bytes = base64.b64decode(registry_base64)
registry_dir = os.path.dirname(registry_path)
if not os.path.exists(repo_path / registry_dir):
os.makedirs(repo_path / registry_dir)
with open(repo_path / registry_path, "wb") as f:
f.write(registry_bytes)
# Initialize the feature store
store = FeatureStore(repo_path=str(repo_path.resolve()))
if isinstance(registry, dict) and registry.get("cache_ttl_seconds", 0) > 0:
# disable synchronous refresh
store.config.registry.cache_ttl_seconds = 0
# enable asynchronous refresh
def async_refresh():
store.refresh_registry()
threading.Timer(registry["cache_ttl_seconds"], async_refresh).start()
async_refresh()
# Start the feature transformation server
port = (
os.environ.get(FEATURE_TRANSFORMATION_SERVER_PORT_ENV_NAME)
or DEFAULT_FEATURE_TRANSFORMATION_SERVER_PORT
)
store.serve_transformations(port)
| 33.140625 | 84 | 0.789722 |
acfb0a9f7408379bd6fd1d6886740b7630cd96ec | 773 | py | Python | tests/integration/graphics/test_immediate_drawing_indexed_data.py | AnantTiwari-Naman/pyglet | 4774f2889057da95a78785a69372112931e6a620 | [
"BSD-3-Clause"
] | 1,160 | 2019-06-13T11:51:40.000Z | 2022-03-31T01:55:32.000Z | tests/integration/graphics/test_immediate_drawing_indexed_data.py | AaronCWacker/pyglet | 63b1ece7043133d47eb898857876e4927d9759b2 | [
"BSD-3-Clause"
] | 491 | 2019-07-14T16:13:11.000Z | 2022-03-31T08:04:32.000Z | tests/integration/graphics/test_immediate_drawing_indexed_data.py | AaronCWacker/pyglet | 63b1ece7043133d47eb898857876e4927d9759b2 | [
"BSD-3-Clause"
] | 316 | 2019-06-14T13:56:48.000Z | 2022-03-30T19:26:58.000Z | """Tests immediate drawing using indexed data.
"""
import unittest
import pyglet
from tests.annotations import Platform, skip_platform
from .graphics_common import GraphicsIndexedGenericTestCase, get_feedback, GL_TRIANGLES
@skip_platform(Platform.OSX) # TODO: Check whether OpenGL < 3.0 or compatibility profile is enabled
class ImmediateDrawingIndexedDataTestCase(GraphicsIndexedGenericTestCase, unittest.TestCase):
def get_feedback(self, data):
return get_feedback(lambda: pyglet.graphics.draw_indexed(self.n_vertices,
GL_TRIANGLES,
self.index_data,
*data))
| 40.684211 | 100 | 0.609314 |
acfb0bce02c3403297131352d15017842c7409d4 | 10,627 | py | Python | awx/main/dispatch/worker/callback.py | bhyunki/awx | ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c | [
"Apache-2.0"
] | 2 | 2020-03-19T20:49:37.000Z | 2020-05-04T14:36:11.000Z | awx/main/dispatch/worker/callback.py | bhyunki/awx | ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c | [
"Apache-2.0"
] | 35 | 2021-03-01T06:34:26.000Z | 2022-03-01T01:18:42.000Z | awx/main/dispatch/worker/callback.py | bhyunki/awx | ce588a6af5a5c7f71a5b176ffe53eda5ebc3492c | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
import signal
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
from django_guid.middleware import GuidMiddleware
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import JobEvent, AdHocCommandEvent, ProjectUpdateEvent, InventoryUpdateEvent, SystemJobEvent, UnifiedJob, Job
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from awx.main.utils.profiling import AWXProfiler
import awx.main.analytics.subsystem_metrics as s_metrics
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
"""
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
"""
MAX_RETRIES = 2
last_stats = time.time()
last_flush = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
self.queue_pop = 0
self.queue_name = settings.CALLBACK_QUEUE
self.prof = AWXProfiler("CallbackBrokerWorker")
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(self.queue_name, timeout=1)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
self.queue_pop += 1
self.subsystem_metrics.inc('callback_receiver_events_popped_redis', 1)
self.subsystem_metrics.inc('callback_receiver_events_in_memory', 1)
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
self.record_read_metrics()
return {'event': 'FLUSH'}
def record_read_metrics(self):
if self.queue_pop == 0:
return
if self.subsystem_metrics.should_pipe_execute() is True:
queue_size = self.redis.llen(self.queue_name)
self.subsystem_metrics.set('callback_receiver_events_queue_size_redis', queue_size)
self.subsystem_metrics.pipe_execute()
self.queue_pop = 0
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0)
def toggle_profiling(self, *args):
if not self.prof.is_started():
self.prof.start()
logger.error('profiling is enabled')
else:
filepath = self.prof.stop()
logger.error(f'profiling is disabled, wrote {filepath}')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if force or (time.time() - self.last_flush) > settings.JOB_EVENT_BUFFER_SECONDS or any([len(events) >= 1000 for events in self.buff.values()]):
bulk_events_saved = 0
singular_events_saved = 0
metrics_events_batch_save_errors = 0
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
duration_to_save = time.perf_counter()
try:
cls.objects.bulk_create(events)
bulk_events_saved += len(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
metrics_events_batch_save_errors += 1
for e in events:
try:
e.save()
singular_events_saved += 1
except Exception:
logger.exception('Database Error Saving Job Event')
duration_to_save = time.perf_counter() - duration_to_save
for e in events:
emit_event_detail(e)
self.buff = {}
self.last_flush = time.time()
# only update metrics if we saved events
if (bulk_events_saved + singular_events_saved) > 0:
self.subsystem_metrics.inc('callback_receiver_batch_events_errors', metrics_events_batch_save_errors)
self.subsystem_metrics.inc('callback_receiver_events_insert_db_seconds', duration_to_save)
self.subsystem_metrics.inc('callback_receiver_events_insert_db', bulk_events_saved + singular_events_saved)
self.subsystem_metrics.observe('callback_receiver_batch_events_insert_db', bulk_events_saved)
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -(bulk_events_saved + singular_events_saved))
if self.subsystem_metrics.should_pipe_execute() is True:
self.subsystem_metrics.pipe_execute()
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
if body.get('event') == 'EOF':
try:
if 'guid' in body:
GuidMiddleware.set_guid(body['guid'])
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification('jobs-summary', dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter))
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
# *actual playbooks* send their success/failure
# notifications in response to the playbook_on_stats
# event handling code in main.models.events
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
finally:
self.subsystem_metrics.inc('callback_receiver_events_in_memory', -1)
GuidMiddleware.set_guid('')
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(i=retries + 1, delay=delay))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| 45.41453 | 151 | 0.58916 |
acfb0ced42062f6104d93523835ff0b0890d48fc | 1,537 | py | Python | python/oneflow/support/func_inspect_util.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/support/func_inspect_util.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/support/func_inspect_util.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import sys
if sys.version_info > (2, 7) and sys.version_info < (3, 0):
def GetArgNameAndDefaultTuple(func):
"""
returns a dictionary of arg_name:default_values for the input function
"""
(args, varargs, keywords, defaults) = inspect.getargspec(func)
defaults = list(defaults) if defaults is not None else []
while len(defaults) < len(args):
defaults.insert(0, None)
return tuple(zip(args, defaults))
elif sys.version_info >= (3, 0):
def GetArgNameAndDefaultTuple(func):
signature = inspect.signature(func)
return tuple(
[
(k, v.default if v.default is not inspect.Parameter.empty else None)
for (k, v) in signature.parameters.items()
]
)
else:
raise NotImplementedError
def GetArgDefaults(func):
return tuple(map(lambda x: x[1], GetArgNameAndDefaultTuple(func)))
| 30.74 | 84 | 0.685751 |
acfb0e0298154197d1a1a920c690ba29a6decfbb | 1,236 | py | Python | python_modules/libraries/dagster-papertrail/setup.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | python_modules/libraries/dagster-papertrail/setup.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | python_modules/libraries/dagster-papertrail/setup.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | from typing import Dict
from setuptools import find_packages, setup # type: ignore
def get_version() -> str:
version: Dict[str, str] = {}
with open("dagster_papertrail/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
ver = get_version()
# dont pin dev installs to avoid pip dep resolver issues
pin = "" if ver == "dev" else f"=={ver}"
setup(
name="dagster-papertrail",
version=ver,
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="Package for papertrail Dagster framework components.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-papertrail",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
install_requires=[f"dagster{pin}"],
zip_safe=False,
)
| 33.405405 | 108 | 0.614887 |
acfb0e0ca95fd42533663dd60eab680c7964d1b3 | 44,137 | py | Python | misc/utils.py | srama2512/sidekicks | a5c487bb30540c98f04ece5e2c22ef95963afbdb | [
"MIT"
] | 26 | 2018-07-30T19:14:49.000Z | 2022-03-12T12:49:36.000Z | misc/utils.py | srama2512/sidekicks | a5c487bb30540c98f04ece5e2c22ef95963afbdb | [
"MIT"
] | 2 | 2018-12-10T17:12:27.000Z | 2019-07-15T22:47:28.000Z | misc/utils.py | srama2512/sidekicks | a5c487bb30540c98f04ece5e2c22ef95963afbdb | [
"MIT"
] | 8 | 2018-12-18T00:55:45.000Z | 2019-11-11T18:42:49.000Z | import sys
import pdb
import math
import json
import torch
import random
import argparse
import numpy as np
import torchvision
import tensorboardX
import torch.optim as optim
import torchvision.utils as vutils
from State import *
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def set_random_seeds(seed):
"""
Sets the random seeds for numpy, python, pytorch cpu and gpu
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_module(agent, opts):
"""
Given the agent, load a pre-trained model and other setup based on the
training_setting
"""
# ---- Load the pre-trained model ----
load_state = torch.load(opts.load_model)
# strict=False ensures that only the modules common to loaded_dict and agent.policy's state_dict are loaded.
# Could potentially lead to errors being masked. Tread carefully!
if opts.actorType == 'actor' and opts.act_full_obs:
# Don't load the actor module, since the full obs actor architecture is different.
partial_state_dict = {k: v for k, v in load_state['state_dict'].items() if 'act' not in k}
agent.policy.load_state_dict(partial_state_dict, strict=False)
else:
agent.policy.load_state_dict(load_state['state_dict'], strict=False)
# ---- Other settings ----
epoch_start = 0
best_val_error = 100000
train_history = []
val_history = []
if opts.training_setting == 1:
"""
Scenario: Model trained on one-view reconstruction. Needs to be
finetuned for multi-view reconstruction.
"""
# (1) Must fix sense, fuse modules
for parameter in agent.policy.sense_im.parameters():
parameter.requires_grad = False
for parameter in agent.policy.sense_pro.parameters():
parameter.requires_grad = False
for parameter in agent.policy.fuse.parameters():
parameter.requires_grad = False
# (2) Fix decode module if requested
if opts.fix_decode:
for parameter in agent.policy.decode.parameters():
parameter.requires_grad = False
# (3) Re-create the optimizer with the above settings
agent.create_optimizer(opts.lr, opts.weight_decay, opts.training_setting, opts.fix_decode)
elif opts.training_setting == 2:
"""
Scenario: Model trained on one-view reconstruction. Needs to be
further trained on the same setting.
"""
# (1) Keep a copy of the new number of epochs to run for
epoch_total = opts.epochs
# (2) Load the rest of the opts from saved model
opts = load_state['opts']
opts.epochs = epoch_total
train_history = load_state['train_history']
val_history = load_state['val_history']
best_val_error = load_state['best_val_error']
epoch_start = load_state['epoch']+1
# (3) Create optimizer based on the new parameter settings
agent.create_optimizer(opts.lr, opts.weight_decay, 2, opts.fix_decode)
# (4) Load the optimizer state dict
agent.optimizer.load_state_dict(load_state['optimizer'])
elif opts.training_setting == 3:
"""
Scenario: Model training on multi-view reconstruction. Needs to be
further trained on the same setting.
"""
# (1) Load opts from saved model and replace LR
opts_copy = load_state['opts']
opts_copy.lr = opts.lr
train_history = load_state['train_history']
val_history = load_state['val_history']
best_val_error = load_state['best_val_error']
epoch_start = load_state['epoch']+1
opts_copy.training_setting = opts.training_setting
opts = opts_copy
# (2) Fix sense, fuse and decode (optionally) modules
for parameter in agent.policy.sense_im.parameters():
parameter.requires_grad = False
for parameter in agent.policy.sense_pro.parameters():
parameter.requires_grad = False
for parameter in agent.policy.fuse.parameters():
parameter.requires_grad = False
if opts.fix_decode:
for parameter in agent.policy.decode.parameters():
parameter.requires_grad = False
# (3) Re-create the optimizer with the above settings
agent.create_optimizer(opts.lr, opts.weight_decay, 3, opts.fix_decode)
# (4) Load the optimizer state dict
agent.optimizer.load_state_dict(load_state['optimizer'])
elif opts.training_setting == 4:
"""
Scenario: Model trained on one-view reconstruction. Needs to be
further trained on some other setting.
"""
# (1) Load the train history, val history and best validation errors from the saved model.
train_history = load_state['train_history']
val_history = load_state['val_history']
best_val_error = load_state['best_val_error']
epoch_start = load_state['epoch']+1
# (2) Create the optimizer according to the new settings
agent.create_optimizer(opts.lr, opts.weight_decay, opts.training_setting, False)
return best_val_error, train_history, val_history, epoch_start
def get_starts(N, M, batch_size, option):
"""
Given the number of elevations(N), azimuths(M), batch size and the option (different types of starts),
this function returns the start indices for the batch.
start_idx: list of [start_elev, start_azim] for each panorama in the batch
"""
if option == 0:
start_idx = [[random.randint(0, N-1), random.randint(0, M-1)] for i in range(batch_size)]
else:
start_idx = [[N//2, M//2-1] for i in range(batch_size)]
return start_idx
def utility_function(utility_matrix, selected_views, threshold):
"""
Evaluates the quality of the selected views based on the utility_matrix
utility_matrix : NxMxNxM array
selected_views : list of (i, j) pairs indicating selected views
"""
M = utility_matrix.shape[1]
N = utility_matrix.shape[0]
total_utility_map = np.zeros((N, M))
for view in selected_views:
total_utility_map += utility_matrix[view[0], view[1]]
total_utility_map = np.minimum(total_utility_map, threshold)
return total_utility_map.sum()
def utility_function_unique(utility_matrix, selected_views, threshold):
"""
Evaluates the quality of the selected views based on the utility_matrix.
This selects only uniques views for computation, to ensure that the
same view does get selected multiple times.
utility_matrix : NxMxNxM array
selected_views : list of (i, j) pairs indicating selected views
"""
M = utility_matrix.shape[1]
N = utility_matrix.shape[0]
total_utility_map = np.zeros((N, M))
selected_views_set = set()
for view in selected_views:
selected_views_set.add((view[0], view[1]))
for view in selected_views_set:
total_utility_map += utility_matrix[view[0], view[1]]
total_utility_map = np.minimum(total_utility_map, threshold)
return total_utility_map.sum()
def get_submodular_views(utility_matrix, num_views):
"""
Uses greedy maximization of submodular utility function to get close to optimal set of views
utility_matrix : NxMxNxM array
num_views : number of views to select
"""
M = utility_matrix.shape[1]
N = utility_matrix.shape[0]
sel_views = []
total_utility = 0
for n in range(num_views):
max_idx = [0, 0]
max_utility_gain = 0
for i in range(N):
for j in range(M):
curr_utility_gain = utility_function(utility_matrix, sel_views + [[i, j]], 1) - total_utility
if curr_utility_gain >= max_utility_gain:
max_utility_gain = curr_utility_gain
max_idx = [i, j]
sel_views.append(max_idx)
total_utility += max_utility_gain
return sel_views, total_utility
def get_expert_trajectories(state, pano_maps_orig, selected_views, opts):
"""
Get greedy trajectories based on utility for each panorama in batch
opts must contain:
T, delta_M, delta_N, wrap_elevation, wrap_azimuth, N, M
"""
pano_maps = np.copy(pano_maps_orig)
batch_size = pano_maps.shape[0]
# Note: Assuming atleast one view has been selected initially
t_start = len(selected_views[0])-1 # What t to start from, if some views have already been selected
# Access pattern: selected_views[batch_size][time_step]
selected_actions = np.zeros((batch_size, opts.T-t_start-1), np.int32) # Access pattern: selected_actions[batch_size][time_step]
for i in range(batch_size):
curr_utility = utility_function_unique(pano_maps[i], selected_views[i], 1)
# Given the first view, select T-1 more views
t = t_start
while t < opts.T-1:
curr_pos = selected_views[i][t]
max_gain = 0
max_delta = None
max_pos = None
for delta_ele in range(-(opts.delta_N//2), opts.delta_N//2 + 1):
for delta_azi in range(-(opts.delta_M//2), opts.delta_M//2 + 1):
if opts.wrap_elevation:
new_ele = (curr_pos[0] + delta_ele)%opts.N
else:
new_ele = max(min(curr_pos[0] + delta_ele, opts.N-1), 0)
if opts.wrap_azimuth:
new_azi = (curr_pos[1] + delta_azi)%opts.M
else:
new_azi = max(min(curr_pos[1] + delta_azi, opts.M-1), 0)
new_pos = [new_ele, new_azi]
curr_gain = utility_function_unique(pano_maps[i], selected_views[i] + [new_pos], 1) - curr_utility
if curr_gain >= max_gain:
max_gain = curr_gain
max_delta = (delta_ele, delta_azi)
max_pos = new_pos
curr_utility += max_gain
selected_views[i].append(max_pos)
selected_actions[i][t-t_start] = state.delta_to_act[max_delta]
t += 1
return selected_views, selected_actions
def evaluate(loader, agent, split, opts):
"""
Evaluation function - evaluates the agent over fixed grid locations as
starting points and returns the overall average reconstruction error.
"""
# ---- Initial setup ----
depleted = False
agent.policy.eval()
overall_err = 0
overall_count = 0
err_values = []
decoded_images = []
while not depleted:
# ---- Sample batch of data ----
if split == 'val':
if opts.expert_rewards and opts.expert_trajectories:
pano, pano_maps, pano_rewards, depleted = loader.next_batch_val()
elif opts.expert_trajectories or opts.actorType == 'demo_sidekick':
pano, pano_maps, depleted = loader.next_batch_val()
pano_rewards = None
elif opts.expert_rewards:
pano, pano_rewards, depleted = loader.next_batch_val()
pano_maps = None
else:
pano, depleted = loader.next_batch_val()
pano_rewards = None
pano_maps = None
elif split == 'test':
if opts.actorType == 'demo_sidekick':
pano, pano_masks, pano_maps, depleted = loader.next_batch_test()
else:
pano, pano_masks, depleted = loader.next_batch_test()
pano_rewards = None
elif split == 'test_unseen':
if opts.actorType == 'demo_sidekick':
pano, pano_masks, pano_maps, depleted = loader.next_batch_test_unseen()
else:
pano, pano_masks, depleted = loader.next_batch_test_unseen()
pano_rewards = None
# Initial setup for evaluating over a grid of views
curr_err = 0
curr_count = 0
curr_err_batch = 0
batch_size = pano.shape[0]
# Compute the performance with the initial state
# starting at fixed grid locations
if opts.start_view == 0:
# Randomly sample one location from grid
elevations = [random.randint(0, opts.N-1)]
azimuths = [random.randint(0, opts.M-1)]
elif opts.start_view == 1:
# Sample only the center location from grid
elevations = [opts.N//2]
azimuths = [opts.M//2-1]
else:
# Sample all the locations from grid
elevations = range(0, opts.N, 2)
azimuths = range(0, opts.M, 2)
for i in elevations:
for j in azimuths:
start_idx = [[i, j] for _ in range(batch_size)]
if split == 'test' or split == 'test_unseen':
state = State(pano, pano_rewards, start_idx, opts, pano_masks)
else:
state = State(pano, pano_rewards, start_idx, opts)
# Enable view memorization for testing by default
if opts.actorType == 'demo_sidekick': # Not enabling demo_sidekick training for AgentSupervised (that's not needed, doesn't make sense)
_, rec_errs, _, _, _, _, visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True}, pano_maps=pano_maps, opts=opts)
else:
_, rec_errs, _, _, _, _, visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True})
# For some random initial state, print the decoded images at all time steps
if curr_count == 0:
curr_decoded_plus_true = None
for dec_idx in range(len(decoded_all)):
decoded = decoded_all[dec_idx].data.cpu()
curr_decoded = decoded.numpy()
# Rotate it forward by the start index
# Shifting all the images by equal amount since the start idx is same for all
if not opts.knownAzim:
curr_decoded = np.roll(curr_decoded, start_idx[0][1], axis=2)
if not opts.knownElev:
curr_decoded = np.roll(curr_decoded, start_idx[0][0], axis=1)
# Fill in the true views here
for jdx, jdx_v in enumerate(visited_idxes):
if jdx > dec_idx:
break
for idx in range(batch_size):
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :] = state.views_prepro[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :]
curr_decoded = curr_decoded * 255
for c in range(opts.num_channels):
#curr_decoded[:, :, :, , c, :, :] *= opts.std[c]
curr_decoded[:, :, :, c, :, :] += opts.mean[c]
if opts.num_channels == 1:
curr_decoded_3chn = np.zeros((batch_size, opts.N, opts.M, 3, 32, 32))
for c in range(3):
curr_decoded_3chn[:, :, :, c, :, :] = curr_decoded[:, :, :, 0, :, :]
curr_decoded = curr_decoded_3chn
#for jdx, jdx_v in enumerate(visited_idxes):
# if jdx > dec_idx:
# break
jdx_v = visited_idxes[dec_idx]
#for idx in range(batch_size):
# Fill in some red margin
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, 0:3, :] = 0
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, -3:, :] = 0
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, 0:3] = 0
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, -3:] = 0
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], 0, 0:3, :] = 255
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], 0, -3:, :] = 255
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], 0, :, 0:3] = 255
#curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], 0, :, -3:] = 255
# Need to convert from B x N x M x C x 32 x 32 to B x 1 x C x N*32 x M*32
# Convert from B x N x M x C x 32 x 32 to B x C x N x 32 x M x 32 and then reshape
curr_decoded = curr_decoded.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, 3, opts.N*32, opts.M*32)
true_state = np.array(state.views)
start_idx = state.start_idx
if opts.num_channels == 1:
true_state_3chn = np.zeros((batch_size, opts.N, opts.M, 3, 32, 32))
for c in range(3):
true_state_3chn[:, :, :, c, :, :] = true_state[:, :, :, 0, :, :]
true_state = true_state_3chn
# Fill in red margin for starting states of each true panorama
#for idx in range(batch_size):
# true_state[idx, start_idx[idx][0], start_idx[idx][1], :, 0:3, :] = 0
# true_state[idx, start_idx[idx][0], start_idx[idx][1], :, -3:, :] = 0
# true_state[idx, start_idx[idx][0], start_idx[idx][1], :, :, 0:3] = 0
# true_state[idx, start_idx[idx][0], start_idx[idx][1], :, :, -3:] = 0
# true_state[idx, start_idx[idx][0], start_idx[idx][1], 0, 0:3, :] = 255
# true_state[idx, start_idx[idx][0], start_idx[idx][1], 0, -3:, :] = 255
# true_state[idx, start_idx[idx][0], start_idx[idx][1], 0, :, 0:3] = 255
# true_state[idx, start_idx[idx][0], start_idx[idx][1], 0, :, -3:] = 255
true_state = true_state.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, 3, opts.N*32, opts.M*32)
if curr_decoded_plus_true is None:
curr_decoded_plus_true = curr_decoded
else:
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, curr_decoded], axis=1)
curr_decoded_plus_true = np.concatenate([true_state, curr_decoded_plus_true], axis=1)
if opts.expert_rewards:
reward_image = np.zeros_like(curr_decoded)
for iter_N in range(opts.N):
for iter_M in range(opts.M):
for bn in range(batch_size):
reward_image[bn, :, :, (iter_N*32):((iter_N+1)*32), (iter_M*32):((iter_M+1)*32)] = pano_rewards[bn, iter_N, iter_M]/255.0
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, reward_image], axis=1)
decoded_images.append(torch.Tensor(curr_decoded_plus_true/255.0))
# Add error from the last step
curr_err += rec_errs[-1].data.sum()
curr_count += 1 # Count for the views
curr_err_batch += rec_errs[-1].data.cpu().numpy()
curr_err /= curr_count
curr_err_batch /= curr_count
for i in range(curr_err_batch.shape[0]):
err_values.append(float(curr_err_batch[i]))
overall_err += curr_err
overall_count += batch_size
err_values = np.array(err_values)
overall_mean = float(np.mean(err_values))
overall_std = float(np.std(err_values, ddof=1))
overall_std_err = float(overall_std/math.sqrt(err_values.shape[0]))
agent.policy.train()
return overall_mean, overall_std, overall_std_err, decoded_images
def evaluate_adversarial_fixed(loader, agent, split, opts):
"""
Evaluation function - evaluates the agent over the hardest starting points for
a one-view model
"""
# ---- Initial setup ----
depleted = False
agent.policy.eval()
overall_err = 0
overall_count = 0
decoded_images = []
start_views = json.load(open(opts.start_views_json))['%s_adversarial_views'%split]
for i in range(len(start_views)):
start_views[i][0] = int(start_views[i][0])
start_views[i][1] = int(start_views[i][1])
err_values = []
while not depleted:
# ---- Sample batch of data ----
if split == 'test':
pano, pano_masks, depleted = loader.next_batch_test()
pano_rewards = None
pano_maps = None
elif split == 'test_unseen':
pano, pano_masks, depleted = loader.next_batch_test_unseen()
pano_rewards = None
pano_maps = None
# Initial setup for evaluating over a grid of views
batch_size = pano.shape[0]
# Get the adversarial start_idx
start_idx = start_views[overall_count:(overall_count+batch_size)]
state = State(pano, pano_rewards, start_idx, opts, pano_masks)
# Enable view memorization for testing by default
_, rec_errs, _, _, _, _, visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True})
# For some random initial state, print the decoded images at all time steps
curr_decoded_plus_true = None
for dec_idx in range(len(decoded_all)):
decoded = decoded_all[dec_idx].data.cpu()
curr_decoded = decoded.numpy()
# Rotate it forward by the start index
# Shifting all the images by equal amount since the start idx is same for all
if not opts.knownAzim:
curr_decoded = np.roll(curr_decoded, start_idx[0][1], axis=2)
if not opts.knownElev:
curr_decoded = np.roll(curr_decoded, start_idx[0][0], axis=1)
# Fill in the true views here
for jdx, jdx_v in enumerate(visited_idxes):
if jdx > dec_idx:
break
for idx in range(batch_size):
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :] = state.views_prepro[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :]
# Fill in some black margin
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, 0:3, :] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, -3:-1, :] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, 0:3] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, -3:-1] = 0
# Need to convert from B x N x M x C x 32 x 32 to B x 1 x C x N*32 x M*32
# Convert from B x N x M x C x 32 x 32 to B x C x N x 32 x M x 32 and then reshape
curr_decoded = curr_decoded.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, opts.num_channels, opts.N*32, opts.M*32)*255.0
true_state = state.views.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, opts.num_channels, opts.N*32, opts.M*32)
for c in range(opts.num_channels):
#curr_decoded[:, :, c, :, :] *= opts.std[c]
curr_decoded[:, :, c, :, :] += opts.mean[c]
if curr_decoded_plus_true is None:
curr_decoded_plus_true = curr_decoded
else:
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, curr_decoded], axis=1)
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, true_state], axis=1)
if opts.expert_rewards:
reward_image = np.zeros_like(curr_decoded)
for iter_N in range(opts.N):
for iter_M in range(opts.M):
for bn in range(batch_size):
reward_image[bn, :, :, (iter_N*32):((iter_N+1)*32), (iter_M*32):((iter_M+1)*32)] = pano_rewards[bn, iter_N, iter_M]/255.0
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, reward_image], axis=1)
decoded_images.append(torch.Tensor(curr_decoded_plus_true/255.0))
err_values.append(rec_errs[-1].data.cpu().numpy())
overall_err += np.sum(rec_errs[-1].data.cpu().numpy())
overall_count += batch_size
err_values = np.concatenate(err_values, axis=0)
overall_mean = np.mean(err_values)
overall_std = np.std(err_values, ddof=1)
overall_std_err = overall_std/math.sqrt(err_values.shape[0])
agent.policy.train()
return overall_mean, overall_std, overall_std_err, decoded_images
def evaluate_adversarial(loader, agent, split, opts):
"""
Evaluation function - evaluates the agent over all grid locations as
starting points and returns the average of worst reconstruction error over different
locations for the panoramas (average(max error over locations)).
"""
# ---- Initial setup ----
depleted = False
agent.policy.eval()
overall_err = 0
overall_count = 0
decoded_images = []
err_values = []
while not depleted:
# ---- Sample batch of data ----
if split == 'val':
if opts.expert_trajectories or opts.actorType == 'demo_sidekick':
pano, pano_maps, depleted = loader.next_batch_val()
pano_rewards = None
elif opts.expert_rewards:
pano, pano_rewards, depleted = loader.next_batch_val()
pano_maps = None
else:
pano, depleted = loader.next_batch_val()
pano_rewards = None
pano_maps = None
elif split == 'test':
if opts.actorType == 'demo_sidekick':
pano, pano_masks, pano_maps, depleted = loader.next_batch_test()
else:
pano, pano_masks, depleted = loader.next_batch_test()
pano_rewards = None
elif split == 'test_unseen':
if opts.actorType == 'demo_sidekick':
pano, pano_masks, pano_maps, depleted = loader.next_batch_test_unseen()
else:
pano, pano_masks, depleted = loader.next_batch_test_unseen()
pano_rewards = None
# Initial setup for evaluating over a grid of views
batch_size = pano.shape[0]
# Compute the performance with the initial state
# starting at fixed grid locations
elevations = range(0, opts.N)
azimuths = range(0, opts.M)
errs_across_grid = np.zeros((batch_size, opts.N, opts.M))
for i in elevations:
for j in azimuths:
start_idx = [[i, j] for _ in range(batch_size)]
if split == 'test' or split == 'test_unseen':
state = State(pano, pano_rewards, start_idx, opts, pano_masks)
else:
state = State(pano, pano_rewards, start_idx, opts)
# Enable view memorization for testing by default
if opts.actorType == 'demo_sidekick': # Not enabling demo_sidekick training for AgentSupervised (that's not needed, doesn't make sense)
_, rec_errs, _, _, _, _, visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True}, pano_maps=pano_maps, opts=opts)
else:
_, rec_errs, _, _, _, _, visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True})
# For some random initial state, print the decoded images at all time steps
if i == 0 and j == 0:
curr_decoded_plus_true = None
for dec_idx in range(len(decoded_all)):
decoded = decoded_all[dec_idx].data.cpu()
curr_decoded = decoded.numpy()
# Rotate it forward by the start index
# Shifting all the images by equal amount since the start idx is same for all
if not opts.knownAzim:
curr_decoded = np.roll(curr_decoded, start_idx[0][1], axis=2)
if not opts.knownElev:
curr_decoded = np.roll(curr_decoded, start_idx[0][0], axis=1)
# Fill in the true views here
for jdx, jdx_v in enumerate(visited_idxes):
if jdx > dec_idx:
break
for idx in range(batch_size):
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :] = state.views_prepro[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :]
# Fill in some black margin
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, 0:3, :] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, -3:-1, :] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, 0:3] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, -3:-1] = 0
# Need to convert from B x N x M x C x 32 x 32 to B x 1 x C x N*32 x M*32
# Convert from B x N x M x C x 32 x 32 to B x C x N x 32 x M x 32 and then reshape
curr_decoded = curr_decoded.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, opts.num_channels, opts.N*32, opts.M*32)*255.0
true_state = state.views.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, opts.num_channels, opts.N*32, opts.M*32)
for c in range(opts.num_channels):
#curr_decoded[:, :, c, :, :] *= opts.std[c]
curr_decoded[:, :, c, :, :] += opts.mean[c]
if curr_decoded_plus_true is None:
curr_decoded_plus_true = curr_decoded
else:
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, curr_decoded], axis=1)
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, true_state], axis=1)
if opts.expert_rewards:
reward_image = np.zeros_like(curr_decoded)
for iter_N in range(opts.N):
for iter_M in range(opts.M):
for bn in range(batch_size):
reward_image[bn, :, :, (iter_N*32):((iter_N+1)*32), (iter_M*32):((iter_M+1)*32)] = pano_rewards[bn, iter_N, iter_M]/255.0
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, reward_image], axis=1)
decoded_images.append(torch.Tensor(curr_decoded_plus_true/255.0))
# endif
errs_across_grid[:, i, j] = rec_errs[-1].data.cpu().numpy()
errs_across_grid = errs_across_grid.reshape(batch_size, -1)
overall_err += np.sum(np.max(errs_across_grid, axis=1))
overall_count += batch_size
err_values.append(np.max(errs_across_grid, axis=1))
err_values = np.concatenate(err_values, axis=0)
overall_mean = np.mean(err_values)
overall_std = np.std(err_values, ddof=1)
overall_std_err = overall_std/math.sqrt(err_values.shape[0])
agent.policy.train()
return overall_mean, overall_std, overall_std_err, decoded_images
def get_all_trajectories(loader, agent, split, opts):
"""
Gathers trajectories from all starting positions and returns them.
"""
# ---- Initial setup ----
depleted = False
agent.policy.eval()
trajectories = {}
# Sample all the locations from grid
elevations = range(0, opts.N)
azimuths = range(0, opts.M)
for i in elevations:
for j in azimuths:
trajectories[(i, j)] = []
while not depleted:
# ---- Sample batch of data ----
if split == 'train':
pano, depleted = loader.next_batch_train()
pano_rewards = None
pano_maps = None
if split == 'val':
pano, depleted = loader.next_batch_val()
pano_rewards = None
pano_maps = None
elif split == 'test':
pano, pano_masks, depleted = loader.next_batch_test()
pano_rewards = None
pano_maps = None
elif split == 'test_unseen':
pano, pano_masks, depleted = loader.next_batch_test_unseen()
pano_rewards = None
pano_maps = None
batch_size = pano.shape[0]
# Gather agent trajectories from each starting location
for i in elevations:
for j in azimuths:
start_idx = [[i, j] for _ in range(batch_size)]
if split == 'test' or split == 'test_unseen':
state = State(pano, pano_rewards, start_idx, opts, pano_masks)
else:
state = State(pano, pano_rewards, start_idx, opts)
# Enable view memorization for testing by default
_, _, _, _, _, _, _, _, actions_taken = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True})
# actions_taken: B x T torch Tensor
trajectories[(i, j)].append(actions_taken)
for i in elevations:
for j in azimuths:
trajectories[(i, j)] = torch.cat(trajectories[(i, j)], dim=0)
agent.policy.train()
return trajectories
def select_adversarial_views(loader, agent, split, opts):
"""
Adversarial selection function - evaluates the agent over all grid locations as
starting points and returns the indices of the worst reconstruction error over different
locations for the panoramas.
"""
# ---- Initial setup ----
depleted = False
agent.policy.eval()
decoded_images = []
adversarial_views = []
while not depleted:
# ---- Sample batch of data ----
if split == 'val':
if opts.expert_trajectories:
pano, pano_maps, depleted = loader.next_batch_val()
pano_rewards = None
elif opts.expert_rewards:
pano, pano_rewards, depleted = loader.next_batch_val()
pano_maps = None
else:
pano, depleted = loader.next_batch_val()
pano_rewards = None
pano_maps = None
elif split == 'test':
pano, pano_masks, depleted = loader.next_batch_test()
pano_rewards = None
pano_maps = None
elif split == 'test_unseen':
pano, pano_masks, depleted = loader.next_batch_test_unseen()
pano_rewards = None
pano_maps = None
# Initial setup for evaluating over a grid of views
batch_size = pano.shape[0]
# Compute the performance with the initial state
# starting at fixed grid locations
elevations = range(0, opts.N)
azimuths = range(0, opts.M)
errs_across_grid = np.zeros((batch_size, opts.N, opts.M))
for i in elevations:
for j in azimuths:
start_idx = [[i, j] for _ in range(batch_size)]
if split == 'test' or split == 'test_unseen':
state = State(pano, pano_rewards, start_idx, opts, pano_masks)
else:
state = State(pano, pano_rewards, start_idx, opts)
# Enable view memorization for testing by default
_, rec_errs, _, _, _, _, visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts={'greedy': opts.greedy, 'memorize_views': True})
# For some random initial state, print the decoded images at all time steps
if i == 0 and j == 0:
curr_decoded_plus_true = None
for dec_idx in range(len(decoded_all)):
decoded = decoded_all[dec_idx].data.cpu()
curr_decoded = decoded.numpy()
# Rotate it forward by the start index
# Shifting all the images by equal amount since the start idx is same for all
if not opts.knownAzim:
curr_decoded = np.roll(curr_decoded, start_idx[0][1], axis=2)
if not opts.knownElev:
curr_decoded = np.roll(curr_decoded, start_idx[0][0], axis=1)
# Fill in the true views here
for jdx, jdx_v in enumerate(visited_idxes):
if jdx > dec_idx:
break
for idx in range(batch_size):
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :] = state.views_prepro[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, :]
# Fill in some black margin
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, 0:3, :] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, -3:-1, :] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, 0:3] = 0
curr_decoded[idx, jdx_v[idx][0], jdx_v[idx][1], :, :, -3:-1] = 0
# Need to convert from B x N x M x C x 32 x 32 to B x 1 x C x N*32 x M*32
# Convert from B x N x M x C x 32 x 32 to B x C x N x 32 x M x 32 and then reshape
curr_decoded = curr_decoded.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, opts.num_channels, opts.N*32, opts.M*32)*255.0
true_state = state.views.transpose((0, 3, 1, 4, 2, 5)).reshape(batch_size, 1, opts.num_channels, opts.N*32, opts.M*32)
for c in range(opts.num_channels):
#curr_decoded[:, :, c, :, :] *= opts.std[c]
curr_decoded[:, :, c, :, :] += opts.mean[c]
if curr_decoded_plus_true is None:
curr_decoded_plus_true = curr_decoded
else:
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, curr_decoded], axis=1)
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, true_state], axis=1)
if opts.expert_rewards:
reward_image = np.zeros_like(curr_decoded)
for iter_N in range(opts.N):
for iter_M in range(opts.M):
for bn in range(batch_size):
reward_image[bn, :, :, (iter_N*32):((iter_N+1)*32), (iter_M*32):((iter_M+1)*32)] = pano_rewards[bn, iter_N, iter_M]/255.0
curr_decoded_plus_true = np.concatenate([curr_decoded_plus_true, reward_image], axis=1)
decoded_images.append(torch.Tensor(curr_decoded_plus_true/255.0))
# endif
errs_across_grid[:, i, j] = rec_errs[-1].data.cpu().numpy()
errs_across_grid = errs_across_grid.reshape(batch_size, -1)
adversarial_views.append(np.argmax(errs_across_grid, axis=1))
# The indices are encoded in the row major format. Need to convert to (n, m) format.
adversarial_views = np.concatenate(adversarial_views, axis=0)
adversarial_views_n_m = np.zeros((adversarial_views.shape[0], 2))
for i in range(adversarial_views.shape[0]):
# adversarial_views[i] = n*M + m
m = adversarial_views[i]%opts.M
n = math.floor(adversarial_views[i]/opts.M)
assert(n*opts.M + m == adversarial_views[i])
adversarial_views_n_m[i][0] = n
adversarial_views_n_m[i][1] = m
return adversarial_views_n_m.tolist()
def iunf(input_layer, initunf=0.1):
# If the layer is an LSTM
if str(type(input_layer)) == "<class 'torch.nn.modules.rnn.LSTM'>":
for i in range(input_layer.num_layers):
nn.init.uniform(getattr(input_layer, 'weight_ih_l%d'%(i)), -initunf, initunf)
nn.init.uniform(getattr(input_layer, 'weight_hh_l%d'%(i)), -initunf, initunf)
nn.init.uniform(getattr(input_layer, 'bias_ih_l%d'%(i)), -initunf, initunf)
nn.init.uniform(getattr(input_layer, 'bias_hh_l%d'%(i)), -initunf, initunf)
# For all other layers except batch norm
elif not (str(type(input_layer)) == "<class 'torch.nn.modules.batchnorm.BatchNorm2d'>" or str(type(input_layer)) == "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>"):
if hasattr(input_layer, 'weight'):
nn.init.uniform(input_layer.weight, -initunf, initunf);
if hasattr(input_layer, 'bias'):
nn.init.uniform(input_layer.bias, -initunf, initunf);
return input_layer
def ixvr(input_layer, bias_val=0.01):
# If the layer is an LSTM
if str(type(input_layer)) == "<class 'torch.nn.modules.rnn.LSTM'>":
for i in range(input_layer.num_layers):
nn.init.xavier_normal(getattr(input_layer, 'weight_ih_l%d'%(i)))
nn.init.xavier_normal(getattr(input_layer, 'weight_hh_l%d'%(i)))
nn.init.constant(getattr(input_layer, 'bias_ih_l%d'%(i)), bias_val)
nn.init.constant(getattr(input_layer, 'bias_hh_l%d'%(i)), bias_val)
# For all other layers except batch norm
elif not (str(type(input_layer)) == "<class 'torch.nn.modules.batchnorm.BatchNorm2d'>" or str(type(input_layer)) == "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>"):
if hasattr(input_layer, 'weight'):
nn.init.xavier_normal(input_layer.weight);
if hasattr(input_layer, 'bias'):
nn.init.constant(input_layer.bias, bias_val);
return input_layer
def inrml(input_layer, mean=0, std=0.001):
# If the layer is an LSTM
if str(type(input_layer)) == "<class 'torch.nn.modules.rnn.LSTM'>":
for i in range(input_layer.num_layers):
nn.init.normal(getattr(input_layer, 'weight_ih_l%d'%(i)), mean, std)
nn.init.normal(getattr(input_layer, 'weight_hh_l%d'%(i)), mean, std)
nn.init.constant(getattr(input_layer, 'bias_ih_l%d'%(i)), 0.01)
nn.init.constant(getattr(input_layer, 'bias_hh_l%d'%(i)), 0.01)
# For all other layers except batch norm
elif not (str(type(input_layer)) == "<class 'torch.nn.modules.batchnorm.BatchNorm2d'>" or str(type(input_layer)) == "<class 'torch.nn.modules.batchnorm.BatchNorm1d'>"):
if hasattr(input_layer, 'weight'):
nn.init.normal(input_layer.weight, mean, std);
if hasattr(input_layer, 'bias'):
nn.init.constant(input_layer.bias, 0.01);
return input_layer
def initialize_sequential(var_sequential, init_method):
"""
Given a sequential module (var_sequential) and an initialization method
(init_method), this initializes var_sequential using init_method
Note: The layers returned are different from the one inputted.
Not sure if this affects anything.
"""
var_list = []
for i in range(len(var_sequential)):
var_list.append(init_method(var_sequential[i]))
return nn.Sequential(*var_list)
class View(nn.Module):
def __init__(self, *shape):
# shape is a list
super(View, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(*self.shape)
| 49.315084 | 199 | 0.573646 |
acfb0e89f37ca9a2508060c11d516a646750b2b7 | 2,388 | py | Python | examples/205_multivof/buoyancy_breakup/vis/contour.py | ChristopherKotthoff/Aphros-with-GraphContraction | 18af982a50e350a8bf6979ae5bd25b2ef4d3792a | [
"MIT"
] | 252 | 2020-06-03T16:01:59.000Z | 2022-03-30T14:06:32.000Z | examples/205_multivof/buoyancy_breakup/vis/contour.py | ChristopherKotthoff/Aphros-with-GraphContraction | 18af982a50e350a8bf6979ae5bd25b2ef4d3792a | [
"MIT"
] | 4 | 2021-03-13T11:13:55.000Z | 2022-03-31T15:11:22.000Z | examples/205_multivof/buoyancy_breakup/vis/contour.py | ChristopherKotthoff/Aphros-with-GraphContraction | 18af982a50e350a8bf6979ae5bd25b2ef4d3792a | [
"MIT"
] | 27 | 2020-09-18T04:12:03.000Z | 2022-03-30T04:22:42.000Z | #!/usr/bin/env pvbatch
import argparse
from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
import os
import sys
def printerr(m):
sys.stderr.write('{:}\n'.format(m))
sys.stderr.flush()
parser = argparse.ArgumentParser()
parser.add_argument('--files0', nargs='*', help="Paths to sm_*.vtk files to render with color C0")
parser.add_argument('--files1', nargs='*', help="Paths to sm_*.vtk files to render with color C1")
parser.add_argument('--files2', nargs='*', help="Paths to sm_*.vtk files to render with color C2")
parser.add_argument('--files3', nargs='*', help="Paths to sm_*.vtk files to render with color C3")
parser.add_argument('--lw', default=4, help="Line width")
parser.add_argument('--force', action='store_true', help="Force overwrite")
parser.add_argument('--outdir', default='.', help="Path to output directory")
args = parser.parse_args()
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [1080, 1080]
renderView1.InteractionMode = '2D'
renderView1.OrientationAxesVisibility = 0
renderView1.CenterOfRotation = [0.5, 0.5, 0]
renderView1.CameraPosition = [0.5, 0.5, 3.0]
renderView1.CameraFocalPoint = [0.5, 0.5, 0.0]
renderView1.CameraParallelScale = 0.5
renderView1.CameraParallelProjection = 1
renderView1.Background = [1., 1., 1.]
renderView1.UseLight = 0
# https://github.com/OrdnanceSurvey/GeoDataViz-Toolkit/tree/master/Colours
colorscheme = [
"#FF1F5B", "#00CD6C", "#009ADE", "#AF58BA", "#FFC61E", "#F28522",
"#A0B1BA", "#A6761D", "#E9002D", "#FFAA00", "#00B000"
]
def rgb(h):
return list(int(h[1:][i:i + 2], 16) / 255. for i in (0, 2, 4))
for i in range(4):
files = eval("args.files" + str(i))
if not files:
continue
surf = LegacyVTKReader(FileNames=files)
surfDisplay = Show(surf, renderView1)
surfDisplay.Representation = 'Wireframe'
surfDisplay.AmbientColor = rgb(colorscheme[i])
surfDisplay.ColorArrayName = ['POINTS', '']
surfDisplay.DiffuseColor = rgb(colorscheme[i])
surfDisplay.LineWidth = args.lw
tk = GetTimeKeeper()
for i, f in enumerate(args.files0):
path = os.path.join(args.outdir,
os.path.splitext(os.path.basename(f))[0] + '.png')
if not args.force and os.path.isfile(path):
printerr("skip existing '{}'".format(path))
continue
tk.Time = i
printerr(path)
SaveScreenshot(path)
| 34.608696 | 98 | 0.687605 |
acfb0eb8a08f6b87749b61bba81c94320c5df45d | 1,619 | py | Python | viz/web/webserve.py | vivinastase/histwords | bb3117434e76679fb38f649e2dbf11b15f5ef03b | [
"Apache-2.0"
] | null | null | null | viz/web/webserve.py | vivinastase/histwords | bb3117434e76679fb38f649e2dbf11b15f5ef03b | [
"Apache-2.0"
] | null | null | null | viz/web/webserve.py | vivinastase/histwords | bb3117434e76679fb38f649e2dbf11b15f5ef03b | [
"Apache-2.0"
] | null | null | null | import sys
import os
import base64
import threading
import ssl
import socketserver
#import BaseHTTPServer
#from SimpleHTTPServer import SimpleHTTPRequestHandler
from http.server import SimpleHTTPRequestHandler
from importlib import reload
WEB_PORT=5000
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def log_request(self, *args, **kwargs):
pass
class WebHandler(SimpleHTTPRequestHandler):
def do_GET(self):
import webhandle
reload(webhandle)
webhandle.do_get(self)
def do_POST(self):
import webhandle
reload(webhandle)
webhandle.do_post(self)
SERVER=None
def serve_http(https_port=80, HandlerClass = WebHandler):
global SERVER
socketserver.TCPServer.allow_reuse_address = True
httpd = ThreadedTCPServer(("", https_port), HandlerClass)
debug("Serving HTTP on", https_port)
SERVER = httpd
SERVER.serve_forever()
def debug(*args):
print(" ".join(map(str, args)))
def start():
port = int(WEB_PORT)
def run_webserve():
serve_http(port)
web_thread = threading.Thread(target=run_webserve)
web_thread.daemon = True
web_thread.start()
return web_thread
def stop():
SERVER.shutdown()
SERVER.server_close()
def restart():
stop()
start()
def main():
t = start()
import helpers
# TODO: add argument parsing with argparse
helpers.select_embedding()
while True:
t.join(0.5)
if not t.isAlive():
print("WEBSERVER DIED, EXITING")
break
if __name__ == '__main__':
main()
| 18.397727 | 77 | 0.680667 |
acfb0ebd2dd15a9f023726184bdaa544dae7eaa7 | 4,619 | py | Python | Task 1 turtles/turtles.py | mansasha21/phys-math-modeling | fc8b63c2894676f6eb72896b06ae726b0aa506c5 | [
"MIT"
] | 1 | 2020-02-24T16:04:37.000Z | 2020-02-24T16:04:37.000Z | Task 1 turtles/turtles.py | mansasha21/phys-math-modeling | fc8b63c2894676f6eb72896b06ae726b0aa506c5 | [
"MIT"
] | null | null | null | Task 1 turtles/turtles.py | mansasha21/phys-math-modeling | fc8b63c2894676f6eb72896b06ae726b0aa506c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""turtles.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Wl6WD8ntb-XqJEb1m4CfYfHvWXR-99ok
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
import matplotlib
import random
matplotlib.rcParams["animation.embed_limit"] = 75
N = 4
R = 30
v = 2
vx = 0.03
frames = 1500
dt = 1
class finish:
finished = False
real_time = frames
fig, ax = plt.subplots()
ax = plt.axis([-40, 40, -40, 40])
tracks = []
dots = []
curr_coord = [
[R * np.cos((2 * np.pi * i) / N), R * np.sin((2 * np.pi * i) / N)] for i in range(N)
]
rand_coord = [
[random.random()*R, random.random()*R] for i in range(N)
]
curr_coord1 = [list(i) for i in curr_coord]
#curr_coord = rand_coord
#curr_coord1 = [list(i) for i in rand_coord]
for x, y in curr_coord:
(dot,) = plt.plot([x], [y], "o")
dots.append(dot)
tracks.append([x])
tracks.append([y])
for i in range(frames):
x,y = 0, 0
for k in range(N):
if k != N - 1:
x = curr_coord1[k + 1][0] - curr_coord1[k][0]
y = curr_coord1[k + 1][1] - curr_coord1[k][1]
else:
x = curr_coord1[0][0] - curr_coord1[k][0]
y = curr_coord1[0][1] - curr_coord1[k][1]
norm = np.linalg.norm([x, y])
curr_coord1[k][0] += x / norm * vx * dt
curr_coord1[k][1] += y / norm * vx * dt
tracks[2 * k].append(curr_coord1[k][0])
tracks[2 * k + 1].append(curr_coord1[k][1])
for i in range(N):
plt.plot(tracks[2 * i], tracks[2 * i + 1])
def animate(i):
if i % 100 == 0:
print("{}% prepared".format(1.*i/frames))
for k, dot in zip(range(N), dots):
curr_coord[k][0] = tracks[2 * k][i]
curr_coord[k][1] = tracks[2 * k + 1][i]
dot.set_data(curr_coord[k][0], curr_coord[k][1])
if round(curr_coord[0][0],1) == round(curr_coord[1][0],1) and round(curr_coord[0][1],1) == round(curr_coord[1][1],1) and not finish.finished:
finish.finished = True
finish.real_time = i
return dots
myAnimation = animation.FuncAnimation(
fig, animate, frames=frames, blit=True, repeat=False
)
HTML(myAnimation.to_jshtml(embed_frames=frames))
print('real_time = ',finish.real_time)
theor_time = R/(vx*np.sin(np.pi/N))
print('theor_time = ',theor_time)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
import matplotlib
import random
matplotlib.rcParams["animation.embed_limit"] = 75
N = 6
R = 30
v = 2
vx = 0.03
frames = 2500
dt = 1
class finish:
finished = False
real_time = frames
fig, ax = plt.subplots()
ax = plt.axis([-40, 40, -40, 40])
tracks = []
dots = []
curr_coord = [
[R * np.cos((2 * np.pi * i) / N), R * np.sin((2 * np.pi * i) / N)] for i in range(N)
]
rand_coord = [
[random.random()*R, random.random()*R] for i in range(N)
]
curr_coord1 = [list(i) for i in curr_coord]
#curr_coord = rand_coord
#curr_coord1 = [list(i) for i in rand_coord]
for x, y in curr_coord:
(dot,) = plt.plot([x], [y], "o")
dots.append(dot)
tracks.append([x])
tracks.append([y])
for i in range(frames):
x,y = 0, 0
for k in range(N):
if k != N - 1:
x = curr_coord1[k + 1][0] - curr_coord1[k][0]
y = curr_coord1[k + 1][1] - curr_coord1[k][1]
else:
x = curr_coord1[0][0] - curr_coord1[k][0]
y = curr_coord1[0][1] - curr_coord1[k][1]
norm = np.linalg.norm([x, y])
curr_coord1[k][0] += x / norm * vx * dt
curr_coord1[k][1] += y / norm * vx * dt
tracks[2 * k].append(curr_coord1[k][0])
tracks[2 * k + 1].append(curr_coord1[k][1])
for i in range(N):
plt.plot(tracks[2 * i], tracks[2 * i + 1])
def animate(i):
if i % 100 == 0:
print("{}% prepared".format(1.*i/frames))
for k, dot in zip(range(N), dots):
curr_coord[k][0] = tracks[2 * k][i]
curr_coord[k][1] = tracks[2 * k + 1][i]
dot.set_data(curr_coord[k][0], curr_coord[k][1])
if round(curr_coord[0][0],1) == round(curr_coord[1][0],1) and round(curr_coord[0][1],1) == round(curr_coord[1][1],1) and not finish.finished:
finish.finished = True
finish.real_time = i
return dots
myAnimation = animation.FuncAnimation(
fig, animate, frames=frames, blit=True, repeat=False
)
HTML(myAnimation.to_jshtml(embed_frames=frames))
print('real_time = ',finish.real_time)
theor_time = R/(vx*np.sin(np.pi/N))
print('theor_time = ',theor_time)
| 25.103261 | 145 | 0.60013 |
acfb110dfa2a3d6c2bc6602626292d47cfb7c7d8 | 1,394 | py | Python | vilt/datamodules/vqav2_datamodule.py | kris927b/ViLT | db96f20ebc656f1995aa573cbcbca0fe31f55c42 | [
"Apache-2.0"
] | 587 | 2021-05-08T08:17:08.000Z | 2022-03-31T15:17:09.000Z | vilt/datamodules/vqav2_datamodule.py | kris927b/ViLT | db96f20ebc656f1995aa573cbcbca0fe31f55c42 | [
"Apache-2.0"
] | 54 | 2021-05-12T12:36:22.000Z | 2022-03-31T03:34:54.000Z | vilt/datamodules/vqav2_datamodule.py | kris927b/ViLT | db96f20ebc656f1995aa573cbcbca0fe31f55c42 | [
"Apache-2.0"
] | 107 | 2021-05-09T07:48:53.000Z | 2022-03-30T04:12:16.000Z | from vilt.datasets import VQAv2Dataset
from .datamodule_base import BaseDataModule
from collections import defaultdict
class VQAv2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VQAv2Dataset
@property
def dataset_name(self):
return "vqa"
def setup(self, stage):
super().setup(stage)
train_answers = self.train_dataset.table["answers"].to_pandas().tolist()
val_answers = self.val_dataset.table["answers"].to_pandas().tolist()
train_labels = self.train_dataset.table["answer_labels"].to_pandas().tolist()
val_labels = self.val_dataset.table["answer_labels"].to_pandas().tolist()
all_answers = [c for c in train_answers + val_answers if c is not None]
all_answers = [l for lll in all_answers for ll in lll for l in ll]
all_labels = [c for c in train_labels + val_labels if c is not None]
all_labels = [l for lll in all_labels for ll in lll for l in ll]
self.answer2id = {k: v for k, v in zip(all_answers, all_labels)}
sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1])
self.num_class = max(self.answer2id.values()) + 1
self.id2answer = defaultdict(lambda: "unknown")
for k, v in sorted_a2i:
self.id2answer[v] = k
| 37.675676 | 85 | 0.664993 |
acfb111ed62d858f7bcadc7c68808fb65c04e417 | 645 | py | Python | app/loyalty/migrations/0009_auto_20190726_0123.py | S3Infosoft/s3-loyalty-webapp | 264b98a325ccfa683737e03623acc99fe3053a99 | [
"MIT"
] | null | null | null | app/loyalty/migrations/0009_auto_20190726_0123.py | S3Infosoft/s3-loyalty-webapp | 264b98a325ccfa683737e03623acc99fe3053a99 | [
"MIT"
] | 7 | 2019-06-17T04:11:38.000Z | 2019-08-01T06:23:46.000Z | app/loyalty/migrations/0009_auto_20190726_0123.py | S3Infosoft/mvr-loyalty | 264b98a325ccfa683737e03623acc99fe3053a99 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-07-25 19:53
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('loyalty', '0008_auto_20190726_0019'),
]
operations = [
migrations.AlterField(
model_name='reservations',
name='date',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
),
migrations.AlterField(
model_name='spendpoints',
name='date',
field=models.CharField(default=django.utils.timezone.now, max_length=50),
),
]
| 25.8 | 85 | 0.618605 |
acfb1227743e49efd04d3ebaad2f2ed1fbe4d9f7 | 3,179 | py | Python | mp_sort/app/static/library.py | seancze/d2w_mini_project_mp_sort | aa472b168a7e1d238bf074e74773c3c71b0ba7af | [
"MIT"
] | null | null | null | mp_sort/app/static/library.py | seancze/d2w_mini_project_mp_sort | aa472b168a7e1d238bf074e74773c3c71b0ba7af | [
"MIT"
] | null | null | null | mp_sort/app/static/library.py | seancze/d2w_mini_project_mp_sort | aa472b168a7e1d238bf074e74773c3c71b0ba7af | [
"MIT"
] | null | null | null | from org.transcrypt.stubs.browser import *
import random
def gen_random_int(number, seed):
random.seed(seed)
array = list(range(number))
random.shuffle(array)
return array
def generate():
number = 10
seed = 200
# call gen_random_int() with the given number and seed
# store it to the variable array
array = gen_random_int(number, seed)
# convert the items into one single string
# the number should be separated by a comma
# and a full stop should end the string.
array_str = array.join(",") + "."
# This line is to placed the string into the HTML
# under div section with the id called "generate"
document.getElementById("generate").innerHTML = array_str
def bubble_sort(array):
n = len(array)
swapped = True
count = 0
while swapped:
swapped = False
new_n = 0
for i in range(1, n):
second = array[i]
first = array[i-1]
count += 1
if second < first:
array[i], array[i-1] = first, second
swapped = True
new_n = i
n = new_n
print(f"Array in bubble_sort: {array} Count: {count}")
def insertion_sort(array):
n = len(array)
count = 0
# Loop through (n-1) times in outer loop
for outer in range(1, n):
temp = array[outer]
idx = outer
# Remember: You are NOT shfiting the number yet. Hence, compare with temp
while idx > 0 and temp < array[idx-1]:
count += 1
# Shift right
array[idx] = array[idx-1]
# Move left
idx -= 1
# Save temp element to its final position
array[idx] = temp
print(f"Array in insertion_sort: {array} Count: {count}")
def sortnumber1():
''' This function is used in Exercise 1.
The function is called when the sort button is clicked.
You need to do the following:
- get the list of numbers from the "generate" HTML id, use document.getElementById(id).innerHTML
- create a list of integers from the string of numbers
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
pass
array = document.getElementById("generate").innerHTML
array_int = [int(i) for i in array[:-1].split(",")]
insertion_sort(array_int)
array_str = [str(i) for i in array_int]
document.getElementById("sorted").innerHTML = array_str
def sortnumber2():
''' This function is used in Exercise 2.
The function is called when the sort button is clicked.
You need to do the following:
- Get the numbers from a string variable "value".
- Split the string using comma as the separator and convert them to
a list of numbers
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
# The following line get the value of the text input called "numbers"
value = document.getElementsByName("numbers")[0].value
# Throw alert and stop if nothing in the text input
if value == "":
window.alert("Your textbox is empty")
return
# Your code should start from here
# store the final string to the variable array_str
array_int = [int(i) for i in value.split(",")]
bubble_sort(array_int)
array_str = [str(i) for i in array_int]
document.getElementById("sorted").innerHTML = array_str
| 25.845528 | 98 | 0.702422 |
acfb1299c79974fb1301f71d998802eb2e5d6e77 | 213 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/learn-python3/debug/do_try.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/learn-python3/debug/do_try.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/learn-python3/debug/do_try.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
print("try...")
r = 10 / 0
print("result:", r)
except ZeroDivisionError as e:
print("except:", e)
finally:
print("finally...")
print("END")
| 16.384615 | 30 | 0.553991 |
acfb1308386405470d39d6a3a4c7b46b508f4db3 | 353 | py | Python | opencv-examples/color_range_filter.py | MeneDev/pigeon-tracker | 472dcbeb924131cdea34a37c4f787a67b37fea84 | [
"MIT"
] | 3 | 2018-11-02T09:38:56.000Z | 2019-03-09T15:58:58.000Z | opencv-examples/color_range_filter.py | MeneDev/pigeon-tracker | 472dcbeb924131cdea34a37c4f787a67b37fea84 | [
"MIT"
] | 2 | 2018-11-03T18:09:16.000Z | 2019-02-10T16:40:58.000Z | opencv-examples/color_range_filter.py | MeneDev/pigeon-tracker | 472dcbeb924131cdea34a37c4f787a67b37fea84 | [
"MIT"
] | 1 | 2018-11-02T10:15:56.000Z | 2018-11-02T10:15:56.000Z | import cv2
cam = cv2.VideoCapture(0)
lower_green = (55, 130, 65)
upper_green = (75, 175, 70)
while cam.isOpened():
ret, frame = cam.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame, lower_green, upper_green)
cv2.imshow("threshold", mask)
key = cv2.waitKey(1) & 0xff
if key == 27:
break
| 19.611111 | 55 | 0.637394 |
acfb13c2316ffe62155d1dbe9ddfc08fa646b39a | 3,824 | py | Python | jutil/format.py | kutera/django-jutil | 9ad85caeb04a0a45be4d139e32a16cf9a9ec6c12 | [
"MIT"
] | null | null | null | jutil/format.py | kutera/django-jutil | 9ad85caeb04a0a45be4d139e32a16cf9a9ec6c12 | [
"MIT"
] | null | null | null | jutil/format.py | kutera/django-jutil | 9ad85caeb04a0a45be4d139e32a16cf9a9ec6c12 | [
"MIT"
] | null | null | null | import re
from datetime import timedelta
from decimal import Decimal
def format_full_name(first_name: str, last_name: str, max_length: int = 20):
"""
Limits name length to specified length. Tries to keep name as human-readable an natural as possible.
:param first_name: First name
:param last_name: Last name
:param max_length: Maximum length
:return: Full name of shortened version depending on length
"""
# dont allow commas in limited names
first_name = first_name.replace(',', ' ')
last_name = last_name.replace(',', ' ')
# accept short full names as is
original_full_name = first_name + ' ' + last_name
if len(original_full_name) <= max_length:
return original_full_name
# drop middle names
first_name = first_name.split(' ')[0]
full_name = first_name + ' ' + last_name
if len(full_name) <= max_length:
return full_name
# drop latter parts of combined first names
first_name = re.split(r'[\s\-]', first_name)[0]
full_name = first_name + ' ' + last_name
if len(full_name) <= max_length:
return full_name
# drop latter parts of multi part last names
last_name = re.split(r'[\s\-]', last_name)[0]
full_name = first_name + ' ' + last_name
if len(full_name) <= max_length:
return full_name
# shorten last name to one letter
last_name = last_name[:1]
full_name = first_name + ' ' + last_name
if len(full_name) > max_length:
raise Exception('Failed to shorten name {}'.format(original_full_name))
return full_name
def format_timedelta(dt: timedelta) -> str:
"""
Formats timedelta to readable format, e.g. 1h30min.
:param dt: timedelta
:return: str
"""
seconds = int(dt.total_seconds())
days, remainder = divmod(seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
s = ""
if days > 0:
s += str(days) + "d"
if hours > 0:
s += str(hours) + "h"
if minutes > 0:
s += str(minutes) + "min"
if s == "":
s = "0min"
return s
def format_xml(xml_str: str, exceptions: bool=False):
"""
Formats XML document as human-readable plain text.
:param xml_str: str (Input XML str)
:param exceptions: Raise exceptions on error
:return: str (Formatted XML str)
"""
try:
import xml.dom.minidom
return xml.dom.minidom.parseString(xml_str).toprettyxml()
except Exception:
if exceptions:
raise
return xml_str
def dec1(a) -> Decimal:
"""
Converts number to Decimal with 1 decimal digits.
:param a: Number
:return: Decimal with 1 decimal digits
"""
return Decimal(a).quantize(Decimal('1.0'))
def dec2(a) -> Decimal:
"""
Converts number to Decimal with 2 decimal digits.
:param a: Number
:return: Decimal with 2 decimal digits
"""
return Decimal(a).quantize(Decimal('1.00'))
def dec3(a) -> Decimal:
"""
Converts number to Decimal with 3 decimal digits.
:param a: Number
:return: Decimal with 3 decimal digits
"""
return Decimal(a).quantize(Decimal('1.000'))
def dec4(a) -> Decimal:
"""
Converts number to Decimal with 4 decimal digits.
:param a: Number
:return: Decimal with 4 decimal digits
"""
return Decimal(a).quantize(Decimal('1.0000'))
def dec5(a) -> Decimal:
"""
Converts number to Decimal with 5 decimal digits.
:param a: Number
:return: Decimal with 4 decimal digits
"""
return Decimal(a).quantize(Decimal('1.00000'))
def dec6(a) -> Decimal:
"""
Converts number to Decimal with 6 decimal digits.
:param a: Number
:return: Decimal with 4 decimal digits
"""
return Decimal(a).quantize(Decimal('1.000000'))
| 27.314286 | 104 | 0.636768 |
acfb140bf6c022f442c334a8adf0c94ac33f2dc7 | 7,334 | py | Python | pyexcel_io/book.py | Glose/pyexcel-io | 3408497c81ae4652a0c1a1f41ed4679fbc2cb416 | [
"BSD-3-Clause"
] | null | null | null | pyexcel_io/book.py | Glose/pyexcel-io | 3408497c81ae4652a0c1a1f41ed4679fbc2cb416 | [
"BSD-3-Clause"
] | null | null | null | pyexcel_io/book.py | Glose/pyexcel-io | 3408497c81ae4652a0c1a1f41ed4679fbc2cb416 | [
"BSD-3-Clause"
] | null | null | null | """
pyexcel_io.book
~~~~~~~~~~~~~~~~~~~
The io interface to file extensions
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import pyexcel_io.manager as manager
from pyexcel_io._compact import PY2, OrderedDict, isstream
from .constants import MESSAGE_ERROR_03, MESSAGE_WRONG_IO_INSTANCE
class RWInterface(object):
"""
The common methods for book reader and writer
"""
stream_type = None
def __init__(self):
self._file_type = None
def open(self, file_name, **keywords):
"""open a file for read or write"""
raise NotImplementedError("Please implement this method")
def open_stream(self, file_stream, **keywords):
"""open a file stream for read or write"""
raise NotImplementedError("Please implement this method")
def open_content(self, file_stream, **keywords):
"""open a file content for read or write"""
raise NotImplementedError("Please implement this method")
def set_type(self, file_type):
"""
set the file type for the instance
file type is needed when a third party library could
handle more than one file type"""
self._file_type = file_type
def close(self):
"""
close the file handle if necessary
"""
pass
# implement context manager
def __enter__(self):
return self
def __exit__(self, a_type, value, traceback):
self.close()
class BookReader(RWInterface):
"""
Standard book reader
"""
def __init__(self):
super(BookReader, self).__init__()
self._file_name = None
self._file_stream = None
self._keywords = None
self._native_book = None
def open(self, file_name, **keywords):
"""
open a file with unlimited keywords
keywords are passed on to individual readers
"""
self._file_name = file_name
self._keywords = keywords
def open_stream(self, file_stream, **keywords):
"""
open a file with unlimited keywords for reading
keywords are passed on to individual readers
"""
if isstream(file_stream):
if PY2:
if hasattr(file_stream, "seek"):
file_stream.seek(0)
else:
# python 2
# Hei zipfile in odfpy would do a seek
# but stream from urlib cannot do seek
file_stream = _convert_content_to_stream(
file_stream.read(), self._file_type
)
else:
from io import UnsupportedOperation
try:
file_stream.seek(0)
except UnsupportedOperation:
# python 3
file_stream = _convert_content_to_stream(
file_stream.read(), self._file_type
)
self._file_stream = file_stream
self._keywords = keywords
else:
raise IOError(MESSAGE_WRONG_IO_INSTANCE)
def open_content(self, file_content, **keywords):
"""
read file content as if it is a file stream with
unlimited keywords for reading
keywords are passed on to individual readers
"""
file_stream = _convert_content_to_stream(file_content, self._file_type)
self.open_stream(file_stream, **keywords)
def read_sheet_by_name(self, sheet_name):
"""
read a named sheet from a excel data book
"""
named_contents = [
content
for content in self._native_book
if content.name == sheet_name
]
if len(named_contents) == 1:
return {named_contents[0].name: self.read_sheet(named_contents[0])}
else:
raise ValueError("Cannot find sheet %s" % sheet_name)
def read_sheet_by_index(self, sheet_index):
"""
read an indexed sheet from a excel data book
"""
try:
sheet = self._native_book[sheet_index]
return {sheet.name: self.read_sheet(sheet)}
except IndexError:
self.close()
raise
def read_all(self):
"""
read everything from a excel data book
"""
result = OrderedDict()
for sheet in self._native_book:
result[sheet.name] = self.read_sheet(sheet)
return result
def read_many(self, sheets):
"""
read everything from a excel data book
"""
result = OrderedDict()
for sheet in sheets:
if isinstance(sheet, int):
result.update(self.read_sheet_by_index(sheet))
else:
result.update(self.read_sheet_by_name(sheet))
return result
def read_sheet(self, native_sheet):
"""
Return a context specific sheet from a native sheet
"""
raise NotImplementedError("Please implement this method")
class BookWriter(RWInterface):
"""
Standard book writer
"""
def __init__(self):
super(BookWriter, self).__init__()
self._file_alike_object = None
self._keywords = None
def open(self, file_name, **keywords):
"""
open a file with unlimited keywords for writing
keywords are passed on to individual writers
"""
self._file_alike_object = file_name
self._keywords = keywords
def open_stream(self, file_stream, **keywords):
"""
open a file stream with unlimited keywords for writing
keywords are passed on to individual writers
"""
if not isstream(file_stream):
raise IOError(MESSAGE_ERROR_03)
self.open(file_stream, **keywords)
def open_content(self, file_stream, **keywords):
"""open a file content for read or write"""
raise Exception("Normal writer would not need this interface")
def write(self, incoming_dict):
"""
write a dictionary into an excel file
"""
for sheet_name in incoming_dict:
sheet_writer = self.create_sheet(sheet_name)
if sheet_writer:
sheet_writer.write_array(incoming_dict[sheet_name])
sheet_writer.close()
else:
raise Exception("Cannot create a sheet writer!")
def create_sheet(self, sheet_name):
"""
implement this method for easy extension
"""
raise NotImplementedError("Please implement create_sheet()")
def _convert_content_to_stream(file_content, file_type):
stream = manager.get_io(file_type)
if not PY2:
target_content_type = manager.get_io_type(file_type)
needs_encode = (target_content_type == 'bytes' and
not isinstance(file_content, bytes))
needs_decode = (target_content_type == 'string' and
isinstance(file_content, bytes))
if needs_encode:
file_content = file_content.encode('utf-8')
elif needs_decode:
file_content = file_content.decode('utf-8')
stream.write(file_content)
stream.seek(0)
return stream
| 29.572581 | 79 | 0.593128 |
acfb14cdaa98e3f1741584295a6a6fca4e1cd649 | 1,327 | py | Python | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/UpgradeClientRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/UpgradeClientRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ehpc/aliyunsdkehpc/request/v20180412/UpgradeClientRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class UpgradeClientRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'UpgradeClient','ehs')
def get_ClientVersion(self):
return self.get_query_params().get('ClientVersion')
def set_ClientVersion(self,ClientVersion):
self.add_query_param('ClientVersion',ClientVersion)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId) | 36.861111 | 73 | 0.767898 |
acfb152fb704304b95394cf122491b382eedb35c | 3,015 | py | Python | plenum/test/node_request/test_apply_stashed_partially_ordered.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | null | null | null | plenum/test/node_request/test_apply_stashed_partially_ordered.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | null | null | null | plenum/test/node_request/test_apply_stashed_partially_ordered.py | andkononykhin/indy-plenum-copy | 46c48feaf75e5578c9dceb76d4b6d09f7e63add5 | [
"Apache-2.0"
] | null | null | null | import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, assertExp
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
# Make sure that all requests in test will end up in one batch
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
# Delay COMMITs so requests are not ordered on test node
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
# Get some of txns that need to be ordered
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
txns = ledger_info.ledger.uncommittedTxns
txns = txns[:len(txns) // 2]
assert len(txns) > 1
# Emulate incomplete catchup simultaneous with generation of ORDERED message
origin_fun = test_node.try_processing_ordered
ordered_msgs = []
test_node.try_processing_ordered = lambda msg: ordered_msgs.append(msg)
test_node.master_replica.revert_unordered_batches()
looper.run(eventually(lambda: assertExp(len(ordered_msgs) > 0)))
test_node.mode = Mode.synced
test_node.try_processing_ordered = origin_fun
for msg in ordered_msgs:
test_node.try_processing_ordered(msg)
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
for txn in txns:
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
for r in test_node.replicas.values():
r.stasher.unstash_catchup()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
| 39.671053 | 99 | 0.75257 |
acfb153cedb0ea6b3c4ddc532218b29d0e435520 | 4,206 | py | Python | geomet/util.py | tomplex/geomet | f57a2302d738ef8af694c8dde09e95d419457d9e | [
"Apache-2.0"
] | null | null | null | geomet/util.py | tomplex/geomet | f57a2302d738ef8af694c8dde09e95d419457d9e | [
"Apache-2.0"
] | null | null | null | geomet/util.py | tomplex/geomet | f57a2302d738ef8af694c8dde09e95d419457d9e | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Lars Butler & individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import six
if six.PY2:
import collections
else:
import collections.abc as collections
def block_splitter(data, block_size):
"""
Creates a generator by slicing ``data`` into chunks of ``block_size``.
>>> data = range(10)
>>> list(block_splitter(data, 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
If ``data`` cannot be evenly divided by ``block_size``, the last block will
simply be the remainder of the data. Example:
>>> data = range(10)
>>> list(block_splitter(data, 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
If the ``block_size`` is greater than the total length of ``data``, a
single block will be generated:
>>> data = range(3)
>>> list(block_splitter(data, 4))
[[0, 1, 2]]
:param data:
Any iterable. If ``data`` is a generator, it will be exhausted,
obviously.
:param int block_site:
Desired (maximum) block size.
"""
buf = []
for i, datum in enumerate(data):
buf.append(datum)
if len(buf) == block_size:
yield buf
buf = []
# If there's anything leftover (a partial block),
# yield it as well.
if buf:
yield buf
def take(n, iterable):
"""
Return first n items of the iterable as a list
Copied shamelessly from
http://docs.python.org/2/library/itertools.html#recipes.
"""
return list(itertools.islice(iterable, n))
def as_bin_str(a_list):
if six.PY2:
return b''.join(a_list)
else:
return bytes(a_list)
def round_geom(geom, precision=None):
"""Round coordinates of a geometric object to given precision."""
if geom['type'] == 'Point':
x, y = geom['coordinates']
xp, yp = [x], [y]
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
new_coords = tuple(zip(xp, yp))[0]
if geom['type'] in ['LineString', 'MultiPoint']:
xp, yp = zip(*geom['coordinates'])
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
new_coords = tuple(zip(xp, yp))
elif geom['type'] in ['Polygon', 'MultiLineString']:
new_coords = []
for piece in geom['coordinates']:
xp, yp = zip(*piece)
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
new_coords.append(tuple(zip(xp, yp)))
elif geom['type'] == 'MultiPolygon':
parts = geom['coordinates']
new_coords = []
for part in parts:
inner_coords = []
for ring in part:
xp, yp = zip(*ring)
if precision is not None:
xp = [round(v, precision) for v in xp]
yp = [round(v, precision) for v in yp]
inner_coords.append(tuple(zip(xp, yp)))
new_coords.append(inner_coords)
return {'type': geom['type'], 'coordinates': new_coords}
def flatten_multi_dim(sequence):
"""Flatten a multi-dimensional array-like to a single dimensional sequence
(as a generator).
"""
for x in sequence:
if (isinstance(x, collections.Iterable)
and not isinstance(x, six.string_types)):
for y in flatten_multi_dim(x):
yield y
else:
yield x
def endian_token(is_little_endian):
if is_little_endian:
return '<'
else:
return '>' | 31.155556 | 79 | 0.587019 |
acfb16245fea19afe596470f3c929c4fbf34c116 | 802 | py | Python | atomisticparsers/gulp/metainfo/__init__.py | nomad-coe/atomistic-parsers | 7be55968fbf45e8e49377b58e745548c55c06788 | [
"Apache-2.0"
] | null | null | null | atomisticparsers/gulp/metainfo/__init__.py | nomad-coe/atomistic-parsers | 7be55968fbf45e8e49377b58e745548c55c06788 | [
"Apache-2.0"
] | null | null | null | atomisticparsers/gulp/metainfo/__init__.py | nomad-coe/atomistic-parsers | 7be55968fbf45e8e49377b58e745548c55c06788 | [
"Apache-2.0"
] | null | null | null | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nomad.metainfo import Environment
from . import gulp
m_env = Environment()
m_env.m_add_sub_section(Environment.packages, gulp.m_package)
| 32.08 | 74 | 0.761845 |
acfb1630fca7b7b127be1bc1a1d6025ce3b4b4ac | 1,794 | py | Python | PyQuM/ver(0.0)/QuApp/tools/AgilentDRV.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | PyQuM/ver(0.0)/QuApp/tools/AgilentDRV.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | PyQuM/ver(0.0)/QuApp/tools/AgilentDRV.py | takehuge/PYQUM | bfc9d9b1c2f4246c7aac3a371baaf587c99f8069 | [
"MIT"
] | null | null | null | # This is a collection of low-level VIs for Agilent Drivers
from QuApp.tools.Callout import Call_VI
# from Callout import Call_VI
# Root-location for the M9392A drivers
LOC01 = "C:\\Program Files (x86)\\Agilent\\M9392\\LabVIEW Driver\\20xx\\Agilent M9392" #VSA
LOC02 = "C:\\Program Files (x86)\\Agilent\\M933x\\LabVIEW Driver\\20xx\\Agilent M933x" #AWG
@Call_VI
def InitializeVSA(Parameters):
pack = dict()
pack['VIPath'] = LOC01 + "\\Initialize With Options.vi"
pack['ParameterNames'] = ["resource string", "option string", "id query (Off)", "reset device (Off)"]
pack['Parameters'] = Parameters
pack['Indicators'] = ["instrument handle out", "error out"]
return pack
@Call_VI
def CloseVSA(handle):
pack = dict()
pack['VIPath'] = LOC01 + "\\Close.vi"
pack['ParameterNames'] = ["instrument handle"]
pack['Parameters'] = handle
pack['Indicators'] = ["error out"]
return pack
@Call_VI
def InitializeAWG(Parameters):
pack = dict()
pack['VIPath'] = LOC02 + "\\Initialize With Options.vi"
pack['ParameterNames'] = ["resource string", "option string", "id query (Off)", "reset device (Off)"]
pack['Parameters'] = Parameters
pack['Indicators'] = ["instrument handle out", "error out"]
return pack
@Call_VI
def CloseAWG(handle):
pack = dict()
pack['VIPath'] = LOC02 + "\\Close.vi"
pack['ParameterNames'] = ["instrument handle"]
pack['Parameters'] = handle
pack['Indicators'] = ["error out"]
return pack
# @Call_VI
# def ConfigAcquisVSA(Parameters):
# pack = dict()
# pack['VIPath'] = LOC01 + "\\Public\\Configure\\Configure Acquisition.vi"
# pack['ParameterNames'] = ["instrument handle"]
# pack['Parameters'] = Parameters
# pack['Indicators'] = ["error out"]
# return pack
| 32.618182 | 105 | 0.654961 |
acfb174abd8fe7fb0f4700ff2027eba3a6c2916b | 4,918 | py | Python | app/routers/inception.py | ephraimberkovitch/cadet | 40ff288bfa96a3a0615fdf0b4d79246bc0fb0011 | [
"MIT"
] | 2 | 2021-06-23T14:03:09.000Z | 2021-11-21T01:06:03.000Z | app/routers/inception.py | ephraimberkovitch/cadet | 40ff288bfa96a3a0615fdf0b4d79246bc0fb0011 | [
"MIT"
] | 13 | 2021-06-23T16:07:57.000Z | 2021-07-09T20:51:09.000Z | app/routers/inception.py | ephraimberkovitch/cadet | 40ff288bfa96a3a0615fdf0b4d79246bc0fb0011 | [
"MIT"
] | 2 | 2021-06-23T16:09:32.000Z | 2022-03-18T12:44:25.000Z | from fastapi import APIRouter, Depends, HTTPException
from fastapi import Request, Form, File, UploadFile
from fastapi.templating import Jinja2Templates
from app.util.login import get_current_username
from typing import Any, Dict
from collections import namedtuple
import spacy
from cassis import Cas
from cassis import TypeSystem, load_typesystem, load_cas_from_xmi
nlp = spacy.load("en_core_web_sm", disable=["parser"])
# Types
JsonDict = Dict[str, Any]
PredictionRequest = namedtuple(
"PredictionRequest", ["layer", "feature", "projectId", "document", "typeSystem"]
)
PredictionResponse = namedtuple("PredictionResponse", ["document"])
Document = namedtuple("Document", ["xmi", "documentId", "userId"])
# Constants
SENTENCE_TYPE = "de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence"
TOKEN_TYPE = "de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token"
IS_PREDICTION = "inception_internal_predicted"
# Util functions
def parse_prediction_request(json_object: JsonDict) -> PredictionRequest:
metadata = json_object["metadata"]
document = json_object["document"]
layer = metadata["layer"]
feature = metadata["feature"]
projectId = metadata["projectId"]
xmi = document["xmi"]
documentId = document["documentId"]
userId = document["userId"]
typesystem = json_object["typeSystem"]
return PredictionRequest(
layer, feature, projectId, Document(xmi, documentId, userId), typesystem
)
# Router
router = APIRouter(dependencies=[Depends(get_current_username)])
# INCEpTION posts a request to the endpoint
# the request includes cas/xml serialized as xmi (this is the text and existing annotation data)
# the app adds annotations to the cas
# the app returns the Document
# https://github.com/inception-project/inception-external-recommender/blob/master/ariadne/server.py
# Chunk, Lemma, Morphological Features, Named Entity, Orthography Correction, Part of Speech,
@router.get("/pos/predict")
async def pos1_predict(request: Request):
return {"hi there": "I'm pos"}
# POS 👩🚀🧑🚀👨🚀
@router.post("/pos/predict")
def pos_predict(request: Request):
json_data = request.json()
prediction_request = parse_prediction_request(json_data)
prediction_response = predict_pos(prediction_request)
return prediction_response.document
# @router.post("/pos/train")
# async def pos_train(cas: Cas, layer: str, feature: str, project_id: str, document_id: str, user_id: str):
# pass check that <class 'cassis.cas.Cas'> is a valid pydantic field type
def predict_pos(prediction_request: PredictionRequest) -> PredictionResponse:
# Load the CAS and type system from the request
typesystem = load_typesystem(prediction_request.typeSystem)
cas = load_cas_from_xmi(prediction_request.document.xmi, typesystem=typesystem)
AnnotationType = typesystem.get_type(prediction_request.layer)
# Extract the tokens from the CAS and create a spacy doc from it
tokens = list(cas.select(TOKEN_TYPE))
words = [cas.get_covered_text(token) for token in tokens]
doc = Doc(nlp.vocab, words=words)
# Do the tagging
nlp.tagger(doc)
# For every token, extract the POS tag and create an annotation in the CAS
for token in doc:
fields = {
"begin": tokens[token.i].begin,
"end": tokens[token.i].end,
IS_PREDICTION: True,
prediction_request.feature: token.pos_,
}
annotation = AnnotationType(**fields)
cas.add_annotation(annotation)
xmi = cas.to_xmi()
return PredictionResponse(xmi)
# 👩🚀 LEMMA 👨🚀
@router.post("/lemma/predict")
async def lemma_predict(request: Request):
json_data = request.json()
prediction_request = parse_prediction_request(json_data)
prediction_response = predict_lemma(prediction_request)
return prediction_response.document
def predict_lemma(prediction_request: PredictionRequest) -> PredictionResponse:
# Load the CAS and type system from the request
typesystem = load_typesystem(prediction_request.typeSystem)
cas = load_cas_from_xmi(prediction_request.document.xmi, typesystem=typesystem)
AnnotationType = typesystem.get_type(prediction_request.layer)
# Extract the tokens from the CAS and create a spacy doc from it
tokens = list(cas.select(TOKEN_TYPE))
words = [cas.get_covered_text(token) for token in tokens]
doc = Doc(nlp.vocab, words=words)
# Do the tagging
nlp.tagger(doc)
# For every token, extract the LEMMA tag and create an annotation in the CAS
for token in doc:
fields = {
"begin": tokens[token.i].begin,
"end": tokens[token.i].end,
IS_PREDICTION: True,
prediction_request.feature: token.lemma_,
}
annotation = AnnotationType(**fields)
cas.add_annotation(annotation)
xmi = cas.to_xmi()
return PredictionResponse(xmi)
| 34.152778 | 107 | 0.725092 |
acfb176ba0a619cf9ced560baad83f256a9971d1 | 10,676 | py | Python | loopy/target/cuda.py | cmsquared/loopy | baef6e7603b2bba683327fd43cb006864c225aa6 | [
"MIT"
] | null | null | null | loopy/target/cuda.py | cmsquared/loopy | baef6e7603b2bba683327fd43cb006864c225aa6 | [
"MIT"
] | null | null | null | loopy/target/cuda.py | cmsquared/loopy | baef6e7603b2bba683327fd43cb006864c225aa6 | [
"MIT"
] | 1 | 2021-03-09T15:55:33.000Z | 2021-03-09T15:55:33.000Z | """CUDA target independent of PyCUDA."""
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2015 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
from pytools import memoize_method
from loopy.target.c import CTarget, CASTBuilder
from loopy.target.c.codegen.expression import ExpressionToCMapper
from loopy.diagnostic import LoopyError
from loopy.types import NumpyType
from loopy.kernel.data import temp_var_scope
# {{{ vector types
class vec: # noqa
pass
def _create_vector_types():
field_names = ["x", "y", "z", "w"]
if tuple.__itemsize__ * 8 == 32:
long_dtype = np.int32
ulong_dtype = np.uint32
else:
long_dtype = np.int64
ulong_dtype = np.uint64
vec.types = {}
vec.names_and_dtypes = []
vec.type_to_scalar_and_count = {}
for base_name, base_type, counts in [
('char', np.int8, [1, 2, 3, 4]),
('uchar', np.uint8, [1, 2, 3, 4]),
('short', np.int16, [1, 2, 3, 4]),
('ushort', np.uint16, [1, 2, 3, 4]),
('int', np.int32, [1, 2, 3, 4]),
('uint', np.uint32, [1, 2, 3, 4]),
('long', long_dtype, [1, 2, 3, 4]),
('ulong', ulong_dtype, [1, 2, 3, 4]),
('longlong', np.int64, [1, 2]),
('ulonglong', np.uint64, [1, 2]),
('float', np.float32, [1, 2, 3, 4]),
('double', np.float64, [1, 2]),
]:
for count in counts:
name = "%s%d" % (base_name, count)
titles = field_names[:count]
names = ["s%d" % i for i in range(count)]
if len(titles) < len(names):
titles.extend((len(names)-len(titles))*[None])
try:
dtype = np.dtype(dict(
names=names,
formats=[base_type]*count,
titles=titles))
except NotImplementedError:
try:
dtype = np.dtype([((n, title), base_type)
for (n, title) in zip(names, titles)])
except TypeError:
dtype = np.dtype([(n, base_type) for (n, title)
in zip(names, titles)])
setattr(vec, name, dtype)
vec.names_and_dtypes.append((name, dtype))
vec.types[np.dtype(base_type), count] = dtype
vec.type_to_scalar_and_count[dtype] = np.dtype(base_type), count
_create_vector_types()
def _register_vector_types(dtype_registry):
for name, dtype in vec.names_and_dtypes:
dtype_registry.get_or_register_dtype(name, dtype)
# }}}
# {{{ function mangler
def cuda_function_mangler(kernel, name, arg_dtypes):
if not isinstance(name, str):
return None
if name in ["max", "min"] and len(arg_dtypes) == 2:
dtype = np.find_common_type([], arg_dtypes)
if dtype.kind == "c":
raise RuntimeError("min/max do not support complex numbers")
if dtype.kind == "f":
name = "f" + name
return dtype, name
if name in "atan2" and len(arg_dtypes) == 2:
return arg_dtypes[0], name
if name == "dot":
scalar_dtype, offset, field_name = arg_dtypes[0].fields["x"]
return scalar_dtype, name
return None
# }}}
# {{{ expression mapper
class ExpressionToCudaCMapper(ExpressionToCMapper):
_GRID_AXES = "xyz"
@staticmethod
def _get_index_ctype(kernel):
if kernel.index_dtype.numpy_dtype == np.int32:
return "int32_t"
elif kernel.index_dtype.numpy_dtype == np.int32:
return "int64_t"
else:
raise LoopyError("unexpected index type")
def map_group_hw_index(self, expr, enclosing_prec, type_context):
return "((%s) blockIdx.%s)" % (
self._get_index_ctype(self.kernel),
self._GRID_AXES[expr.axis])
def map_local_hw_index(self, expr, enclosing_prec, type_context):
return "((%s) threadIdx.%s)" % (
self._get_index_ctype(self.kernel),
self._GRID_AXES[expr.axis])
# }}}
# {{{ target
class CudaTarget(CTarget):
"""A target for Nvidia's CUDA GPU programming language."""
def __init__(self, extern_c=True):
"""
:arg extern_c: If *True*, declare kernels using "extern C" to
avoid name mangling.
"""
self.extern_c = extern_c
super(CudaTarget, self).__init__()
def get_device_ast_builder(self):
return CUDACASTBuilder(self)
# {{{ types
@memoize_method
def get_dtype_registry(self):
from loopy.target.c.compyte.dtypes import (DTypeRegistry,
fill_registry_with_opencl_c_types)
result = DTypeRegistry()
fill_registry_with_opencl_c_types(result)
# no complex number support--needs PyOpenCLTarget
_register_vector_types(result)
return result
def is_vector_dtype(self, dtype):
return (isinstance(dtype, NumpyType)
and dtype.numpy_dtype in list(vec.types.values()))
def vector_dtype(self, base, count):
return NumpyType(
vec.types[base.numpy_dtype, count],
target=self)
# }}}
# }}}
# {{{ ast builder
class CUDACASTBuilder(CASTBuilder):
# {{{ library
def function_manglers(self):
return (
super(CUDACASTBuilder, self).function_manglers() + [
cuda_function_mangler
])
# }}}
# {{{ top-level codegen
def get_function_declaration(self, codegen_state, codegen_result,
schedule_index):
fdecl = super(CUDACASTBuilder, self).get_function_declaration(
codegen_state, codegen_result, schedule_index)
from cgen.cuda import CudaGlobal, CudaLaunchBounds
fdecl = CudaGlobal(fdecl)
if self.target.extern_c:
from cgen import Extern
fdecl = Extern("C", fdecl)
from loopy.schedule import get_insn_ids_for_block_at
_, local_grid_size = \
codegen_state.kernel.get_grid_sizes_for_insn_ids_as_exprs(
get_insn_ids_for_block_at(
codegen_state.kernel.schedule, schedule_index))
from loopy.symbolic import get_dependencies
if not get_dependencies(local_grid_size):
# Sizes can't have parameter dependencies if they are
# to be used in static thread block size.
from pytools import product
nthreads = product(local_grid_size)
fdecl = CudaLaunchBounds(nthreads, fdecl)
return fdecl
def generate_code(self, kernel, codegen_state, impl_arg_info):
code, implemented_domains = (
super(CudaTarget, self).generate_code(
kernel, codegen_state, impl_arg_info))
return code, implemented_domains
def generate_body(self, kernel, codegen_state):
body, implemented_domains = (
super(CudaTarget, self).generate_body(kernel, codegen_state))
from loopy.kernel.data import ImageArg
if any(isinstance(arg, ImageArg) for arg in kernel.args):
raise NotImplementedError("not yet: texture arguments in CUDA")
return body, implemented_domains
# }}}
# {{{ code generation guts
def get_expression_to_code_mapper(self, codegen_state):
return ExpressionToCudaCMapper(codegen_state)
_VEC_AXES = "xyzw"
def add_vector_access(self, access_str, index):
return "(%s).%s" % (access_str, self._VEC_AXES[int(index)])
def emit_barrier(self, kind, comment):
"""
:arg kind: ``"local"`` or ``"global"``
:return: a :class:`loopy.codegen.GeneratedInstruction`.
"""
if kind == "local":
if comment:
comment = " /* %s */" % comment
from cgen import Statement
return Statement("__syncthreads()%s" % comment)
elif kind == "global":
raise LoopyError("CUDA does not have global barriers")
else:
raise LoopyError("unknown barrier kind")
def wrap_temporary_decl(self, decl, scope):
if scope == temp_var_scope.LOCAL:
from cgen.cuda import CudaShared
return CudaShared(decl)
elif scope == temp_var_scope.PRIVATE:
return decl
else:
raise ValueError("unexpected temporary variable scope: %s"
% scope)
def wrap_global_constant(self, decl):
from cgen.opencl import CudaConstant
return CudaConstant(decl)
def get_global_arg_decl(self, name, shape, dtype, is_written):
from loopy.target.c import POD # uses the correct complex type
from cgen import Const
from cgen.cuda import CudaRestrictPointer
arg_decl = CudaRestrictPointer(POD(self, dtype, name))
if not is_written:
arg_decl = Const(arg_decl)
return arg_decl
def get_image_arg_decl(self, name, shape, dtype, is_written):
raise NotImplementedError("not yet: texture arguments in CUDA")
def get_constant_arg_decl(self, name, shape, dtype, is_written):
from loopy.target.c import POD # uses the correct complex type
from cgen import RestrictPointer, Const
from cgen.cuda import CudaConstant
arg_decl = RestrictPointer(POD(dtype, name))
if not is_written:
arg_decl = Const(arg_decl)
return CudaConstant(arg_decl)
# }}}
# }}}
# vim: foldmethod=marker
| 30.502857 | 77 | 0.615774 |
acfb17dfc489ced9ae58a4a0c07d7c9a9a2b1366 | 43,992 | py | Python | osf/models/registrations.py | tsukaeru/RDM-osf.io | 2dc3e539322b6110e51772f8bd25ebdeb8e12d0e | [
"Apache-2.0"
] | 11 | 2018-12-11T16:39:40.000Z | 2022-02-26T09:51:32.000Z | osf/models/registrations.py | tsukaeru/RDM-osf.io | 2dc3e539322b6110e51772f8bd25ebdeb8e12d0e | [
"Apache-2.0"
] | 52 | 2018-04-13T05:03:21.000Z | 2022-03-22T02:56:19.000Z | osf/models/registrations.py | tsukaeru/RDM-osf.io | 2dc3e539322b6110e51772f8bd25ebdeb8e12d0e | [
"Apache-2.0"
] | 16 | 2018-07-09T01:44:51.000Z | 2021-06-30T01:57:16.000Z | import logging
import datetime
import html
from future.moves.urllib.parse import urljoin
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from guardian.models import (
GroupObjectPermissionBase,
UserObjectPermissionBase,
)
from dirtyfields import DirtyFieldsMixin
from framework.auth import Auth
from framework.exceptions import PermissionsError
from osf.utils.fields import NonNaiveDateTimeField
from osf.utils.permissions import ADMIN, READ, WRITE
from osf.exceptions import NodeStateError, DraftRegistrationStateError
from website.util import api_v2_url
from website import settings
from website.archiver import ARCHIVER_INITIATED
from osf.models import (
Node,
OSFUser,
Embargo,
Retraction,
RegistrationSchema,
DraftRegistrationApproval,
EmbargoTerminationApproval,
DraftRegistrationContributor,
)
from osf.models.archive import ArchiveJob
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.draft_node import DraftNode
from osf.models.node import AbstractNode
from osf.models.mixins import (
EditableFieldsMixin,
Loggable,
GuardianMixin,
)
from osf.models.nodelog import NodeLog
from osf.models.provider import RegistrationProvider
from osf.models.mixins import RegistrationResponseMixin
from osf.models.tag import Tag
from osf.models.validators import validate_title
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
logger = logging.getLogger(__name__)
class Registration(AbstractNode):
WRITABLE_WHITELIST = [
'article_doi',
'description',
'is_public',
'node_license',
'category',
]
provider = models.ForeignKey(
'RegistrationProvider',
related_name='registrations',
null=True,
on_delete=models.SET_NULL
)
registered_date = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# This is a NullBooleanField because of inheritance issues with using a BooleanField
# TODO: Update to BooleanField(default=False, null=True) when Django is updated to >=2.1
external_registration = models.NullBooleanField(default=False)
registered_user = models.ForeignKey(OSFUser,
related_name='related_to',
on_delete=models.SET_NULL,
null=True, blank=True)
# TODO: Consider making this a FK, as there can be one per Registration
registered_schema = models.ManyToManyField(RegistrationSchema)
registered_meta = DateTimeAwareJSONField(default=dict, blank=True)
registered_from = models.ForeignKey('self',
related_name='registrations',
on_delete=models.SET_NULL,
null=True, blank=True)
# Sanctions
registration_approval = models.ForeignKey('RegistrationApproval',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
retraction = models.ForeignKey('Retraction',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
embargo = models.ForeignKey('Embargo',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
embargo_termination_approval = models.ForeignKey('EmbargoTerminationApproval',
related_name='registrations',
null=True, blank=True,
on_delete=models.SET_NULL)
files_count = models.PositiveIntegerField(blank=True, null=True)
@staticmethod
def find_failed_registrations():
expired_if_before = timezone.now() - settings.ARCHIVE_TIMEOUT_TIMEDELTA
node_id_list = ArchiveJob.objects.filter(sent=False, datetime_initiated__lt=expired_if_before, status=ARCHIVER_INITIATED).values_list('dst_node', flat=True)
root_nodes_id = AbstractNode.objects.filter(id__in=node_id_list).values_list('root', flat=True).distinct()
stuck_regs = AbstractNode.objects.filter(id__in=root_nodes_id, is_deleted=False)
return stuck_regs
@property
def registration_schema(self):
# For use in RegistrationResponseMixin
if self.registered_schema.exists():
return self.registered_schema.first()
return None
def get_registration_metadata(self, schema):
# Overrides RegistrationResponseMixin
registered_meta = self.registered_meta or {}
return registered_meta.get(schema._id, None)
@property
def file_storage_resource(self):
# Overrides RegistrationResponseMixin
return self.registered_from
@property
def registered_schema_id(self):
schema = self.registration_schema
return schema._id if schema else None
@property
def is_registration(self):
"""For v1 compat."""
return True
@property
def is_stuck_registration(self):
return self in self.find_failed_registrations()
@property
def is_collection(self):
"""For v1 compat."""
return False
@property
def archive_job(self):
return self.archive_jobs.first() if self.archive_jobs.count() else None
@property
def sanction(self):
root = self._dirty_root
sanction = (
root.embargo_termination_approval or
root.retraction or
root.embargo or
root.registration_approval
)
if sanction:
return sanction
else:
return None
@property
def is_registration_approved(self):
root = self._dirty_root
if root.registration_approval is None:
return False
return root.registration_approval.is_approved
@property
def is_pending_embargo(self):
root = self._dirty_root
if root.embargo is None:
return False
return root.embargo.is_pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
root = self._dirty_root
if root.embargo is None:
return False
return root.embargo.pending_registration
@property
def is_retracted(self):
root = self._dirty_root
if root.retraction is None:
return False
return root.retraction.is_approved
@property
def is_pending_registration(self):
root = self._dirty_root
if root.registration_approval is None:
return False
return root.registration_approval.is_pending_approval
@property
def is_pending_retraction(self):
root = self._dirty_root
if root.retraction is None:
return False
return root.retraction.is_pending_approval
@property
def is_pending_embargo_termination(self):
root = self._dirty_root
if root.embargo_termination_approval is None:
return False
return root.embargo_termination_approval.is_pending_approval
@property
def is_embargoed(self):
"""A Node is embargoed if:
- it has an associated Embargo record
- that record has been approved
- the node is not public (embargo not yet lifted)
"""
root = self._dirty_root
if root.is_public or root.embargo is None:
return False
return root.embargo.is_approved
@property
def embargo_end_date(self):
root = self._dirty_root
if root.embargo is None:
return False
return root.embargo.embargo_end_date
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def _dirty_root(self):
"""Equivalent to `self.root`, but don't let Django fetch a clean copy
when `self == self.root`. Use when it's important to reflect unsaved
state rather than database state.
"""
if self.id == self.root_id:
return self
return self.root
def date_withdrawn(self):
return getattr(self.root.retraction, 'date_retracted', None)
@property
def withdrawal_justification(self):
return getattr(self.root.retraction, 'justification', None)
def _initiate_embargo(self, user, end_date, for_existing_registration=False,
notify_initiator_on_complete=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
end_date_midnight = datetime.datetime.combine(
end_date,
datetime.datetime.min.time()
).replace(tzinfo=end_date.tzinfo)
self.embargo = Embargo.objects.create(
initiated_by=user,
end_date=end_date_midnight,
for_existing_registration=for_existing_registration,
notify_initiator_on_complete=notify_initiator_on_complete
)
self.save() # Set foreign field reference Node.embargo
admins = self.get_admin_contributors_recursive(unique_users=True)
for (admin, node) in admins:
self.embargo.add_authorizer(admin, node)
self.embargo.save() # Save embargo's approval_state
return self.embargo
def embargo_registration(self, user, end_date, for_existing_registration=False,
notify_initiator_on_complete=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationError if end_date is not within time constraints
"""
if not self.is_admin_contributor(user):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
if (end_date - timezone.now()) >= settings.EMBARGO_END_DATE_MIN:
raise ValidationError('Registrations can only be embargoed for up to four years.')
raise ValidationError('Embargo end date must be at least three days in the future.')
embargo = self._initiate_embargo(user, end_date,
for_existing_registration=for_existing_registration,
notify_initiator_on_complete=notify_initiator_on_complete)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self.registered_from._id,
'registration': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def request_embargo_termination(self, auth):
"""Initiates an EmbargoTerminationApproval to lift this Embargoed Registration's
embargo early."""
if not self.is_embargoed:
raise NodeStateError('This node is not under active embargo')
if not self.root == self:
raise NodeStateError('Only the root of an embargoed registration can request termination')
approval = EmbargoTerminationApproval(
initiated_by=auth.user,
embargoed_registration=self,
)
admins = [admin for admin in self.root.get_admin_contributors_recursive(unique_users=True)]
for (admin, node) in admins:
approval.add_authorizer(admin, node=node)
approval.save()
approval.ask(admins)
self.embargo_termination_approval = approval
self.save()
return approval
def terminate_embargo(self, auth):
"""Handles the actual early termination of an Embargoed registration.
Adds a log to the registered_from Node.
"""
if not self.is_embargoed:
raise NodeStateError('This node is not under active embargo')
self.registered_from.add_log(
action=NodeLog.EMBARGO_TERMINATED,
params={
'project': self._id,
'node': self.registered_from._id,
'registration': self._id,
},
auth=None,
save=True
)
self.embargo.mark_as_completed()
for node in self.node_and_primary_descendants():
node.set_privacy(
self.PUBLIC,
auth=None,
log=False,
save=True
)
return True
def get_contributor_registration_response_keys(self):
"""
Returns the keys of the supplemental responses whose answers
contain author information
:returns QuerySet
"""
return self.registration_schema.schema_blocks.filter(
block_type='contributors-input', registration_response_key__isnull=False,
).values_list('registration_response_key', flat=True)
def copy_registered_meta_and_registration_responses(self, draft, save=True):
"""
Sets the registration's registered_meta and registration_responses from the draft.
If contributor information is in a question, build an accurate bibliographic
contributors list on the registration
"""
if not self.registered_meta:
self.registered_meta = {}
registration_metadata = draft.registration_metadata
registration_responses = draft.registration_responses
bibliographic_contributors = ', '.join(
draft.branched_from.visible_contributors.values_list('fullname', flat=True)
)
contributor_keys = self.get_contributor_registration_response_keys()
for key in contributor_keys:
if key in registration_metadata:
registration_metadata[key]['value'] = bibliographic_contributors
if key in registration_responses:
registration_responses[key] = bibliographic_contributors
self.registered_meta[self.registration_schema._id] = registration_metadata
self.registration_responses = registration_responses
if save:
self.save()
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
self.retraction = Retraction.objects.create(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
self.save()
admins = self.get_admin_contributors_recursive(unique_users=True)
for (admin, node) in admins:
self.retraction.add_authorizer(admin, node)
self.retraction.save() # Save retraction approval state
return self.retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_public and not (self.embargo_end_date or self.is_pending_embargo):
raise NodeStateError('Only public or embargoed registrations may be withdrawn.')
if self.root_id != self.id:
raise NodeStateError('Withdrawal of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self.registered_from._id,
'registration': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
return retraction
def delete_registration_tree(self, save=False):
logger.debug('Marking registration {} as deleted'.format(self._id))
self.is_deleted = True
self.deleted = timezone.now()
for draft_registration in DraftRegistration.objects.filter(registered_node=self):
# Allow draft registration to be submitted
if draft_registration.approval:
draft_registration.approval = None
draft_registration.save()
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def update_files_count(self):
# Updates registration files_count at archival success or
# at the end of forced (manual) archive for restarted (stuck or failed) registrations.
field = AbstractNode._meta.get_field('modified')
field.auto_now = False
self.files_count = self.files.filter(deleted_on__isnull=True).count()
self.save()
field.auto_now = True
def add_tag(self, tag, auth=None, save=True, log=True, system=False):
if self.retraction is None:
super(Registration, self).add_tag(tag, auth, save, log, system)
else:
raise NodeStateError('Cannot add tags to withdrawn registrations.')
def add_tags(self, tags, auth=None, save=True, log=True, system=False):
if self.retraction is None:
super(Registration, self).add_tags(tags, auth, save, log, system)
else:
raise NodeStateError('Cannot add tags to withdrawn registrations.')
def remove_tag(self, tag, auth, save=True):
if self.retraction is None:
super(Registration, self).remove_tag(tag, auth, save)
else:
raise NodeStateError('Cannot remove tags of withdrawn registrations.')
def remove_tags(self, tags, auth, save=True):
if self.retraction is None:
super(Registration, self).remove_tags(tags, auth, save)
else:
raise NodeStateError('Cannot remove tags of withdrawn registrations.')
class Meta:
# custom permissions for use in the GakuNin RDM Admin App
permissions = (
('view_registration', 'Can view registration details'),
)
class DraftRegistrationLog(ObjectIDMixin, BaseModel):
""" Simple log to show status changes for DraftRegistrations
Also, editable fields on registrations are logged.
field - _id - primary key
field - date - date of the action took place
field - action - simple action to track what happened
field - user - user who did the action
"""
date = NonNaiveDateTimeField(default=timezone.now)
action = models.CharField(max_length=255)
draft = models.ForeignKey('DraftRegistration', related_name='logs',
null=True, blank=True, on_delete=models.CASCADE)
user = models.ForeignKey('OSFUser', db_index=True, null=True, blank=True, on_delete=models.CASCADE)
params = DateTimeAwareJSONField(default=dict)
SUBMITTED = 'submitted'
REGISTERED = 'registered'
APPROVED = 'approved'
REJECTED = 'rejected'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
CATEGORY_UPDATED = 'category_updated'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
AFFILIATED_INSTITUTION_ADDED = 'affiliated_institution_added'
AFFILIATED_INSTITUTION_REMOVED = 'affiliated_institution_removed'
CHANGED_LICENSE = 'license_changed'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
def __repr__(self):
return ('<DraftRegistrationLog({self.action!r}, date={self.date!r}), '
'user={self.user!r} '
'with id {self._id!r}>').format(self=self)
class Meta:
ordering = ['-created']
get_latest_by = 'created'
class DraftRegistration(ObjectIDMixin, RegistrationResponseMixin, DirtyFieldsMixin,
BaseModel, Loggable, EditableFieldsMixin, GuardianMixin):
# Fields that are writable by DraftRegistration.update
WRITABLE_WHITELIST = [
'title',
'description',
'category',
'node_license',
]
URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/drafts/{draft_id}'
# Overrides EditableFieldsMixin to make title not required
title = models.TextField(validators=[validate_title], blank=True, default='')
_contributors = models.ManyToManyField(OSFUser,
through=DraftRegistrationContributor,
related_name='draft_registrations')
affiliated_institutions = models.ManyToManyField('Institution', related_name='draft_registrations')
node_license = models.ForeignKey('NodeLicenseRecord', related_name='draft_registrations',
on_delete=models.SET_NULL, null=True, blank=True)
datetime_initiated = NonNaiveDateTimeField(auto_now_add=True)
datetime_updated = NonNaiveDateTimeField(auto_now=True)
deleted = NonNaiveDateTimeField(null=True, blank=True)
# Original Node a draft registration is associated with
branched_from = models.ForeignKey('AbstractNode', related_name='registered_draft',
null=True, on_delete=models.CASCADE)
initiator = models.ForeignKey('OSFUser', null=True, on_delete=models.CASCADE)
provider = models.ForeignKey(
'RegistrationProvider',
related_name='draft_registrations',
null=True,
on_delete=models.CASCADE,
)
# Dictionary field mapping question id to a question's comments and answer
# {
# <qid>: {
# 'comments': [{
# 'user': {
# 'id': <uid>,
# 'name': <name>
# },
# value: <value>,
# lastModified: <datetime>
# }],
# 'value': <value>
# }
# }
registration_metadata = DateTimeAwareJSONField(default=dict, blank=True)
registration_schema = models.ForeignKey('RegistrationSchema', null=True, on_delete=models.CASCADE)
registered_node = models.ForeignKey('Registration', null=True, blank=True,
related_name='draft_registration', on_delete=models.CASCADE)
approval = models.ForeignKey('DraftRegistrationApproval', null=True, blank=True, on_delete=models.CASCADE)
# Dictionary field mapping extra fields defined in the RegistrationSchema.schema to their
# values. Defaults should be provided in the schema (e.g. 'paymentSent': false),
# and these values are added to the DraftRegistration
# TODO: Use "FIELD_ALIASES"?
_metaschema_flags = DateTimeAwareJSONField(default=dict, blank=True)
notes = models.TextField(blank=True)
# For ContributorMixin
guardian_object_type = 'draft_registration'
READ_DRAFT_REGISTRATION = 'read_{}'.format(guardian_object_type)
WRITE_DRAFT_REGISTRATION = 'write_{}'.format(guardian_object_type)
ADMIN_DRAFT_REGISTRATION = 'admin_{}'.format(guardian_object_type)
# For ContributorMixin
base_perms = [READ_DRAFT_REGISTRATION, WRITE_DRAFT_REGISTRATION, ADMIN_DRAFT_REGISTRATION]
groups = {
'read': (READ_DRAFT_REGISTRATION,),
'write': (READ_DRAFT_REGISTRATION, WRITE_DRAFT_REGISTRATION,),
'admin': (READ_DRAFT_REGISTRATION, WRITE_DRAFT_REGISTRATION, ADMIN_DRAFT_REGISTRATION,)
}
group_format = 'draft_registration_{self.id}_{group}'
class Meta:
permissions = (
('read_draft_registration', 'Can read the draft registration'),
('write_draft_registration', 'Can edit the draft registration'),
('admin_draft_registration', 'Can manage the draft registration'),
)
def __repr__(self):
return ('<DraftRegistration(branched_from={self.branched_from!r}) '
'with id {self._id!r}>').format(self=self)
def get_registration_metadata(self, schema):
# Overrides RegistrationResponseMixin
return self.registration_metadata
@property
def file_storage_resource(self):
# Overrides RegistrationResponseMixin
return self.branched_from
# lazily set flags
@property
def flags(self):
if not self._metaschema_flags:
self._metaschema_flags = {}
meta_schema = self.registration_schema
if meta_schema:
schema = meta_schema.schema
flags = schema.get('flags', {})
dirty = False
for flag, value in flags.items():
if flag not in self._metaschema_flags:
self._metaschema_flags[flag] = value
dirty = True
if dirty:
self.save()
return self._metaschema_flags
@flags.setter
def flags(self, flags):
self._metaschema_flags.update(flags)
@property
def branched_from_type(self):
if isinstance(self.branched_from, (DraftNode, Node)):
return self.branched_from.__class__.__name__
else:
raise DraftRegistrationStateError
@property
def url(self):
return self.URL_TEMPLATE.format(
node_id=self.branched_from._id,
draft_id=self._id
)
@property
def _primary_key(self):
return self._id
@property
def absolute_url(self):
return urljoin(settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
# Old draft registration URL - user new endpoints, through draft registration
node = self.branched_from
branched_type = self.branched_from_type
if branched_type == 'DraftNode':
path = '/draft_registrations/{}/'.format(self._id)
elif branched_type == 'Node':
path = '/nodes/{}/draft_registrations/{}/'.format(node._id, self._id)
return api_v2_url(path)
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def requires_approval(self):
return self.registration_schema.requires_approval
@property
def is_pending_review(self):
return self.approval.is_pending_approval if (self.requires_approval and self.approval) else False
@property
def is_approved(self):
if self.requires_approval:
if not self.approval:
return bool(self.registered_node)
else:
return self.approval.is_approved
else:
return False
@property
def is_rejected(self):
if self.requires_approval:
if not self.approval:
return False
else:
return self.approval.is_rejected
else:
return False
@property
def status_logs(self):
""" List of logs associated with this node"""
return self.logs.all().order_by('date')
@property
def log_class(self):
# Override for EditableFieldsMixin
return DraftRegistrationLog
@property
def state_error(self):
# Override for ContributorMixin
return DraftRegistrationStateError
@property
def contributor_class(self):
# Override for ContributorMixin
return DraftRegistrationContributor
def get_contributor_order(self):
# Method needed for ContributorMixin
return self.get_draftregistrationcontributor_order()
def set_contributor_order(self, contributor_ids):
# Method needed for ContributorMixin
return self.set_draftregistrationcontributor_order(contributor_ids)
@property
def contributor_kwargs(self):
# Override for ContributorMixin
return {'draft_registration': self}
@property
def contributor_set(self):
# Override for ContributorMixin
return self.draftregistrationcontributor_set
@property
def order_by_contributor_field(self):
# Property needed for ContributorMixin
return 'draftregistrationcontributor___order'
@property
def admin_contributor_or_group_member_ids(self):
# Overrides ContributorMixin
# Draft Registrations don't have parents or group members at the moment, so this is just admin group member ids
# Called when removing project subscriptions
return self.get_group(ADMIN).user_set.filter(is_active=True).values_list('guids___id', flat=True)
@property
def creator(self):
# Convenience property for testing contributor methods, which are
# shared with other items that have creators
return self.initiator
@property
def is_public(self):
# Convenience property for sharing code with nodes
return False
@property
def log_params(self):
# Override for EditableFieldsMixin
return {
'draft_registration': self._id,
}
@property
def visible_contributors(self):
# Override for ContributorMixin
return OSFUser.objects.filter(
draftregistrationcontributor__draft_registration=self,
draftregistrationcontributor__visible=True
).order_by(self.order_by_contributor_field)
@property
def contributor_email_template(self):
# Override for ContributorMixin
return 'draft_registration'
@property
def institutions_url(self):
# For NodeInstitutionsRelationshipSerializer
path = '/draft_registrations/{}/institutions/'.format(self._id)
return api_v2_url(path)
@property
def institutions_relationship_url(self):
# For NodeInstitutionsRelationshipSerializer
path = '/draft_registrations/{}/relationships/institutions/'.format(self._id)
return api_v2_url(path)
def update_search(self):
# Override for AffiliatedInstitutionMixin, not sending DraftRegs to search
pass
def can_view(self, auth):
"""Does the user have permission to view the draft registration?
Checking permissions directly on the draft, not the node.
"""
if not auth:
return False
return auth.user and self.has_permission(auth.user, READ)
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this draft_registration.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this draft_registration.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
return (user and self.has_permission(user, WRITE))
def get_addons(self):
# Override for ContributorMixin, Draft Registrations don't have addons
return []
# Override Taggable
def add_tag_log(self, tag, auth):
self.add_log(
action=DraftRegistrationLog.TAG_ADDED,
params={
'draft_registration': self._id,
'tag': tag.name
},
auth=auth,
save=False
)
@property
def license(self):
if self.node_license_id:
return self.node_license
return None
@property
def all_tags(self):
"""Return a queryset containing all of this draft's tags (incl. system tags)."""
# Tag's default manager only returns non-system tags, so we can't use self.tags
return Tag.all_tags.filter(draftregistration_tagged=self)
@property
def system_tags(self):
"""The system tags associated with this draft registration. This currently returns a list of string
names for the tags, for compatibility with v1. Eventually, we can just return the
QuerySet.
"""
return self.all_tags.filter(system=True).values_list('name', flat=True)
@classmethod
def create_from_node(cls, user, schema, node=None, data=None, provider=None):
if not provider:
provider = RegistrationProvider.load('osf')
if not node:
# If no node provided, a DraftNode is created for you
node = DraftNode.objects.create(creator=user, title='Untitled')
if not (isinstance(node, Node) or isinstance(node, DraftNode)):
raise DraftRegistrationStateError()
draft = cls(
initiator=user,
branched_from=node,
registration_schema=schema,
registration_metadata=data or {},
provider=provider,
)
draft.save()
draft.copy_editable_fields(node, Auth(user), save=True, contributors=False)
draft.update(data)
return draft
def get_root(self):
return self
def copy_contributors_from(self, resource):
"""
Copies the contibutors from the resource (including permissions and visibility)
into this draft registration.
Visibility, order, draft, and user are stored in DraftRegistrationContributor table.
Permissions are stored in guardian tables (use add_permission)
"""
contribs = []
current_contributors = self.contributor_set.values_list('user_id', flat=True)
for contrib in resource.contributor_set.all():
if contrib.user.id not in current_contributors:
permission = contrib.permission
new_contrib = DraftRegistrationContributor(
draft_registration=self,
_order=contrib._order,
visible=contrib.visible,
user=contrib.user
)
contribs.append(new_contrib)
self.add_permission(contrib.user, permission, save=True)
DraftRegistrationContributor.objects.bulk_create(contribs)
def update_metadata(self, metadata):
changes = []
# Prevent comments on approved drafts
if not self.is_approved:
for question_id, value in metadata.items():
old_value = self.registration_metadata.get(question_id)
if old_value:
old_comments = {
comment['created']: comment
for comment in old_value.get('comments', [])
}
new_comments = {
comment['created']: comment
for comment in value.get('comments', [])
}
old_comments.update(new_comments)
metadata[question_id]['comments'] = sorted(
old_comments.values(),
key=lambda c: c['created']
)
if old_value.get('value') != value.get('value'):
changes.append(question_id)
else:
changes.append(question_id)
self.registration_metadata.update(metadata)
# Write to registration_responses also (new workflow)
registration_responses = self.flatten_registration_metadata()
self.registration_responses.update(registration_responses)
return changes
def update_registration_responses(self, registration_responses):
"""
New workflow - update_registration_responses. This should have been
validated before this method is called. If writing to registration_responses
field, persist the expanded version of this to Draft.registration_metadata.
"""
registration_responses = self.unescape_registration_file_names(registration_responses)
self.registration_responses.update(registration_responses)
registration_metadata = self.expand_registration_responses()
self.registration_metadata = registration_metadata
return
def unescape_registration_file_names(self, registration_responses):
if registration_responses.get('uploader', []):
for upload in registration_responses.get('uploader', []):
upload['file_name'] = html.unescape(upload['file_name'])
return registration_responses
def submit_for_review(self, initiated_by, meta, save=False):
approval = DraftRegistrationApproval(
meta=meta
)
approval.save()
self.approval = approval
self.add_status_log(initiated_by, DraftRegistrationLog.SUBMITTED)
if save:
self.save()
def register(self, auth, save=False, child_ids=None):
node = self.branched_from
if not self.title:
raise NodeStateError('Draft Registration must have title to be registered')
# Create the registration
register = node.register_node(
schema=self.registration_schema,
auth=auth,
draft_registration=self,
child_ids=child_ids,
provider=self.provider
)
self.registered_node = register
self.add_status_log(auth.user, DraftRegistrationLog.REGISTERED)
self.copy_contributors_from(node)
if save:
self.save()
return register
def approve(self, user):
self.approval.approve(user)
self.refresh_from_db()
self.add_status_log(user, DraftRegistrationLog.APPROVED)
self.approval.save()
def reject(self, user):
self.approval.reject(user)
self.add_status_log(user, DraftRegistrationLog.REJECTED)
self.approval.save()
def add_status_log(self, user, action):
params = {
'draft_registration': self._id,
},
log = DraftRegistrationLog(action=action, user=user, draft=self, params=params)
log.save()
def validate_metadata(self, *args, **kwargs):
"""
Validates draft's metadata
"""
return self.registration_schema.validate_metadata(*args, **kwargs)
def validate_registration_responses(self, *args, **kwargs):
"""
Validates draft's registration_responses
"""
return self.registration_schema.validate_registration_responses(*args, **kwargs)
def add_log(self, action, params, auth, save=True):
"""
Tentative - probably need to combine with add_status_log
"""
user = auth.user if auth else None
params['draft_registration'] = params.get('draft_registration') or self._id
log = DraftRegistrationLog(
action=action, user=user,
params=params, draft=self
)
log.save()
return log
# Overrides ContributorMixin
def _add_related_source_tags(self, contributor):
# The related source tag behavior for draft registration is currently undefined
# Therefore we don't add any source tags to it
pass
def save(self, *args, **kwargs):
if 'old_subjects' in kwargs.keys():
kwargs.pop('old_subjects')
return super(DraftRegistration, self).save(*args, **kwargs)
def update(self, fields, auth=None, save=True):
"""Update the draft registration with the given fields.
:param dict fields: Dictionary of field_name:value pairs.
:param Auth auth: Auth object for the user making the update.
:param bool save: Whether to save after updating the object.
"""
if not fields: # Bail out early if there are no fields to update
return False
for key, value in fields.items():
if key not in self.WRITABLE_WHITELIST:
continue
if key == 'title':
self.set_title(title=value, auth=auth, save=False, allow_blank=True)
elif key == 'description':
self.set_description(description=value, auth=auth, save=False)
elif key == 'category':
self.set_category(category=value, auth=auth, save=False)
elif key == 'node_license':
self.set_node_license(
{
'id': value.get('id'),
'year': value.get('year'),
'copyrightHolders': value.get('copyrightHolders') or value.get('copyright_holders', [])
},
auth,
save=save
)
if save:
updated = self.get_dirty_fields()
self.save()
return updated
class DraftRegistrationUserObjectPermission(UserObjectPermissionBase):
"""
Direct Foreign Key Table for guardian - User models - we typically add object
perms directly to Django groups instead of users, so this will be used infrequently
"""
content_object = models.ForeignKey(DraftRegistration, on_delete=models.CASCADE)
class DraftRegistrationGroupObjectPermission(GroupObjectPermissionBase):
"""
Direct Foreign Key Table for guardian - Group models. Makes permission checks faster.
This table gives a Django group a particular permission to a DraftRegistration.
For example, every time a draft reg is created, an admin, write, and read Django group
are created for the draft reg. The "write" group has write/read perms to the draft reg.
Those links are stored here: content_object_id (draft_registration_id), group_id, permission_id
"""
content_object = models.ForeignKey(DraftRegistration, on_delete=models.CASCADE)
@receiver(post_save, sender='osf.DraftRegistration')
def create_django_groups_for_draft_registration(sender, instance, created, **kwargs):
if created:
instance.update_group_permissions()
initiator = instance.initiator
if instance.branched_from.contributor_set.filter(user=initiator).exists():
initiator_node_contributor = instance.branched_from.contributor_set.get(user=initiator)
initiator_visibility = initiator_node_contributor.visible
initiator_order = initiator_node_contributor._order
DraftRegistrationContributor.objects.get_or_create(
user=initiator,
draft_registration=instance,
visible=initiator_visibility,
_order=initiator_order
)
else:
DraftRegistrationContributor.objects.get_or_create(
user=initiator,
draft_registration=instance,
visible=True,
)
instance.add_permission(initiator, ADMIN)
| 37.567891 | 164 | 0.645936 |
acfb1839931eabe9e27d766ec391f61a036b5dc4 | 14,398 | py | Python | faebryk/library/library/components.py | ruben-iteng/faebryk | 58810da4cb24581f421c39784ccf61e1a4ea8ae5 | [
"MIT"
] | 7 | 2021-11-22T20:02:14.000Z | 2022-03-04T19:35:04.000Z | faebryk/library/library/components.py | ruben-iteng/faebryk | 58810da4cb24581f421c39784ccf61e1a4ea8ae5 | [
"MIT"
] | 45 | 2021-11-22T20:24:40.000Z | 2022-03-25T11:01:28.000Z | faebryk/library/library/components.py | ruben-iteng/faebryk | 58810da4cb24581f421c39784ccf61e1a4ea8ae5 | [
"MIT"
] | 3 | 2021-11-22T19:58:08.000Z | 2021-12-17T16:14:08.000Z | # This file is part of the faebryk project
# SPDX-License-Identifier: MIT
import logging
from faebryk.library.traits import component
from faebryk.library.traits.component import (
contructable_from_component,
has_defined_footprint,
has_defined_footprint_pinmap,
has_defined_type_description,
has_footprint_pinmap,
has_interfaces,
has_interfaces_list,
has_symmetric_footprint_pinmap,
has_type_description,
)
from faebryk.library.traits.interface import contructable_from_interface_list
logger = logging.getLogger("library")
from faebryk.library.core import Component, ComponentTrait, Interface, Parameter
from faebryk.library.library.interfaces import Electrical, Power
from faebryk.library.library.parameters import Constant
from faebryk.library.traits import *
from faebryk.library.util import get_all_interfaces, times, unit_map
class Resistor(Component):
def _setup_traits(self):
class _contructable_from_component(contructable_from_component):
@staticmethod
def from_component(comp: Component, resistance: Parameter) -> Resistor:
assert comp.has_trait(has_interfaces)
interfaces = comp.get_trait(has_interfaces).get_interfaces()
assert len(interfaces) == 2
assert len([i for i in interfaces if type(i) is not Electrical]) == 0
r = Resistor.__new__(Resistor)
r._setup_resistance(resistance)
r.interfaces = interfaces
r.get_trait(has_interfaces).set_interface_comp()
return r
self.add_trait(has_interfaces_list())
self.add_trait(_contructable_from_component())
def _setup_interfaces(self):
self.interfaces = times(2, Electrical)
self.get_trait(has_interfaces).set_interface_comp()
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
self._setup_traits()
return self
def __init__(self, resistance: Parameter):
super().__init__()
self._setup_interfaces()
self.set_resistance(resistance)
def set_resistance(self, resistance: Parameter):
self.resistance = resistance
if type(resistance) is not Constant:
# TODO this is a bit ugly
# it might be that there was another more abstract valid trait
# but this challenges the whole trait overriding mechanism
# might have to make a trait stack thats popped or so
self.del_trait(has_type_description)
return
class _has_type_description(has_type_description):
@staticmethod
def get_type_description():
resistance = self.resistance
return unit_map(
resistance.value, ["µΩ", "mΩ", "Ω", "KΩ", "MΩ", "GΩ"], start="Ω"
)
self.add_trait(_has_type_description())
class Capacitor(Component):
def _setup_traits(self):
class _has_interfaces(has_interfaces):
@staticmethod
def get_interfaces() -> list(Interface):
return self.interfaces
class _contructable_from_component(contructable_from_component):
@staticmethod
def from_component(comp: Component, capacitance: Parameter) -> Capacitor:
assert comp.has_trait(has_interfaces)
interfaces = comp.get_trait(has_interfaces).get_interfaces()
assert len(interfaces) == 2
assert len([i for i in interfaces if type(i) is not Electrical]) == 0
c = Capacitor.__new__(Capacitor)
c._setup_capacitance(capacitance)
c.interfaces = interfaces
return c
self.add_trait(_has_interfaces())
self.add_trait(_contructable_from_component())
def _setup_interfaces(self):
self.interfaces = [Electrical(), Electrical()]
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
self._setup_traits()
return self
def __init__(self, capacitance: Parameter):
super().__init__()
self._setup_interfaces()
self.set_capacitance(capacitance)
def set_capacitance(self, capacitance: Parameter):
self.capacitance = capacitance
if type(capacitance) is not Constant:
return
class _has_type_description(has_type_description):
@staticmethod
def get_type_description():
capacitance = self.capacitance
return unit_map(
capacitance.value, ["µF", "mF", "F", "KF", "MF", "GF"], start="F"
)
self.add_trait(_has_type_description())
class LED(Component):
class has_calculatable_needed_series_resistance(ComponentTrait):
@staticmethod
def get_needed_series_resistance_ohm(input_voltage_V) -> int:
raise NotImplemented
def _setup_traits(self):
class _has_interfaces(has_interfaces):
@staticmethod
def get_interfaces() -> list[Interface]:
return [self.anode, self.cathode]
self.add_trait(has_defined_type_description("LED"))
self.add_trait(_has_interfaces())
def _setup_interfaces(self):
self.anode = Electrical()
self.cathode = Electrical()
self.get_trait(has_interfaces).set_interface_comp()
def __new__(cls):
self = super().__new__(cls)
self._setup_traits()
return self
def __init__(self) -> None:
super().__init__()
self._setup_interfaces()
def set_forward_parameters(self, voltage_V: Parameter, current_A: Parameter):
if type(voltage_V) is Constant and type(current_A) is Constant:
class _(self.has_calculatable_needed_series_resistance):
@staticmethod
def get_needed_series_resistance_ohm(input_voltage_V) -> int:
return LED.needed_series_resistance_ohm(
input_voltage_V, voltage_V.value, current_A.value
)
self.add_trait(_())
@staticmethod
def needed_series_resistance_ohm(
input_voltage_V, forward_voltage_V, forward_current_A
) -> Constant:
return Constant((input_voltage_V - forward_voltage_V) / forward_current_A)
class Switch(Component):
def _setup_traits(self):
self.add_trait(has_defined_type_description("SW"))
self.add_trait(has_interfaces_list())
def _setup_interfaces(self):
self.interfaces = times(2, Electrical)
self.get_trait(has_interfaces).set_interface_comp()
def __new__(cls):
self = super().__new__(cls)
self._setup_traits()
return self
def __init__(self) -> None:
super().__init__()
self._setup_interfaces()
class NAND(Component):
def _setup_traits(self):
class _has_interfaces(has_interfaces):
@staticmethod
def get_interfaces():
return get_all_interfaces([self.power, self.output, *self.inputs])
class _constructable_from_component(contructable_from_component):
@staticmethod
def from_comp(comp: Component) -> NAND:
n = NAND.__new__(NAND)
n.__init_from_comp(comp)
return n
self.add_trait(_has_interfaces())
self.add_trait(_constructable_from_component())
def _setup_power(self):
self.power = Power()
def _setup_inouts(self, input_cnt):
self.output = Electrical()
self.inputs = times(input_cnt, Electrical)
self._set_interface_comp()
def _set_interface_comp(self):
self.get_trait(has_interfaces).set_interface_comp()
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
self._setup_traits()
return self
def __init__(self, input_cnt: int):
super().__init__()
self._setup_power()
self._setup_inouts(input_cnt)
self.input_cnt = input_cnt
def __init_from_comp(self, comp: Component):
dummy = NAND(2)
base_cnt = len(get_all_interfaces(dummy))
assert comp.has_trait(has_interfaces)
interfaces = comp.get_trait(has_interfaces).get_interfaces()
assert len(interfaces) >= base_cnt
assert len([i for i in interfaces if type(i) is not Electrical]) == 0
it = iter(interfaces)
self.power = (
Power().get_trait(contructable_from_interface_list).from_interfaces(it)
)
self.output = (
Electrical().get_trait(contructable_from_interface_list).from_interfaces(it)
)
self.inputs = [
Electrical().get_trait(contructable_from_interface_list).from_interfaces(it)
for i in self.inputs
]
self.input_cnt = len(self.inputs)
self._set_interface_comp()
class CD4011(Component):
class constructable_from_nands(ComponentTrait):
@staticmethod
def from_comp(comp: Component):
raise NotImplemented
def _setup_traits(self):
class _has_interfaces(has_interfaces):
@staticmethod
def get_interfaces():
return get_all_interfaces([self.power, *self.in_outs])
class _constructable_from_component(contructable_from_component):
@staticmethod
def from_comp(comp: Component) -> CD4011:
c = CD4011.__new__(CD4011)
c._init_from_comp(comp)
return c
class _constructable_from_nands(self.constructable_from_nands):
@staticmethod
def from_nands(nands: list[NAND]) -> CD4011:
c = CD4011.__new__(CD4011)
c._init_from_nands(nands)
return c
self.add_trait(_has_interfaces())
self.add_trait(_constructable_from_component())
self.add_trait(_constructable_from_nands())
self.add_trait(has_defined_type_description("cd4011"))
def _setup_power(self):
self.power = Power()
def _setup_nands(self):
self.nands = times(4, lambda: NAND(input_cnt=2))
for n in self.nands:
n.add_trait(has_symmetric_footprint_pinmap())
def _setup_inouts(self):
nand_inout_interfaces = [
i for n in self.nands for i in get_all_interfaces([n.output, *n.inputs])
]
self.in_outs = times(len(nand_inout_interfaces), Electrical)
def _setup_internal_connections(self):
self.get_trait(has_interfaces).set_interface_comp()
self.connection_map = {}
it = iter(self.in_outs)
for n in self.nands:
n.power.connect(self.power)
target = next(it)
target.connect(n.output)
self.connection_map[n.output] = target
for i in n.inputs:
target = next(it)
target.connect(i)
self.connection_map[i] = target
# TODO
# assert(len(self.interfaces) == 14)
def __new__(cls):
self = super().__new__(cls)
CD4011._setup_traits(self)
return self
def __init__(self):
super().__init__()
# setup
self._setup_power()
self._setup_nands()
self._setup_inouts()
self._setup_internal_connections()
def _init_from_comp(self, comp: Component):
# checks
assert comp.has_trait(has_interfaces)
interfaces = comp.get_trait(has_interfaces).get_interfaces()
assert len(interfaces) == len(self.get_trait(has_interfaces).get_interfaces())
assert len([i for i in interfaces if type(i) is not Electrical]) == 0
it = iter(interfaces)
# setup
self.power = (
Power().get_trait(contructable_from_interface_list).from_interfaces(it)
)
self._setup_nands()
self.in_outs = [
Electrical().get_trait(contructable_from_interface_list).from_interfaces(i)
for i in it
]
self._setup_internal_connections()
def _init_from_nands(self, nands: list[NAND]):
# checks
assert len(nands) <= 4
cd_nands = list(nands)
cd_nands += times(4 - len(cd_nands), lambda: NAND(input_cnt=2))
for nand in cd_nands:
assert nand.input_cnt == 2
# setup
self._setup_power()
self.nands = cd_nands
self._setup_inouts()
self._setup_internal_connections()
class TI_CD4011BE(CD4011):
def __init__(self):
super().__init__()
def __new__(cls):
self = super().__new__(cls)
TI_CD4011BE._setup_traits(self)
return self
def _setup_traits(self):
from faebryk.library.library.footprints import DIP
self.add_trait(
has_defined_footprint(DIP(pin_cnt=14, spacing_mm=7.62, long_pads=False))
)
class _has_footprint_pinmap(has_footprint_pinmap):
def __init__(self, component: Component) -> None:
super().__init__()
self.component = component
def get_pin_map(self):
component = self.component
return {
7: component.power.lv,
14: component.power.hv,
3: component.connection_map[component.nands[0].output],
4: component.connection_map[component.nands[1].output],
11: component.connection_map[component.nands[2].output],
10: component.connection_map[component.nands[3].output],
1: component.connection_map[component.nands[0].inputs[0]],
2: component.connection_map[component.nands[0].inputs[1]],
5: component.connection_map[component.nands[1].inputs[0]],
6: component.connection_map[component.nands[1].inputs[1]],
12: component.connection_map[component.nands[2].inputs[0]],
13: component.connection_map[component.nands[2].inputs[1]],
9: component.connection_map[component.nands[3].inputs[0]],
8: component.connection_map[component.nands[3].inputs[1]],
}
self.add_trait(_has_footprint_pinmap(self))
| 33.098851 | 88 | 0.624323 |
acfb1879f4a3600bebfcbb5c58fe5563f4d92971 | 17,661 | py | Python | server/models/model_base.py | jideobs/flask-gae-ndb-starter | 776a9ea967524f4a88debb6f00e4d39f15b4e799 | [
"MIT"
] | 2 | 2017-08-13T09:20:17.000Z | 2017-08-13T18:19:09.000Z | server/models/model_base.py | jideobs/flask-gae-ndb-starter | 776a9ea967524f4a88debb6f00e4d39f15b4e799 | [
"MIT"
] | null | null | null | server/models/model_base.py | jideobs/flask-gae-ndb-starter | 776a9ea967524f4a88debb6f00e4d39f15b4e799 | [
"MIT"
] | null | null | null | from google.appengine.ext import ndb
from flask import request
from flask_restful import abort
from flask_login import current_user
import datetime as main_datetime
import functools
from server import utils
from google.appengine.datastore import datastore_query
from google.appengine.datastore.datastore_query import datastore_errors
from server.commons import exceptions
DEFAULT_FETCH_LIMIT = 10
UNIQUE_ID = 'id'
QUERY_FIELDS = 'query_fields'
NEXT_PAGE = 'next_page'
PROPERTY_COLLISION_TEMPLATE = ('Name conflict: %s set as an NDB property and '
'an Endpoints alias property.')
def _verify_property(modelclass, attr_name):
"""Return a property if set on a model class, otherwise raises an exception.
Args:
modelclass: A subclass of EndpointsModel which has a
_GetEndpointsProperty method.
attr_name: String; the name of the property.
Returns:
The property set at the attribute name.
Raises:
AttributeError: if the property is not set on the class.
"""
prop = modelclass._GetEndpointsProperty(attr_name)
if prop is None:
error_msg = ('The attribute %s is not an accepted field. Accepted fields '
'are limited to NDB properties and Endpoints alias '
'properties.' % (attr_name,))
raise AttributeError(error_msg)
return prop
# Code adapted from endpoints_proto_datastore lib.
class _EndpointsQueryInfo(object):
"""A custom container for query information.
This will be set on an EndpointsModel (or subclass) instance, and can be used
in conjunction with alias properties to store query information, simple
filters, ordering and ancestor.
Uses an entity to construct simple filters, to validate ordering, to validate
ancestor and finally to construct a query from these filters, ordering and/or
ancestor.
Attributes:
_entity: An instance of EndpointsModel or a subclass. The values from this
will be used to create filters for a query.
_filters: A set of simple equality filters (ndb.FilterNode). Utilizes the
fact that FilterNodes are hashable and respect equality.
_ancestor: An ndb Key to be used as an ancestor for a query.
_cursor: A datastore_query.Cursor, to be used for resuming a query.
_limit: A positive integer, to be used in a fetch.
_order: String; comma separated list of property names or property names
preceded by a minus sign. Used to define an order of query results.
_order_attrs: The attributes (or negation of attributes) parsed from
_order. If these can't be parsed from the attributes in _entity, will
throw an exception.
_query_final: A final query created using the orders (_order_attrs), filters
(_filters) and class definition (_entity) in the query info. If this is
not null, setting attributes on the query info object will fail.
"""
def __init__(self, entity):
"""Sets all internal variables to the default values and verifies entity.
Args:
entity: An instance of EndpointsModel or a subclass.
Raises:
TypeError: if entity is not an instance of EndpointsModel or a subclass.
"""
if not isinstance(entity, ModelBase):
raise TypeError('Query info can only be used with an instance of an '
'EndpointsModel subclass. Received: instance of %s.' %
(entity.__class__.__name__,))
self._entity = entity
self._filters = set()
self._ancestor = None
self._cursor = None
self._limit = None
self._order = None
self._order_attrs = ()
self._query_final = None
def _PopulateFilters(self):
"""Populates filters in query info by using values set on the entity."""
entity = self._entity
for prop in entity._properties.itervalues():
current_value = prop._retrieve_value(entity)
if prop._repeated:
if current_value is not None:
raise ValueError('No queries on repeated values are allowed.')
continue
# Only filter for non-null values
if current_value is not None:
self._AddFilter(prop == current_value)
def SetQuery(self):
"""Sets the final query on the query info object.
Uses the filters and orders in the query info to refine the query. If the
final query is already set, does nothing.
"""
if self._query_final is not None:
return
self._PopulateFilters()
# _entity.query calls the classmethod for the entity
if self.ancestor is not None:
query = self._entity.query(ancestor=self.ancestor)
else:
query = self._entity.query()
for simple_filter in self._filters:
query = query.filter(simple_filter)
for order_attr in self._order_attrs:
query = query.order(order_attr)
self._query_final = query
def _AddFilter(self, candidate_filter):
"""Checks a filter and sets it in the filter set.
Args:
candidate_filter: An NDB filter which may be added to the query info.
Raises:
AttributeError: if query on the object is already final.
TypeError: if the filter is not a simple filter (FilterNode).
ValueError: if the operator symbol in the filter is not equality.
"""
if self._query_final is not None:
raise AttributeError('Can\'t add more filters. Query info is final.')
if not isinstance(candidate_filter, ndb.FilterNode):
raise TypeError('Only simple filters can be used. Received: %s.' %
(candidate_filter,))
opsymbol = candidate_filter._FilterNode__opsymbol
if opsymbol != '=':
raise ValueError('Only equality filters allowed. Received: %s.' %
(opsymbol,))
self._filters.add(candidate_filter)
@property
def query(self):
"""Public getter for the final query on query info."""
return self._query_final
def _GetAncestor(self):
"""Getter to be used for public ancestor property on query info."""
return self._ancestor
def _SetAncestor(self, value):
"""Setter to be used for public ancestor property on query info.
Args:
value: A potential value for an ancestor.
Raises:
AttributeError: if query on the object is already final.
AttributeError: if the ancestor has already been set.
TypeError: if the value to be set is not an instance of ndb.Key.
"""
if self._query_final is not None:
raise AttributeError('Can\'t set ancestor. Query info is final.')
if self._ancestor is not None:
raise AttributeError('Ancestor can\'t be set twice.')
if not isinstance(value, ndb.Key):
raise TypeError('Ancestor must be an instance of ndb.Key.')
self._ancestor = value
ancestor = property(fget=_GetAncestor, fset=_SetAncestor)
def _GetCursor(self):
"""Getter to be used for public cursor property on query info."""
return self._cursor
def _SetCursor(self, value):
"""Setter to be used for public cursor property on query info.
Args:
value: A potential value for a cursor.
Raises:
AttributeError: if query on the object is already final.
AttributeError: if the cursor has already been set.
TypeError: if the value to be set is not an instance of
datastore_query.Cursor.
"""
if self._query_final is not None:
raise AttributeError('Can\'t set cursor. Query info is final.')
if self._cursor is not None:
raise AttributeError('Cursor can\'t be set twice.')
if not isinstance(value, datastore_query.Cursor):
raise TypeError('Cursor must be an instance of datastore_query.Cursor.')
self._cursor = value
cursor = property(fget=_GetCursor, fset=_SetCursor)
def _GetLimit(self):
"""Getter to be used for public limit property on query info."""
return self._limit
def _SetLimit(self, value):
"""Setter to be used for public limit property on query info.
Args:
value: A potential value for a limit.
Raises:
AttributeError: if query on the object is already final.
AttributeError: if the limit has already been set.
TypeError: if the value to be set is not a positive integer.
"""
if self._query_final is not None:
raise AttributeError('Can\'t set limit. Query info is final.')
if self._limit is not None:
raise AttributeError('Limit can\'t be set twice.')
if not isinstance(value, (int, long)) or value < 1:
raise TypeError('Limit must be a positive integer.')
self._limit = value
limit = property(fget=_GetLimit, fset=_SetLimit)
def _GetOrder(self):
"""Getter to be used for public order property on query info."""
return self._order
def _SetOrderAttrs(self):
"""Helper method to set _order_attrs using the value of _order.
If _order is not set, simply returns, else splits _order by commas and then
looks up each value (or its negation) in the _properties of the entity on
the query info object.
We look up directly in _properties rather than using the attribute names
on the object since only NDB property names will be used for field names.
Raises:
AttributeError: if one of the attributes in the order is not a property
on the entity.
"""
if self._order is None:
return
unclean_attr_names = self._order.strip().split(',')
result = []
for attr_name in unclean_attr_names:
ascending = True
if attr_name.startswith('-'):
ascending = False
attr_name = attr_name[1:]
attr = self._entity._properties.get(attr_name)
if attr is None:
raise AttributeError('Order attribute %s not defined.' % (attr_name,))
if ascending:
result.append(+attr)
else:
result.append(-attr)
self._order_attrs = tuple(result)
def _SetOrder(self, value):
"""Setter to be used for public order property on query info.
Sets the value of _order and attempts to set _order_attrs as well
by valling _SetOrderAttrs, which uses the value of _order.
If the passed in value is None, but the query is not final and the
order has not already been set, the method will return without any
errors or data changed.
Args:
value: A potential value for an order.
Raises:
AttributeError: if query on the object is already final.
AttributeError: if the order has already been set.
TypeError: if the order to be set is not a string.
"""
if self._query_final is not None:
raise AttributeError('Can\'t set order. Query info is final.')
if self._order is not None:
raise AttributeError('Order can\'t be set twice.')
if value is None:
return
elif not isinstance(value, basestring):
raise TypeError('Order must be a string.')
self._order = value
self._SetOrderAttrs()
order = property(fget=_GetOrder, fset=_SetOrder)
class ModelBase(ndb.Model):
_alias_properties = None
def __init__(self, *args, **kwargs):
super(ModelBase, self).__init__(*args, **kwargs)
self._endpoints_query_info = _EndpointsQueryInfo(self)
self._from_datastore = False
@property
def from_datastore(self):
return self._from_datastore
@classmethod
def _GetEndpointsProperty(cls, attr_name):
"""Return a property if set on a model class.
Attempts to retrieve both the NDB version of the property.
Args:
attr_name: String; the name of the property.
Returns:
The property set at the attribute name.
"""
return cls._properties.get(attr_name)
@classmethod
def from_filter_data(cls, filter_data):
url_string = filter_data.get(UNIQUE_ID)
if url_string:
entity_key = ndb.Key(urlsafe=url_string)
if entity_key:
filter_data.pop(UNIQUE_ID)
entity = entity_key.get()
for field_name, value in filter_data.iteritems():
if getattr(entity, field_name) != value:
return None
return entity
else:
return None
else:
entity_query = cls.query()
for field_name, value in filter_data.iteritems():
value_property = _verify_property(cls, field_name)
entity_query = entity_query.filter(value_property == value)
return entity_query.fetch()
@staticmethod
def to_json_data(value):
property_value = value
if isinstance(value, (main_datetime.date, main_datetime.datetime, main_datetime.time)):
property_value = utils.date_to_str(value)
elif isinstance(value, ndb.Key):
property_value = value.urlsafe()
return property_value
def to_json(self):
"""
Transforms entity property values to json format.
Watch for data that cannot be serialized by jsonify function, then convert data into an acceptable format.
:return: Dictionary containing entity data.
"""
data = self._to_dict()
for property, value in data.iteritems():
if isinstance(value, ModelBase):
property_value = value.to_json()
else:
property_value = self.to_json_data(value)
data[property] = property_value
data.update({'id': self.key.urlsafe()})
return data
@classmethod
def to_json_collection(cls, items, next_cursor=None):
output = {NEXT_PAGE: next_cursor, 'data': []}
for item in items:
output['data'].append(item.to_json())
return output
def from_json(self, request_data):
"""
Update entity with new data from json.
check for data to transform, if needed, perform operations and assign values to respective properties in entity.
:param request_data:
:return:
"""
for property, value in request_data.iteritems():
prop_type = self._properties.get(property)
if prop_type:
prop_value = value
if isinstance(prop_type, (ndb.DateProperty, ndb.DateTimeProperty, ndb.TimeProperty)):
prop_value = utils.date_from_str(prop_type, prop_value)
elif isinstance(prop_type, ndb.KeyProperty):
prop_value = ndb.Key(urlsafe=prop_value)
setattr(self, property, prop_value)
@classmethod
def method(cls, transform_response=False, transform_fields=None, user_required=False):
"""Creates an API method decorator.
:param transform_request: Boolean; indicates whether or not
a response data's ndb.Key value are to be returned,
if True all ndb.Key values are used to get respective entity data,
if False all ndb.Key are returned as urlsafe strings.
:param transform_fields: An (optional) list or tuple that defines
returned fields for ndb.Key value type in response data.
:param user_required: Boolean; indicates whether or not a user is required on any incoming request.
:return: A decorator that takes the metadata passed in and augments an API method.
"""
def request_to_entity_decorator(api_method):
@functools.wraps(api_method)
def entity_to_request_method(service_instance, **filter_data):
if user_required and not current_user.is_authenticated:
raise exceptions.AuthenticationError
entity = None
if filter_data:
entity = cls.from_filter_data(filter_data)
if entity:
if type(entity) is list:
entity = entity[0]
entity._from_datastore = True
if not entity:
entity = cls()
request_data = request.get_json()
request_data and entity.from_json(request_data)
try:
response = api_method(service_instance, entity)
except datastore_errors.BadValueError, e:
raise exceptions.RequiredInputError(e.message)
if transform_response:
response_data = response.transform_response(transform_fields)
else:
response_data = response.to_json()
return response_data
return entity_to_request_method
return request_to_entity_decorator
@classmethod
def query_method(cls, transform_response=False, transform_fields=None, user_required=False):
"""Creates an API method decorator.
:param transform_request:
:param transform_fields:
:param user_required:
:return:
"""
def request_to_query_decorator(api_method):
@functools.wraps(api_method)
def query_from_request_method(service_instance, **filter_data):
if user_required and not current_user.is_authenticated:
abort(401, message='Invalid user.')
if UNIQUE_ID in filter_data:
entity_key = ndb.Key(urlsafe=filter_data.get(UNIQUE_ID))
request_entity = (entity_key and entity_key.get()) or cls()
filter_data.pop(UNIQUE_ID)
return (transform_fields and request_entity.transform_response()) or request_entity.to_json()
else:
request_entity = cls()
request_entity.from_json(filter_data)
query_info = request_entity._endpoints_query_info
next_page = request.args.get(NEXT_PAGE)
if next_page:
query_info.cursor = datastore_query.Cursor(urlsafe=next_page)
query_info.SetQuery()
query = api_method(service_instance, query_info.query)
query_options = {'start_cursor': query_info.cursor}
items, next_cursor, more_results = query.fetch_page(DEFAULT_FETCH_LIMIT, **query_options)
if not more_results:
next_cursor = None
else:
next_cursor = next_cursor.urlsafe()
if transform_response:
return cls.transform_response_collection(items, next_cursor=next_cursor)
else:
return cls.to_json_collection(items, next_cursor=next_cursor)
return query_from_request_method
return request_to_query_decorator
def transform_response(self, transform_fields=None):
"""
Select ndb.Key property types for their respective data response.
:param transform_fields: optional list or tuple which is used to specify returned properties for a
ndb.Key property.
:return:
"""
data = self._to_dict()
for property_name, value in data.iteritems():
if isinstance(value, ndb.Key):
property_value = value.get()
if property_value:
property_value = property_value.to_json()
else:
property_value = self.to_json_data(value)
data[property_name] = property_value
data['id'] = self.key.urlsafe()
return data
@classmethod
def transform_response_collection(cls, items, next_cursor=None, transform_fields=None):
"""
Transforming a collection of response data
:param transform_fields:
:return:
"""
output = {NEXT_PAGE: next_cursor, 'data': []}
for item in items:
output['data'].append(item.transform_response())
return output
| 32.465074 | 114 | 0.730706 |
acfb18bacc1e78f80c4a1d97a6b95c36cd34bdff | 5,190 | py | Python | repos/system_upgrade/el7toel8/files/rhel_upgrade.py | brammittendorff/leapp-repository | 2b04640fd00fb1402e952a0bae13d4002b299345 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/files/rhel_upgrade.py | brammittendorff/leapp-repository | 2b04640fd00fb1402e952a0bae13d4002b299345 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/files/rhel_upgrade.py | brammittendorff/leapp-repository | 2b04640fd00fb1402e952a0bae13d4002b299345 | [
"Apache-2.0"
] | null | null | null | # plugin inspired by "system_upgrade.py" from rpm-software-management
from __future__ import print_function
import json
import sys
import dnf
import dnf.cli
CMDS = ['download', 'upgrade', 'check']
class DoNotDownload(Exception):
pass
def _do_not_download_packages(packages, progress=None, total=None):
raise DoNotDownload()
class RhelUpgradeCommand(dnf.cli.Command):
aliases = ('rhel-upgrade',)
summary = ("Plugin for upgrading to the next RHEL major release")
def __init__(self, cli):
super(RhelUpgradeCommand, self).__init__(cli)
self.plugin_data = {}
self.pkgs_notfound = []
@staticmethod
def set_argparser(parser):
parser.add_argument('tid', nargs=1, choices=CMDS,
metavar="[%s]" % "|".join(CMDS))
parser.add_argument('filename')
def _process_packages(self, pkg_set, op):
'''
Adds list of packages for given operation to the transaction
'''
pkgs_notfound = []
for pkg_spec in pkg_set:
try:
op(pkg_spec)
except dnf.exceptions.MarkingError:
self.pkgs_notfound.append(pkg_spec)
if pkgs_notfound:
err_str = ('Packages marked by Leapp for installation/removal/upgrade not found '
'in repository metadata: ') + ' '.join(pkgs_notfound)
raise dnf.exceptions.MarkingError(err_str)
def pre_configure(self):
with open(self.opts.filename) as fo:
self.plugin_data = json.load(fo)
# There is an issue that ignores releasever value if it is set at configure
self.base.conf.releasever = self.plugin_data['dnf_conf']['releasever']
def configure(self):
self.cli.demands.root_user = True
self.cli.demands.resolving = self.opts.tid[0] != 'check'
self.cli.demands.available_repos = True
self.cli.demands.sack_activation = True
self.cli.demands.cacheonly = self.opts.tid[0] == 'upgrade'
self.cli.demands.allow_erasing = self.plugin_data['dnf_conf']['allow_erasing']
self.base.conf.protected_packages = []
self.base.conf.best = self.plugin_data['dnf_conf']['best']
self.base.conf.assumeyes = True
self.base.conf.gpgcheck = self.plugin_data['dnf_conf']['gpgcheck']
self.base.conf.debug_solver = self.plugin_data['dnf_conf']['debugsolver']
self.base.conf.module_platform_id = self.plugin_data['dnf_conf']['platform_id']
installroot = self.plugin_data['dnf_conf'].get('installroot')
if installroot:
self.base.conf.installroot = installroot
if self.plugin_data['dnf_conf']['test_flag'] and self.opts.tid[0] == 'download':
self.base.conf.tsflags.append("test")
enabled_repos = self.plugin_data['dnf_conf']['enable_repos']
self.base.repos.all().disable()
for repo in self.base.repos.all():
if repo.id in enabled_repos:
repo.skip_if_unavailable = False
if not self.base.conf.gpgcheck:
repo.gpgcheck = False
repo.enable()
def run(self):
# takes local rpms, creates Package objects from them, and then adds them to the sack as virtual repository
local_rpm_objects = self.base.add_remote_rpms(self.plugin_data['pkgs_info']['local_rpms'])
for pkg in local_rpm_objects:
self.base.package_install(pkg)
to_install_local = self.plugin_data['pkgs_info']['local_rpms']
to_install = self.plugin_data['pkgs_info']['to_install']
to_remove = self.plugin_data['pkgs_info']['to_remove']
to_upgrade = self.plugin_data['pkgs_info']['to_upgrade']
# Local (on filesystem) packages to be installed.
# add_remote_rpms() accepts list of packages
self.base.add_remote_rpms(to_install_local)
# Packages to be removed
self._process_packages(to_remove, self.base.remove)
# Packages to be installed
self._process_packages(to_install, self.base.install)
# Packages to be upgraded
self._process_packages(to_upgrade, self.base.upgrade)
self.base.distro_sync()
if self.opts.tid[0] == 'check':
try:
self.base.resolve(allow_erasing=self.cli.demands.allow_erasing)
except dnf.exceptions.DepsolveError as e:
print(str(e), file=sys.stderr)
raise
# We are doing this to avoid downloading the packages in the check phase
self.base.download_packages = _do_not_download_packages
try:
displays = []
if self.cli.demands.transaction_display is not None:
displays.append(self.cli.demands.transaction_display)
self.base.do_transaction(display=displays)
except DoNotDownload:
print('Check completed.')
class RhelUpgradePlugin(dnf.Plugin):
name = 'rhel-upgrade'
def __init__(self, base, cli):
super(RhelUpgradePlugin, self).__init__(base, cli)
if cli:
cli.register_command(RhelUpgradeCommand)
| 37.883212 | 115 | 0.637765 |
acfb19434f8fa92b6320b48e30b9af7b575b7de3 | 65 | py | Python | app/core.py | BlaShadow/Hestia-Mongo | 91cca65ee246f035ee15aad359aa33dd33a404dc | [
"MIT"
] | 3 | 2015-07-01T19:52:12.000Z | 2015-07-01T20:04:50.000Z | app/core.py | BlaShadow/Hestia-Mongo | 91cca65ee246f035ee15aad359aa33dd33a404dc | [
"MIT"
] | null | null | null | app/core.py | BlaShadow/Hestia-Mongo | 91cca65ee246f035ee15aad359aa33dd33a404dc | [
"MIT"
] | null | null | null | #core
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
| 13 | 39 | 0.8 |
acfb19ed9621ebbb04e9d1872f1872529cfcaf0f | 8,559 | py | Python | configerus/test/test_6_instances.py | james-nesbitt/configerus | ff11e8cb0f1ee0ca078a8fc2475f16a1c52f1271 | [
"MIT"
] | 1 | 2021-02-07T18:47:58.000Z | 2021-02-07T18:47:58.000Z | configerus/test/test_6_instances.py | james-nesbitt/configerus | ff11e8cb0f1ee0ca078a8fc2475f16a1c52f1271 | [
"MIT"
] | null | null | null | configerus/test/test_6_instances.py | james-nesbitt/configerus | ff11e8cb0f1ee0ca078a8fc2475f16a1c52f1271 | [
"MIT"
] | null | null | null | """
test the plugin instance functionality
"""
import logging
import unittest
from configerus.config import Config
from configerus.plugin import (
SourceFactory,
FormatFactory,
ValidatorFactory,
Type,
)
from configerus.instances import PluginInstance, PluginInstances
logger = logging.getLogger("test_instances")
""" register a bunch of dummy plugins which do nothing but can be used for testing """
@SourceFactory(plugin_id="dummy_1")
def plugin_factory_source_1(config: Config, instance_id: str = ""):
"""create a dummy configsource plugin"""
return DummyPlugin(config, instance_id)
@SourceFactory(plugin_id="dummy_2")
def plugin_factory_source_2(config: Config, instance_id: str = ""):
"""create a dummy configsource plugin"""
return DummyPlugin(config, instance_id)
@FormatFactory(plugin_id="dummy_1")
def plugin_factory_format_1(config: Config, instance_id: str = ""):
"""create a dummy formatter plugin"""
return DummyPlugin(config, instance_id)
@FormatFactory(plugin_id="dummy_2")
def plugin_factory_format_2(config: Config, instance_id: str = ""):
"""create a dummy formatter plugin"""
return DummyPlugin(config, instance_id)
@ValidatorFactory(plugin_id="dummy_1")
def plugin_factory_validate_1(config: Config, instance_id: str = ""):
"""create a dummy formatter plugin"""
return DummyPlugin(config, instance_id)
@ValidatorFactory(plugin_id="dummy_2")
def plugin_factory_validate_2(config: Config, instance_id: str = ""):
"""create a dummy formatter plugin"""
return DummyPlugin(config, instance_id)
class DummyPlugin:
"""Just a placehold plugin object"""
def __init__(self, config: Config, instance_id: str):
self.config = config
self.instance_id = instance_id
class ConfigTemplating(unittest.TestCase):
def test_instance_struct_sanity(self):
"""just test that the Instance struct has the properties that we use"""
config = Config()
instance = PluginInstance(
plugin_id="dummy",
instance_id="dummy",
type=Type.SOURCE,
priority=60,
plugin=DummyPlugin(config, "dummy"),
)
self.assertEqual(instance.plugin_id, "dummy")
self.assertEqual(instance.priority, 60)
def test_instances_sanity(self):
"""Test some construction sanity on the instances object"""
config = Config()
instances = PluginInstances(config.make_plugin)
self.assertEqual(len(instances), 0)
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"dummy_instance_1",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"dummy_instance_2",
config.default_priority(),
)
self.assertEqual(len(instances), 2)
def test_instances_plugin_get_simple(self):
"""test that we can add plugins and then retrieve them"""
config = Config()
instances = PluginInstances(config.make_plugin)
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"dummy_instance_1",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"dummy_instance_2",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_2",
"dummy_instance_3",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_2",
"dummy_instance_4",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"dummy_instance_5",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_2",
"dummy_instance_6",
config.default_priority(),
)
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"dummy_instance_7",
config.default_priority(),
)
len(instances) == 7
get_4 = instances["dummy_instance_4"]
self.assertTrue(get_4)
self.assertEqual(get_4.instance_id, "dummy_instance_4")
self.assertEqual(
instances.get_plugin(instance_id="dummy_instance_4").instance_id,
"dummy_instance_4",
)
with self.assertRaises(KeyError):
instances.get_plugin(instance_id="no_such_instance")
def test_instances_get_plugins(self):
"""get plugins based on multiple search parameters"""
config = Config()
instances = PluginInstances(config.make_plugin)
instances.add_plugin(
Type.SOURCE, "dummy_1", "dummy_source_1", config.default_priority()
)
instances.add_plugin(
Type.SOURCE, "dummy_1", "dummy_source_2", config.default_priority()
)
instances.add_plugin(
Type.SOURCE, "dummy_2", "dummy_instance", config.default_priority()
)
instances.add_plugin(
Type.FORMATTER,
"dummy_2",
"dummy_formatter_4",
config.default_priority(),
)
instances.add_plugin(
Type.FORMATTER,
"dummy_1",
"dummy_formatter_5",
config.default_priority(),
)
instances.add_plugin(
Type.FORMATTER,
"dummy_2",
"dummy_formatter_6",
config.default_priority(),
)
instances.add_plugin(
Type.VALIDATOR,
"dummy_1",
"dummy_instance",
config.default_priority(),
)
instances.add_plugin(
Type.VALIDATOR,
"dummy_2",
"dummy_validator_8",
config.default_priority(),
)
self.assertTrue(instances.has_plugin(instance_id="dummy_formatter_4"))
self.assertTrue(instances.has_plugin(instance_id="dummy_validator_8"))
self.assertTrue(instances.has_plugin(instance_id="dummy_formatter_4"))
self.assertTrue(instances.has_plugin(instance_id="dummy_source_2"))
self.assertEqual(len(instances.get_instances(type=Type.SOURCE)), 3)
self.assertEqual(len(instances.get_instances(type=Type.FORMATTER)), 3)
self.assertEqual(len(instances.get_instances(type=Type.VALIDATOR)), 2)
self.assertEqual(
len(
instances.get_instances(type=Type.SOURCE, plugin_id="dummy_1")
),
2,
)
self.assertEqual(
len(
instances.get_instances(
type=Type.VALIDATOR, plugin_id="dummy_1"
)
),
1,
)
# Plugin ID only search should cross type
self.assertEqual(len(instances.get_instances(plugin_id="dummy_1")), 4)
# There are no rules against repeated instance_ids
self.assertEqual(
len(instances.get_instances(instance_id="dummy_instance")), 2
)
# no rule against empty filtering (gets sorted instances)
self.assertEqual(len(instances.get_instances()), len(instances))
def test_instance_priority_simple(self):
"""test that retrieving instances sorts"""
config = Config()
instances = PluginInstances(config.make_plugin)
starting_range = range(30, 71, 10)
priority_list = range(70, 29, -10)
for priority in starting_range:
instances.add_plugin(
Type.SOURCE,
"dummy_1",
"instance_{}".format(priority),
priority,
)
instance_list = instances.get_instances(type=Type.SOURCE)
self.assertEqual(len(instance_list), len(starting_range))
for index, priority in enumerate(priority_list):
self.assertEqual(instance_list[index].priority, priority)
# let's see what happens as things change
instances.add_plugin(
Type.SOURCE, "dummy_1", "instance_{}".format(90), 90
)
instance_list = instances.get_instances(type=Type.SOURCE)
self.assertEqual(len(instance_list), len(starting_range) + 1)
self.assertEqual(instance_list[0].priority, 90)
self.assertEqual(instance_list[1].priority, 70)
self.assertEqual(instance_list[len(starting_range)].priority, 30)
| 31.123636 | 86 | 0.611286 |
acfb1a58ffdfe04c8b0bc5fb92b25f0e280d0248 | 1,518 | py | Python | models.py | We-Vote/BE | 7c6317b911d9a8bfe55c47f7e4b565feef7edbd6 | [
"MIT"
] | null | null | null | models.py | We-Vote/BE | 7c6317b911d9a8bfe55c47f7e4b565feef7edbd6 | [
"MIT"
] | 6 | 2020-03-24T18:06:56.000Z | 2021-12-13T20:31:19.000Z | models.py | We-Vote/BE | 7c6317b911d9a8bfe55c47f7e4b565feef7edbd6 | [
"MIT"
] | null | null | null | from app import db
from sqlalchemy.dialects.postgresql import JSON
class User(db.model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(), nullable = False, unique=True)
password = db.Column(db.String(), nullable = False)
# backrefs
polls = db.relationship('Poll', backref='user', order_by="Poll.created_at")
votes =db.relationship('Vote', backref='user')
# def __init__(self, username, password):
class Poll(db.Model):
__tablename__ = 'polls'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(), nullable = False)
description = db.Column(db.String())
created_at = db.Column(db.DateTime, nullable = False)
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
options = db.relationship('Option', backref='poll', order_by="Option.created_at")
class Option(db.Model):
__tablename__ = 'options'
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(), nullable=False)
created_at = db.Column(db.DateTime, nullable = False)
poll_id = db.Column(db.Integer, db.ForeignKey('poll.id'), nullable=False)
votes = db.relationship('Vote', backref='option')
class Vote(db.Model):
__tablename__ = 'votes'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
option_id = db.Column(db.Integer, db.ForeignKey('option.id'), nullable=False)
| 38.923077 | 85 | 0.694335 |
acfb1a69309e0680f231ae3976b4ddb4d57c1d08 | 7,645 | py | Python | utils/data_loader.py | volflow/neural-image-assessment | 0d6ff06ddffda531efd0da9e8a77a6ea9528c473 | [
"MIT"
] | null | null | null | utils/data_loader.py | volflow/neural-image-assessment | 0d6ff06ddffda531efd0da9e8a77a6ea9528c473 | [
"MIT"
] | null | null | null | utils/data_loader.py | volflow/neural-image-assessment | 0d6ff06ddffda531efd0da9e8a77a6ea9528c473 | [
"MIT"
] | null | null | null | import numpy as np
import os
import glob
import tensorflow as tf
# path to the images and the text file which holds the scores and ids
base_images_path = r'/Users/valentinwolf/data/AVA_dataset/images/'
ava_dataset_path = r'/Users/valentinwolf/data/AVA_dataset/AVA.txt'
def import_dataset(base_images_path,ava_dataset_path, IMAGE_SIZE=224): #, val_size=5000):
files = glob.glob(base_images_path + "*.jpg")
files = sorted(files)
train_image_paths = []
train_scores = []
with open(ava_dataset_path, mode='r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
token = line.split()
id = int(token[1])
values = np.array(token[2:12], dtype='float32')
values /= values.sum()
file_path = base_images_path + str(id) + '.jpg'
if os.path.exists(file_path):
train_image_paths.append(file_path)
train_scores.append(values)
count = 255000 // 100
if i % count == 0 and i != 0:
print('\rLoaded %d%% of the dataset' % (i / 255000. * 100), end='')
train_image_paths = np.array(train_image_paths)
train_scores = np.array(train_scores, dtype='float32')
# val_image_paths = train_image_paths[-val_size:]
# val_scores = train_scores[-val_size:]
# train_image_paths = train_image_paths[:-val_size]
# train_scores = train_scores[:-val_size]
# print('Train set size : ', train_image_paths.shape, train_scores.shape)
# print('Val set size : ', val_image_paths.shape, val_scores.shape)
# print('Train and validation datasets ready !')
return train_image_paths, train_scores#, val_image_paths, val_scores
def parse_data(filename, scores):
'''
Loads the image file, and randomly applies crops and flips to each image.
Args:
filename: the filename from the record
scores: the scores from the record
Returns:
an image referred to by the filename and its scores
'''
image = tf.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_images(image, (256, 256))
image = tf.random_crop(image, size=(IMAGE_SIZE, IMAGE_SIZE, 3))
image = tf.image.random_flip_left_right(image)
image = (tf.cast(image, tf.float32) - 127.5) / 127.5
return image, scores
def parse_data_without_augmentation(filename, scores):
'''
Loads the image file without any augmentation. Used for validation set.
Args:
filename: the filename from the record
scores: the scores from the record
Returns:
an image referred to by the filename and its scores
'''
image = tf.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize_images(image, (IMAGE_SIZE, IMAGE_SIZE))
image = (tf.cast(image, tf.float32) - 127.5) / 127.5
return image, scores
def train_generator(batchsize, shuffle=True):
'''
Creates a python generator that loads the AVA dataset images with random data
augmentation and generates numpy arrays to feed into the Keras model for training.
Args:
batchsize: batchsize for training
shuffle: whether to shuffle the dataset
Returns:
a batch of samples (X_images, y_scores)
'''
with tf.Session() as sess:
# create a dataset
train_dataset = tf.data.Dataset().from_tensor_slices((train_image_paths, train_scores))
train_dataset = train_dataset.map(parse_data, num_parallel_calls=2)
train_dataset = train_dataset.batch(batchsize)
train_dataset = train_dataset.repeat()
if shuffle:
train_dataset = train_dataset.shuffle(buffer_size=4)
train_iterator = train_dataset.make_initializable_iterator()
train_batch = train_iterator.get_next()
sess.run(train_iterator.initializer)
while True:
try:
X_batch, y_batch = sess.run(train_batch)
yield (X_batch, y_batch)
except:
train_iterator = train_dataset.make_initializable_iterator()
sess.run(train_iterator.initializer)
train_batch = train_iterator.get_next()
X_batch, y_batch = sess.run(train_batch)
yield (X_batch, y_batch)
def val_generator(batchsize):
'''
Creates a python generator that loads the AVA dataset images without random data
augmentation and generates numpy arrays to feed into the Keras model for training.
Args:
batchsize: batchsize for validation set
Returns:
a batch of samples (X_images, y_scores)
'''
with tf.Session() as sess:
val_dataset = tf.data.Dataset().from_tensor_slices((val_image_paths, val_scores))
val_dataset = val_dataset.map(parse_data_without_augmentation)
val_dataset = val_dataset.batch(batchsize)
val_dataset = val_dataset.repeat()
val_iterator = val_dataset.make_initializable_iterator()
val_batch = val_iterator.get_next()
sess.run(val_iterator.initializer)
while True:
try:
X_batch, y_batch = sess.run(val_batch)
yield (X_batch, y_batch)
except:
val_iterator = val_dataset.make_initializable_iterator()
sess.run(val_iterator.initializer)
val_batch = val_iterator.get_next()
X_batch, y_batch = sess.run(val_batch)
yield (X_batch, y_batch)
def features_generator(record_path, faeture_size, batchsize, shuffle=True):
'''
Creates a python generator that loads pre-extracted features from a model
and serves it to Keras for pre-training.
Args:
record_path: path to the TF Record file
faeture_size: the number of features in each record. Depends on the base model.
batchsize: batchsize for training
shuffle: whether to shuffle the records
Returns:
a batch of samples (X_features, y_scores)
'''
with tf.Session() as sess:
# maps record examples to numpy arrays
def parse_single_record(serialized_example):
# parse a single record
example = tf.parse_single_example(
serialized_example,
features={
'features': tf.FixedLenFeature([faeture_size], tf.float32),
'scores': tf.FixedLenFeature([10], tf.float32),
})
features = example['features']
scores = example['scores']
return features, scores
# Loads the TF dataset
train_dataset = tf.data.TFRecordDataset([record_path])
train_dataset = train_dataset.map(parse_single_record, num_parallel_calls=4)
train_dataset = train_dataset.batch(batchsize)
train_dataset = train_dataset.repeat()
if shuffle:
train_dataset = train_dataset.shuffle(buffer_size=5)
train_iterator = train_dataset.make_initializable_iterator()
train_batch = train_iterator.get_next()
sess.run(train_iterator.initializer)
# indefinitely extract batches
while True:
try:
X_batch, y_batch = sess.run(train_batch)
yield (X_batch, y_batch)
except:
train_iterator = train_dataset.make_initializable_iterator()
sess.run(train_iterator.initializer)
train_batch = train_iterator.get_next()
X_batch, y_batch = sess.run(train_batch)
yield (X_batch, y_batch)
| 35.230415 | 95 | 0.646174 |
acfb1a83001f20a03d7855abeb4c02406325b73c | 7,116 | py | Python | solver.py | kondrak/graph_bfs_solver | 2f44161c09e9cdb0b0433a53aeee198540f1b744 | [
"MIT"
] | null | null | null | solver.py | kondrak/graph_bfs_solver | 2f44161c09e9cdb0b0433a53aeee198540f1b744 | [
"MIT"
] | null | null | null | solver.py | kondrak/graph_bfs_solver | 2f44161c09e9cdb0b0433a53aeee198540f1b744 | [
"MIT"
] | null | null | null | # *** BFS Graph Solver ***
# (c) Krzysztof Kondrak (at) gmail (dot) com
import sys
import os
import itertools
import getopt
import pickle
from graph import *
from pathFinding import *
from tools import ProgressBar, usage, processGraphFile
sys.path.append(os.getcwd())
SILENT_MODE = False
def Message(msg):
if not SILENT_MODE:
print msg
def Warning(msg):
if not SILENT_MODE:
sys.stderr.write(msg + "\n")
sys.stderr.flush()
def parseGraph(fileName):
try:
Message("\n* Parsing graph file: " + fileName)
return processGraphFile(fileName)
except IOError:
print "*** ERROR: Could not open " + fileName + ". Aborting. (Run with -? for help) ***"
print " "
sys.exit(2)
def main(argv):
pathFinder = PathFinder()
numTravellers = 2
combinationLimit = 1000000
minPathLength = -1
maxPathLength = -1
maxEdgeRedundancy = -1
guiFormat = False
fileName = ""
try:
opts, args = getopt.getopt(argv, "?gst:l:f:i:a:chr:", ["help", "guiFormat", "silent", "travellers=", "limit=", "filename=", "min=", "max=", "cyclic", "allowHomes", "allowhomes", "redundancy="])
for opt, arg in opts:
if opt in ("-?", "--help"):
usage()
sys.exit(1)
if opt in ("-t", "--travellers"):
numTravellers = int(arg)
if opt in ("-l", "--limit"):
combinationLimit = int(arg) * 1000000
if opt in ("-f", "--filename"):
fileName = arg
if opt in ("-i", "--min"):
minPathLength = int(arg) + 1
if opt in ("-a", "--max"):
maxPathLength = int(arg) + 1
if opt in ("-r", "--redundancy"):
maxEdgeRedundancy = int(arg)
if opt in ("-c", "--cyclic"):
pathFinder.useCyclicBFS = True
if opt in ("-h", "--allowHomes", "--allowhomes"):
pathFinder.canPassHomeNodes = True
if opt in ("-g", "--guiFormat", "--guiformat"):
guiFormat = True
if opt in ("-s", "--silent"):
global SILENT_MODE
SILENT_MODE = True
if len(fileName) == 0:
usage()
sys.exit(2)
except getopt.GetoptError:
usage()
sys.exit(2)
progressBar = ProgressBar()
testGraph = parseGraph(fileName)
Message("\n* Solving for " + str(numTravellers) + " traveller(s)")
if combinationLimit > 0:
Message("* Considering at most " + str(combinationLimit) + " combinations.")
else:
Message("* Attempting to solve all combinations.")
homeNodeIds = testGraph.GetHomeNodeIds()
homeNodePairs = itertools.combinations(homeNodeIds, 2)
solutions = []
# FindAllPaths dla wszystkich par domkow
for p in homeNodePairs:
for s in pathFinder.FindAllPaths(testGraph, p[0], p[1]):
if(minPathLength == -1 or len(s) >= minPathLength) and (maxPathLength == -1 or len(s) <= maxPathLength):
solutions.append(s)
#generate solution sets
solutions.sort()
Message("Discovered " + str(len(solutions)) + " paths for all home nodes.")
combinations = itertools.combinations(solutions, numTravellers)
solutionSets = []
numMillions = 1
# if combinationLimit > 0:
currentCombination = 0
for c in combinations:
if currentCombination == combinationLimit and combinationLimit > 0:
break
if currentCombination > numMillions*1000000:
Warning("** WARNING: over " + str(numMillions) + " million combinations.")
numMillions = numMillions + 1
solutionSets.append(c)
currentCombination = currentCombination + 1
# else:
# solutionSets = list(combinations)
Message("* Spawned " + str(len(solutionSets)) + " combinations.")
# get rid of gazillions duplicate entries
Message("* Filtering combinations, this may take a while...")
solutionSets.sort()
solutionSets = list(solutionSets for solutionSets,_ in itertools.groupby(solutionSets))
totalNumSets = len(solutionSets)
Message("* Will check " + str(totalNumSets) + " unique sets")
possibleSolutions = []
currentSetNum = 0
solutionNum = 1
for s in solutionSets:
if not SILENT_MODE:
progressBar.draw(currentSetNum, totalNumSets)
currentSetNum = currentSetNum + 1
testGraph.Reset()
possibleSolution = testGraph.IsSolvableForSet(s)
if possibleSolution is not None:
Message("\rSolution " + str(solutionNum) + " " + str(possibleSolution))
# check how many edges are left unused, the less the better
unusedEdges = testGraph.GetFreeEdges()
possibleSolutions.append((possibleSolution, unusedEdges))
solutionNum = solutionNum + 1
if not SILENT_MODE:
progressBar.draw(currentSetNum, totalNumSets)
Message("\n")
# sort solutions by number of unused edges
possibleSolutions.sort(key=lambda possibleSolutions: len(possibleSolutions[1]))
numSolutionsListed = 0
guiFormatDataList = [] # container of guiFormatData
for s in possibleSolutions:
solutionString = str(s[0]) + " "
guiFormatData = dict()
guiFormatData['Paths'] = s[0]
guiFormatData['PathEndNodes'] = []
guiFormatData['MoveLimits'] = []
for element in s[0]:
startPoint = "(SP: " + str(element[0]) + "|" + str(element[len(element)-1]) + " ML: " + str(len(element)-1) + ") "
solutionString += startPoint
guiFormatData['PathEndNodes'].append((element[0], element[len(element)-1]))
guiFormatData['MoveLimits'].append(len(element)-1)
solutionString += "RE: " + str(len(s[1])) + " "
redundantEdgeIdList = []
for e in s[1]:
redundantEdgeIdList.append(e.id)
guiFormatData['RedundantEdgeIds'] = redundantEdgeIdList
if len(s[1]) > 0:
unusedEdgesStr = ""
for ue in s[1]:
unusedEdgesStr += "(" + str(ue.connectedNodes[0].id) + "-" + str(ue.connectedNodes[1].id) + ")"
solutionString += "[" + unusedEdgesStr + "]"
if maxEdgeRedundancy < 0 or len(s[1]) <= maxEdgeRedundancy:
numSolutionsListed = numSolutionsListed + 1
guiFormatDataList.append(guiFormatData)
print solutionString
guiDataOutput = open('output.txt', 'wb')
pickle.dump(guiFormatDataList, guiDataOutput, -1)
guiDataOutput.close()
if len(possibleSolutions) == 0:
Warning("*** NO SOLUTIONS FOUND. ***\n")
sys.exit(1)
else:
Message("\nFound " + str(len(possibleSolutions)) + " solutions. ")
Message("\nListed " + str(numSolutionsListed) + " solutions. ")
if __name__ == '__main__':
main(sys.argv[1:])
| 30.152542 | 201 | 0.575464 |
acfb1a9c80bdbdbad7e2dec4d2e633ce63c9b6c8 | 14,125 | py | Python | tests/test_rfc7191.py | pysnmp/pyasn1-modules | 93f5699988fbb090be13aaa339498c128ba7dedb | [
"BSD-2-Clause"
] | null | null | null | tests/test_rfc7191.py | pysnmp/pyasn1-modules | 93f5699988fbb090be13aaa339498c128ba7dedb | [
"BSD-2-Clause"
] | null | null | null | tests/test_rfc7191.py | pysnmp/pyasn1-modules | 93f5699988fbb090be13aaa339498c128ba7dedb | [
"BSD-2-Clause"
] | null | null | null | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem, rfc5652, rfc7191
class ReceiptRequestTestCase(unittest.TestCase):
message1_pem_text = """\
MIIGfAYJKoZIhvcNAQcCoIIGbTCCBmkCAQMxDTALBglghkgBZQMEAgIwgb4GCyqGSIb3DQEJ
EAEZoIGuBIGrMIGooEQwIwYLKoZIhvcNAQkQDAExFAwSVmlnaWwgU2VjdXJpdHkgTExDMB0G
CyqGSIb3DQEJEAwDMQ4MDFByZXRlbmQgMDQ4QTBgMF4wVjAbBgsqhkiG9w0BCRAMGzEMDApl
eGFtcGxlSUQxMBUGCyqGSIb3DQEJEAwKMQYMBEhPVFAwIAYLKoZIhvcNAQkQDAsxEQwPa3Rh
LmV4YW1wbGUuY29tBAQxMjM0oIIChzCCAoMwggIKoAMCAQICCQCls1QoG7BuPTAKBggqhkjO
PQQDAzA/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAP
BgNVBAoMCEJvZ3VzIENBMB4XDTE5MDYxMjE0MzEwNFoXDTIwMDYxMTE0MzEwNFowfDELMAkG
A1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRswGQYDVQQKExJWaWdp
bCBTZWN1cml0eSBMTEMxFzAVBgNVBAsTDktleSBNYW5hZ2VtZW50MRgwFgYDVQQDEw9rdGEu
ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASX9l7E3VS3GAEiiRrVozgCBQfL
F67IhOxtbQviD/ojhHSQmflLyfRJ8e7+nbWlOLstRc7lgmq+OQVaSlStkzVk/BO1wE5BgUyF
xje+sieUtPRXVqfoVZCJJsgiSbo181ejgZQwgZEwCwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIB
DQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0cnVzdGVkIGZvciBhbnkgcHVycG9z
ZS4wHQYDVR0OBBYEFG2bXP0Dr7W51YvxZJ8aVuC1rU0PMB8GA1UdIwQYMBaAFPI12zQE2qVV
8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMAZ4lqTtdbaDLFfHywaQYwOWBkL3d0wH
EsNZTW1qQKy/oY3tXc0O6cbJZ5JJb9wk8QIwblXm8+JjdEJHsNjSv4rcJZou4vkMT7PzEme2
BbMkwOWeIdhmy1vszd8TQgvdb36XMYIDBzCCAwMCAQOAFG2bXP0Dr7W51YvxZJ8aVuC1rU0P
MAsGCWCGSAFlAwQCAqCCAmUwGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEZMBwGCSqGSIb3
DQEJBTEPFw0xOTA2MTIxOTM1NTFaMCUGCyqGSIb3DQEJEAIHMRYEFCe4nFY7FiJRnReHHHm/
rIht3/g9MD8GCSqGSIb3DQEJBDEyBDA3gzQlzfvylOn9Rf59kMSa1K2IyOBA5Eoeiyp83Bmj
KasomGorn9htte1iFPbxPRUwggG/BglghkgBZQIBBUExggGwMIIBrAQUJ7icVjsWIlGdF4cc
eb+siG3f+D0wggGSoIH+MH8GCWCGSAFlAgEQAARyMHAxCzAJBgNVBAYTAlVTMQswCQYDVQQI
EwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEOMAwGA1UEAxMFQWxp
Y2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29tMHsGCWCGSAFlAgEQAARuMGwx
CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMH
RXhhbXBsZTEMMAoGA1UEAxMDQm9iMR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20w
gY4wgYsGCWCGSAFlAgEQAAR+MHwxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UE
BxMHSGVybmRvbjEbMBkGA1UEChMSVmlnaWwgU2VjdXJpdHkgTExDMRcwFQYDVQQLEw5LZXkg
TWFuYWdlbWVudDEYMBYGA1UEAxMPa3RhLmV4YW1wbGUuY29tMAoGCCqGSM49BAMDBGYwZAIw
Z7DXliUb8FDKs+BadyCY+IJobPnQ6UoLldMj3pKEowONPifqrbWBJJ5cQQNgW6YuAjBbjSlY
goRV+bq4fdgOOj25JFqa80xnXGtQqjm/7NSII5SbdJk+DT7KCkSbkElkbgQ=
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.message1_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object["contentType"])
sd, rest = der_decoder(asn1Object["content"], asn1Spec=rfc5652.SignedData())
for sa in sd["signerInfos"][0]["signedAttrs"]:
sat = sa["attrType"]
sav0 = sa["attrValues"][0]
if sat == rfc7191.id_aa_KP_keyPkgIdAndReceiptReq:
sav, rest = der_decoder(
sav0, asn1Spec=rfc7191.KeyPkgIdentifierAndReceiptReq()
)
self.assertFalse(rest)
self.assertTrue(sav.prettyPrint())
self.assertEqual(sav0, der_encoder(sav))
package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
package_id = pem.readBase64fromText(package_id_pem_text)
self.assertEqual(package_id, sav["pkgID"])
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.message1_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True
)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object["contentType"])
v3 = rfc5652.CMSVersion().subtype(value="v3")
self.assertEqual(v3, asn1Object["content"]["version"])
for sa in asn1Object["content"]["signerInfos"][0]["signedAttrs"]:
if sa["attrType"] == rfc7191.id_aa_KP_keyPkgIdAndReceiptReq:
package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
package_id = pem.readBase64fromText(package_id_pem_text)
self.assertEqual(package_id, sa["attrValues"][0]["pkgID"])
class ReceiptTestCase(unittest.TestCase):
message2_pem_text = """\
MIIEdAYJKoZIhvcNAQcCoIIEZTCCBGECAQMxDTALBglghkgBZQMEAgIwgawGCmCGSAFlAgEC
TgOggZ0EgZowgZcEFCe4nFY7FiJRnReHHHm/rIht3/g9MH8GCWCGSAFlAgEQAARyMHAxCzAJ
BgNVBAYTAlVTMQswCQYDVQQIEwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhh
bXBsZTEOMAwGA1UEAxMFQWxpY2UxIDAeBgkqhkiG9w0BCQEWEWFsaWNlQGV4YW1wbGUuY29t
oIICfDCCAngwggH+oAMCAQICCQCls1QoG7BuOzAKBggqhkjOPQQDAzA/MQswCQYDVQQGEwJV
UzELMAkGA1UECAwCVkExEDAOBgNVBAcMB0hlcm5kb24xETAPBgNVBAoMCEJvZ3VzIENBMB4X
DTE5MDUyOTE0NDU0MVoXDTIwMDUyODE0NDU0MVowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
AlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAwDgYDVQQKEwdFeGFtcGxlMQ4wDAYDVQQDEwVBbGlj
ZTEgMB4GCSqGSIb3DQEJARYRYWxpY2VAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQA
IgNiAAT4zZ8HL+xEDpXWkoWp5xFMTz4u4Ae1nF6zXCYlmsEGD5vPu5hl9hDEjd1UHRgJIPoy
3fJcWWeZ8FHCirICtuMgFisNscG/aTwKyDYOFDuqz/C2jyEwqgWCRyxyohuJXtmjgZQwgZEw
CwYDVR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBi
ZSB0cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFMS6Wg4+euM8gbD0Aqpouxbg
lg41MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2gAMGUC
MGO5H9E1uAveRGGaf48lN4pov2yH+hCAc5hOAuZKe/f40MKSF8q4w2ij+0euSaKFiAIxAL3g
xp6sMitCmLQgOH6/RBIC/2syJ97y0KVp9da0PDAvwxLugCHTKZPjjpSLPHHc9TGCARwwggEY
AgEDgBTEuloOPnrjPIGw9AKqaLsW4JYONTALBglghkgBZQMEAgKgejAZBgkqhkiG9w0BCQMx
DAYKYIZIAWUCAQJOAzAcBgkqhkiG9w0BCQUxDxcNMTkwNjEzMTYxNjA4WjA/BgkqhkiG9w0B
CQQxMgQwQSWYpq4jwhMkmS0as0JL3gjYxKLgDfzP2ndTNsAY0m9p8Igp8ZcK4+5n9fXJ43vU
MAoGCCqGSM49BAMDBGgwZgIxAMfq2EJ5pSl9tGOEVJEgZitc266ljrOg5GDjkd2d089qw1A3
bUcOYuCdivgxVuhlAgIxAPR9JavxziwCbVyBUWOAiKKYfglTgG3AwNmrKDj0NtXUQ9qDmGAc
6L+EAY2P5OVB8Q==
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.message2_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object["contentType"])
sd, rest = der_decoder(asn1Object["content"], asn1Spec=rfc5652.SignedData())
self.assertFalse(rest)
self.assertTrue(sd.prettyPrint())
self.assertEqual(asn1Object["content"], der_encoder(sd))
oid = sd["encapContentInfo"]["eContentType"]
self.assertEqual(rfc7191.id_ct_KP_keyPackageReceipt, oid)
receipt, rest = der_decoder(
sd["encapContentInfo"]["eContent"], asn1Spec=rfc7191.KeyPackageReceipt()
)
self.assertFalse(rest)
self.assertTrue(receipt.prettyPrint())
self.assertEqual(sd["encapContentInfo"]["eContent"], der_encoder(receipt))
package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
package_id = pem.readBase64fromText(package_id_pem_text)
self.assertEqual(package_id, receipt["receiptOf"]["pkgID"])
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.message2_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True
)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object["contentType"])
v3 = rfc5652.CMSVersion().subtype(value="v3")
self.assertEqual(v3, asn1Object["content"]["version"])
for sa in asn1Object["content"]["signerInfos"][0]["signedAttrs"]:
self.assertIn(sa["attrType"], rfc5652.cmsAttributesMap)
if sa["attrType"] == rfc5652.id_messageDigest:
self.assertIn("0x412598a6ae2", sa["attrValues"][0].prettyPrint())
ct_oid = asn1Object["content"]["encapContentInfo"]["eContentType"]
self.assertIn(ct_oid, rfc5652.cmsContentTypesMap)
self.assertEqual(ct_oid, rfc7191.id_ct_KP_keyPackageReceipt)
# Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
# automatically decode it
sd_eci = asn1Object["content"]["encapContentInfo"]
receipt, rest = der_decoder(
sd_eci["eContent"],
asn1Spec=rfc5652.cmsContentTypesMap[sd_eci["eContentType"]],
)
package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
package_id = pem.readBase64fromText(package_id_pem_text)
self.assertEqual(package_id, receipt["receiptOf"]["pkgID"])
class ErrorTestCase(unittest.TestCase):
message3_pem_text = """\
MIIEbwYJKoZIhvcNAQcCoIIEYDCCBFwCAQMxDTALBglghkgBZQMEAgIwga0GCmCGSAFlAgEC
TgaggZ4EgZswgZigFgQUJ7icVjsWIlGdF4cceb+siG3f+D0wewYJYIZIAWUCARAABG4wbDEL
MAkGA1UEBhMCVVMxCzAJBgNVBAgTAlZBMRAwDgYDVQQHEwdIZXJuZG9uMRAwDgYDVQQKEwdF
eGFtcGxlMQwwCgYDVQQDEwNCb2IxHjAcBgkqhkiG9w0BCQEWD2JvYkBleGFtcGxlLmNvbQoB
CqCCAncwggJzMIIB+qADAgECAgkApbNUKBuwbjwwCgYIKoZIzj0EAwMwPzELMAkGA1UEBhMC
VVMxCzAJBgNVBAgMAlZBMRAwDgYDVQQHDAdIZXJuZG9uMREwDwYDVQQKDAhCb2d1cyBDQTAe
Fw0xOTA1MjkxOTIwMTNaFw0yMDA1MjgxOTIwMTNaMGwxCzAJBgNVBAYTAlVTMQswCQYDVQQI
EwJWQTEQMA4GA1UEBxMHSGVybmRvbjEQMA4GA1UEChMHRXhhbXBsZTEMMAoGA1UEAxMDQm9i
MR4wHAYJKoZIhvcNAQkBFg9ib2JAZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNi
AAQxpGJVLxa83xhyal+rvmMFs4xS6Q19cCDoAvQkkFe0gUC4glxlWWQuf/FvLCRwwscr877D
1FZRBrYKPD6Hxv/UKX6Aimou0TnnxsPk98zZpikn9gTrJn2cF9NCzvPVMfmjgZQwgZEwCwYD
VR0PBAQDAgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0
cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFMprZnLeLJtXf5iO4sMq02aOwhql
MB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMAoGCCqGSM49BAMDA2cAMGQCMBVu
hLo58RhCiYsOLZFSR3vWHPDCJBnO1vE1uixqEjONHxlBoeGN2MmWs/9PppcHCwIwN9HB5jPc
J7gTjA9+ipCe+qkztmV+Gy2NBAY6xYC0gh+pb+X5OAI7y7HdctXp+PfrMYIBGzCCARcCAQOA
FMprZnLeLJtXf5iO4sMq02aOwhqlMAsGCWCGSAFlAwQCAqB6MBkGCSqGSIb3DQEJAzEMBgpg
hkgBZQIBAk4GMBwGCSqGSIb3DQEJBTEPFw0xOTA2MTMxNjE2MDhaMD8GCSqGSIb3DQEJBDEy
BDCgXFTUc3ZInjt+MWYkYmXYERk4FgErEZNILlWgVl7Z9pImgLObIpdrGqGPt06/VkwwCgYI
KoZIzj0EAwMEZzBlAjEAsjJ3iWRUteMKBVsjaYeN6TG9NITRTOpRVkSVq55DcnhwS9g9lu8D
iNF8uKtW/lk0AjA7z2q40N0lamXkSU7ECasiWOYV1X4cWGiQwMZDKknBPDqXqB6Es6p4J+qe
0V6+BtY=
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.message3_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object["contentType"])
sd, rest = der_decoder(asn1Object["content"], asn1Spec=rfc5652.SignedData())
self.assertFalse(rest)
self.assertTrue(sd.prettyPrint())
self.assertEqual(asn1Object["content"], der_encoder(sd))
oid = sd["encapContentInfo"]["eContentType"]
self.assertEqual(rfc7191.id_ct_KP_keyPackageError, oid)
kpe, rest = der_decoder(
sd["encapContentInfo"]["eContent"], asn1Spec=rfc7191.KeyPackageError()
)
self.assertFalse(rest)
self.assertTrue(kpe.prettyPrint())
self.assertEqual(sd["encapContentInfo"]["eContent"], der_encoder(kpe))
package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
package_id = pem.readBase64fromText(package_id_pem_text)
self.assertEqual(package_id, kpe["errorOf"]["pkgID"])
self.assertEqual(rfc7191.EnumeratedErrorCode(value=10), kpe["errorCode"])
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.message3_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True
)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_signedData, asn1Object["contentType"])
v3 = rfc5652.CMSVersion().subtype(value="v3")
self.assertEqual(v3, asn1Object["content"]["version"])
for sa in asn1Object["content"]["signerInfos"][0]["signedAttrs"]:
self.assertIn(sa["attrType"], rfc5652.cmsAttributesMap)
if sa["attrType"] == rfc5652.id_messageDigest:
self.assertIn("0xa05c54d4737", sa["attrValues"][0].prettyPrint())
ct_oid = asn1Object["content"]["encapContentInfo"]["eContentType"]
self.assertIn(ct_oid, rfc5652.cmsContentTypesMap)
self.assertEqual(rfc7191.id_ct_KP_keyPackageError, ct_oid)
# Since receipt is inside an OCTET STRING, decodeOpenTypes=True cannot
# automatically decode it
sd_eci = asn1Object["content"]["encapContentInfo"]
kpe, rest = der_decoder(
sd_eci["eContent"],
asn1Spec=rfc5652.cmsContentTypesMap[sd_eci["eContentType"]],
)
package_id_pem_text = "J7icVjsWIlGdF4cceb+siG3f+D0="
package_id = pem.readBase64fromText(package_id_pem_text)
self.assertEqual(package_id, kpe["errorOf"]["pkgID"])
self.assertEqual(rfc7191.EnumeratedErrorCode(value=10), kpe["errorCode"])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| 45.272436 | 84 | 0.787044 |
acfb1aad374a9bc52d3bdad235b3d045fe5e2047 | 5,493 | py | Python | docs/source/conf.py | jethornton/pman | c26e526478fc52dd097034f8451db6ef45df08b2 | [
"MIT"
] | null | null | null | docs/source/conf.py | jethornton/pman | c26e526478fc52dd097034f8451db6ef45df08b2 | [
"MIT"
] | null | null | null | docs/source/conf.py | jethornton/pman | c26e526478fc52dd097034f8451db6ef45df08b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'Pandoc Man Pages'
copyright = u'2021, John Thornton'
author = u'John Thornton'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PandocManPagesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PandocManPages.tex', u'Pandoc Man Pages Documentation',
u'John Thornton', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pandocmanpages', u'Pandoc Man Pages Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PandocManPages', u'Pandoc Man Pages Documentation',
author, 'PandocManPages', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 29.853261 | 79 | 0.646823 |
acfb1ad83074d4f4416976a15663e0d26d6d7a74 | 26,322 | py | Python | workspace/module/python-2.7/LxBasic/bscMethods/_bscMtdRaw.py | no7hings/Lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | 2 | 2018-03-06T03:33:55.000Z | 2019-03-26T03:25:11.000Z | workspace/module/python-2.7/LxBasic/bscMethods/_bscMtdRaw.py | no7hings/lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | null | null | null | workspace/module/python-2.7/LxBasic/bscMethods/_bscMtdRaw.py | no7hings/lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | null | null | null | # coding:utf-8
from LxBasic import bscMtdCore, bscCfg, bscMtdCore
from LxBasic.bscMethods import _bscMtdPath
class Raw(bscMtdCore.Mtd_BscUtility):
@classmethod
def toHash(cls, raw):
return cls._stringToHash(unicode(raw))
class String(bscMtdCore.Mtd_BscUtility):
@classmethod
def toNumberEmbeddedList(cls, string):
pieces = cls.MOD_re.compile(r'(\d+)').split(unicode(string))
pieces[1::2] = map(int, pieces[1::2])
return pieces
@classmethod
def toVariantCommand(cls, varName, string):
def getStringLis():
# noinspection RegExpSingleCharAlternation
return [i for i in cls.MOD_re.split("<|>", string) if i]
#
def getVariantLis():
varPattern = cls.MOD_re.compile(r'[<](.*?)[>]', cls.MOD_re.S)
return cls.MOD_re.findall(varPattern, string)
#
def getVarStringLis():
lis = []
for i in strings:
if i in variants:
lis.append(i)
else:
v = '''"%s"''' % i
lis.append(v)
return lis
#
strings = getStringLis()
variants = getVariantLis()
#
varStrings = getVarStringLis()
#
command = '''{0} = u'{1}' % ({2})'''.format(varName, '%s' * len(strings), ', '.join(varStrings))
return command
@classmethod
def toList(cls, string, includes=None):
lis = []
if isinstance(string, (str, unicode)):
if includes:
if string in includes:
lis = [string]
else:
lis = [string]
elif isinstance(string, (tuple, list)):
for i in string:
if includes:
if i in includes:
lis.append(i)
else:
lis.append(i)
return lis
@staticmethod
def toRgb(string, maximum=255):
a = int(''.join([str(ord(i)).zfill(3) for i in string]))
b = a % 3
i = int(a / 256) % 3
n = int(a % 256)
if a % 2:
if i == 0:
r, g, b = 64 + 64 * b, n, 0
elif i == 1:
r, g, b = 0, 64 + 64 * b, n
else:
r, g, b = 0, n, 64 + 64 * b
else:
if i == 0:
r, g, b = 0, n, 64 + 64 * b
elif i == 1:
r, g, b = 64 + 64 * b, 0, n
else:
r, g, b = 64 + 64 * b, n, 0
#
return r / 255.0 * maximum, g / 255.0 * maximum, b / 255.0 * maximum
@classmethod
def toUniqueId(cls, string):
return cls._stringToUniqueId(string)
@classmethod
def findSpans(cls, contentStr, keywordStr, matchCaseFlag=False, matchWordFlag=False):
lis = []
if contentStr and keywordStr:
if matchWordFlag is True:
p = r'\b{}\b'.format(keywordStr)
else:
p = keywordStr
if matchCaseFlag is True:
r = cls.MOD_re.finditer(p, contentStr)
else:
r = cls.MOD_re.finditer(p, contentStr, cls.MOD_re.I)
for i in r:
lis.append(i.span())
return lis
class Variant(bscMtdCore.Mtd_BscUtility):
@classmethod
def covertTo(cls, varName, string):
def getStringLis():
# noinspection RegExpSingleCharAlternation
return [i for i in cls.MOD_re.split("<|>", string) if i]
#
def getVariantLis():
varPattern = cls.MOD_re.compile(r'[<](.*?)[>]', cls.MOD_re.S)
return cls.MOD_re.findall(varPattern, string)
#
def getVarStringLis():
lis = []
for i in strings:
if i in variants:
lis.append(i)
else:
v = '''"%s"''' % i
lis.append(v)
return lis
#
strings = getStringLis()
variants = getVariantLis()
#
varStrings = getVarStringLis()
#
command = '''{0} = u'{1}' % ({2})'''.format(varName, '%s'*len(strings), ', '.join(varStrings))
return command
class StrUnderline(bscMtdCore.Mtd_BscUtility):
@classmethod
def toLabel(cls, *labels):
return labels[0] + ''.join([i.capitalize() for i in labels[1:]])
@classmethod
def toCamelcase(cls, string):
return cls.MOD_re.sub(r'_(\w)', lambda x: x.group(1).upper(), string)
class StrCamelcase(bscMtdCore.Mtd_BscUtility):
@classmethod
def toPrettify(cls, string):
return ' '.join([str(x).capitalize() for x in cls.MOD_re.findall(r'[a-zA-Z][a-z]*[0-9]*', string)])
@classmethod
def toUnderline(cls, string):
return '_'.join([str(x).lower() for x in cls.MOD_re.findall(r'[a-zA-Z][a-z]*[0-9]*', string)])
@classmethod
def toUiPath(cls, strings, isPrettify=False):
if isPrettify is True:
strings = [cls.toPrettify(i) for i in cls._string2list(strings)]
return cls._toPathString(strings, '>')
class TxtHtml(bscMtdCore.Mtd_BscUtility):
color_html_lis = bscCfg.Ui().htmlColors
color_html_dic = bscCfg.Ui().htmlColorDict
family_lis = bscCfg.Ui().families
@classmethod
def _getHtmlColor(cls, *args):
arg = args[0]
if isinstance(arg, (float, int)):
return cls.color_html_lis[int(arg)]
elif isinstance(arg, (str, unicode)):
return cls.color_html_dic.get(arg, '#dfdfdf')
return '#dfdfdf'
@classmethod
def toHtml(cls, string, fontColor=u'white', fontSize=10, lineHeight=12):
htmlColor = cls._getHtmlColor(fontColor)
#
html = u'''
<html>
<style type="text/css">p{{line-height:{4}px}}</style>
<span style="font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>
</html>
'''.format(string, fontSize, cls.family_lis[0], htmlColor, lineHeight)
return html
@classmethod
def getHtmls(cls, string, fontColor=u'white', fontSize=10, lineHeight=12):
htmlColor = cls._getHtmlColor(fontColor)
#
stringLis = string.split('\r\n')
if len(stringLis) > 1:
s = ''.join([u'<p>{}</p>'.format(i) for i in stringLis])
else:
s = string
#
html = u'''
<html>
<style>p{{line-height:{4}px}}</style>
<span style="font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>
</html>
'''.format(s, fontSize, cls.family_lis[0], htmlColor, lineHeight)
return html
@classmethod
def toHtmlSpan(cls, string, fontColor=u'white', fontSize=10):
htmlColor = cls._getHtmlColor(fontColor)
#
viewExplain = u'''
<span style="font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>
'''.format(string, fontSize, cls.family_lis[0], htmlColor)
return viewExplain
@classmethod
def toHtmlSpanTime(cls, lString='', fontColor=u'gray', fontSize=10):
htmlColor = cls._getHtmlColor(fontColor)
#
string = cls._getActivePrettifyTime()
htmlString = u'''
<span style="font-family:'{3}';font-size:{2}pt;color:{4};">{1}<{0}></span>
'''.format(string, lString, fontSize, cls.family_lis[0], htmlColor)
return htmlString
@classmethod
def toHtmlSpanSuper(cls, string, fontColor=u'orange', fontSize=10):
htmlColor = cls._getHtmlColor(fontColor)
viewSuper = u'''
<span style="vertical-align:super;font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>
'''.format(string, fontSize, cls.family_lis[0], htmlColor)
return viewSuper
@classmethod
def toHtmlSpanSub(cls, string, fontColor=u'orange', fontSize=10):
htmlColor = cls._getHtmlColor(fontColor)
viewSuper = u'''
<span style="vertical-align:sub;font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>
'''.format(string, fontSize, cls.family_lis[0], htmlColor)
return viewSuper
@classmethod
def toHtmlMayaConnection(cls, sourceAttr, targetAttr, namespaceFilter):
def getBranch(attr):
namespace = _bscMtdPath.MaNodeString.namespace(attr)
name = _bscMtdPath.MaNodeString.nodename(attr)
attrName = _bscMtdPath.MaAttrpath.portpathString(attr)
#
namespacesep = _bscMtdPath.MaAttrpath.portsep()
#
if namespace:
namespaceHtml = cls.toHtmlSpan(namespace, 7, 10) + cls.toHtmlSpan(namespacesep, 3, 10)
else:
namespaceHtml = ''
#
if attr.startswith(namespaceFilter):
html = namespaceHtml + cls.toHtmlSpan(name[:-len(attrName)], 4, 10) + cls.toHtmlSpan(attrName, 6, 10)
else:
html = namespaceHtml + cls.toHtmlSpan(name[:-len(attrName)], 1, 10) + cls.toHtmlSpan(attrName, 6, 10)
#
return html
#
sourceHtml = getBranch(sourceAttr)
targetHtml = getBranch(targetAttr)
#
string = sourceHtml + cls.toHtmlSpan('>>', 3, 10) + targetHtml
return string
@classmethod
def toHtmlMayaRenderImage(cls, prefix, string, fontSize=8, lineHeight=10):
htmls = []
#
colorDic = {
'<Scene>': '#ff0000',
'<Camera>': '#ffaa00',
'<RenderLayer>': '#aaff00',
'<Version>': '#00ff00',
'<Extension>': '#00ffaa',
'<RenderPass>': '#00aaff',
'<RenderPassFileGroup>': '#0000ff'
}
colorIndexDic = {}
if prefix and string:
splitPrefix = prefix.split('/')
for seq, i in enumerate(splitPrefix):
colorIndexDic[seq] = colorDic[i]
#
splitString = string.split('/')
for seq, s in enumerate(splitString):
if s:
htmlColor = colorIndexDic[seq]
#
html = u'''<span style="font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>'''.format(
s, fontSize, cls.family_lis[0], htmlColor
)
htmls.append(html)
#
htmlSep = u'''<span style="font-family:'{2}';font-size:{1}pt;color:{3};">{0}</span>'''.format(u'>', fontSize, cls.family_lis[0], cls.color_html_lis[6]
)
#
htmlString = u'''<html><style>p{{line-height:{1}px}}</style>{0}</html>'''.format(htmlSep.join(htmls), lineHeight)
return htmlString
class Value(object):
@classmethod
def stepTo(cls, value, delta, step, valueRange):
min0, max0 = valueRange
min1, max1 = min0 + step, max0 - step
if value < min1:
if 0 < delta:
value += step
else:
value = min0
elif min1 <= value <= max1:
value += [-step, step][delta > 0]
elif max1 < value:
if delta < 0:
value -= step
else:
value = max0
return value
@classmethod
def mapTo(cls, value, sourceValueRange, targetValueRange):
assert isinstance(sourceValueRange, (tuple, list)), 'Argument Error, "sourceValueRange" Must "tuple" or "list".'
assert isinstance(targetValueRange, (tuple, list)), 'Argument Error, "targetValueRange" Must "tuple" or "list".'
min0, max0 = sourceValueRange
min1, max1 = targetValueRange
#
if max0 - min0 > 0:
percent = float(value - min0) / float(max0 - min0)
#
value_ = (max1 - min1) * percent + min1
return value_
else:
return min1
@classmethod
def toSizePrettify(cls, value):
string = value
#
dv = 1000
lis = [(dv ** 4, 'T'), (dv ** 3, 'B'), (dv ** 2, 'M'), (dv ** 1, 'K')]
#
if value >= dv:
for i in lis:
s = int(abs(value)) / i[0]
if s:
string = str(round(float(value) / float(i[0]), 2)) + i[1]
break
else:
string = value
#
return str(string)
@classmethod
def toFileSizePrettify(cls, value):
string = value
#
dv = 1024
lis = [(dv ** 4, 'T'), (dv ** 3, 'G'), (dv ** 2, 'M'), (dv ** 1, 'K')]
#
for i in lis:
s = abs(value) / i[0]
if s:
string = str(round(float(value) / float(i[0]), 2)) + i[1]
break
#
return str(string)
@classmethod
def toPrettify(cls, value, useMode):
if useMode == 0:
return cls.toSizePrettify(value)
else:
return cls.toFileSizePrettify(value)
@classmethod
def toPercentPrettify(cls, value, maximumValue, roundCount=3):
valueRange = 100
if maximumValue > 0:
percent = round(float(value) / float(maximumValue), roundCount) * valueRange
else:
if value > 0:
percent = float(u'inf')
elif value < 0:
percent = float('-inf')
else:
percent = 0
return percent
class Range(object):
pass
class List(object):
@classmethod
def splitTo(cls, lis, splitCount):
lis_ = []
count = len(lis)
cutCount = int(count / splitCount)
for i in range(cutCount + 1):
subLis = lis[i * splitCount:min((i + 1) * splitCount, count)]
if subLis:
lis_.append(subLis)
return lis_
@classmethod
def cleanupTo(cls, lis):
lis_ = list(filter(None, set(lis)))
lis_.sort(key=lis.index)
return lis_
@classmethod
def extendFrom(cls, lis, subLis):
[lis.append(i) for i in subLis if i not in lis]
@staticmethod
def toFrameRange(array):
lis = []
#
maximum, minimum = max(array), min(array)
#
start, end = None, None
count = len(array)
index = 0
#
array.sort()
for seq in array:
if index > 0:
pre = array[index - 1]
else:
pre = None
#
if index < (count - 1):
nex = array[index + 1]
else:
nex = None
#
if pre is None and nex is not None:
start = minimum
if seq - nex != -1:
lis.append(start)
elif pre is not None and nex is None:
end = maximum
if seq - pre == 1:
lis.append((start, end))
else:
lis.append(end)
elif pre is not None and nex is not None:
if seq - pre != 1 and seq - nex != -1:
lis.append(seq)
elif seq - pre == 1 and seq - nex != -1:
end = seq
lis.append((start, end))
elif seq - pre != 1 and seq - nex == -1:
start = seq
#
index += 1
#
return lis
class Dict(object):
@classmethod
def getValue(cls, dic, key, failobj=None):
if key in dic:
return dic.get(key, failobj)
@classmethod
def getAsBoolean(cls, dic, key, failobj=False):
if key in dic:
return dic.get(key, failobj)
return False
class NestedArray(object):
@classmethod
def mapTo(cls, nestedArray):
"""
:param nestedArray: etc.[[1, 2], [1, 2]]
:return: etc.[[1, 1], [1, 2], [2, 1], [2, 2]]
"""
def rcsFnc_(index_):
if index_ < count:
_array = nestedArray[index_]
for _i in _array:
c[index_] = _i
rcsFnc_(index_ + 1)
else:
lis.append(
bscCfg.BscUtility.MOD_copy.deepcopy(c)
)
lis = []
count = len(nestedArray)
c = [None]*count
rcsFnc_(0)
return lis
class Array(List):
@classmethod
def getDefects(cls, lis, useMode=0):
lis_ = []
if lis:
maxiNumber = max(lis)
miniNumber = min(lis)
if useMode == 1:
miniNumber = 0
for i in range(miniNumber, maxiNumber + 1):
if not i in lis:
lis_.append(i)
return lis_
@classmethod
def toRangecase(cls, lis):
lis_ = []
#
if lis:
maximum, minimum = max(lis), min(lis)
#
start, end = None, None
count = len(lis)
index = 0
#
lis.sort()
for seq in lis:
if index > 0:
pre = lis[index - 1]
else:
pre = None
#
if index < (count - 1):
nex = lis[index + 1]
else:
nex = None
#
if pre is None and nex is not None:
start = minimum
if seq - nex != -1:
lis_.append(start)
elif pre is not None and nex is None:
end = maximum
if seq - pre == 1:
lis_.append((start, end))
else:
lis_.append(end)
elif pre is not None and nex is not None:
if seq - pre != 1 and seq - nex != -1:
lis_.append(seq)
elif seq - pre == 1 and seq - nex != -1:
end = seq
lis_.append((start, end))
elif seq - pre != 1 and seq - nex == -1:
start = seq
#
index += 1
#
return lis_
return []
class Position2d(bscMtdCore.Mtd_BscUtility):
@classmethod
def toRegion(cls, position, size):
x, y = position
width, height = size
if 0 <= x < width / 2 and 0 <= y < height / 2:
value = 0
elif width / 2 <= x < width and 0 <= y < height / 2:
value = 1
elif 0 <= x < width / 2 and height / 2 <= y < height:
value = 2
else:
value = 3
return value
@classmethod
def regionTo(cls, position, size, maximumSize, offset):
x, y = position
width, height = size
maxWidth, maxHeight = maximumSize
xOffset, yOffset = offset
region = cls.toRegion(
position=position,
size=(maxWidth, maxHeight)
)
if region == 0:
x_ = x + xOffset
y_ = y + yOffset
elif region == 1:
x_ = x - width - xOffset
y_ = y + yOffset
elif region == 2:
x_ = x + xOffset
y_ = y - height - yOffset
else:
x_ = x - width - xOffset
y_ = y - height - yOffset
return x_, y_, region
@classmethod
def toLength(cls, position0, position1):
x0, y0 = position0
x1, y1 = position1
return cls.MOD_math.sqrt(((x0 - x1) ** 2) + ((y0 - y1) ** 2))
@classmethod
def toAngle(cls, position0, position1):
x0, y0 = position0
x1, y1 = position1
radian = 0.0
#
r0 = 0.0
r90 = cls.MOD_math.pi / 2.0
r180 = cls.MOD_math.pi
r270 = 3.0 * cls.MOD_math.pi / 2.0
if x0 == x1:
if y0 < y1:
radian = r0
elif y0 > y1:
radian = r180
elif y0 == y1:
if x0 < x1:
radian = r90
elif x0 > x1:
radian = r270
elif x0 < x1 and y0 < y1:
radian = cls.MOD_math.atan2((-x0 + x1), (-y0 + y1))
elif x0 < x1 and y0 > y1:
radian = r90 + cls.MOD_math.atan2((y0 - y1), (-x0 + x1))
elif x0 > x1 and y0 > y1:
radian = r180 + cls.MOD_math.atan2((x0 - x1), (y0 - y1))
elif x0 > x1 and y0 < y1:
radian = r270 + cls.MOD_math.atan2((-y0 + y1), (x0 - x1))
return radian * 180 / cls.MOD_math.pi
class Rect2d(object):
@classmethod
def isContainPos(cls, rect, position):
x0, y0, width, height = rect
x1, y1 = position
if rect is not None:
return x0 <= x1 <= x0 + width and y0 <= y1 <= y0 + height
return False
class Size2d(object):
@classmethod
def remapTo(cls, width, height, maximum):
maxValue = max([width, height])
if maxValue > maximum:
if width > height:
return maximum, maximum*(float(height)/float(width))
elif width < height:
return maximum*(float(width)/float(height)), maximum
return width, height
@classmethod
def mapToRect(cls, size0, size1):
w0, h0 = size0
w1, h1 = size1
if h0 > 0 and h1 > 0:
pr0 = float(w0) / float(h0)
pr1 = float(w1) / float(h1)
smax1 = max(w1, h1)
smin1 = min(w1, h1)
if pr0 > 1:
w, h = smin1, smin1 / pr0
elif pr0 < 1:
w, h = smin1, smin1 * pr0
else:
w, h = smin1, smin1
x, y = (w1 - w) / 2, (h1 - h) / 2
return x, y, w, h
else:
return 0, 0, w0, h0
class Ellipse2d(bscMtdCore.Mtd_BscUtility):
@classmethod
def positionAtAngle(cls, center, radius, angle):
x, y = center
xp = cls.MOD_math.sin(cls.MOD_math.radians(angle)) * radius / 2 + x + radius / 2
yp = cls.MOD_math.cos(cls.MOD_math.radians(angle)) * radius / 2 + y + radius / 2
return xp, yp
class Frame(object):
@classmethod
def toTime(cls, frameValue, fpsValue=24):
second = int(frameValue) / fpsValue
h = second / 3600
m = second / 60 - 60 * h
s = second - 3600 * h - 60 * m
return h, m, s
@classmethod
def toTimeString(cls, frameValue, fpsValue=24):
h, m, s = cls.toTime(frameValue, fpsValue)
return '%s:%s:%s' % (str(h).zfill(2), str(m).zfill(2), str(s).zfill(2))
class Math2D(bscMtdCore.Mtd_BscUtility):
@classmethod
def getAngleByCoord(cls, x1, y1, x2, y2):
radian = 0.0
#
r0 = 0.0
r90 = cls.MOD_math.pi / 2.0
r180 = cls.MOD_math.pi
r270 = 3.0 * cls.MOD_math.pi / 2.0
#
if x1 == x2:
if y1 < y2:
radian = r0
elif y1 > y2:
radian = r180
elif y1 == y2:
if x1 < x2:
radian = r90
elif x1 > x2:
radian = r270
elif x1 < x2 and y1 < y2:
radian = cls.MOD_math.atan2((-x1 + x2), (-y1 + y2))
elif x1 < x2 and y1 > y2:
radian = r90 + cls.MOD_math.atan2((y1 - y2), (-x1 + x2))
elif x1 > x2 and y1 > y2:
radian = r180 + cls.MOD_math.atan2((x1 - x2), (y1 - y2))
elif x1 > x2 and y1 < y2:
radian = r270 + cls.MOD_math.atan2((-y1 + y2), (x1 - x2))
#
return radian * 180 / cls.MOD_math.pi
@classmethod
def getLengthByCoord(cls, x1, y1, x2, y2):
return cls.MOD_math.sqrt(((x1 - x2) ** 2) + ((y1 - y2) ** 2))
class Color(bscMtdCore.Mtd_BscUtility):
@classmethod
def mapToFloat(cls, r, g, b):
def mapFnc_(v):
return float(v) / float(255)
return mapFnc_(r), mapFnc_(g), mapFnc_(b)
@classmethod
def mapTo256(cls, r, g, b):
def mapFnc_(v):
return int(v*256)
return mapFnc_(r), mapFnc_(g), mapFnc_(b)
@classmethod
def hsv2rgb(cls, h, s, v, maximum=255):
h = float(h % 360.0)
s = float(max(min(s, 1.0), 0.0))
v = float(max(min(v, 1.0), 0.0))
#
c = v * s
x = c * (1 - abs((h / 60.0) % 2 - 1))
m = v - c
if 0 <= h < 60:
r_, g_, b_ = c, x, 0
elif 60 <= h < 120:
r_, g_, b_ = x, c, 0
elif 120 <= h < 180:
r_, g_, b_ = 0, c, x
elif 180 <= h < 240:
r_, g_, b_ = 0, x, c
elif 240 <= h < 300:
r_, g_, b_ = x, 0, c
else:
r_, g_, b_ = c, 0, x
#
if maximum == 255:
r, g, b = int(round((r_ + m) * maximum)), int(round((g_ + m) * maximum)), int(round((b_ + m) * maximum))
else:
r, g, b = float((r_ + m)), float((g_ + m)), float((b_ + m))
return r, g, b
class UniqueId(bscMtdCore.Mtd_BscUtility):
@classmethod
def getByString(cls, string):
return cls._stringToUniqueId(string)
@classmethod
def getByStrings(cls, *args):
return cls._stringsToUniqueId(*args)
@classmethod
def new(cls):
return cls._getUniqueId()
@classmethod
def isUsable(cls, string):
boolean = False
if string is not None:
pattern = cls.MOD_re.compile(r'[0-9A-F]' * 8 + '-' + (r'[0-9A-F]' * 4 + '-') * 3 + r'[0-9A-F]' * 12)
match = pattern.match(string)
if match:
boolean = True
return boolean
@classmethod
def toList(cls, uniqueId):
lis = []
if isinstance(uniqueId, str) or isinstance(uniqueId, unicode):
if cls.isUsable(uniqueId):
lis = [uniqueId]
elif isinstance(uniqueId, tuple) or isinstance(uniqueId, list):
for i in uniqueId:
if cls.isUsable(i):
lis.append(i)
return lis
class IconKeyword(object):
@staticmethod
def mayaPng(nodeTypeString):
return 'maya/out_{}'.format(nodeTypeString)
@staticmethod
def mayaSvg(nodeTypeString):
return 'maya/{}'.format(nodeTypeString)
| 31.003534 | 158 | 0.488109 |
acfb1b65a81240ea06749bd183f9df25c4dd2187 | 2,494 | py | Python | PopulationGenerator.py | HoSzyk/Genetic_algorithm_board | edcd2307761489a6e9b65424726039b89bff98ff | [
"MIT"
] | null | null | null | PopulationGenerator.py | HoSzyk/Genetic_algorithm_board | edcd2307761489a6e9b65424726039b89bff98ff | [
"MIT"
] | null | null | null | PopulationGenerator.py | HoSzyk/Genetic_algorithm_board | edcd2307761489a6e9b65424726039b89bff98ff | [
"MIT"
] | null | null | null | import random
from Board import Board
from Path import Path
from Point import Point
from Segment import Segment
from Solution import Solution
def generate_random_path_list(board: Board):
result_solution = Solution()
for start_point, end_point in board.point_pairs:
cur_point = start_point
cur_path = Path()
horizontal = random.random() < 0.5
while cur_point != end_point:
cur_segment = get_random_segment(cur_point, end_point, board, horizontal)
cur_path.append_segment(cur_segment)
cur_point = cur_segment.get_end_point(cur_point)
horizontal = not horizontal
result_solution.append_path(cur_path)
return result_solution
def get_random_segment(start_point: Point, end_point: Point, board: Board, horizontal: bool):
result_segment = Segment()
if horizontal:
result_segment.length = start_point.x - end_point.x
if result_segment.length > 0:
result_segment.direction = 'W'
elif result_segment.length < 0:
result_segment.direction = 'E'
result_segment.length = abs(result_segment.length)
else:
result_segment.direction = 'E' if random.random() < 0.5 else 'W'
result_segment.length = 1
else:
result_segment.length = start_point.y - end_point.y
if result_segment.length > 0:
result_segment.direction = 'N'
elif result_segment.length < 0:
result_segment.direction = 'S'
result_segment.length = abs(result_segment.length)
else:
result_segment.direction = 'N' if random.random() < 0.5 else 'S'
result_segment.length = 1
result_segment.direction = set_direction_within_bounds(start_point,
horizontal,
board,
result_segment.direction)
result_segment.length = random.randint(1, result_segment.length)
return result_segment
def set_direction_within_bounds(point: Point, horizontal: bool, board: Board, direction):
if horizontal:
if point.x == 0:
direction = 'E'
elif point.x == board.width - 1:
direction = 'W'
else:
if point.y == 0:
direction = 'S'
elif point.y == board.width - 1:
direction = 'N'
return direction
| 33.702703 | 93 | 0.60425 |
acfb1d2ab4132e9557c2e5745a18c32f4153f841 | 1,369 | py | Python | src/InvictusService.py | elijah-rou/InvictusMicroservice | 2f200dccdd28d32c8fb3cc524e128ba13583b7f1 | [
"MIT"
] | null | null | null | src/InvictusService.py | elijah-rou/InvictusMicroservice | 2f200dccdd28d32c8fb3cc524e128ba13583b7f1 | [
"MIT"
] | null | null | null | src/InvictusService.py | elijah-rou/InvictusMicroservice | 2f200dccdd28d32c8fb3cc524e128ba13583b7f1 | [
"MIT"
] | null | null | null | '''
Microservice that performs a variety of functions:
1) Squares every odd number in a vector of integers
2) Generate string:encoding key store from a list of strings
3) Decode an encoded string
'''
# Nameko import
from nameko.rpc import rpc
# Huffman encoder/decoder
from dahuffman import HuffmanCodec
### Use NLTK Gutenberg corpus to create a frequency distribution of letters
### Use that to perform static Huffman encoding
from nltk.corpus import gutenberg
codec = HuffmanCodec.from_data(gutenberg.raw())
# Define the service
class InvictusService():
name = "invictus_service"
# Function that squares a number if it's odd
def odd_square(self, number):
if (number % 2 != 0):
return number * number
return number
# RPC to apply odd_square to a list of integers
@rpc
def apply_odd_square(self, array):
return list(map(self.odd_square, array))
# Function that takes a string and produces the huffman encoding
def to_huffman(self, string):
return (string, codec.encode(string))
# RPC to apply to_huffman to a list of strings and return key-values
@rpc
def apply_to_huffman(self, array):
return dict(map(self.to_huffman, array))
# RPC to decode a given Huffman encoded string
@rpc
def decode_huffman(self, code):
return codec.decode(code)
| 29.76087 | 75 | 0.706355 |
acfb1e46645b200ef47632ee3b7d0963ede484a6 | 8,411 | py | Python | tests/server/rest/jobs_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-01-23T17:12:41.000Z | 2019-01-14T13:38:17.000Z | tests/server/rest/jobs_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 242 | 2016-05-09T18:46:51.000Z | 2022-03-31T22:02:29.000Z | tests/server/rest/jobs_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-03-27T09:13:40.000Z | 2019-01-27T10:55:30.000Z | """
Test script for REST/jobs
"""
import logging
logger = logging.getLogger('rest_jobs_test')
import os
import sys
import time
import random
import shutil
import tempfile
import unittest
import subprocess
import json
from functools import partial
from unittest.mock import patch, MagicMock
from tests.util import unittest_reporter, glob_tests
import ldap3
import tornado.web
import tornado.ioloop
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.testing import AsyncTestCase
from rest_tools.server import Auth, RestServer
from iceprod.server.modules.rest_api import setup_rest
from . import RestTestCase
class rest_jobs_test(RestTestCase):
def setUp(self):
config = {'rest':{'jobs':{}}}
super(rest_jobs_test,self).setUp(config=config)
@unittest_reporter(name='REST POST /jobs')
def test_105_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
self.assertIn('result', ret)
@unittest_reporter(name='REST GET /jobs/<job_id>')
def test_110_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
r = yield client.fetch('http://localhost:%d/jobs/%s'%(self.port,job_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
for k in data:
self.assertIn(k, ret)
self.assertEqual(data[k], ret[k])
for k in ('status','status_changed'):
self.assertIn(k, ret)
self.assertEqual(ret['status'], 'processing')
@unittest_reporter(name='REST PATCH /jobs/<job_id>')
def test_120_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
new_data = {
'status': 'processing',
}
r = yield client.fetch('http://localhost:%d/jobs/%s'%(self.port,job_id),
method='PATCH', body=json.dumps(new_data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
for k in new_data:
self.assertIn(k, ret)
self.assertEqual(new_data[k], ret[k])
@unittest_reporter(name='REST GET /datasets/<dataset_id>/jobs')
def test_200_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/jobs'%(self.port,data['dataset_id']),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn(job_id, ret)
for k in data:
self.assertIn(k, ret[job_id])
self.assertEqual(data[k], ret[job_id][k])
@unittest_reporter(name='REST GET /datasets/<dataset_id>/jobs/<job_id>')
def test_210_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s'%(self.port,data['dataset_id'],job_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
for k in data:
self.assertIn(k, ret)
self.assertEqual(data[k], ret[k])
for k in ('status','status_changed'):
self.assertIn(k, ret)
self.assertEqual(ret['status'], 'processing')
@unittest_reporter(name='REST PUT /datasets/<dataset_id>/jobs/<job_id>/status')
def test_220_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
data2 = {'status':'failed'}
r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s/status'%(self.port,data['dataset_id'],job_id),
method='PUT', body=json.dumps(data2),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s'%(self.port,data['dataset_id'],job_id),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertIn('status', ret)
self.assertEqual(ret['status'], 'failed')
@unittest_reporter(name='REST GET /datasets/<dataset_id>/job_summaries/status')
def test_300_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/job_summaries/status'%(self.port,data['dataset_id']),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(ret, {'processing': [job_id]})
@unittest_reporter(name='REST GET /datasets/<dataset_id>/job_counts/status')
def test_400_jobs(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'job_index': 0,
}
r = yield client.fetch('http://localhost:%d/jobs'%self.port,
method='POST', body=json.dumps(data),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
job_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/job_counts/status'%(self.port,data['dataset_id']),
headers={'Authorization': 'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(ret, {'processing': 1})
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(rest_jobs_test))
suite.addTests(loader.loadTestsFromNames(alltests,rest_jobs_test))
return suite
| 37.549107 | 119 | 0.567947 |
acfb1e5a4b66152a2df11ad431df7d017f011791 | 873 | py | Python | mongodb/MongoDB_BenchMark/WordsFilter.py | skihyy/GT-CS6675 | e2c86072f479ac3f6a334bbfcedc633e4a711421 | [
"MIT"
] | null | null | null | mongodb/MongoDB_BenchMark/WordsFilter.py | skihyy/GT-CS6675 | e2c86072f479ac3f6a334bbfcedc633e4a711421 | [
"MIT"
] | null | null | null | mongodb/MongoDB_BenchMark/WordsFilter.py | skihyy/GT-CS6675 | e2c86072f479ac3f6a334bbfcedc633e4a711421 | [
"MIT"
] | null | null | null | import csv
with open('sample.csv', 'w') as wf:
writer = csv.writer(wf)
with open('enwiki-20170120-pages-articles-multistream-index.txt', 'r') as rf:
lines = 0
ct = 0;
row = []
for line in rf:
words = line.split(":")
for word in words:
if 0 < len(word):
final_words = word.split(" ")
for fw in final_words:
if 0 < len(fw):
w = str(fw).strip()
row.append(w)
ct += 1;
if 15 == ct:
writer.writerow(row)
ct = 0
row = []
lines += 1
print('lines: ' + str(lines))
rf.close()
wf.close() | 34.92 | 81 | 0.34937 |
acfb1fddec810dcd3fd682dea58cdc50b5cd1173 | 45,405 | py | Python | venv/lib/python2.7/dist-packages/pyx/path.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | venv/lib/python2.7/dist-packages/pyx/path.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | venv/lib/python2.7/dist-packages/pyx/path.py | pengwu/scapy_env | 3db9c5dea2e219048a2387649d6d89be342903d9 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
#
#
# Copyright (C) 2002-2006 Jörg Lehmann <joergl@users.sourceforge.net>
# Copyright (C) 2003-2005 Michael Schindler <m-schindler@users.sourceforge.net>
# Copyright (C) 2002-2011 André Wobst <wobsta@users.sourceforge.net>
#
# This file is part of PyX (http://pyx.sourceforge.net/).
#
# PyX is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PyX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyX; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import math
from math import cos, sin, tan, acos, pi, radians, degrees
import trafo, unit
from normpath import NormpathException, normpath, normsubpath, normline_pt, normcurve_pt
import bbox as bboxmodule
# set is available as an external interface to the normpath.set method
from normpath import set
# normpath's invalid is available as an external interface
from normpath import invalid
# use new style classes when possible
__metaclass__ = type
class _marker: pass
################################################################################
# specific exception for path-related problems
class PathException(Exception): pass
################################################################################
# Bezier helper functions
################################################################################
def _bezierpolyrange(x0, x1, x2, x3):
tc = [0, 1]
a = x3 - 3*x2 + 3*x1 - x0
b = 2*x0 - 4*x1 + 2*x2
c = x1 - x0
s = b*b - 4*a*c
if s >= 0:
if b >= 0:
q = -0.5*(b+math.sqrt(s))
else:
q = -0.5*(b-math.sqrt(s))
try:
t = q*1.0/a
except ZeroDivisionError:
pass
else:
if 0 < t < 1:
tc.append(t)
try:
t = c*1.0/q
except ZeroDivisionError:
pass
else:
if 0 < t < 1:
tc.append(t)
p = [(((a*t + 1.5*b)*t + 3*c)*t + x0) for t in tc]
return min(*p), max(*p)
def _arctobcurve(x_pt, y_pt, r_pt, phi1, phi2):
"""generate the best bezier curve corresponding to an arc segment"""
dphi = phi2-phi1
if dphi==0: return None
# the two endpoints should be clear
x0_pt, y0_pt = x_pt+r_pt*cos(phi1), y_pt+r_pt*sin(phi1)
x3_pt, y3_pt = x_pt+r_pt*cos(phi2), y_pt+r_pt*sin(phi2)
# optimal relative distance along tangent for second and third
# control point
l = r_pt*4*(1-cos(dphi/2))/(3*sin(dphi/2))
x1_pt, y1_pt = x0_pt-l*sin(phi1), y0_pt+l*cos(phi1)
x2_pt, y2_pt = x3_pt+l*sin(phi2), y3_pt-l*cos(phi2)
return normcurve_pt(x0_pt, y0_pt, x1_pt, y1_pt, x2_pt, y2_pt, x3_pt, y3_pt)
def _arctobezierpath(x_pt, y_pt, r_pt, phi1, phi2, dphimax=45):
apath = []
phi1 = radians(phi1)
phi2 = radians(phi2)
dphimax = radians(dphimax)
if phi2<phi1:
# guarantee that phi2>phi1 ...
phi2 = phi2 + (math.floor((phi1-phi2)/(2*pi))+1)*2*pi
elif phi2>phi1+2*pi:
# ... or remove unnecessary multiples of 2*pi
phi2 = phi2 - (math.floor((phi2-phi1)/(2*pi))-1)*2*pi
if r_pt == 0 or phi1-phi2 == 0: return []
subdivisions = abs(int((1.0*(phi1-phi2))/dphimax))+1
dphi = (1.0*(phi2-phi1))/subdivisions
for i in range(subdivisions):
apath.append(_arctobcurve(x_pt, y_pt, r_pt, phi1+i*dphi, phi1+(i+1)*dphi))
return apath
def _arcpoint(x_pt, y_pt, r_pt, angle):
"""return starting point of arc segment"""
return x_pt+r_pt*cos(radians(angle)), y_pt+r_pt*sin(radians(angle))
def _arcbboxdata(x_pt, y_pt, r_pt, angle1, angle2):
phi1 = radians(angle1)
phi2 = radians(angle2)
# starting end end point of arc segment
sarcx_pt, sarcy_pt = _arcpoint(x_pt, y_pt, r_pt, angle1)
earcx_pt, earcy_pt = _arcpoint(x_pt, y_pt, r_pt, angle2)
# Now, we have to determine the corners of the bbox for the
# arc segment, i.e. global maxima/mimima of cos(phi) and sin(phi)
# in the interval [phi1, phi2]. These can either be located
# on the borders of this interval or in the interior.
if phi2 < phi1:
# guarantee that phi2>phi1
phi2 = phi2 + (math.floor((phi1-phi2)/(2*pi))+1)*2*pi
# next minimum of cos(phi) looking from phi1 in counterclockwise
# direction: 2*pi*floor((phi1-pi)/(2*pi)) + 3*pi
if phi2 < (2*math.floor((phi1-pi)/(2*pi))+3)*pi:
minarcx_pt = min(sarcx_pt, earcx_pt)
else:
minarcx_pt = x_pt-r_pt
# next minimum of sin(phi) looking from phi1 in counterclockwise
# direction: 2*pi*floor((phi1-3*pi/2)/(2*pi)) + 7/2*pi
if phi2 < (2*math.floor((phi1-3.0*pi/2)/(2*pi))+7.0/2)*pi:
minarcy_pt = min(sarcy_pt, earcy_pt)
else:
minarcy_pt = y_pt-r_pt
# next maximum of cos(phi) looking from phi1 in counterclockwise
# direction: 2*pi*floor((phi1)/(2*pi))+2*pi
if phi2 < (2*math.floor((phi1)/(2*pi))+2)*pi:
maxarcx_pt = max(sarcx_pt, earcx_pt)
else:
maxarcx_pt = x_pt+r_pt
# next maximum of sin(phi) looking from phi1 in counterclockwise
# direction: 2*pi*floor((phi1-pi/2)/(2*pi)) + 1/2*pi
if phi2 < (2*math.floor((phi1-pi/2)/(2*pi))+5.0/2)*pi:
maxarcy_pt = max(sarcy_pt, earcy_pt)
else:
maxarcy_pt = y_pt+r_pt
return minarcx_pt, minarcy_pt, maxarcx_pt, maxarcy_pt
################################################################################
# path context and pathitem base class
################################################################################
class context:
"""context for pathitem"""
def __init__(self, x_pt, y_pt, subfirstx_pt, subfirsty_pt):
"""initializes a context for path items
x_pt, y_pt are the currentpoint. subfirstx_pt, subfirsty_pt
are the starting point of the current subpath. There are no
invalid contexts, i.e. all variables need to be set to integer
or float numbers.
"""
self.x_pt = x_pt
self.y_pt = y_pt
self.subfirstx_pt = subfirstx_pt
self.subfirsty_pt = subfirsty_pt
class pathitem:
"""element of a PS style path"""
def __str__(self):
raise NotImplementedError()
def createcontext(self):
"""creates a context from the current pathitem
Returns a context instance. Is called, when no context has yet
been defined, i.e. for the very first pathitem. Most of the
pathitems do not provide this method. Note, that you should pass
the context created by createcontext to updatebbox and updatenormpath
of successive pathitems only; use the context-free createbbox and
createnormpath for the first pathitem instead.
"""
raise PathException("path must start with moveto or the like (%r)" % self)
def createbbox(self):
"""creates a bbox from the current pathitem
Returns a bbox instance. Is called, when a bbox has to be
created instead of updating it, i.e. for the very first
pathitem. Most pathitems do not provide this method.
updatebbox must not be called for the created instance and the
same pathitem.
"""
raise PathException("path must start with moveto or the like (%r)" % self)
def createnormpath(self, epsilon=_marker):
"""create a normpath from the current pathitem
Return a normpath instance. Is called, when a normpath has to
be created instead of updating it, i.e. for the very first
pathitem. Most pathitems do not provide this method.
updatenormpath must not be called for the created instance and
the same pathitem.
"""
raise PathException("path must start with moveto or the like (%r)" % self)
def updatebbox(self, bbox, context):
"""updates the bbox to contain the pathitem for the given
context
Is called for all subsequent pathitems in a path to complete
the bbox information. Both, the bbox and context are updated
inplace. Does not return anything.
"""
raise NotImplementedError()
def updatenormpath(self, normpath, context):
"""update the normpath to contain the pathitem for the given
context
Is called for all subsequent pathitems in a path to complete
the normpath. Both the normpath and the context are updated
inplace. Most pathitem implementations will use
normpath.normsubpath[-1].append to add normsubpathitem(s).
Does not return anything.
"""
raise NotImplementedError()
def outputPS(self, file, writer):
"""write PS representation of pathitem to file"""
################################################################################
# various pathitems
################################################################################
# Each one comes in two variants:
# - one with suffix _pt. This one requires the coordinates
# to be already in pts (mainly used for internal purposes)
# - another which accepts arbitrary units
class closepath(pathitem):
"""Connect subpath back to its starting point"""
__slots__ = ()
def __str__(self):
return "closepath()"
def updatebbox(self, bbox, context):
context.x_pt = context.subfirstx_pt
context.y_pt = context.subfirsty_pt
def updatenormpath(self, normpath, context):
normpath.normsubpaths[-1].close()
context.x_pt = context.subfirstx_pt
context.y_pt = context.subfirsty_pt
def outputPS(self, file, writer):
file.write("closepath\n")
class pdfmoveto_pt(normline_pt):
def outputPDF(self, file, writer):
pass
class moveto_pt(pathitem):
"""Start a new subpath and set current point to (x_pt, y_pt) (coordinates in pts)"""
__slots__ = "x_pt", "y_pt"
def __init__(self, x_pt, y_pt):
self.x_pt = x_pt
self.y_pt = y_pt
def __str__(self):
return "moveto_pt(%g, %g)" % (self.x_pt, self.y_pt)
def createcontext(self):
return context(self.x_pt, self.y_pt, self.x_pt, self.y_pt)
def createbbox(self):
return bboxmodule.bbox_pt(self.x_pt, self.y_pt, self.x_pt, self.y_pt)
def createnormpath(self, epsilon=_marker):
if epsilon is _marker:
return normpath([normsubpath([normline_pt(self.x_pt, self.y_pt, self.x_pt, self.y_pt)])])
elif epsilon is None:
return normpath([normsubpath([pdfmoveto_pt(self.x_pt, self.y_pt, self.x_pt, self.y_pt)],
epsilon=epsilon)])
else:
return normpath([normsubpath([normline_pt(self.x_pt, self.y_pt, self.x_pt, self.y_pt)],
epsilon=epsilon)])
def updatebbox(self, bbox, context):
bbox.includepoint_pt(self.x_pt, self.y_pt)
context.x_pt = context.subfirstx_pt = self.x_pt
context.y_pt = context.subfirsty_pt = self.y_pt
def updatenormpath(self, normpath, context):
if normpath.normsubpaths[-1].epsilon is not None:
normpath.append(normsubpath([normline_pt(self.x_pt, self.y_pt, self.x_pt, self.y_pt)],
epsilon=normpath.normsubpaths[-1].epsilon))
else:
normpath.append(normsubpath(epsilon=normpath.normsubpaths[-1].epsilon))
context.x_pt = context.subfirstx_pt = self.x_pt
context.y_pt = context.subfirsty_pt = self.y_pt
def outputPS(self, file, writer):
file.write("%g %g moveto\n" % (self.x_pt, self.y_pt) )
class lineto_pt(pathitem):
"""Append straight line to (x_pt, y_pt) (coordinates in pts)"""
__slots__ = "x_pt", "y_pt"
def __init__(self, x_pt, y_pt):
self.x_pt = x_pt
self.y_pt = y_pt
def __str__(self):
return "lineto_pt(%g, %g)" % (self.x_pt, self.y_pt)
def updatebbox(self, bbox, context):
bbox.includepoint_pt(self.x_pt, self.y_pt)
context.x_pt = self.x_pt
context.y_pt = self.y_pt
def updatenormpath(self, normpath, context):
normpath.normsubpaths[-1].append(normline_pt(context.x_pt, context.y_pt,
self.x_pt, self.y_pt))
context.x_pt = self.x_pt
context.y_pt = self.y_pt
def outputPS(self, file, writer):
file.write("%g %g lineto\n" % (self.x_pt, self.y_pt) )
class curveto_pt(pathitem):
"""Append curveto (coordinates in pts)"""
__slots__ = "x1_pt", "y1_pt", "x2_pt", "y2_pt", "x3_pt", "y3_pt"
def __init__(self, x1_pt, y1_pt, x2_pt, y2_pt, x3_pt, y3_pt):
self.x1_pt = x1_pt
self.y1_pt = y1_pt
self.x2_pt = x2_pt
self.y2_pt = y2_pt
self.x3_pt = x3_pt
self.y3_pt = y3_pt
def __str__(self):
return "curveto_pt(%g, %g, %g, %g, %g, %g)" % (self.x1_pt, self.y1_pt,
self.x2_pt, self.y2_pt,
self.x3_pt, self.y3_pt)
def updatebbox(self, bbox, context):
xmin_pt, xmax_pt = _bezierpolyrange(context.x_pt, self.x1_pt, self.x2_pt, self.x3_pt)
ymin_pt, ymax_pt = _bezierpolyrange(context.y_pt, self.y1_pt, self.y2_pt, self.y3_pt)
bbox.includepoint_pt(xmin_pt, ymin_pt)
bbox.includepoint_pt(xmax_pt, ymax_pt)
context.x_pt = self.x3_pt
context.y_pt = self.y3_pt
def updatenormpath(self, normpath, context):
normpath.normsubpaths[-1].append(normcurve_pt(context.x_pt, context.y_pt,
self.x1_pt, self.y1_pt,
self.x2_pt, self.y2_pt,
self.x3_pt, self.y3_pt))
context.x_pt = self.x3_pt
context.y_pt = self.y3_pt
def outputPS(self, file, writer):
file.write("%g %g %g %g %g %g curveto\n" % (self.x1_pt, self.y1_pt,
self.x2_pt, self.y2_pt,
self.x3_pt, self.y3_pt))
class rmoveto_pt(pathitem):
"""Perform relative moveto (coordinates in pts)"""
__slots__ = "dx_pt", "dy_pt"
def __init__(self, dx_pt, dy_pt):
self.dx_pt = dx_pt
self.dy_pt = dy_pt
def __str__(self):
return "rmoveto_pt(%g, %g)" % (self.dx_pt, self.dy_pt)
def updatebbox(self, bbox, context):
bbox.includepoint_pt(context.x_pt + self.dx_pt, context.y_pt + self.dy_pt)
context.x_pt += self.dx_pt
context.y_pt += self.dy_pt
context.subfirstx_pt = context.x_pt
context.subfirsty_pt = context.y_pt
def updatenormpath(self, normpath, context):
context.x_pt += self.dx_pt
context.y_pt += self.dy_pt
context.subfirstx_pt = context.x_pt
context.subfirsty_pt = context.y_pt
if normpath.normsubpaths[-1].epsilon is not None:
normpath.append(normsubpath([normline_pt(context.x_pt, context.y_pt,
context.x_pt, context.y_pt)],
epsilon=normpath.normsubpaths[-1].epsilon))
else:
normpath.append(normsubpath(epsilon=normpath.normsubpaths[-1].epsilon))
def outputPS(self, file, writer):
file.write("%g %g rmoveto\n" % (self.dx_pt, self.dy_pt) )
class rlineto_pt(pathitem):
"""Perform relative lineto (coordinates in pts)"""
__slots__ = "dx_pt", "dy_pt"
def __init__(self, dx_pt, dy_pt):
self.dx_pt = dx_pt
self.dy_pt = dy_pt
def __str__(self):
return "rlineto_pt(%g %g)" % (self.dx_pt, self.dy_pt)
def updatebbox(self, bbox, context):
bbox.includepoint_pt(context.x_pt + self.dx_pt, context.y_pt + self.dy_pt)
context.x_pt += self.dx_pt
context.y_pt += self.dy_pt
def updatenormpath(self, normpath, context):
normpath.normsubpaths[-1].append(normline_pt(context.x_pt, context.y_pt,
context.x_pt + self.dx_pt, context.y_pt + self.dy_pt))
context.x_pt += self.dx_pt
context.y_pt += self.dy_pt
def outputPS(self, file, writer):
file.write("%g %g rlineto\n" % (self.dx_pt, self.dy_pt) )
class rcurveto_pt(pathitem):
"""Append rcurveto (coordinates in pts)"""
__slots__ = "dx1_pt", "dy1_pt", "dx2_pt", "dy2_pt", "dx3_pt", "dy3_pt"
def __init__(self, dx1_pt, dy1_pt, dx2_pt, dy2_pt, dx3_pt, dy3_pt):
self.dx1_pt = dx1_pt
self.dy1_pt = dy1_pt
self.dx2_pt = dx2_pt
self.dy2_pt = dy2_pt
self.dx3_pt = dx3_pt
self.dy3_pt = dy3_pt
def __str__(self):
return "rcurveto_pt(%g, %g, %g, %g, %g, %g)" % (self.dx1_pt, self.dy1_pt,
self.dx2_pt, self.dy2_pt,
self.dx3_pt, self.dy3_pt)
def updatebbox(self, bbox, context):
xmin_pt, xmax_pt = _bezierpolyrange(context.x_pt,
context.x_pt+self.dx1_pt,
context.x_pt+self.dx2_pt,
context.x_pt+self.dx3_pt)
ymin_pt, ymax_pt = _bezierpolyrange(context.y_pt,
context.y_pt+self.dy1_pt,
context.y_pt+self.dy2_pt,
context.y_pt+self.dy3_pt)
bbox.includepoint_pt(xmin_pt, ymin_pt)
bbox.includepoint_pt(xmax_pt, ymax_pt)
context.x_pt += self.dx3_pt
context.y_pt += self.dy3_pt
def updatenormpath(self, normpath, context):
normpath.normsubpaths[-1].append(normcurve_pt(context.x_pt, context.y_pt,
context.x_pt + self.dx1_pt, context.y_pt + self.dy1_pt,
context.x_pt + self.dx2_pt, context.y_pt + self.dy2_pt,
context.x_pt + self.dx3_pt, context.y_pt + self.dy3_pt))
context.x_pt += self.dx3_pt
context.y_pt += self.dy3_pt
def outputPS(self, file, writer):
file.write("%g %g %g %g %g %g rcurveto\n" % (self.dx1_pt, self.dy1_pt,
self.dx2_pt, self.dy2_pt,
self.dx3_pt, self.dy3_pt))
class arc_pt(pathitem):
"""Append counterclockwise arc (coordinates in pts)"""
__slots__ = "x_pt", "y_pt", "r_pt", "angle1", "angle2"
def __init__(self, x_pt, y_pt, r_pt, angle1, angle2):
self.x_pt = x_pt
self.y_pt = y_pt
self.r_pt = r_pt
self.angle1 = angle1
self.angle2 = angle2
def __str__(self):
return "arc_pt(%g, %g, %g, %g, %g)" % (self.x_pt, self.y_pt, self.r_pt,
self.angle1, self.angle2)
def createcontext(self):
x_pt, y_pt = _arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle2)
return context(x_pt, y_pt, x_pt, y_pt)
def createbbox(self):
return bboxmodule.bbox_pt(*_arcbboxdata(self.x_pt, self.y_pt, self.r_pt,
self.angle1, self.angle2))
def createnormpath(self, epsilon=_marker):
if epsilon is _marker:
return normpath([normsubpath(_arctobezierpath(self.x_pt, self.y_pt, self.r_pt, self.angle1, self.angle2))])
else:
return normpath([normsubpath(_arctobezierpath(self.x_pt, self.y_pt, self.r_pt, self.angle1, self.angle2),
epsilon=epsilon)])
def updatebbox(self, bbox, context):
minarcx_pt, minarcy_pt, maxarcx_pt, maxarcy_pt = _arcbboxdata(self.x_pt, self.y_pt, self.r_pt,
self.angle1, self.angle2)
bbox.includepoint_pt(minarcx_pt, minarcy_pt)
bbox.includepoint_pt(maxarcx_pt, maxarcy_pt)
context.x_pt, context.y_pt = _arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle2)
def updatenormpath(self, normpath, context):
if normpath.normsubpaths[-1].closed:
normpath.append(normsubpath([normline_pt(context.x_pt, context.y_pt,
*_arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle1))],
epsilon=normpath.normsubpaths[-1].epsilon))
else:
normpath.normsubpaths[-1].append(normline_pt(context.x_pt, context.y_pt,
*_arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle1)))
normpath.normsubpaths[-1].extend(_arctobezierpath(self.x_pt, self.y_pt, self.r_pt, self.angle1, self.angle2))
context.x_pt, context.y_pt = _arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle2)
def outputPS(self, file, writer):
file.write("%g %g %g %g %g arc\n" % (self.x_pt, self.y_pt,
self.r_pt,
self.angle1,
self.angle2))
class arcn_pt(pathitem):
"""Append clockwise arc (coordinates in pts)"""
__slots__ = "x_pt", "y_pt", "r_pt", "angle1", "angle2"
def __init__(self, x_pt, y_pt, r_pt, angle1, angle2):
self.x_pt = x_pt
self.y_pt = y_pt
self.r_pt = r_pt
self.angle1 = angle1
self.angle2 = angle2
def __str__(self):
return "arcn_pt(%g, %g, %g, %g, %g)" % (self.x_pt, self.y_pt, self.r_pt,
self.angle1, self.angle2)
def createcontext(self):
x_pt, y_pt = _arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle2)
return context(x_pt, y_pt, x_pt, y_pt)
def createbbox(self):
return bboxmodule.bbox_pt(*_arcbboxdata(self.x_pt, self.y_pt, self.r_pt,
self.angle2, self.angle1))
def createnormpath(self, epsilon=_marker):
if epsilon is _marker:
return normpath([normsubpath(_arctobezierpath(self.x_pt, self.y_pt, self.r_pt, self.angle2, self.angle1))]).reversed()
else:
return normpath([normsubpath(_arctobezierpath(self.x_pt, self.y_pt, self.r_pt, self.angle2, self.angle1),
epsilon=epsilon)]).reversed()
def updatebbox(self, bbox, context):
minarcx_pt, minarcy_pt, maxarcx_pt, maxarcy_pt = _arcbboxdata(self.x_pt, self.y_pt, self.r_pt,
self.angle2, self.angle1)
bbox.includepoint_pt(minarcx_pt, minarcy_pt)
bbox.includepoint_pt(maxarcx_pt, maxarcy_pt)
context.x_pt, context.y_pt = _arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle2)
def updatenormpath(self, normpath, context):
if normpath.normsubpaths[-1].closed:
normpath.append(normsubpath([normline_pt(context.x_pt, context.y_pt,
*_arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle1))],
epsilon=normpath.normsubpaths[-1].epsilon))
else:
normpath.normsubpaths[-1].append(normline_pt(context.x_pt, context.y_pt,
*_arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle1)))
bpathitems = _arctobezierpath(self.x_pt, self.y_pt, self.r_pt, self.angle2, self.angle1)
bpathitems.reverse()
for bpathitem in bpathitems:
normpath.normsubpaths[-1].append(bpathitem.reversed())
context.x_pt, context.y_pt = _arcpoint(self.x_pt, self.y_pt, self.r_pt, self.angle2)
def outputPS(self, file, writer):
file.write("%g %g %g %g %g arcn\n" % (self.x_pt, self.y_pt,
self.r_pt,
self.angle1,
self.angle2))
class arct_pt(pathitem):
"""Append tangent arc (coordinates in pts)"""
__slots__ = "x1_pt", "y1_pt", "x2_pt", "y2_pt", "r_pt"
def __init__(self, x1_pt, y1_pt, x2_pt, y2_pt, r_pt):
self.x1_pt = x1_pt
self.y1_pt = y1_pt
self.x2_pt = x2_pt
self.y2_pt = y2_pt
self.r_pt = r_pt
def __str__(self):
return "arct_pt(%g, %g, %g, %g, %g)" % (self.x1_pt, self.y1_pt,
self.x2_pt, self.y2_pt,
self.r_pt)
def _pathitems(self, x_pt, y_pt):
"""return pathitems corresponding to arct for given currentpoint x_pt, y_pt.
The return is a list containing line_pt, arc_pt, a arcn_pt instances.
This is a helper routine for updatebbox and updatenormpath,
which will delegate the work to the constructed pathitem.
"""
# direction of tangent 1
dx1_pt, dy1_pt = self.x1_pt-x_pt, self.y1_pt-y_pt
l1_pt = math.hypot(dx1_pt, dy1_pt)
dx1, dy1 = dx1_pt/l1_pt, dy1_pt/l1_pt
# direction of tangent 2
dx2_pt, dy2_pt = self.x2_pt-self.x1_pt, self.y2_pt-self.y1_pt
l2_pt = math.hypot(dx2_pt, dy2_pt)
dx2, dy2 = dx2_pt/l2_pt, dy2_pt/l2_pt
# intersection angle between two tangents in the range (-pi, pi).
# We take the orientation from the sign of the vector product.
# Negative (positive) angles alpha corresponds to a turn to the right (left)
# as seen from currentpoint.
if dx1*dy2-dy1*dx2 > 0:
alpha = acos(dx1*dx2+dy1*dy2)
else:
alpha = -acos(dx1*dx2+dy1*dy2)
try:
# two tangent points
xt1_pt = self.x1_pt - dx1*self.r_pt*tan(abs(alpha)/2)
yt1_pt = self.y1_pt - dy1*self.r_pt*tan(abs(alpha)/2)
xt2_pt = self.x1_pt + dx2*self.r_pt*tan(abs(alpha)/2)
yt2_pt = self.y1_pt + dy2*self.r_pt*tan(abs(alpha)/2)
# direction point 1 -> center of arc
dmx_pt = 0.5*(xt1_pt+xt2_pt) - self.x1_pt
dmy_pt = 0.5*(yt1_pt+yt2_pt) - self.y1_pt
lm_pt = math.hypot(dmx_pt, dmy_pt)
dmx, dmy = dmx_pt/lm_pt, dmy_pt/lm_pt
# center of arc
mx_pt = self.x1_pt + dmx*self.r_pt/cos(alpha/2)
my_pt = self.y1_pt + dmy*self.r_pt/cos(alpha/2)
# angle around which arc is centered
phi = degrees(math.atan2(-dmy, -dmx))
# half angular width of arc
deltaphi = degrees(alpha)/2
line = lineto_pt(*_arcpoint(mx_pt, my_pt, self.r_pt, phi-deltaphi))
if alpha > 0:
return [line, arc_pt(mx_pt, my_pt, self.r_pt, phi-deltaphi, phi+deltaphi)]
else:
return [line, arcn_pt(mx_pt, my_pt, self.r_pt, phi-deltaphi, phi+deltaphi)]
except ZeroDivisionError:
# in the degenerate case, we just return a line as specified by the PS
# language reference
return [lineto_pt(self.x1_pt, self.y1_pt)]
def updatebbox(self, bbox, context):
for pathitem in self._pathitems(context.x_pt, context.y_pt):
pathitem.updatebbox(bbox, context)
def updatenormpath(self, normpath, context):
for pathitem in self._pathitems(context.x_pt, context.y_pt):
pathitem.updatenormpath(normpath, context)
def outputPS(self, file, writer):
file.write("%g %g %g %g %g arct\n" % (self.x1_pt, self.y1_pt,
self.x2_pt, self.y2_pt,
self.r_pt))
#
# now the pathitems that convert from user coordinates to pts
#
class moveto(moveto_pt):
"""Set current point to (x, y)"""
__slots__ = "x_pt", "y_pt"
def __init__(self, x, y):
moveto_pt.__init__(self, unit.topt(x), unit.topt(y))
class lineto(lineto_pt):
"""Append straight line to (x, y)"""
__slots__ = "x_pt", "y_pt"
def __init__(self, x, y):
lineto_pt.__init__(self, unit.topt(x), unit.topt(y))
class curveto(curveto_pt):
"""Append curveto"""
__slots__ = "x1_pt", "y1_pt", "x2_pt", "y2_pt", "x3_pt", "y3_pt"
def __init__(self, x1, y1, x2, y2, x3, y3):
curveto_pt.__init__(self,
unit.topt(x1), unit.topt(y1),
unit.topt(x2), unit.topt(y2),
unit.topt(x3), unit.topt(y3))
class rmoveto(rmoveto_pt):
"""Perform relative moveto"""
__slots__ = "dx_pt", "dy_pt"
def __init__(self, dx, dy):
rmoveto_pt.__init__(self, unit.topt(dx), unit.topt(dy))
class rlineto(rlineto_pt):
"""Perform relative lineto"""
__slots__ = "dx_pt", "dy_pt"
def __init__(self, dx, dy):
rlineto_pt.__init__(self, unit.topt(dx), unit.topt(dy))
class rcurveto(rcurveto_pt):
"""Append rcurveto"""
__slots__ = "dx1_pt", "dy1_pt", "dx2_pt", "dy2_pt", "dx3_pt", "dy3_pt"
def __init__(self, dx1, dy1, dx2, dy2, dx3, dy3):
rcurveto_pt.__init__(self,
unit.topt(dx1), unit.topt(dy1),
unit.topt(dx2), unit.topt(dy2),
unit.topt(dx3), unit.topt(dy3))
class arcn(arcn_pt):
"""Append clockwise arc"""
__slots__ = "x_pt", "y_pt", "r_pt", "angle1", "angle2"
def __init__(self, x, y, r, angle1, angle2):
arcn_pt.__init__(self, unit.topt(x), unit.topt(y), unit.topt(r), angle1, angle2)
class arc(arc_pt):
"""Append counterclockwise arc"""
__slots__ = "x_pt", "y_pt", "r_pt", "angle1", "angle2"
def __init__(self, x, y, r, angle1, angle2):
arc_pt.__init__(self, unit.topt(x), unit.topt(y), unit.topt(r), angle1, angle2)
class arct(arct_pt):
"""Append tangent arc"""
__slots__ = "x1_pt", "y1_pt", "x2_pt", "y2_pt", "r_pt"
def __init__(self, x1, y1, x2, y2, r):
arct_pt.__init__(self, unit.topt(x1), unit.topt(y1),
unit.topt(x2), unit.topt(y2), unit.topt(r))
#
# "combined" pathitems provided for performance reasons
#
class multilineto_pt(pathitem):
"""Perform multiple linetos (coordinates in pts)"""
__slots__ = "points_pt"
def __init__(self, points_pt):
self.points_pt = points_pt
def __str__(self):
result = []
for point_pt in self.points_pt:
result.append("(%g, %g)" % point_pt )
return "multilineto_pt([%s])" % (", ".join(result))
def updatebbox(self, bbox, context):
for point_pt in self.points_pt:
bbox.includepoint_pt(*point_pt)
if self.points_pt:
context.x_pt, context.y_pt = self.points_pt[-1]
def updatenormpath(self, normpath, context):
x0_pt, y0_pt = context.x_pt, context.y_pt
for point_pt in self.points_pt:
normpath.normsubpaths[-1].append(normline_pt(x0_pt, y0_pt, *point_pt))
x0_pt, y0_pt = point_pt
context.x_pt, context.y_pt = x0_pt, y0_pt
def outputPS(self, file, writer):
for point_pt in self.points_pt:
file.write("%g %g lineto\n" % point_pt )
class multicurveto_pt(pathitem):
"""Perform multiple curvetos (coordinates in pts)"""
__slots__ = "points_pt"
def __init__(self, points_pt):
self.points_pt = points_pt
def __str__(self):
result = []
for point_pt in self.points_pt:
result.append("(%g, %g, %g, %g, %g, %g)" % point_pt )
return "multicurveto_pt([%s])" % (", ".join(result))
def updatebbox(self, bbox, context):
for point_pt in self.points_pt:
xmin_pt, xmax_pt = _bezierpolyrange(context.x_pt, point_pt[0], point_pt[2], point_pt[4])
ymin_pt, ymax_pt = _bezierpolyrange(context.y_pt, point_pt[1], point_pt[3], point_pt[5])
bbox.includepoint_pt(xmin_pt, ymin_pt)
bbox.includepoint_pt(xmax_pt, ymax_pt)
context.x_pt, context.y_pt = point_pt[4:]
def updatenormpath(self, normpath, context):
x0_pt, y0_pt = context.x_pt, context.y_pt
for point_pt in self.points_pt:
normpath.normsubpaths[-1].append(normcurve_pt(x0_pt, y0_pt, *point_pt))
x0_pt, y0_pt = point_pt[4:]
context.x_pt, context.y_pt = x0_pt, y0_pt
def outputPS(self, file, writer):
for point_pt in self.points_pt:
file.write("%g %g %g %g %g %g curveto\n" % point_pt)
################################################################################
# path: PS style path
################################################################################
class path:
"""PS style path"""
__slots__ = "pathitems", "_normpath"
def __init__(self, *pathitems):
"""construct a path from pathitems *args"""
for apathitem in pathitems:
assert isinstance(apathitem, pathitem), "only pathitem instances allowed"
self.pathitems = list(pathitems)
# normpath cache (when no epsilon is set)
self._normpath = None
def __add__(self, other):
"""create new path out of self and other"""
return path(*(self.pathitems + other.path().pathitems))
def __iadd__(self, other):
"""add other inplace
If other is a normpath instance, it is converted to a path before
being added.
"""
self.pathitems += other.path().pathitems
self._normpath = None
return self
def __getitem__(self, i):
"""return path item i"""
return self.pathitems[i]
def __len__(self):
"""return the number of path items"""
return len(self.pathitems)
def __str__(self):
l = ", ".join(map(str, self.pathitems))
return "path(%s)" % l
def append(self, apathitem):
"""append a path item"""
assert isinstance(apathitem, pathitem), "only pathitem instance allowed"
self.pathitems.append(apathitem)
self._normpath = None
def arclen_pt(self):
"""return arc length in pts"""
return self.normpath().arclen_pt()
def arclen(self):
"""return arc length"""
return self.normpath().arclen()
def arclentoparam_pt(self, lengths_pt):
"""return the param(s) matching the given length(s)_pt in pts"""
return self.normpath().arclentoparam_pt(lengths_pt)
def arclentoparam(self, lengths):
"""return the param(s) matching the given length(s)"""
return self.normpath().arclentoparam(lengths)
def at_pt(self, params):
"""return coordinates of path in pts at param(s) or arc length(s) in pts"""
return self.normpath().at_pt(params)
def at(self, params):
"""return coordinates of path at param(s) or arc length(s)"""
return self.normpath().at(params)
def atbegin_pt(self):
"""return coordinates of the beginning of first subpath in path in pts"""
return self.normpath().atbegin_pt()
def atbegin(self):
"""return coordinates of the beginning of first subpath in path"""
return self.normpath().atbegin()
def atend_pt(self):
"""return coordinates of the end of last subpath in path in pts"""
return self.normpath().atend_pt()
def atend(self):
"""return coordinates of the end of last subpath in path"""
return self.normpath().atend()
def bbox(self):
"""return bbox of path"""
if self.pathitems:
bbox = self.pathitems[0].createbbox()
context = self.pathitems[0].createcontext()
for pathitem in self.pathitems[1:]:
pathitem.updatebbox(bbox, context)
return bbox
else:
return bboxmodule.empty()
def begin(self):
"""return param corresponding of the beginning of the path"""
return self.normpath().begin()
def curveradius_pt(self, params):
"""return the curvature radius in pts at param(s) or arc length(s) in pts
The curvature radius is the inverse of the curvature. When the
curvature is 0, None is returned. Note that this radius can be negative
or positive, depending on the sign of the curvature."""
return self.normpath().curveradius_pt(params)
def curveradius(self, params):
"""return the curvature radius at param(s) or arc length(s)
The curvature radius is the inverse of the curvature. When the
curvature is 0, None is returned. Note that this radius can be negative
or positive, depending on the sign of the curvature."""
return self.normpath().curveradius(params)
def end(self):
"""return param corresponding of the end of the path"""
return self.normpath().end()
def extend(self, pathitems):
"""extend path by pathitems"""
for apathitem in pathitems:
assert isinstance(apathitem, pathitem), "only pathitem instance allowed"
self.pathitems.extend(pathitems)
self._normpath = None
def intersect(self, other):
"""intersect self with other path
Returns a tuple of lists consisting of the parameter values
of the intersection points of the corresponding normpath.
"""
return self.normpath().intersect(other)
def join(self, other):
"""join other path/normpath inplace
If other is a normpath instance, it is converted to a path before
being joined.
"""
self.pathitems = self.joined(other).path().pathitems
self._normpath = None
return self
def joined(self, other):
"""return path consisting of self and other joined together"""
return self.normpath().joined(other).path()
# << operator also designates joining
__lshift__ = joined
def normpath(self, epsilon=_marker):
"""convert the path into a normpath"""
# use cached value if existent and epsilon is _marker
if self._normpath is not None and epsilon is _marker:
return self._normpath
if self.pathitems:
if epsilon is _marker:
normpath = self.pathitems[0].createnormpath()
else:
normpath = self.pathitems[0].createnormpath(epsilon)
context = self.pathitems[0].createcontext()
for pathitem in self.pathitems[1:]:
pathitem.updatenormpath(normpath, context)
else:
if epsilon is _marker:
normpath = normpath([])
else:
normpath = normpath(epsilon=epsilon)
if epsilon is _marker:
self._normpath = normpath
return normpath
def paramtoarclen_pt(self, params):
"""return arc lenght(s) in pts matching the given param(s)"""
return self.normpath().paramtoarclen_pt(params)
def paramtoarclen(self, params):
"""return arc lenght(s) matching the given param(s)"""
return self.normpath().paramtoarclen(params)
def path(self):
"""return corresponding path, i.e., self"""
return self
def reversed(self):
"""return reversed normpath"""
# TODO: couldn't we try to return a path instead of converting it
# to a normpath (but this might not be worth the trouble)
return self.normpath().reversed()
def rotation_pt(self, params):
"""return rotation at param(s) or arc length(s) in pts"""
return self.normpath().rotation(params)
def rotation(self, params):
"""return rotation at param(s) or arc length(s)"""
return self.normpath().rotation(params)
def split_pt(self, params):
"""split normpath at param(s) or arc length(s) in pts and return list of normpaths"""
return self.normpath().split(params)
def split(self, params):
"""split normpath at param(s) or arc length(s) and return list of normpaths"""
return self.normpath().split(params)
def tangent_pt(self, params, length):
"""return tangent vector of path at param(s) or arc length(s) in pts
If length in pts is not None, the tangent vector will be scaled to
the desired length.
"""
return self.normpath().tangent_pt(params, length)
def tangent(self, params, length=1):
"""return tangent vector of path at param(s) or arc length(s)
If length is not None, the tangent vector will be scaled to
the desired length.
"""
return self.normpath().tangent(params, length)
def trafo_pt(self, params):
"""return transformation at param(s) or arc length(s) in pts"""
return self.normpath().trafo(params)
def trafo(self, params):
"""return transformation at param(s) or arc length(s)"""
return self.normpath().trafo(params)
def transformed(self, trafo):
"""return transformed path"""
return self.normpath().transformed(trafo)
def outputPS(self, file, writer):
"""write PS code to file"""
for pitem in self.pathitems:
pitem.outputPS(file, writer)
def outputPDF(self, file, writer):
"""write PDF code to file"""
# PDF only supports normsubpathitems; we need to use a normpath
# with epsilon equals None to prevent failure for paths shorter
# than epsilon
self.normpath(epsilon=None).outputPDF(file, writer)
#
# some special kinds of path, again in two variants
#
class line_pt(path):
"""straight line from (x1_pt, y1_pt) to (x2_pt, y2_pt) in pts"""
def __init__(self, x1_pt, y1_pt, x2_pt, y2_pt):
path.__init__(self, moveto_pt(x1_pt, y1_pt), lineto_pt(x2_pt, y2_pt))
class curve_pt(path):
"""bezier curve with control points (x0_pt, y1_pt),..., (x3_pt, y3_pt) in pts"""
def __init__(self, x0_pt, y0_pt, x1_pt, y1_pt, x2_pt, y2_pt, x3_pt, y3_pt):
path.__init__(self,
moveto_pt(x0_pt, y0_pt),
curveto_pt(x1_pt, y1_pt, x2_pt, y2_pt, x3_pt, y3_pt))
class rect_pt(path):
"""rectangle at position (x_pt, y_pt) with width_pt and height_pt in pts"""
def __init__(self, x_pt, y_pt, width_pt, height_pt):
path.__init__(self, moveto_pt(x_pt, y_pt),
lineto_pt(x_pt+width_pt, y_pt),
lineto_pt(x_pt+width_pt, y_pt+height_pt),
lineto_pt(x_pt, y_pt+height_pt),
closepath())
class circle_pt(path):
"""circle with center (x_pt, y_pt) and radius_pt in pts"""
def __init__(self, x_pt, y_pt, radius_pt, arcepsilon=0.1):
path.__init__(self, moveto_pt(x_pt+radius_pt, y_pt),
arc_pt(x_pt, y_pt, radius_pt, arcepsilon, 360-arcepsilon),
closepath())
class ellipse_pt(path):
"""ellipse with center (x_pt, y_pt) in pts,
the two axes (a_pt, b_pt) in pts,
and the angle angle of the first axis"""
def __init__(self, x_pt, y_pt, a_pt, b_pt, angle, **kwargs):
t = trafo.scale(a_pt, b_pt, epsilon=None).rotated(angle).translated_pt(x_pt, y_pt)
p = circle_pt(0, 0, 1, **kwargs).normpath(epsilon=None).transformed(t).path()
path.__init__(self, *p.pathitems)
class line(line_pt):
"""straight line from (x1, y1) to (x2, y2)"""
def __init__(self, x1, y1, x2, y2):
line_pt.__init__(self, unit.topt(x1), unit.topt(y1),
unit.topt(x2), unit.topt(y2))
class curve(curve_pt):
"""bezier curve with control points (x0, y1),..., (x3, y3)"""
def __init__(self, x0, y0, x1, y1, x2, y2, x3, y3):
curve_pt.__init__(self, unit.topt(x0), unit.topt(y0),
unit.topt(x1), unit.topt(y1),
unit.topt(x2), unit.topt(y2),
unit.topt(x3), unit.topt(y3))
class rect(rect_pt):
"""rectangle at position (x,y) with width and height"""
def __init__(self, x, y, width, height):
rect_pt.__init__(self, unit.topt(x), unit.topt(y),
unit.topt(width), unit.topt(height))
class circle(circle_pt):
"""circle with center (x,y) and radius"""
def __init__(self, x, y, radius, **kwargs):
circle_pt.__init__(self, unit.topt(x), unit.topt(y), unit.topt(radius), **kwargs)
class ellipse(ellipse_pt):
"""ellipse with center (x, y), the two axes (a, b),
and the angle angle of the first axis"""
def __init__(self, x, y, a, b, angle, **kwargs):
ellipse_pt.__init__(self, unit.topt(x), unit.topt(y), unit.topt(a), unit.topt(b), angle, **kwargs)
| 35.555991 | 130 | 0.585795 |
acfb2007850a309f1ca1d3e4b3270938f2462d5c | 517 | py | Python | pyvitemadose/__init__.py | thib1984/pyvitemadose | 5f3da18207b9da946737a468f05af0ad3c6526e5 | [
"MIT"
] | 2 | 2021-06-05T20:33:40.000Z | 2021-12-24T09:23:46.000Z | pyvitemadose/__init__.py | thib1984/pyvitemadose | 5f3da18207b9da946737a468f05af0ad3c6526e5 | [
"MIT"
] | 3 | 2021-11-23T09:26:03.000Z | 2021-11-23T09:26:32.000Z | pyvitemadose/__init__.py | thib1984/pyvitemadose | 5f3da18207b9da946737a468f05af0ad3c6526e5 | [
"MIT"
] | null | null | null | """
pyvitemadose init
"""
from os import sys
from pyvitemadose.args import compute_args, is_pyinstaller
from pyvitemadose.pyvitemadose import find
from pyvitemadose.update import update
def pyvitemadose():
"""
pyvitemadose entry point
"""
args = compute_args()
if args.update:
if not is_pyinstaller():
update()
else:
print("update is disabled. Do you use a bundle?")
if args.departement:
find(args.departement)
sys.exit(0)
pyvitemadose() | 20.68 | 61 | 0.659574 |
acfb20936490520d95f1d296734cc25c38fedca5 | 1,160 | py | Python | testing/xvfb_test_script.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | testing/xvfb_test_script.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86 | 2015-10-21T13:02:42.000Z | 2022-03-14T07:50:50.000Z | testing/xvfb_test_script.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright (c) 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple script for xvfb_unittest to launch.
This script outputs formatted data to stdout for the xvfb unit tests
to read and compare with expected output.
"""
from __future__ import print_function
import os
import signal
import sys
import time
def print_signal(sig, *_):
# print_function does not guarantee its output won't be interleaved
# with other logging elsewhere, but it does guarantee its output
# will appear intact. Because the tests parse via starts_with, prefix
# with a newline. These tests were previously flaky due to output like
# > Signal: 1 <other messages>.
print('\nSignal :{}'.format(sig))
if __name__ == '__main__':
signal.signal(signal.SIGTERM, print_signal)
signal.signal(signal.SIGINT, print_signal)
# test the subprocess display number.
print('\nDisplay :{}'.format(os.environ.get('DISPLAY', 'None')))
if len(sys.argv) > 1 and sys.argv[1] == '--sleep':
time.sleep(2) # gives process time to receive signal.
| 30.526316 | 72 | 0.736207 |
acfb20c69fb8701660c1513d76fe0a67555f4fc8 | 3,575 | py | Python | test/test_project_api.py | ContrastingSounds/looker_sdk_31 | f973434049fff1b605b10086ab8b84f2f62e3489 | [
"MIT"
] | null | null | null | test/test_project_api.py | ContrastingSounds/looker_sdk_31 | f973434049fff1b605b10086ab8b84f2f62e3489 | [
"MIT"
] | null | null | null | test/test_project_api.py | ContrastingSounds/looker_sdk_31 | f973434049fff1b605b10086ab8b84f2f62e3489 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import looker_client_31
from looker_client_31.api.project_api import ProjectApi # noqa: E501
from looker_client_31.rest import ApiException
class TestProjectApi(unittest.TestCase):
"""ProjectApi unit test stubs"""
def setUp(self):
self.api = looker_client_31.api.project_api.ProjectApi() # noqa: E501
def tearDown(self):
pass
def test_all_git_branches(self):
"""Test case for all_git_branches
Get All Git Branchs # noqa: E501
"""
pass
def test_all_git_connection_tests(self):
"""Test case for all_git_connection_tests
Get All Git Connection Tests # noqa: E501
"""
pass
def test_all_project_files(self):
"""Test case for all_project_files
Get All Project Files # noqa: E501
"""
pass
def test_all_projects(self):
"""Test case for all_projects
Get All Projects # noqa: E501
"""
pass
def test_create_git_deploy_key(self):
"""Test case for create_git_deploy_key
Create Deploy Key # noqa: E501
"""
pass
def test_create_project(self):
"""Test case for create_project
Create Project # noqa: E501
"""
pass
def test_git_deploy_key(self):
"""Test case for git_deploy_key
Git Deploy Key # noqa: E501
"""
pass
def test_project(self):
"""Test case for project
Get Project # noqa: E501
"""
pass
def test_project_file(self):
"""Test case for project_file
Get Project File # noqa: E501
"""
pass
def test_project_validation_results(self):
"""Test case for project_validation_results
Cached Project Validation Results # noqa: E501
"""
pass
def test_project_workspace(self):
"""Test case for project_workspace
Get Project Workspace # noqa: E501
"""
pass
def test_reset_project_to_production(self):
"""Test case for reset_project_to_production
Reset To Production # noqa: E501
"""
pass
def test_reset_project_to_remote(self):
"""Test case for reset_project_to_remote
Reset To Remote # noqa: E501
"""
pass
def test_run_git_connection_test(self):
"""Test case for run_git_connection_test
Run Git Connection Test # noqa: E501
"""
pass
def test_update_project(self):
"""Test case for update_project
Update Project # noqa: E501
"""
pass
def test_validate_project(self):
"""Test case for validate_project
Validate Project # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 24.319728 | 518 | 0.632168 |
acfb223b45dd023bf482d2a27d661e27dc23c40e | 221 | py | Python | locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_dragon-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_dragon-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_dragon-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | from pyvista import examples
dataset = examples.download_dragon() # doctest:+SKIP
#
# This dataset is used in the following examples:
#
# * :ref:`floors_example`
# * :ref:`orbiting_example`
# * :ref:`silhouette_example`
| 24.555556 | 53 | 0.733032 |
acfb24b5895076c2b36e0d6d46dd823b023a4d25 | 4,894 | py | Python | paddlers/models/ppcls/utils/gallery2fc.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 40 | 2022-02-28T02:07:28.000Z | 2022-03-31T09:54:29.000Z | paddlers/models/ppcls/utils/gallery2fc.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 5 | 2022-03-15T12:13:33.000Z | 2022-03-31T15:54:08.000Z | paddlers/models/ppcls/utils/gallery2fc.py | huilin16/PaddleRS | ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a | [
"Apache-2.0"
] | 20 | 2022-02-28T02:07:31.000Z | 2022-03-31T11:40:40.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import cv2
from ppcls.arch import build_model
from ppcls.utils.config import parse_config, parse_args
from ppcls.utils.save_load import load_dygraph_pretrain
from ppcls.utils.logger import init_logger
from ppcls.data import create_operators
from ppcls.arch.slim import quantize_model
class GalleryLayer(paddle.nn.Layer):
def __init__(self, configs):
super().__init__()
self.configs = configs
embedding_size = self.configs["Arch"]["Head"]["embedding_size"]
self.batch_size = self.configs["IndexProcess"]["batch_size"]
self.image_shape = self.configs["Global"]["image_shape"].copy()
self.image_shape.insert(0, self.batch_size)
image_root = self.configs["IndexProcess"]["image_root"]
data_file = self.configs["IndexProcess"]["data_file"]
delimiter = self.configs["IndexProcess"]["delimiter"]
self.gallery_images = []
gallery_docs = []
gallery_labels = []
with open(data_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
for ori_line in lines:
line = ori_line.strip().split(delimiter)
text_num = len(line)
assert text_num >= 2, f"line({ori_line}) must be splitted into at least 2 parts, but got {text_num}"
image_file = os.path.join(image_root, line[0])
self.gallery_images.append(image_file)
gallery_docs.append(ori_line.strip())
gallery_labels.append(line[1].strip())
self.gallery_layer = paddle.nn.Linear(
embedding_size, len(self.gallery_images), bias_attr=False)
self.gallery_layer.skip_quant = True
output_label_str = ""
for i, label_i in enumerate(gallery_labels):
output_label_str += "{} {}\n".format(i, label_i)
output_path = configs["Global"]["save_inference_dir"] + "_label.txt"
with open(output_path, "w") as f:
f.write(output_label_str)
def forward(self, x, label=None):
x = paddle.nn.functional.normalize(x)
x = self.gallery_layer(x)
return x
def build_gallery_layer(self, feature_extractor):
transform_configs = self.configs["IndexProcess"]["transform_ops"]
preprocess_ops = create_operators(transform_configs)
embedding_size = self.configs["Arch"]["Head"]["embedding_size"]
batch_index = 0
input_tensor = paddle.zeros(self.image_shape)
gallery_feature = paddle.zeros(
(len(self.gallery_images), embedding_size))
for i, image_path in enumerate(self.gallery_images):
image = cv2.imread(image_path)[:, :, ::-1]
for op in preprocess_ops:
image = op(image)
input_tensor[batch_index] = image
batch_index += 1
if batch_index == self.batch_size or i == len(
self.gallery_images) - 1:
batch_feature = feature_extractor(input_tensor)["features"]
for j in range(batch_index):
feature = batch_feature[j]
norm_feature = paddle.nn.functional.normalize(
feature, axis=0)
gallery_feature[i - batch_index + j + 1] = norm_feature
self.gallery_layer.set_state_dict({"_layer.weight": gallery_feature.T})
def export_fuse_model(configs):
slim_config = configs["Slim"].copy()
configs["Slim"] = None
fuse_model = build_model(configs)
fuse_model.head = GalleryLayer(configs)
configs["Slim"] = slim_config
quantize_model(configs, fuse_model)
load_dygraph_pretrain(fuse_model, configs["Global"]["pretrained_model"])
fuse_model.eval()
fuse_model.head.build_gallery_layer(fuse_model)
save_path = configs["Global"]["save_inference_dir"]
fuse_model.quanter.save_quantized_model(
fuse_model,
save_path,
input_spec=[
paddle.static.InputSpec(
shape=[None] + configs["Global"]["image_shape"],
dtype='float32')
])
def main():
args = parse_args()
configs = parse_config(args.config)
init_logger(name='gallery2fc')
export_fuse_model(configs)
if __name__ == '__main__':
main()
| 39.467742 | 116 | 0.648754 |
acfb2509622a39670f93ac21937d153bf57892c8 | 410 | py | Python | PycharmProjects/pythonexercicios/aula 10/ex028.py | zmixtv1/cev-Python | edce04f86d943d9af070bf3c5e89575ff796ec9e | [
"MIT"
] | null | null | null | PycharmProjects/pythonexercicios/aula 10/ex028.py | zmixtv1/cev-Python | edce04f86d943d9af070bf3c5e89575ff796ec9e | [
"MIT"
] | null | null | null | PycharmProjects/pythonexercicios/aula 10/ex028.py | zmixtv1/cev-Python | edce04f86d943d9af070bf3c5e89575ff796ec9e | [
"MIT"
] | null | null | null | from random import randint
from time import sleep
computador = randint(0, 10)
print('-❤-' * 14)
print('Vou pensar em um numero entre 0 e 10. tente adivinhar!! ')
print('-❤-' * 14)
jogador = int(input('Em que número eu pensei? '))
print('Processando...')
sleep(2.5)
if jogador == computador:
print('Parabêns!! você Ganhou!!')
else:
print(f'Ganhei, eu pensei no numero {computador} e não no {jogador} ')
| 29.285714 | 74 | 0.67561 |
acfb257d364e973c583da423ff8cda082bde1e47 | 1,357 | py | Python | test/test_resource_collection_community_action_template_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | test/test_resource_collection_community_action_template_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | test/test_resource_collection_community_action_template_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_swagger_client.models.resource_collection_community_action_template_resource import ResourceCollectionCommunityActionTemplateResource # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestResourceCollectionCommunityActionTemplateResource(unittest.TestCase):
"""ResourceCollectionCommunityActionTemplateResource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResourceCollectionCommunityActionTemplateResource(self):
"""Test ResourceCollectionCommunityActionTemplateResource"""
# FIXME: construct object with mandatory attributes with example values
# model = octopus_deploy_swagger_client.models.resource_collection_community_action_template_resource.ResourceCollectionCommunityActionTemplateResource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 33.097561 | 175 | 0.793662 |
acfb25c51f660470638db43fcdf30ff6b173aeb8 | 8,170 | py | Python | mmdet/models/detectors/two_stage.py | CK-er/mmdet | 9bea4068efbcf7bf739dbe41917a68d525c29868 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/two_stage.py | CK-er/mmdet | 9bea4068efbcf7bf739dbe41917a68d525c29868 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/two_stage.py | CK-er/mmdet | 9bea4068efbcf7bf739dbe41917a68d525c29868 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
return hasattr(self, 'roi_head') and self.roi_head is not None
#初始化权重过程
def init_weights(self, pretrained=None):
super(TwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained) # backbone.init_weights()
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights() # neck.init_weights()
else:
self.neck.init_weights()
if self.with_rpn: # true
self.rpn_head.init_weights() # rpn_head.init_weights()
if self.with_roi_head:
self.roi_head.init_weights(pretrained) # roi_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck
"""
x = self.backbone(img) # 经过backbone的前向计算,提取特征
if self.with_neck: # 如果有neck特征处理的话,将提取出的特征,进行对应的特征处理
x, y = self.neck(x)
return x, y
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
outs = ()
# backbone
x, y = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
# 在这里实现层之间的连接关系,其实就是所谓的前向传播(训练过程的前向传播计算)
# 实现父类的抽象方法forward_train(), 该方法在父类的forward()中被调用执行
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# 提取特征,包含了backbone + neck 两个部分, 计算了前向的backbone传播和FPN
x, y = self.extract_feat(img) #执行extract_feature()的forward()
#从rpn开始有loss了
#开始计算loss, include rpn_loss, bbox_loss , mask_loss
losses = dict()
# RPN forward and loss
# RPN输出一堆候选框
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal', #proposal is a dict.
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train( #调用module, rpn_head的前向传播forward() 计算loss以及输出proposals
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses) #字典的合并方法
else:
#直接指定proposals
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, #调用module, roi_head的前向传播forward() 计算loss
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
| 38.537736 | 141 | 0.55716 |
acfb26bcf5d7caea28f9a73a3763f9aaf276f3fd | 2,810 | py | Python | asv_bench/benchmarks/frame_ctor.py | mtrbean/pandas | c0ff67a22df9c18da1172766e313732ed2ab6c30 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5 | 2019-07-26T15:22:41.000Z | 2021-09-28T09:22:17.000Z | asv_bench/benchmarks/frame_ctor.py | mtrbean/pandas | c0ff67a22df9c18da1172766e313732ed2ab6c30 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2019-08-18T16:00:45.000Z | 2019-08-18T16:00:45.000Z | asv_bench/benchmarks/frame_ctor.py | mtrbean/pandas | c0ff67a22df9c18da1172766e313732ed2ab6c30 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2019-07-26T10:47:23.000Z | 2020-08-10T12:40:32.000Z | import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range
try:
from pandas.tseries.offsets import Nano, Hour
except ImportError:
# For compatibility with older versions
from pandas.core.datetools import * # noqa
class FromDicts:
def setup(self):
N, K = 5000, 50
self.index = tm.makeStringIndex(N)
self.columns = tm.makeStringIndex(K)
frame = DataFrame(np.random.randn(N, K), index=self.index, columns=self.columns)
self.data = frame.to_dict()
self.dict_list = frame.to_dict(orient="records")
self.data2 = {i: {j: float(j) for j in range(100)} for i in range(2000)}
def time_list_of_dict(self):
DataFrame(self.dict_list)
def time_nested_dict(self):
DataFrame(self.data)
def time_nested_dict_index(self):
DataFrame(self.data, index=self.index)
def time_nested_dict_columns(self):
DataFrame(self.data, columns=self.columns)
def time_nested_dict_index_columns(self):
DataFrame(self.data, index=self.index, columns=self.columns)
def time_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
DataFrame(self.data2)
class FromSeries:
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
def time_mi_series(self):
DataFrame(self.s)
class FromDictwithTimestamp:
params = [Nano(1), Hour(1)]
param_names = ["offset"]
def setup(self, offset):
N = 10 ** 3
np.random.seed(1234)
idx = date_range(Timestamp("1/1/1900"), freq=offset, periods=N)
df = DataFrame(np.random.randn(N, 10), index=idx)
self.d = df.to_dict()
def time_dict_with_timestamp_offsets(self, offset):
DataFrame(self.d)
class FromRecords:
params = [None, 1000]
param_names = ["nrows"]
# Generators get exhausted on use, so run setup before every call
number = 1
repeat = (3, 250, 10)
def setup(self, nrows):
N = 100000
self.gen = ((x, (x * 20), (x * 100)) for x in range(N))
def time_frame_from_records_generator(self, nrows):
# issue-6700
self.df = DataFrame.from_records(self.gen, nrows=nrows)
class FromNDArray:
def setup(self):
N = 100000
self.data = np.random.randn(N)
def time_frame_from_ndarray(self):
self.df = DataFrame(self.data)
class FromLists:
goal_time = 0.2
def setup(self):
N = 1000
M = 100
self.data = [[j for j in range(M)] for i in range(N)]
def time_frame_from_lists(self):
self.df = DataFrame(self.data)
from .pandas_vb_common import setup # noqa: F401
| 26.018519 | 88 | 0.643416 |
acfb29aff5583055d34ddbb5937133bea5d94281 | 2,219 | py | Python | vb2py/PythonCard/components/togglebutton.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/PythonCard/components/togglebutton.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/PythonCard/components/togglebutton.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null |
"""
__version__ = "$Revision: 1.2 $"
__date__ = "$Date: 2004/05/13 02:40:24 $"
"""
import wx
from PythonCard import event, widget
# KEA 2004-05-06
# expose the same interface as CheckBox
class ToggleButtonMouseClickEvent(event.MouseClickEvent):
binding = wx.EVT_TOGGLEBUTTON
id = wx.wxEVT_COMMAND_TOGGLEBUTTON_CLICKED
ToggleButtonEvents = (ToggleButtonMouseClickEvent,)
class ToggleButtonSpec(widget.WidgetSpec):
def __init__(self):
events = list(ToggleButtonEvents)
attributes = {
'label' : { 'presence' : 'optional', 'default':'ToggleButton' },
'checked' : { 'presence' : 'optional', 'default' : 0 } }
widget.WidgetSpec.__init__(self, 'ToggleButton', 'Widget', events, attributes )
class ToggleButton(widget.Widget, wx.ToggleButton):
"""
A toggle button.
"""
_spec = ToggleButtonSpec()
def __init__( self, aParent, aResource ) :
wx.ToggleButton.__init__(
self,
aParent,
widget.makeNewId(aResource.id),
aResource.label,
aResource.position,
aResource.size,
style = wx.CLIP_SIBLINGS | wx.NO_FULL_REPAINT_ON_RESIZE,
name = aResource.name
)
widget.Widget.__init__( self, aParent, aResource)
if aResource.checked:
self.SetValue(True)
self._bindEvents(event.WIDGET_EVENTS + ToggleButtonEvents)
checked = property(wx.ToggleButton.GetValue, wx.ToggleButton.SetValue)
label = property(wx.ToggleButton.GetLabel, wx.ToggleButton.SetLabel)
# KEA 2004-05-06
# you can't actually set the foregroundColor and backgroundColor of
# a ToggleButton so I wonder whether we should have those as valid
# attributes? The same goes for other components where some of our
# base attributes don't make any sense. OTOH, having the attribute
# which fails silently when it tries to set it gives some symmetry
# to the components and gets rid of the need for try/except blocks
# when processing a group of component attributes.
import sys
from PythonCard import registry
registry.Registry.getInstance().register(sys.modules[__name__].ToggleButton)
| 31.253521 | 87 | 0.676431 |
acfb29bbcad1481fcad44e65075713562087d8da | 763 | py | Python | jumper/pattern.py | ccmikechen/Jumper-Game | b68a03cdfee27cea2bfb321f77b57ce80904bef6 | [
"MIT"
] | null | null | null | jumper/pattern.py | ccmikechen/Jumper-Game | b68a03cdfee27cea2bfb321f77b57ce80904bef6 | [
"MIT"
] | null | null | null | jumper/pattern.py | ccmikechen/Jumper-Game | b68a03cdfee27cea2bfb321f77b57ce80904bef6 | [
"MIT"
] | 1 | 2017-12-19T17:42:52.000Z | 2017-12-19T17:42:52.000Z | from random import randint
from jumper.config import config
class Pattern:
def __init__(self, env, level):
self.env = env
self.levels = 0
self.platforms = []
self.items = []
self.objects = []
self.monsters = []
def get_levels(self):
return self.levels
def get_platforms(self):
return self.platforms
def get_items(self):
return self.items
def get_objects(self):
return self.objects
def get_monsters(self):
return self.monsters
def get_random_position(self, level):
(width, height) = self.env.get_scene().get_bound()
x = randint(0, int((width - 100) / 50) * 50)
y = config.LEVEL_HEIGHT * level
return (x, y)
| 21.8 | 58 | 0.589777 |
acfb2ad2c6bd22a4c204984b6a94b6ad22b542f6 | 27,360 | py | Python | airflow/providers/google/cloud/operators/compute.py | mmaton/airflow | 16f43605f3370f20611ba9e08b568ff8a7cd433d | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-03-10T03:37:28.000Z | 2021-03-10T03:37:28.000Z | airflow/providers/google/cloud/operators/compute.py | mmaton/airflow | 16f43605f3370f20611ba9e08b568ff8a7cd433d | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-02-21T15:12:02.000Z | 2021-02-21T15:12:02.000Z | airflow/providers/google/cloud/operators/compute.py | yohei1126/airflow | b718495e4caecb753742c3eb22919411a715f24a | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Compute Engine operators."""
from copy import deepcopy
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.errors import HttpError
from json_merge_patch import merge
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook
from airflow.providers.google.cloud.utils.field_sanitizer import GcpBodyFieldSanitizer
from airflow.providers.google.cloud.utils.field_validator import GcpBodyFieldValidator
from airflow.utils.decorators import apply_defaults
class ComputeEngineBaseOperator(BaseOperator):
"""Abstract base operator for Google Compute Engine operators to inherit from."""
@apply_defaults
def __init__(
self,
*,
zone: str,
resource_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.project_id = project_id
self.zone = zone
self.resource_id = resource_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is missing")
if not self.zone:
raise AirflowException("The required parameter 'zone' is missing")
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing")
def execute(self, context):
pass
class ComputeEngineStartInstanceOperator(ComputeEngineBaseOperator):
"""
Starts an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStartInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_start_template_fields]
template_fields = (
'project_id',
'zone',
'resource_id',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_start_template_fields]
@apply_defaults
def __init__(
self,
*,
zone: str,
resource_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def execute(self, context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
return hook.start_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
class ComputeEngineStopInstanceOperator(ComputeEngineBaseOperator):
"""
Stops an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStopInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_stop_template_fields]
template_fields = (
'project_id',
'zone',
'resource_id',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_stop_template_fields]
@apply_defaults
def __init__(
self,
*,
zone: str,
resource_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def execute(self, context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
hook.stop_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION = [
dict(name="machineType", regexp="^.+$"),
]
class ComputeEngineSetMachineTypeOperator(ComputeEngineBaseOperator):
"""
Changes the machine type for a stopped instance to the machine type specified in
the request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineSetMachineTypeOperator`
:param zone: Google Cloud zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API, as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType#request-body
:type body: dict
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_set_machine_type_template_fields]
template_fields = (
'project_id',
'zone',
'resource_id',
'body',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_set_machine_type_template_fields]
@apply_defaults
def __init__(
self,
*,
zone: str,
resource_id: str,
body: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
validate_body: bool = True,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.body = body
self._field_validator = None # type: Optional[GcpBodyFieldValidator]
if validate_body:
self._field_validator = GcpBodyFieldValidator(
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION, api_version=api_version
)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def execute(self, context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
return hook.set_machine_type(
zone=self.zone, resource_id=self.resource_id, body=self.body, project_id=self.project_id
)
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION = [
dict(name="name", regexp="^.+$"),
dict(name="description", optional=True),
dict(
name="properties",
type='dict',
optional=True,
fields=[
dict(name="description", optional=True),
dict(name="tags", optional=True, fields=[dict(name="items", optional=True)]),
dict(name="machineType", optional=True),
dict(name="canIpForward", optional=True),
dict(name="networkInterfaces", optional=True), # not validating deeper
dict(name="disks", optional=True), # not validating the array deeper
dict(
name="metadata",
optional=True,
fields=[
dict(name="fingerprint", optional=True),
dict(name="items", optional=True),
dict(name="kind", optional=True),
],
),
dict(name="serviceAccounts", optional=True), # not validating deeper
dict(
name="scheduling",
optional=True,
fields=[
dict(name="onHostMaintenance", optional=True),
dict(name="automaticRestart", optional=True),
dict(name="preemptible", optional=True),
dict(name="nodeAffinities", optional=True), # not validating deeper
],
),
dict(name="labels", optional=True),
dict(name="guestAccelerators", optional=True), # not validating deeper
dict(name="minCpuPlatform", optional=True),
],
),
] # type: List[Dict[str, Any]]
GCE_INSTANCE_TEMPLATE_FIELDS_TO_SANITIZE = [
"kind",
"id",
"name",
"creationTimestamp",
"properties.disks.sha256",
"properties.disks.kind",
"properties.disks.sourceImageEncryptionKey.sha256",
"properties.disks.index",
"properties.disks.licenses",
"properties.networkInterfaces.kind",
"properties.networkInterfaces.accessConfigs.kind",
"properties.networkInterfaces.name",
"properties.metadata.kind",
"selfLink",
]
class ComputeEngineCopyInstanceTemplateOperator(ComputeEngineBaseOperator):
"""
Copies the instance template, applying specified changes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineCopyInstanceTemplateOperator`
:param resource_id: Name of the Instance Template
:type resource_id: str
:param body_patch: Patch to the body of instanceTemplates object following rfc7386
PATCH semantics. The body_patch content follows
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
Name field is required as we need to rename the template,
all the other fields are optional. It is important to follow PATCH semantics
- arrays are replaced fully, so if you need to update an array you should
provide the whole target array as patch element.
:type body_patch: dict
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:type request_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_template_copy_operator_template_fields]
template_fields = (
'project_id',
'resource_id',
'request_id',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_template_copy_operator_template_fields]
@apply_defaults
def __init__(
self,
*,
resource_id: str,
body_patch: dict,
project_id: Optional[str] = None,
request_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
validate_body: bool = True,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.body_patch = body_patch
self.request_id = request_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
if 'name' not in self.body_patch:
raise AirflowException(
"The body '{}' should contain at least "
"name for the new operator in the 'name' field".format(body_patch)
)
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_TEMPLATE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone='global',
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body_patch)
def execute(self, context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new template
# is already created and if is, then we assume it was created by previous run
# of CopyTemplate operator - we do not check if content of the template
# is as expected. Templates are immutable so we cannot update it anyway
# and deleting/recreating is not worth the hassle especially
# that we cannot delete template if it is already used in some Instance
# Group Manager. We assume success if the template is simply present
existing_template = hook.get_instance_template(
resource_id=self.body_patch['name'], project_id=self.project_id
)
self.log.info(
"The %s template already existed. It was likely created by previous run of the operator. "
"Assuming success.",
existing_template,
)
return existing_template
except HttpError as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if not e.resp.status == 404:
raise e
old_body = hook.get_instance_template(resource_id=self.resource_id, project_id=self.project_id)
new_body = deepcopy(old_body)
self._field_sanitizer.sanitize(new_body)
new_body = merge(new_body, self.body_patch)
self.log.info("Calling insert instance template with updated body: %s", new_body)
hook.insert_instance_template(body=new_body, request_id=self.request_id, project_id=self.project_id)
return hook.get_instance_template(resource_id=self.body_patch['name'], project_id=self.project_id)
class ComputeEngineInstanceGroupUpdateManagerTemplateOperator(ComputeEngineBaseOperator):
"""
Patches the Instance Group Manager, replacing source template URL with the
destination one. API V1 does not have update/patch operations for Instance
Group Manager, so you must use beta or newer API version. Beta is the default.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInstanceGroupUpdateManagerTemplateOperator`
:param resource_id: Name of the Instance Group Manager
:type resource_id: str
:param zone: Google Cloud zone where the Instance Group Manager exists.
:type zone: str
:param source_template: URL of the template to replace.
:type source_template: str
:param destination_template: URL of the target template.
:type destination_template: str
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:type request_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_igm_update_template_operator_template_fields]
template_fields = (
'project_id',
'resource_id',
'zone',
'request_id',
'source_template',
'destination_template',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_igm_update_template_operator_template_fields]
@apply_defaults
def __init__(
self,
*,
resource_id: str,
zone: str,
source_template: str,
destination_template: str,
project_id: Optional[str] = None,
update_policy: Optional[Dict[str, Any]] = None,
request_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version='beta',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.zone = zone
self.source_template = source_template
self.destination_template = destination_template
self.request_id = request_id
self.update_policy = update_policy
self._change_performed = False
if api_version == 'v1':
raise AirflowException(
"Api version v1 does not have update/patch "
"operations for Instance Group Managers. Use beta"
" api version or above"
)
super().__init__(
project_id=project_id,
zone=self.zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _possibly_replace_template(self, dictionary: dict) -> None:
if dictionary.get('instanceTemplate') == self.source_template:
dictionary['instanceTemplate'] = self.destination_template
self._change_performed = True
def execute(self, context) -> Optional[bool]:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
old_instance_group_manager = hook.get_instance_group_manager(
zone=self.zone, resource_id=self.resource_id, project_id=self.project_id
)
patch_body = {}
if 'versions' in old_instance_group_manager:
patch_body['versions'] = old_instance_group_manager['versions']
if 'instanceTemplate' in old_instance_group_manager:
patch_body['instanceTemplate'] = old_instance_group_manager['instanceTemplate']
if self.update_policy:
patch_body['updatePolicy'] = self.update_policy
self._possibly_replace_template(patch_body)
if 'versions' in patch_body:
for version in patch_body['versions']:
self._possibly_replace_template(version)
if self._change_performed or self.update_policy:
self.log.info("Calling patch instance template with updated body: %s", patch_body)
return hook.patch_instance_group_manager(
zone=self.zone,
resource_id=self.resource_id,
body=patch_body,
request_id=self.request_id,
project_id=self.project_id,
)
else:
# Idempotence achieved
return True
| 41.517451 | 108 | 0.666374 |
acfb2cbcd7c26a2a06ac4aebcf0a63b432d6c06d | 10,284 | py | Python | numba/decorators.py | tolysz/numba | d7953a18dbf5ea231dc16e967ce8e9b754578ea6 | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | numba/decorators.py | tolysz/numba | d7953a18dbf5ea231dc16e967ce8e9b754578ea6 | [
"Apache-2.0",
"BSD-2-Clause"
] | 1 | 2019-02-11T13:46:30.000Z | 2019-02-11T13:46:30.000Z | numba/decorators.py | asodeur/numba | d7953a18dbf5ea231dc16e967ce8e9b754578ea6 | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | """
Define @jit and related decorators.
"""
import sys
import warnings
import inspect
import logging
from . import config, sigutils
from .errors import DeprecationError, NumbaDeprecationWarning
from .targets import registry
from .stencil import stencil
_logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Decorators
_msg_deprecated_signature_arg = ("Deprecated keyword argument `{0}`. "
"Signatures should be passed as the first "
"positional argument.")
def jit(signature_or_function=None, locals={}, target='cpu', cache=False,
pipeline_class=None, boundscheck=False, **options):
"""
This decorator is used to compile a Python function into native code.
Args
-----
signature_or_function:
The (optional) signature or list of signatures to be compiled.
If not passed, required signatures will be compiled when the
decorated function is called, depending on the argument values.
As a convenience, you can directly pass the function to be compiled
instead.
locals: dict
Mapping of local variable names to Numba types. Used to override the
types deduced by Numba's type inference engine.
target: str
Specifies the target platform to compile for. Valid targets are cpu,
gpu, npyufunc, and cuda. Defaults to cpu.
pipeline_class: type numba.compiler.CompilerBase
The compiler pipeline type for customizing the compilation stages.
options:
For a cpu target, valid options are:
nopython: bool
Set to True to disable the use of PyObjects and Python API
calls. The default behavior is to allow the use of PyObjects
and Python API. Default value is False.
forceobj: bool
Set to True to force the use of PyObjects for every value.
Default value is False.
looplift: bool
Set to True to enable jitting loops in nopython mode while
leaving surrounding code in object mode. This allows functions
to allocate NumPy arrays and use Python objects, while the
tight loops in the function can still be compiled in nopython
mode. Any arrays that the tight loop uses should be created
before the loop is entered. Default value is True.
error_model: str
The error-model affects divide-by-zero behavior.
Valid values are 'python' and 'numpy'. The 'python' model
raises exception. The 'numpy' model sets the result to
*+/-inf* or *nan*. Default value is 'python'.
inline: str or callable
The inline option will determine whether a function is inlined
at into its caller if called. String options are 'never'
(default) which will never inline, and 'always', which will
always inline. If a callable is provided it will be called with
the call expression node that is requesting inlining, the
caller's IR and callee's IR as arguments, it is expected to
return Truthy as to whether to inline.
NOTE: This inlining is performed at the Numba IR level and is in
no way related to LLVM inlining.
boundscheck: bool
Set to True to enable bounds checking for array indices. Out
of bounds accesses will raise IndexError. The default is to
not do bounds checking. If bounds checking is disabled, out of
bounds accesses can produce garbage results or segfaults.
However, enabling bounds checking will slow down typical
functions, so it is recommended to only use this flag for
debugging. You can also set the NUMBA_BOUNDSCHECK environment
variable to 0 or 1 to globally override this flag.
Returns
--------
A callable usable as a compiled function. Actual compiling will be
done lazily if no explicit signatures are passed.
Examples
--------
The function can be used in the following ways:
1) jit(signatures, target='cpu', **targetoptions) -> jit(function)
Equivalent to:
d = dispatcher(function, targetoptions)
for signature in signatures:
d.compile(signature)
Create a dispatcher object for a python function. Then, compile
the function with the given signature(s).
Example:
@jit("int32(int32, int32)")
def foo(x, y):
return x + y
@jit(["int32(int32, int32)", "float32(float32, float32)"])
def bar(x, y):
return x + y
2) jit(function, target='cpu', **targetoptions) -> dispatcher
Create a dispatcher function object that specializes at call site.
Examples:
@jit
def foo(x, y):
return x + y
@jit(target='cpu', nopython=True)
def bar(x, y):
return x + y
"""
if 'argtypes' in options:
raise DeprecationError(_msg_deprecated_signature_arg.format('argtypes'))
if 'restype' in options:
raise DeprecationError(_msg_deprecated_signature_arg.format('restype'))
options['boundscheck'] = boundscheck
# Handle signature
if signature_or_function is None:
# No signature, no function
pyfunc = None
sigs = None
elif isinstance(signature_or_function, list):
# A list of signatures is passed
pyfunc = None
sigs = signature_or_function
elif sigutils.is_signature(signature_or_function):
# A single signature is passed
pyfunc = None
sigs = [signature_or_function]
else:
# A function is passed
pyfunc = signature_or_function
sigs = None
dispatcher_args = {}
if pipeline_class is not None:
dispatcher_args['pipeline_class'] = pipeline_class
wrapper = _jit(sigs, locals=locals, target=target, cache=cache,
targetoptions=options, **dispatcher_args)
if pyfunc is not None:
return wrapper(pyfunc)
else:
return wrapper
def _jit(sigs, locals, target, cache, targetoptions, **dispatcher_args):
dispatcher = registry.dispatcher_registry[target]
def wrapper(func):
if config.ENABLE_CUDASIM and target == 'cuda':
from . import cuda
return cuda.jit(func)
if config.DISABLE_JIT and not target == 'npyufunc':
return func
disp = dispatcher(py_func=func, locals=locals,
targetoptions=targetoptions,
**dispatcher_args)
if cache:
disp.enable_caching()
if sigs is not None:
# Register the Dispatcher to the type inference mechanism,
# even though the decorator hasn't returned yet.
from . import typeinfer
with typeinfer.register_dispatcher(disp):
for sig in sigs:
disp.compile(sig)
disp.disable_compile()
return disp
return wrapper
def generated_jit(function=None, target='cpu', cache=False,
pipeline_class=None, **options):
"""
This decorator allows flexible type-based compilation
of a jitted function. It works as `@jit`, except that the decorated
function is called at compile-time with the *types* of the arguments
and should return an implementation function for those types.
"""
dispatcher_args = {}
if pipeline_class is not None:
dispatcher_args['pipeline_class'] = pipeline_class
wrapper = _jit(sigs=None, locals={}, target=target, cache=cache,
targetoptions=options, impl_kind='generated',
**dispatcher_args)
if function is not None:
return wrapper(function)
else:
return wrapper
def njit(*args, **kws):
"""
Equivalent to jit(nopython=True)
See documentation for jit function/decorator for full description.
"""
if 'nopython' in kws:
warnings.warn('nopython is set for njit and is ignored', RuntimeWarning)
if 'forceobj' in kws:
warnings.warn('forceobj is set for njit and is ignored', RuntimeWarning)
kws.update({'nopython': True})
return jit(*args, **kws)
def cfunc(sig, locals={}, cache=False, **options):
"""
This decorator is used to compile a Python function into a C callback
usable with foreign C libraries.
Usage::
@cfunc("float64(float64, float64)", nopython=True, cache=True)
def add(a, b):
return a + b
"""
sig = sigutils.normalize_signature(sig)
def wrapper(func):
from .ccallback import CFunc
res = CFunc(func, sig, locals=locals, options=options)
if cache:
res.enable_caching()
res.compile()
return res
return wrapper
def jit_module(**kwargs):
""" Automatically ``jit``-wraps functions defined in a Python module
Note that ``jit_module`` should only be called at the end of the module to
be jitted. In addition, only functions which are defined in the module
``jit_module`` is called from are considered for automatic jit-wrapping.
See the Numba documentation for more information about what can/cannot be
jitted.
:param kwargs: Keyword arguments to pass to ``jit`` such as ``nopython``
or ``error_model``.
"""
# Get the module jit_module is being called from
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
# Replace functions in module with jit-wrapped versions
for name, obj in module.__dict__.items():
if inspect.isfunction(obj) and inspect.getmodule(obj) == module:
_logger.debug("Auto decorating function {} from module {} with jit "
"and options: {}".format(obj, module.__name__, kwargs))
module.__dict__[name] = jit(obj, **kwargs)
| 35.958042 | 81 | 0.620284 |
acfb2cfcd1ff9555e95d2279e1c4a1d235d8382f | 50,344 | py | Python | Tests/Marketplace/upload_packs.py | rod-castillo/content | 689692d98ce721c0823190c112f089953a4a31d8 | [
"MIT"
] | null | null | null | Tests/Marketplace/upload_packs.py | rod-castillo/content | 689692d98ce721c0823190c112f089953a4a31d8 | [
"MIT"
] | null | null | null | Tests/Marketplace/upload_packs.py | rod-castillo/content | 689692d98ce721c0823190c112f089953a4a31d8 | [
"MIT"
] | null | null | null | import json
import os
import sys
import argparse
import shutil
import uuid
import prettytable
import glob
import requests
import logging
from datetime import datetime
from zipfile import ZipFile
from typing import Any, Tuple, Union
from Tests.Marketplace.marketplace_services import init_storage_client, init_bigquery_client, Pack, PackStatus, \
GCPConfig, PACKS_FULL_PATH, IGNORED_FILES, PACKS_FOLDER, IGNORED_PATHS, Metadata, CONTENT_ROOT_PATH, \
LANDING_PAGE_SECTIONS_PATH, get_packs_statistics_dataframe, BucketUploadFlow, load_json, get_content_git_client, \
get_recent_commits_data, store_successful_and_failed_packs_in_ci_artifacts
from demisto_sdk.commands.common.tools import run_command, str2bool
from Tests.scripts.utils.log_util import install_logging
def get_packs_names(target_packs: str, previous_commit_hash: str = "HEAD^") -> set:
"""Detects and returns packs names to upload.
In case that `Modified` is passed in target_packs input, checks the git difference between two commits,
current and previous and greps only ones with prefix Packs/.
By default this function will receive `All` as target_packs and will return all packs names from content repo.
Args:
target_packs (str): csv packs names or `All` for all available packs in content
or `Modified` for only modified packs (currently not in use).
previous_commit_hash (str): the previous commit to diff with.
Returns:
set: unique collection of packs names to upload.
"""
if target_packs.lower() == "all":
if os.path.exists(PACKS_FULL_PATH):
all_packs = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(all_packs)}")
# return all available packs names
return all_packs
else:
logging.error(f"Folder {PACKS_FOLDER} was not found at the following path: {PACKS_FULL_PATH}")
sys.exit(1)
elif target_packs.lower() == "modified":
cmd = f"git diff --name-only HEAD..{previous_commit_hash} | grep 'Packs/'"
modified_packs_path = run_command(cmd).splitlines()
modified_packs = {p.split('/')[1] for p in modified_packs_path if p not in IGNORED_PATHS}
logging.info(f"Number of modified packs is: {len(modified_packs)}")
# return only modified packs between two commits
return modified_packs
elif target_packs and isinstance(target_packs, str):
modified_packs = {p.strip() for p in target_packs.split(',') if p not in IGNORED_FILES}
logging.info(f"Number of selected packs to upload is: {len(modified_packs)}")
# return only packs from csv list
return modified_packs
else:
logging.critical("Not correct usage of flag -p. Please check help section of upload packs script.")
sys.exit(1)
def extract_packs_artifacts(packs_artifacts_path: str, extract_destination_path: str):
"""Extracts all packs from content pack artifact zip.
Args:
packs_artifacts_path (str): full path to content artifacts zip file.
extract_destination_path (str): full path to directory where to extract the packs.
"""
with ZipFile(packs_artifacts_path) as packs_artifacts:
packs_artifacts.extractall(extract_destination_path)
logging.info("Finished extracting packs artifacts")
def download_and_extract_index(storage_bucket: Any, extract_destination_path: str) -> Tuple[str, Any, int]:
"""Downloads and extracts index zip from cloud storage.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
extract_destination_path (str): the full path of extract folder.
Returns:
str: extracted index folder full path.
Blob: google cloud storage object that represents index.zip blob.
str: downloaded index generation.
"""
if storage_bucket.name == GCPConfig.PRODUCTION_PRIVATE_BUCKET:
index_storage_path = os.path.join(GCPConfig.PRIVATE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
else:
index_storage_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, f"{GCPConfig.INDEX_NAME}.zip")
download_index_path = os.path.join(extract_destination_path, f"{GCPConfig.INDEX_NAME}.zip")
index_blob = storage_bucket.blob(index_storage_path)
index_folder_path = os.path.join(extract_destination_path, GCPConfig.INDEX_NAME)
index_generation = 0 # Setting to 0 makes the operation succeed only if there are no live versions of the blob
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
if not index_blob.exists():
os.mkdir(index_folder_path)
logging.error(f"{storage_bucket.name} index blob does not exists")
return index_folder_path, index_blob, index_generation
index_blob.reload()
index_generation = index_blob.generation
index_blob.download_to_filename(download_index_path, if_generation_match=index_generation)
if os.path.exists(download_index_path):
with ZipFile(download_index_path, 'r') as index_zip:
index_zip.extractall(extract_destination_path)
if not os.path.exists(index_folder_path):
logging.critical(f"Failed creating {GCPConfig.INDEX_NAME} folder with extracted data.")
sys.exit(1)
os.remove(download_index_path)
logging.success(f"Finished downloading and extracting {GCPConfig.INDEX_NAME} file to "
f"{extract_destination_path}")
return index_folder_path, index_blob, index_generation
else:
logging.critical(f"Failed to download {GCPConfig.INDEX_NAME}.zip file from cloud storage.")
sys.exit(1)
def update_index_folder(index_folder_path: str, pack_name: str, pack_path: str, pack_version: str = '',
hidden_pack: bool = False) -> bool:
"""
Copies pack folder into index folder.
Args:
index_folder_path (str): full path to index folder.
pack_name (str): pack folder name to copy.
pack_path (str): pack folder full path.
pack_version (str): pack latest version.
hidden_pack (bool): whether pack is hidden/internal or regular pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
index_folder_subdirectories = [d for d in os.listdir(index_folder_path) if
os.path.isdir(os.path.join(index_folder_path, d))]
index_pack_path = os.path.join(index_folder_path, pack_name)
metadata_files_in_index = glob.glob(f"{index_pack_path}/metadata-*.json")
new_metadata_path = os.path.join(index_pack_path, f"metadata-{pack_version}.json")
if pack_version:
# Update the latest metadata
if new_metadata_path in metadata_files_in_index:
metadata_files_in_index.remove(new_metadata_path)
# Remove old files but keep metadata files
if pack_name in index_folder_subdirectories:
for d in os.scandir(index_pack_path):
if d.path not in metadata_files_in_index:
os.remove(d.path)
# skipping index update in case hidden is set to True
if hidden_pack:
if os.path.exists(index_pack_path):
shutil.rmtree(index_pack_path) # remove pack folder inside index in case that it exists
logging.warning(f"Skipping updating {pack_name} pack files to index")
task_status = True
return True
# Copy new files and add metadata for latest version
for d in os.scandir(pack_path):
if not os.path.exists(index_pack_path):
os.mkdir(index_pack_path)
logging.info(f"Created {pack_name} pack folder in {GCPConfig.INDEX_NAME}")
shutil.copy(d.path, index_pack_path)
if pack_version and Pack.METADATA == d.name:
shutil.copy(d.path, new_metadata_path)
task_status = True
except Exception:
logging.exception(f"Failed in updating index folder for {pack_name} pack.")
finally:
return task_status
def clean_non_existing_packs(index_folder_path: str, private_packs: list, storage_bucket: Any) -> bool:
""" Detects packs that are not part of content repo or from private packs bucket.
In case such packs were detected, problematic pack is deleted from index and from content/packs/{target_pack} path.
Args:
index_folder_path (str): full path to downloaded index folder.
private_packs (list): priced packs from private bucket.
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where index.zip is stored.
Returns:
bool: whether cleanup was skipped or not.
"""
if ('CI' not in os.environ) or (
os.environ.get('CIRCLE_BRANCH') != 'master' and storage_bucket.name == GCPConfig.PRODUCTION_BUCKET) or (
os.environ.get('CIRCLE_BRANCH') == 'master' and storage_bucket.name not in
(GCPConfig.PRODUCTION_BUCKET, GCPConfig.CI_BUILD_BUCKET)):
logging.info("Skipping cleanup of packs in gcs.") # skipping execution of cleanup in gcs bucket
return True
public_packs_names = {p for p in os.listdir(PACKS_FULL_PATH) if p not in IGNORED_FILES}
private_packs_names = {p.get('id', '') for p in private_packs}
valid_packs_names = public_packs_names.union(private_packs_names)
# search for invalid packs folder inside index
invalid_packs_names = {(entry.name, entry.path) for entry in os.scandir(index_folder_path) if
entry.name not in valid_packs_names and entry.is_dir()}
if invalid_packs_names:
try:
logging.warning(f"Detected {len(invalid_packs_names)} non existing pack inside index, starting cleanup.")
for invalid_pack in invalid_packs_names:
invalid_pack_name = invalid_pack[0]
invalid_pack_path = invalid_pack[1]
# remove pack from index
shutil.rmtree(invalid_pack_path)
logging.warning(f"Deleted {invalid_pack_name} pack from {GCPConfig.INDEX_NAME} folder")
# important to add trailing slash at the end of path in order to avoid packs with same prefix
invalid_pack_gcs_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, invalid_pack_name, "") # by design
for invalid_blob in [b for b in storage_bucket.list_blobs(prefix=invalid_pack_gcs_path)]:
logging.warning(f"Deleted invalid {invalid_pack_name} pack under url {invalid_blob.public_url}")
invalid_blob.delete() # delete invalid pack in gcs
except Exception:
logging.exception("Failed to cleanup non existing packs.")
else:
logging.info(f"No invalid packs detected inside {GCPConfig.INDEX_NAME} folder")
return False
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None, landing_page_sections: dict = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:param landing_page_sections: landingPage sections.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
logging.info('Force upload flow - Index commit hash shuould not be changed')
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
logging.info('Updating production index commit hash to master last commit hash')
if not landing_page_sections:
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
logging.debug(f'commit hash is: {commit}')
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit,
'landingPage': {'sections': landing_page_sections.get('sections', [])}
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
def upload_core_packs_config(storage_bucket: Any, build_number: str, index_folder_path: str):
"""Uploads corepacks.json file configuration to bucket. Corepacks file includes core packs for server installation.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
build_number (str): circleCI build number.
index_folder_path (str): The index folder path.
"""
core_packs_public_urls = []
found_core_packs = set()
for pack in os.scandir(index_folder_path):
if pack.is_dir() and pack.name in GCPConfig.CORE_PACKS_LIST:
pack_metadata_path = os.path.join(index_folder_path, pack.name, Pack.METADATA)
if not os.path.exists(pack_metadata_path):
logging.critical(f"{pack.name} pack {Pack.METADATA} is missing in {GCPConfig.INDEX_NAME}")
sys.exit(1)
with open(pack_metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
pack_current_version = metadata.get('currentVersion', Pack.PACK_INITIAL_VERSION)
core_pack_relative_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, pack.name,
pack_current_version, f"{pack.name}.zip")
core_pack_public_url = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name, core_pack_relative_path)
if not storage_bucket.blob(core_pack_relative_path).exists():
logging.critical(f"{pack.name} pack does not exist under {core_pack_relative_path} path")
sys.exit(1)
core_packs_public_urls.append(core_pack_public_url)
found_core_packs.add(pack.name)
if len(found_core_packs) != len(GCPConfig.CORE_PACKS_LIST):
missing_core_packs = set(GCPConfig.CORE_PACKS_LIST) ^ found_core_packs
logging.critical(f"Number of defined core packs are: {len(GCPConfig.CORE_PACKS_LIST)}")
logging.critical(f"Actual number of found core packs are: {len(found_core_packs)}")
logging.critical(f"Missing core packs are: {missing_core_packs}")
sys.exit(1)
# construct core pack data with public gcs urls
core_packs_data = {
'corePacks': core_packs_public_urls,
'buildNumber': build_number
}
# upload core pack json file to gcs
core_packs_config_path = os.path.join(GCPConfig.STORAGE_BASE_PATH, GCPConfig.CORE_PACK_FILE_NAME)
blob = storage_bucket.blob(core_packs_config_path)
blob.upload_from_string(json.dumps(core_packs_data, indent=4))
logging.success(f"Finished uploading {GCPConfig.CORE_PACK_FILE_NAME} to storage.")
def upload_id_set(storage_bucket: Any, id_set_local_path: str = None):
"""
Uploads the id_set.json artifact to the bucket.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where core packs config is uploaded.
id_set_local_path: path to the id_set.json file
"""
if not id_set_local_path:
logging.info("Skipping upload of id set to gcs.")
return
id_set_gcs_path = os.path.join(os.path.dirname(GCPConfig.STORAGE_BASE_PATH), 'id_set.json')
blob = storage_bucket.blob(id_set_gcs_path)
with open(id_set_local_path, mode='r') as f:
blob.upload_from_file(f)
logging.success("Finished uploading id_set.json to storage.")
def _build_summary_table(packs_input_list: list, include_pack_status: bool = False) -> Any:
"""Build summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
PrettyTable: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Aggregated Pack Versions"]
if include_pack_status:
table_fields.append("Status")
table = prettytable.PrettyTable()
table.field_names = table_fields
for index, pack in enumerate(packs_input_list, start=1):
pack_status_message = PackStatus[pack.status].value
row = [index, pack.name, pack.display_name, pack.latest_version,
pack.aggregation_str if pack.aggregated and pack.aggregation_str else "False"]
if include_pack_status:
row.append(pack_status_message)
table.add_row(row)
return table
def build_summary_table_md(packs_input_list: list, include_pack_status: bool = False) -> str:
"""Build markdown summary table from pack list
Args:
packs_input_list (list): list of Packs
include_pack_status (bool): whether pack includes status
Returns:
Markdown table: table with upload result of packs.
"""
table_fields = ["Index", "Pack ID", "Pack Display Name", "Latest Version", "Status"] if include_pack_status \
else ["Index", "Pack ID", "Pack Display Name", "Latest Version"]
table = ['|', '|']
for key in table_fields:
table[0] = f'{table[0]} {key} |'
table[1] = f'{table[1]} :- |'
for index, pack in enumerate(packs_input_list):
pack_status_message = PackStatus[pack.status].value if include_pack_status else ''
row = [index, pack.name, pack.display_name, pack.latest_version, pack_status_message] if include_pack_status \
else [index, pack.name, pack.display_name, pack.latest_version]
row_hr = '|'
for _value in row:
row_hr = f'{row_hr} {_value}|'
table.append(row_hr)
return '\n'.join(table)
def add_private_content_to_index(private_index_path: str, extract_destination_path: str, index_folder_path: str,
pack_names: set) -> Tuple[Union[list, list], list]:
""" Adds a list of priced packs data-structures to the public index.json file.
This step should not be skipped even if there are no new or updated private packs.
Args:
private_index_path: path to where the private index is located.
extract_destination_path (str): full path to extract directory.
index_folder_path (str): downloaded index folder directory path.
pack_names (set): collection of pack names.
Returns:
list: priced packs from private bucket.
"""
private_packs = []
updated_private_packs = []
try:
logging.info("get_private_packs")
private_packs = get_private_packs(private_index_path, pack_names,
extract_destination_path)
logging.info("get_updated_private_packs")
updated_private_packs = get_updated_private_packs(private_packs, index_folder_path)
logging.info("add_private_packs_to_index")
add_private_packs_to_index(index_folder_path, private_index_path)
except Exception as e:
logging.exception(f"Could not add private packs to the index. Additional Info: {str(e)}")
finally:
logging.info("Finished updating index with priced packs")
shutil.rmtree(os.path.dirname(private_index_path), ignore_errors=True)
return private_packs, updated_private_packs
def get_updated_private_packs(private_packs, index_folder_path):
""" Checks for updated private packs by compering contentCommitHash between public index json and private pack
metadata files.
Args:
private_packs (list): List of dicts containing pack metadata information.
index_folder_path (str): The public index folder path.
Returns:
updated_private_packs (list) : a list of all private packs id's that were updated.
"""
updated_private_packs = []
public_index_file_path = os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")
public_index_json = load_json(public_index_file_path)
private_packs_from_public_index = public_index_json.get("packs", {})
for pack in private_packs:
private_pack_id = pack.get('id')
private_commit_hash_from_metadata = pack.get('contentCommitHash', "")
private_commit_hash_from_content_repo = ""
for public_pack in private_packs_from_public_index:
if public_pack.get('id') == private_pack_id:
private_commit_hash_from_content_repo = public_pack.get('contentCommitHash', "")
private_pack_was_updated = private_commit_hash_from_metadata != private_commit_hash_from_content_repo
if private_pack_was_updated:
updated_private_packs.append(private_pack_id)
logging.debug(f"Updated private packs are: {updated_private_packs}")
return updated_private_packs
def get_private_packs(private_index_path: str, pack_names: set = set(),
extract_destination_path: str = '') -> list:
"""
Gets a list of private packs.
:param private_index_path: Path to where the private index is located.
:param pack_names: Collection of pack names.
:param extract_destination_path: Path to where the files should be extracted to.
:return: List of dicts containing pack metadata information.
"""
try:
metadata_files = glob.glob(f"{private_index_path}/**/metadata.json")
except Exception:
logging.exception(f'Could not find metadata files in {private_index_path}.')
return []
if not metadata_files:
logging.warning(f'No metadata files found in [{private_index_path}]')
private_packs = []
for metadata_file_path in metadata_files:
try:
with open(metadata_file_path, "r") as metadata_file:
metadata = json.load(metadata_file)
pack_id = metadata.get('id')
is_changed_private_pack = pack_id in pack_names
if is_changed_private_pack: # Should take metadata from artifacts.
with open(os.path.join(extract_destination_path, pack_id, "pack_metadata.json"),
"r") as metadata_file:
metadata = json.load(metadata_file)
if metadata:
private_packs.append({
'id': metadata.get('id') if not is_changed_private_pack else metadata.get('name'),
'price': metadata.get('price'),
'vendorId': metadata.get('vendorId', ""),
'partnerId': metadata.get('partnerId', ""),
'partnerName': metadata.get('partnerName', ""),
'contentCommitHash': metadata.get('contentCommitHash', "")
})
except ValueError:
logging.exception(f'Invalid JSON in the metadata file [{metadata_file_path}].')
return private_packs
def add_private_packs_to_index(index_folder_path: str, private_index_path: str):
""" Add the private packs to the index folder.
Args:
index_folder_path: The index folder path.
private_index_path: The path for the index of the private packs.
"""
for d in os.scandir(private_index_path):
if os.path.isdir(d.path):
update_index_folder(index_folder_path, d.name, d.path)
def is_private_packs_updated(public_index_json, private_index_path):
""" Checks whether there were changes in private packs from the last upload.
The check compares the `content commit hash` field in the public index with the value stored in the private index.
If there is at least one private pack that has been updated/released, the upload should be performed and not
skipped.
Args:
public_index_json (dict) : The public index.json file.
private_index_path (str): Path to where the private index.zip is located.
Returns:
is_private_packs_updated (bool): True if there is at least one private pack that was updated/released,
False otherwise (i.e there are no private packs that have been updated/released).
"""
logging.debug("Checking if there are updated private packs")
private_index_file_path = os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")
private_index_json = load_json(private_index_file_path)
private_packs_from_private_index = private_index_json.get("packs")
private_packs_from_public_index = public_index_json.get("packs")
if len(private_packs_from_private_index) != len(private_packs_from_public_index):
# private pack was added or deleted
logging.debug("There is at least one private pack that was added/deleted, upload should not be skipped.")
return True
id_to_commit_hash_from_public_index = {private_pack.get("id"): private_pack.get("contentCommitHash", "") for
private_pack in private_packs_from_public_index}
for private_pack in private_packs_from_private_index:
pack_id = private_pack.get("id")
content_commit_hash = private_pack.get("contentCommitHash", "")
if id_to_commit_hash_from_public_index.get(pack_id) != content_commit_hash:
logging.debug("There is at least one private pack that was updated, upload should not be skipped.")
return True
logging.debug("No private packs were changed")
return False
def check_if_index_is_updated(index_folder_path: str, content_repo: Any, current_commit_hash: str,
previous_commit_hash: str, storage_bucket: Any,
is_private_content_updated: bool = False):
""" Checks stored at index.json commit hash and compares it to current commit hash. In case no packs folders were
added/modified/deleted, all other steps are not performed.
Args:
index_folder_path (str): index folder full path.
content_repo (git.repo.base.Repo): content repo object.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with
storage_bucket: public storage bucket.
is_private_content_updated (bool): True if private content updated, False otherwise.
"""
skipping_build_task_message = "Skipping Upload Packs To Marketplace Storage Step."
try:
if storage_bucket.name not in (GCPConfig.CI_BUILD_BUCKET, GCPConfig.PRODUCTION_BUCKET):
logging.info("Skipping index update check in non production/build bucket")
return
if is_private_content_updated:
logging.debug("Skipping index update as Private Content has updated.")
return
if not os.path.exists(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")):
# will happen only in init bucket run
logging.warning(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
return
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json")) as index_file:
index_json = json.load(index_file)
index_commit_hash = index_json.get('commit', previous_commit_hash)
try:
index_commit = content_repo.commit(index_commit_hash)
except Exception:
# not updated build will receive this exception because it is missing more updated commit
logging.exception(f"Index is already updated. {skipping_build_task_message}")
sys.exit()
current_commit = content_repo.commit(current_commit_hash)
if current_commit.committed_datetime <= index_commit.committed_datetime:
logging.warning(
f"Current commit {current_commit.hexsha} committed time: {current_commit.committed_datetime}")
logging.warning(f"Index commit {index_commit.hexsha} committed time: {index_commit.committed_datetime}")
logging.warning("Index is already updated.")
logging.warning(skipping_build_task_message)
sys.exit()
for changed_file in current_commit.diff(index_commit):
if changed_file.a_path.startswith(PACKS_FOLDER):
logging.info(
f"Found changed packs between index commit {index_commit.hexsha} and {current_commit.hexsha}")
break
else:
logging.warning(f"No changes found between index commit {index_commit.hexsha} and {current_commit.hexsha}")
logging.warning(skipping_build_task_message)
sys.exit()
except Exception:
logging.exception("Failed in checking status of index")
sys.exit(1)
def print_packs_summary(successful_packs: list, skipped_packs: list, failed_packs: list,
fail_build: bool = True):
"""Prints summary of packs uploaded to gcs.
Args:
successful_packs (list): list of packs that were successfully uploaded.
skipped_packs (list): list of packs that were skipped during upload.
failed_packs (list): list of packs that were failed during upload.
fail_build (bool): indicates whether to fail the build upon failing pack to upload or not
"""
logging.info(
f"""\n
------------------------------------------ Packs Upload Summary ------------------------------------------
Total number of packs: {len(successful_packs + skipped_packs + failed_packs)}
----------------------------------------------------------------------------------------------------------""")
if successful_packs:
successful_packs_table = _build_summary_table(successful_packs)
logging.success(f"Number of successful uploaded packs: {len(successful_packs)}")
logging.success(f"Uploaded packs:\n{successful_packs_table}")
with open('pack_list.txt', 'w') as f:
f.write(successful_packs_table.get_string())
if skipped_packs:
skipped_packs_table = _build_summary_table(skipped_packs, include_pack_status=True)
logging.warning(f"Number of skipped packs: {len(skipped_packs)}")
logging.warning(f"Skipped packs:\n{skipped_packs_table}")
if failed_packs:
failed_packs_table = _build_summary_table(failed_packs, include_pack_status=True)
logging.critical(f"Number of failed packs: {len(failed_packs)}")
logging.critical(f"Failed packs:\n{failed_packs_table}")
if fail_build:
# We don't want the bucket upload flow to fail in Prepare Content step if a pack has failed to upload.
sys.exit(1)
# for external pull requests - when there is no failed packs, add the build summary to the pull request
branch_name = os.environ.get('CIRCLE_BRANCH')
if branch_name and branch_name.startswith('pull/'):
successful_packs_table = build_summary_table_md(successful_packs)
build_num = os.environ['CIRCLE_BUILD_NUM']
bucket_path = f'https://console.cloud.google.com/storage/browser/' \
f'marketplace-ci-build/content/builds/{branch_name}/{build_num}'
pr_comment = f'Number of successful uploaded packs: {len(successful_packs)}\n' \
f'Uploaded packs:\n{successful_packs_table}\n\n' \
f'Browse to the build bucket with this address:\n{bucket_path}'
add_pr_comment(pr_comment)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-e', '--extract_path', help="Full path of folder to extract wanted packs", required=True)
parser.add_argument('-b', '--bucket_name', help="Storage bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-i', '--id_set_path', help="The full path of id_set.json", required=False)
parser.add_argument('-d', '--pack_dependencies', help="Full path to pack dependencies json file.", required=False)
parser.add_argument('-p', '--pack_names',
help=("Target packs to upload to gcs. Optional values are: `All`, "
"`Modified` or csv list of packs "
"Default is set to `All`"),
required=False, default="All")
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=False)
parser.add_argument('-o', '--override_all_packs', help="Override all existing packs in cloud storage",
type=str2bool, default=False, required=True)
parser.add_argument('-k', '--key_string', help="Base64 encoded signature key used for signing packs.",
required=False)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-rt', '--remove_test_playbooks', type=str2bool,
help='Should remove test playbooks from content packs or not.', default=True)
parser.add_argument('-bu', '--bucket_upload', help='is bucket upload build?', type=str2bool, required=True)
parser.add_argument('-pb', '--private_bucket_name', help="Private storage bucket name", required=False)
parser.add_argument('-c', '--circle_branch', help="CircleCi branch of current build", required=True)
parser.add_argument('-f', '--force_upload', help="is force upload build?", type=str2bool, required=True)
# disable-secrets-detection-end
return parser.parse_args()
def add_pr_comment(comment: str):
"""Add comment to the pull request.
Args:
comment (string): The comment text.
"""
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = f'?q={sha1}+repo:demisto/content+is:pr+is:open+head:{branch_name}+is:open'
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
logging.warning(
f'Add pull request comment failed: There is more then one open pull request for branch {branch_name}.')
except Exception:
logging.exception('Add pull request comment failed.')
def handle_github_response(response: json) -> dict:
"""
Handles the response from the GitHub server after making a request.
:param response: Response from the server.
:return: The returned response.
"""
res_dict = response.json()
if not res_dict.get('ok'):
logging.warning(f'Add pull request comment failed: {res_dict.get("message")}')
return res_dict
def get_packs_summary(packs_list):
""" Returns the packs list divided into 3 lists by their status
Args:
packs_list (list): The full packs list
Returns: 3 lists of packs - successful_packs, skipped_packs & failed_packs
"""
successful_packs = [pack for pack in packs_list if pack.status == PackStatus.SUCCESS.name]
skipped_packs = [pack for pack in packs_list if
pack.status == PackStatus.PACK_ALREADY_EXISTS.name
or pack.status == PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name]
failed_packs = [pack for pack in packs_list if pack not in successful_packs and pack not in skipped_packs]
return successful_packs, skipped_packs, failed_packs
def handle_private_content(public_index_folder_path, private_bucket_name, extract_destination_path, storage_client,
public_pack_names) -> Tuple[bool, list, list]:
"""
1. Add private packs to public index.json.
2. Checks if there are private packs that were added/deleted/updated.
Args:
public_index_folder_path: extracted public index folder full path.
private_bucket_name: Private storage bucket name
extract_destination_path: full path to extract directory.
storage_client : initialized google cloud storage client.
public_pack_names : unique collection of public packs names to upload.
Returns:
is_private_content_updated (bool): True if there is at least one private pack that was updated/released.
False otherwise (i.e there are no private packs that have been updated/released).
private_packs (list) : priced packs from private bucket.
updated_private_packs_ids (list): all private packs id's that were updated.
"""
if private_bucket_name:
private_storage_bucket = storage_client.bucket(private_bucket_name)
private_index_path, _, _ = download_and_extract_index(
private_storage_bucket, os.path.join(extract_destination_path, "private")
)
public_index_json_file_path = os.path.join(public_index_folder_path, f"{GCPConfig.INDEX_NAME}.json")
public_index_json = load_json(public_index_json_file_path)
if public_index_json:
are_private_packs_updated = is_private_packs_updated(public_index_json, private_index_path)
private_packs, updated_private_packs_ids = add_private_content_to_index(
private_index_path, extract_destination_path, public_index_folder_path, public_pack_names
)
return are_private_packs_updated, private_packs, updated_private_packs_ids
else:
logging.error(f"Public {GCPConfig.INDEX_NAME}.json was found empty.")
sys.exit(1)
else:
return False, [], []
def main():
install_logging('Prepare_Content_Packs_For_Testing.log')
option = option_handler()
packs_artifacts_path = option.artifacts_path
extract_destination_path = option.extract_path
storage_bucket_name = option.bucket_name
service_account = option.service_account
target_packs = option.pack_names if option.pack_names else ""
build_number = option.ci_build_number if option.ci_build_number else str(uuid.uuid4())
override_all_packs = option.override_all_packs
signature_key = option.key_string
id_set_path = option.id_set_path
packs_dependencies_mapping = load_json(option.pack_dependencies) if option.pack_dependencies else {}
storage_base_path = option.storage_base_path
remove_test_playbooks = option.remove_test_playbooks
is_bucket_upload_flow = option.bucket_upload
private_bucket_name = option.private_bucket_name
circle_branch = option.circle_branch
force_upload = option.force_upload
landing_page_sections = load_json(LANDING_PAGE_SECTIONS_PATH)
# google cloud storage client initialized
storage_client = init_storage_client(service_account)
storage_bucket = storage_client.bucket(storage_bucket_name)
if storage_base_path:
GCPConfig.STORAGE_BASE_PATH = storage_base_path
# Relevant when triggering test upload flow
if storage_bucket_name:
GCPConfig.PRODUCTION_BUCKET = storage_bucket_name
# download and extract index from public bucket
index_folder_path, index_blob, index_generation = download_and_extract_index(storage_bucket,
extract_destination_path)
# content repo client initialized
content_repo = get_content_git_client(CONTENT_ROOT_PATH)
current_commit_hash, previous_commit_hash = get_recent_commits_data(content_repo, index_folder_path,
is_bucket_upload_flow, circle_branch)
# detect packs to upload
pack_names = get_packs_names(target_packs, previous_commit_hash)
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
packs_list = [Pack(pack_name, os.path.join(extract_destination_path, pack_name)) for pack_name in pack_names
if os.path.exists(os.path.join(extract_destination_path, pack_name))]
# taking care of private packs
is_private_content_updated, private_packs, updated_private_packs_ids = handle_private_content(
index_folder_path, private_bucket_name, extract_destination_path, storage_client, pack_names
)
if not option.override_all_packs:
check_if_index_is_updated(index_folder_path, content_repo, current_commit_hash, previous_commit_hash,
storage_bucket, is_private_content_updated)
# google cloud bigquery client initialized
bq_client = init_bigquery_client(service_account)
packs_statistic_df = get_packs_statistics_dataframe(bq_client)
# clean index and gcs from non existing or invalid packs
clean_non_existing_packs(index_folder_path, private_packs, storage_bucket)
# starting iteration over packs
for pack in packs_list:
task_status, user_metadata = pack.load_user_metadata()
if not task_status:
pack.status = PackStatus.FAILED_LOADING_USER_METADATA.value
pack.cleanup()
continue
task_status, pack_content_items = pack.collect_content_items()
if not task_status:
pack.status = PackStatus.FAILED_COLLECT_ITEMS.name
pack.cleanup()
continue
task_status, integration_images = pack.upload_integration_images(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_IMAGES_UPLOAD.name
pack.cleanup()
continue
task_status, author_image = pack.upload_author_image(storage_bucket)
if not task_status:
pack.status = PackStatus.FAILED_AUTHOR_IMAGE_UPLOAD.name
pack.cleanup()
continue
task_status, pack_was_modified = pack.detect_modified(content_repo, index_folder_path, current_commit_hash,
previous_commit_hash)
if not task_status:
pack.status = PackStatus.FAILED_DETECTING_MODIFIED_FILES.name
pack.cleanup()
continue
task_status = pack.format_metadata(user_metadata=user_metadata, pack_content_items=pack_content_items,
integration_images=integration_images, author_image=author_image,
index_folder_path=index_folder_path,
packs_dependencies_mapping=packs_dependencies_mapping,
build_number=build_number, commit_hash=current_commit_hash,
packs_statistic_df=packs_statistic_df,
pack_was_modified=pack_was_modified,
landing_page_sections=landing_page_sections)
if not task_status:
pack.status = PackStatus.FAILED_METADATA_PARSING.name
pack.cleanup()
continue
task_status, not_updated_build = pack.prepare_release_notes(index_folder_path, build_number, pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_RELEASE_NOTES.name
pack.cleanup()
continue
if not_updated_build:
pack.status = PackStatus.PACK_IS_NOT_UPDATED_IN_RUNNING_BUILD.name
pack.cleanup()
continue
task_status = pack.remove_unwanted_files(remove_test_playbooks)
if not task_status:
pack.status = PackStatus.FAILED_REMOVING_PACK_SKIPPED_FOLDERS
pack.cleanup()
continue
task_status = pack.sign_pack(signature_key)
if not task_status:
pack.status = PackStatus.FAILED_SIGNING_PACKS.name
pack.cleanup()
continue
task_status, zip_pack_path = pack.zip_pack()
if not task_status:
pack.status = PackStatus.FAILED_ZIPPING_PACK_ARTIFACTS.name
pack.cleanup()
continue
(task_status, skipped_pack_uploading, full_pack_path) = \
pack.upload_to_storage(zip_pack_path, pack.latest_version,
storage_bucket, override_all_packs
or pack_was_modified)
if not task_status:
pack.status = PackStatus.FAILED_UPLOADING_PACK.name
pack.cleanup()
continue
task_status, exists_in_index = pack.check_if_exists_in_index(index_folder_path)
if not task_status:
pack.status = PackStatus.FAILED_SEARCHING_PACK_IN_INDEX.name
pack.cleanup()
continue
task_status = pack.prepare_for_index_upload()
if not task_status:
pack.status = PackStatus.FAILED_PREPARING_INDEX_FOLDER.name
pack.cleanup()
continue
task_status = update_index_folder(index_folder_path=index_folder_path, pack_name=pack.name, pack_path=pack.path,
pack_version=pack.latest_version, hidden_pack=pack.hidden)
if not task_status:
pack.status = PackStatus.FAILED_UPDATING_INDEX_FOLDER.name
pack.cleanup()
continue
# in case that pack already exist at cloud storage path and in index, don't show that the pack was changed
if skipped_pack_uploading and exists_in_index:
pack.status = PackStatus.PACK_ALREADY_EXISTS.name
pack.cleanup()
continue
pack.status = PackStatus.SUCCESS.name
# upload core packs json to bucket
upload_core_packs_config(storage_bucket, build_number, index_folder_path)
# finished iteration over content packs
upload_index_to_storage(index_folder_path=index_folder_path, extract_destination_path=extract_destination_path,
index_blob=index_blob, build_number=build_number, private_packs=private_packs,
current_commit_hash=current_commit_hash, index_generation=index_generation,
force_upload=force_upload, previous_commit_hash=previous_commit_hash,
landing_page_sections=landing_page_sections)
# upload id_set.json to bucket
upload_id_set(storage_bucket, id_set_path)
# get the lists of packs divided by their status
successful_packs, skipped_packs, failed_packs = get_packs_summary(packs_list)
# Store successful and failed packs list in CircleCI artifacts - to be used in Upload Packs To Marketplace job
packs_results_file_path = os.path.join(os.path.dirname(packs_artifacts_path), BucketUploadFlow.PACKS_RESULTS_FILE)
store_successful_and_failed_packs_in_ci_artifacts(
packs_results_file_path, BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING, successful_packs, failed_packs,
updated_private_packs_ids
)
# summary of packs status
print_packs_summary(successful_packs, skipped_packs, failed_packs, not is_bucket_upload_flow)
if __name__ == '__main__':
main()
| 46.314627 | 120 | 0.681154 |
acfb2dc705ccd34238f2a1018f8ee51cd39db204 | 3,601 | py | Python | addressnet/model.py | Ryancodeshard/address-net | 4763a12e34af137a71de3b284c357f1f953e856e | [
"MIT"
] | null | null | null | addressnet/model.py | Ryancodeshard/address-net | 4763a12e34af137a71de3b284c357f1f953e856e | [
"MIT"
] | null | null | null | addressnet/model.py | Ryancodeshard/address-net | 4763a12e34af137a71de3b284c357f1f953e856e | [
"MIT"
] | null | null | null | from typing import Dict, Optional
import tensorflow as tf
from addressnet.dataset import vocab, n_labels
def model_fn(features: Dict[str, tf.Tensor], labels: tf.Tensor, mode: str, params) -> tf.estimator.EstimatorSpec:
"""
The AddressNet model function suitable for tf.estimator.Estimator
:param features: a dictionary containing tensors for the encoded_text and lengths
:param labels: a label for each character designating its position in the address
:param mode: indicates whether the model is being trained, evaluated or used in prediction mode
:param params: model hyperparameters, including rnn_size and rnn_layers
:return: the appropriate tf.estimator.EstimatorSpec for the model mode
"""
encoded_text, lengths = features['encoded_text'], features['lengths']
rnn_size = params.get("rnn_size", 128)
rnn_layers = params.get("rnn_layers", 3)
embeddings = tf.Variable("embeddings", dtype=tf.float32, initializer=tf.random_normal(shape=(len(vocab), 8)))
encoded_strings = tf.nn.embedding_lookup(embeddings, encoded_text)
logits, loss = nnet(encoded_strings, lengths, rnn_layers, rnn_size, labels, mode == tf.estimator.ModeKeys.TRAIN)
predicted_classes = tf.argmax(logits, axis=2)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
if mode == tf.estimator.ModeKeys.EVAL:
metrics = {}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
def nnet(encoded_strings: tf.Tensor, lengths: tf.Tensor, rnn_layers: int, rnn_size: int, labels: tf.Tensor = None,
training: bool = True) -> (tf.Tensor, Optional[tf.Tensor]):
"""
Generates the RNN component of the model
:param encoded_strings: a tensor containing the encoded strings (embedding vectors)
:param lengths: a tensor of string lengths
:param rnn_layers: number of layers to use in the RNN
:param rnn_size: number of units in each layer
:param labels: labels for each character in the string (optional)
:param training: if True, dropout will be enabled on the RNN
:return: logits and loss (loss will be None if labels is not provided)
"""
def rnn_cell():
probs = 0.8 if training else 1.0
return tf.contrib.rnn.DropoutWrapper(tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(rnn_size),
state_keep_prob=probs, output_keep_prob=probs)
rnn_cell_fw = tf.nn.rnn_cell.MultiRNNCell([rnn_cell() for _ in range(rnn_layers)])
rnn_cell_bw = tf.nn.rnn_cell.MultiRNNCell([rnn_cell() for _ in range(rnn_layers)])
(rnn_output_fw, rnn_output_bw), states = tf.nn.bidirectional_dynamic_rnn(rnn_cell_fw, rnn_cell_bw, encoded_strings,
lengths, dtype=tf.float32)
rnn_output = tf.concat([rnn_output_fw, rnn_output_bw], axis=2)
logits = tf.layers.dense(rnn_output, n_labels, activation=tf.nn.elu)
loss = None
if labels is not None:
mask = tf.sequence_mask(lengths, dtype=tf.float32)
loss = tf.losses.softmax_cross_entropy(labels, logits, weights=mask)
return logits, loss
| 47.381579 | 119 | 0.698139 |
acfb2ec451b8912a57ca31959eda753a331103e1 | 1,422 | py | Python | codes/scripts/audio/librivox/preprocess_libritts.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 12 | 2020-12-13T12:45:03.000Z | 2022-03-29T09:58:15.000Z | codes/scripts/audio/librivox/preprocess_libritts.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 1 | 2020-12-31T01:12:45.000Z | 2021-03-31T11:43:52.000Z | codes/scripts/audio/librivox/preprocess_libritts.py | neonbjb/DL-Art-School | a6f0f854b987ac724e258af8b042ea4459a571bc | [
"Apache-2.0"
] | 3 | 2020-12-14T06:04:04.000Z | 2020-12-26T19:11:41.000Z | # Combines all libriTTS WAV->text mappings into a single file
import os
from tqdm import tqdm
if __name__ == '__main__':
libri_root = 'E:\\audio\\LibriTTS'
basis = 'train-clean-360'
readers = os.listdir(os.path.join(libri_root, basis))
ofile = open(os.path.join(libri_root, f'{basis}_list.txt'), 'w', encoding='utf-8')
for reader_dir in tqdm(readers):
reader = os.path.join(libri_root, basis, reader_dir)
if not os.path.isdir(reader):
continue
for chapter_dir in os.listdir(reader):
chapter = os.path.join(reader, chapter_dir)
if not os.path.isdir(chapter):
continue
id = f'{os.path.basename(reader)}_{os.path.basename(chapter)}'
trans_file = f'{id}.trans.tsv'
with open(os.path.join(chapter, trans_file), encoding='utf-8') as f:
trans_lines = [line.strip().split('\t') for line in f]
for line in trans_lines:
wav_file, raw_text, normalized_text = line
wav_file = '/'.join([basis, reader_dir, chapter_dir, f'{wav_file}.wav'])
if not os.path.exists(os.path.join(libri_root, wav_file)):
print(f'!WARNING could not open {wav_file}')
else:
ofile.write(f'{wav_file}|{normalized_text}\n')
ofile.flush()
ofile.close()
| 43.090909 | 92 | 0.575949 |
acfb30776aec7f43bf37ea573bff0f984002ac41 | 3,236 | py | Python | profiles_project/settings.py | pvgirish/profiles-rest-api | 33428c14f708b1f6390286921e172deb2719dbe2 | [
"MIT"
] | 1 | 2021-06-03T06:00:05.000Z | 2021-06-03T06:00:05.000Z | profiles_project/settings.py | pvgirish/profiles-rest-api | 33428c14f708b1f6390286921e172deb2719dbe2 | [
"MIT"
] | null | null | null | profiles_project/settings.py | pvgirish/profiles-rest-api | 33428c14f708b1f6390286921e172deb2719dbe2 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%bzhno$0wn53vk%&he@pm^5bi*17ugni^y$f*nxj=ift@wusor'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.68254 | 91 | 0.699938 |
acfb30b718702ee7a05b4861c2f9276110dd331e | 338 | py | Python | 03_EstruturasRepeticao/13_potenciacao.py | eduardovivi/Python_tests | b70d009d6180b136c50ccfec343a13f2c09b8029 | [
"MIT"
] | null | null | null | 03_EstruturasRepeticao/13_potenciacao.py | eduardovivi/Python_tests | b70d009d6180b136c50ccfec343a13f2c09b8029 | [
"MIT"
] | null | null | null | 03_EstruturasRepeticao/13_potenciacao.py | eduardovivi/Python_tests | b70d009d6180b136c50ccfec343a13f2c09b8029 | [
"MIT"
] | null | null | null | base = int(raw_input('Informe o valor da base: '))
expoente = 0
while (expoente <= 0):
expoente = int(raw_input('Informe o valor do expoente: '))
if (expoente <= 0):
print 'O expoente deve ser positivo!'
potencia = 1
for i in range(1, expoente + 1):
potencia *= base
print base, 'elevada a', expoente, '=', potencia
| 26 | 62 | 0.639053 |
acfb324570bfd82a32b4604029a6b0604fb2d8c3 | 5,202 | py | Python | ipproxytool/spiders/validator/validator.py | k1tCooler/himasoft | 546f11aafa9f17c36fc0f3bd98f3df5e4fe154b1 | [
"MIT"
] | null | null | null | ipproxytool/spiders/validator/validator.py | k1tCooler/himasoft | 546f11aafa9f17c36fc0f3bd98f3df5e4fe154b1 | [
"MIT"
] | 3 | 2021-03-18T20:24:09.000Z | 2021-12-13T19:44:52.000Z | ipproxytool/spiders/validator/validator.py | k1tCooler/himasoft | 546f11aafa9f17c36fc0f3bd98f3df5e4fe154b1 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
import random
import time
import datetime
import utils
import config
from scrapy import Request
from scrapy.spiders import Spider
from sql import SqlManager
class Validator(Spider):
name = 'base'
concurrent_requests = 16
retry_enabled = False
def __init__(self, name=None, **kwargs):
super(Validator, self).__init__(name, **kwargs)
self.urls = []
self.headers = None
self.timeout = 10
self.is_record_web_page = False
self.sql = SqlManager()
def init(self):
self.dir_log = 'log/validator/%s' % self.name
utils.make_dir(self.dir_log)
self.sql.init_proxy_table(self.name)
@classmethod
def update_settings(cls, settings):
settings.setdict(cls.custom_settings or {
'CONCURRENT_REQUESTS': cls.concurrent_requests,
'RETRY_ENABLED': cls.retry_enabled,
},
priority='spider')
def start_requests(self):
count = self.sql.get_proxy_count(self.name)
count_free = self.sql.get_proxy_count(config.httpbin_table)
ids = self.sql.get_proxy_ids(self.name)
ids_httpbin = self.sql.get_proxy_ids(config.httpbin_table)
for i in range(0, count + count_free):
table = self.name if (i < count) else config.httpbin_table
id = ids[i] if i < count else ids_httpbin[i - len(ids)]
proxy = self.sql.get_proxy_with_id(table, id)
if proxy == None:
continue
url = random.choice(self.urls)
cur_time = time.time()
yield Request(
url=url,
headers=self.headers,
meta={
'cur_time': cur_time,
'download_timeout': self.timeout,
'proxy_info': proxy,
'table': table,
'proxy': 'http://%s:%s' % (proxy.ip, proxy.port),
},
dont_filter=True,
callback=self.success_parse,
errback=self.error_parse,
)
def success_parse(self, response):
proxy = response.meta.get('proxy_info')
table = response.meta.get('table')
self.save_page(proxy.ip, response.body)
self.log('success_parse speed:%s meta:%s' %
(time.time() - response.meta.get('cur_time'), response.meta))
proxy.vali_count += 1
proxy.speed = time.time() - response.meta.get('cur_time')
if self.success_content_parse(response):
if table == self.name:
if proxy.speed > self.timeout:
self.sql.del_proxy_with_id(table, proxy.id)
else:
self.sql.update_proxy(table, proxy)
else:
if proxy.speed < self.timeout:
self.sql.insert_proxy(table_name=self.name, proxy=proxy)
else:
if table == self.name:
self.sql.del_proxy_with_id(table_name=table, id=proxy.id)
self.sql.commit()
def success_content_parse(self, response):
return True
def error_parse(self, failure):
request = failure.request
self.log('error_parse value:%s url:%s meta:%s' %
(failure.value, request.url, request.meta))
proxy = failure.request.meta.get('proxy_info')
table = failure.request.meta.get('table')
if table == self.name:
self.sql.del_proxy_with_id(table_name=table, id=proxy.id)
else:
# TODO... 如果 ip 验证失败应该针对特定的错误类型,进行处理
pass
#
# request = failure.request.meta
# utils.log('request meta:%s' % str(request))
#
# # log all errback failures,
# # in case you want to do something special for some errors,
# # you may need the failure's type
# self.logger.error(repr(failure))
#
# #if isinstance(failure.value, HttpError):
# if failure.check(HttpError):
# # you can get the response
# response = failure.value.response
# self.logger.error('HttpError on %s', response.url)
#
# #elif isinstance(failure.value, DNSLookupError):
# elif failure.check(DNSLookupError):
# # this is the original request
# request = failure.request
# self.logger.error('DNSLookupError on %s', request.url)
#
# #elif isinstance(failure.value, TimeoutError):
# elif failure.check(TimeoutError):
# request = failure.request
# self.logger.error('TimeoutError on url:%s', request.url)
def save_page(self, ip, data):
filename = '{time} {ip}'.format(
time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S:%f'), ip=ip)
if self.is_record_web_page:
with open('%s/%s.html' % (self.dir_log, filename), 'wb') as f:
f.write(data)
f.close()
def close(self, spider, reason):
spider.sql.commit()
spider.sql.close()
| 33.779221 | 81 | 0.553825 |
acfb33730892c27922f22a442ef653a991ddabcd | 70 | py | Python | filters/encoding.py | adibalcan/crawlingbot | 9f2a8b13dccafcc07cf7760e1498cf51cf691277 | [
"MIT"
] | 1 | 2016-10-07T14:10:58.000Z | 2016-10-07T14:10:58.000Z | filters/encoding.py | adibalcan/crawlingbot | 9f2a8b13dccafcc07cf7760e1498cf51cf691277 | [
"MIT"
] | null | null | null | filters/encoding.py | adibalcan/crawlingbot | 9f2a8b13dccafcc07cf7760e1498cf51cf691277 | [
"MIT"
] | null | null | null | def filter(source, meta={}):
return meta["response"].textencoding
| 23.333333 | 40 | 0.7 |
acfb33b06b81656275c5dd55b35a465e1d0c2c0a | 3,762 | py | Python | python-examples/primary-example.py | BrainsOnBoard/eye-renderer | 59ae026fcb08e029500d1d0a8f152a37200ba260 | [
"MIT"
] | 2 | 2021-10-07T07:08:47.000Z | 2021-11-02T15:52:14.000Z | python-examples/primary-example.py | BrainsOnBoard/eye-renderer | 59ae026fcb08e029500d1d0a8f152a37200ba260 | [
"MIT"
] | 1 | 2021-10-07T09:21:51.000Z | 2021-11-01T21:52:30.000Z | python-examples/primary-example.py | BrainsOnBoard/compound-ray | 59ae026fcb08e029500d1d0a8f152a37200ba260 | [
"MIT"
] | 1 | 2021-11-07T12:51:58.000Z | 2021-11-07T12:51:58.000Z | import os.path
import time
from ctypes import *
from sys import platform
from numpy.ctypeslib import ndpointer
import numpy as np
from PIL import Image
import eyeRendererHelperFunctions as eyeTools
# Makes sure we have a "test-images" folder
if not os.path.exists("test-images"):
os.mkdir("test-images")
sleepTime = 5 # How long to sleep between rendering images
try:
# Load the renderer
eyeRenderer = CDLL("../build/make/lib/libEyeRenderer3.so")
print("Successfully loaded ", eyeRenderer)
# Configure the renderer's function outputs and inputs using the helper functions
eyeTools.configureFunctions(eyeRenderer)
# Load a scene
eyeRenderer.loadGlTFscene(c_char_p(b"../data/ofstad-arena/ofstad-acceptance-angle.gltf"))
# Resize the renderer display
# This can be done at any time, but restype of getFramePointer must also be updated to match as such:
renderWidth = 200
renderHeight = 200
eyeRenderer.setRenderSize(renderWidth,renderHeight)
eyeRenderer.getFramePointer.restype = ndpointer(dtype=c_ubyte, shape = (renderHeight, renderWidth, 4))
# An alternative to the above two lines would be to run:
#eyeTools.setRenderSize(eyeRenderer, renderWidth, renderHeight)
# Iterate through a few cameras and do some stuff with them
for i in range(5):
# Actually render the frame
renderTime = eyeRenderer.renderFrame()
print("View from camera '", eyeRenderer.getCurrentCameraName(), " rendered in ", renderTime)
eyeRenderer.displayFrame() # Display the frame in the renderer
# Save the frame as a .ppm file directly from the renderer
eyeRenderer.saveFrameAs(c_char_p(("test-images/test-image-"+str(i)+".ppm").encode()))
# Retrieve frame data
# Note: This data is not owned by Python, and is subject to change
# with subsequent calls to the renderer so must be deep-copied if
# you wish for it to persist.
frameData = eyeRenderer.getFramePointer()
frameDataRGB = frameData[:,:,:3] # Remove the alpha component
print("FrameData type:", type(frameData))
print("FrameData:\n",frameData)
print("FrameDataRGB:\n",frameDataRGB)
# Use PIL to display the image (note that it is vertically inverted)
img = Image.fromarray(frameDataRGB, "RGB")
img.show()
# Vertically un-invert the array and then display
rightWayUp = np.flipud(frameDataRGB)
#rightWayUp = frameDataRGB[::-1,:,:] also works
img = Image.fromarray(rightWayUp, "RGB")
img.show()
# If the current eye is a compound eye, set the sample rate for it high and take another photo
if(eyeRenderer.isCompoundEyeActive()):
print("This one's a compound eye, let's get a higher sample rate image!")
eyeRenderer.setCurrentEyeSamplesPerOmmatidium(100);
renderTime = eyeRenderer.renderFrame() # Render the frame
eyeRenderer.saveFrameAs(c_char_p(("test-images/test-image-"+str(i)+"-100samples.ppm").encode()))# Save it
Image.fromarray(eyeRenderer.getFramePointer()[::-1,:,:3], "RGB").show() # Show it in PIL (the right way up)
## Change this compound eye's ommatidia to only be the first 10 in the list:
#time.sleep(5)
#ommList = eyeTools.readEyeFile(eyeRenderer.getCurrentEyeDataPath())
#eyeTools.setOmmatidiaFromOmmatidiumList(eyeRenderer,ommList[:10])
#eyeRenderer.renderFrame()
#eyeRenderer.displayFrame()
## Put it back
#eyeTools.setOmmatidiaFromOmmatidiumList(eyeRenderer,ommList)
#eyeRenderer.renderFrame()
#eyeRenderer.displayFrame()
print("Sleeping for " + str(sleepTime) + " seconds...")
# Change to the next Camera
eyeRenderer.nextCamera()
time.sleep(sleepTime)
# Finally, stop the eye renderer
eyeRenderer.stop()
except Exception as e:
print(e);
| 36.524272 | 113 | 0.720627 |
acfb3498d274f0076d94c8e74ab01abcd048ae6a | 161 | py | Python | src/pytheas/data/annotation_state.py | dcronkite/pytheas | 3cdd6a21bda488e762931cbf5975964d5e574abd | [
"MIT"
] | null | null | null | src/pytheas/data/annotation_state.py | dcronkite/pytheas | 3cdd6a21bda488e762931cbf5975964d5e574abd | [
"MIT"
] | null | null | null | src/pytheas/data/annotation_state.py | dcronkite/pytheas | 3cdd6a21bda488e762931cbf5975964d5e574abd | [
"MIT"
] | null | null | null | import enum
class AnnotationState(enum.Enum):
READY = 0
IN_PROGRESS = 1
DONE = 2
DELETED = 3
ON_HOLD = 4
NOT_READY = 5
SKIPPED = 6
| 13.416667 | 33 | 0.590062 |
acfb361ecf67625a472d33e5a06fd297ca52c30b | 25,778 | py | Python | read_write_DIC_files/bubble_data_helper/BubbleDataHelper.py | cjekel/inverse_bubble_inflation | 9ec50f65cc42d4fa49af0829f90a0bf98b6a3bf4 | [
"MIT"
] | null | null | null | read_write_DIC_files/bubble_data_helper/BubbleDataHelper.py | cjekel/inverse_bubble_inflation | 9ec50f65cc42d4fa49af0829f90a0bf98b6a3bf4 | [
"MIT"
] | 2 | 2018-07-09T20:55:31.000Z | 2018-08-06T23:04:11.000Z | read_write_DIC_files/bubble_data_helper/BubbleDataHelper.py | cjekel/inverse_bubble_inflation | 9ec50f65cc42d4fa49af0829f90a0bf98b6a3bf4 | [
"MIT"
] | 1 | 2018-05-17T18:51:03.000Z | 2018-05-17T18:51:03.000Z | # -*- coding: utf-8 -*-
# =============================================================================
# MIT License
#
# Copyright (c) 2018 Andrés Bernardo
# Copyright (c) 2019 Charles Jekel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# =============================================================================
##=============================================================================
## """ Bubble Test Data Helper (command line version) """ :
## File compression;
## zero-Z-displacement data removal to isolate specimen;
## optional plot grid @ apparatus surface
##=============================================================================
# =============================================================================
# This function has 1 required argument and 3 optional arguments.
# Required argument 1, "dataFolder":
# string of the filepath of folder containing TecData ".dat" files.
# By default, this script will save resulting ".npz" files in a new
# folder under the same directory as this script's current directory.
# Optional Argument 1, "removeZeroZ":
# boolean to turn on/off "dispZ != 0" data removal tool.
# Setting this to 1 will remove all data points that have an
# displaced Z value of 0mm; this removes data with 0 displacement,
# i.e., data points that are not relevent to the specimen.
# Setting this to 0 keeps data unaffected.
# Default is 0 a.k.a. False.
# Optional argument 2, "outputPlot": (work in progress--to be completed)
# boolean to deliver 3D plots of the displacement field
# from given TecData, which will be saved in ".png" format
# in the same location as the compressed ".npz" files.
# Default is 0 a.k.a. False.
# Optional argument 3: "surfaceGrid" (work in progress--to be completed)
# boolean to turn on/off grid representing apparatus surface @ Z=10mm.
# This grid is added to the 3D plots as a visual aid,
# replacing the data points where Z displacement = 0mm.
# Default is 0 a.k.a. False.
# If optional argument 2 "outputPlot" is 0,
# OR if optional argument 1 "removeZeroZ" is 0,
# then this option is also 0 by default, regardless of cmd line input.
# =============================================================================
###############################################################################
##=============================================================================
## How to use BubbleDataHelper:
##
## -> add the Python interpreter to your "Path" Environment Variable
###============================================================================
### If you are not sure if Python is added to your path,
### here are some resources to assist you:
### Windows:
### https://superuser.com/questions/143119/
### how-do-i-add-python-to-the-windows-path
### https://www.pythoncentral.io/add-python-to-path-python-is-not-recognized-
### as-an-internal-or-external-command/
### (you will need to know the location of your python interpreter,
### a.k.a. the folder where "python.exe" file is located; common locations
### are C:\Python27, C:\Python36, C:\Users\[name]\Anaconda3, etc.)
###
### Linux:
### https://stackoverflow.com/questions/18247333/python-pythonpath-in-linux
###
### MacOS:
### https://docs.python.org/2/using/mac.html
### https://stackoverflow.com/questions/3387695/add-to-python-path-mac-os-x
### https://stackoverflow.com/questions/3696124/changing-python-path-on-mac
###============================================================================
##
## -> copy directory (a.k.a. filepath) of folder containing this script
## (use keyboard shortcut "ctrl+c")
##
## -> open command prompt
###============================================================================
### If you are not sure how to open a command prompt/terminal,
### here are some resources to assist you:
### Windows:
### https://www.howtogeek.com/235101/10-ways-to-open-
### the-command-prompt-in-windows-10/
###
### Linux:
### https://askubuntu.com/questions/196212/how-do-you-open-a-command-line
###
### MacOS:
### https://www.howtogeek.com/210147/how-to-open-terminal-
### in-the-current-os-x-finder-location/
### http://blog.teamtreehouse.com/introduction-to-the-mac-os-x-command-line
###============================================================================
##
## -> change command directory by typing the command "cd " (with a space),
## then paste the filepath of this script's containing folder
## (using the keyboard shortcut "ctrl+v"), e.g., cd C:\temp
##
## --> note: if necessary, use the commands "C:" or "cd /d C:"
## to switch disks to the C drive (or any drive of your choosing)
##
## -> press "enter" (you should see the directory change on the command line)
##
## -> type
##
## python BubbleDataHelper.py --dataFolder arg1 --removeZeroZ arg2
## --outputPlot arg3 --surfaceGrid arg4
##
## where "arg1", "arg2", "arg3", and "arg4" are replaced with the
## correct argument inputs as described in the previous section:
##
## --> "arg1" is "dataFolder", a string of the file path of the
## folder that contains the ".dat" files to be compressed;
## e.g.: C:\temp\Example
##
## --> "arg2" is "removeZeroZ", a boolean that decides whether
## or not to remove data points where Z displacement is 0mm;
## use 1 for true, 0 for false; if no input is given, the script
## will use default value of 0
##
## --> "arg3" is "outputPlot", a boolean that decides whether or not to
## output plots of the given data, which will be saved in ".png" format
## in the same location as the compressed ".npz" files;
## use 1 for true, 0 for false; if no input is given, the script
## will use default value of 0
##
## --> "arg4" is "surfaceGrid", a boolean that decides whether or not
## to include a 2D grid representing the apparatus surface @ Z=10mm;
## use 1 for true, 0 for false; as said earlier,
## if "removeZeroZ"=0 or "outputPlot"=0, the script
## will use default value of 0; also, if no input is given, the script
## will use default value of 0
##
## e.g., if you wish to compress files in the folder "C:\temp\Example",
## and you wish to remove data points where Z displacement is 0mm,
## and you wish to output 3D plots of the data,
## and you wish to include a 2D grid at the apparatus surface,
## type the following:
##
## python BubbleDataHelper.py --dataFolder C:\temp\Example
## --removeZeroZ 1 --outputPlot 1 --surfaceGrid 1
##
## -> press "enter"
##
##
## note: the arguments can be given in any order, for example...
## python cmd_BubbleDataHelper.py --surfaceGrid 1 --outputPlot 1
## --removeZeroZ 1 --dataFolder C:\temp\Example
##
## note: if there are spaces within folder names in the necessary paths,
## you may use quotation marks to avoid errors, e.g., instead of
## cd C:\temp\Example with Space\Example, you can use
## cd C:\temp\"Example with Space"\Example, or
## cd "C:\temp\Example with Space\Example"
##
## note: this function will also work using "True", "T", or "t" for 1
## & "False", "F", or "f" for 0)
##
## note: for the work-in-progress sections, the arguments will be unused
## until the coding is completed
##=============================================================================
###############################################################################
import sys
import os
import os.path as path
import argparse
import glob
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# this function evaluates the given folder path argument, dataFolder,
# to determine if it is valid
def is_folderPathStr_valid(folderPath):
try:
folderPath = folderPath.strip() # (remove unnecessary spaces)
print(folderPath)
if not path.isdir(folderPath):
print('Sorry, that is not a valid folder directory. ')
print('Cancelling operation...\n')
sys.exit()
except ValueError:
print('Unknown error encountered.\n')
print('Cancelling operation...\n')
sys.exit()
return folderPath
# this function validates the various forms of the boolean inputs (str or int)
# and returns True or False values, strictly of type "bool";
# default values of False are enforced if nonsense string inputs are given
def parse_boolean(arg):
try:
# from the command line, arguments are passed strictly as strings...
if type(arg) == str:
arg = arg.strip() # (remove unnecessary spaces)
arg = arg.lower() # (change all capital letters to lowercase)
arg = (arg in ['1', 'true', 't'])
# (if the argument equals '1', 'true', or 't', then it is True;
# otherwise, False)
# ...but in the event that this code receives booleans or integers
# as arguments in future use, this parser will handle those cases:
elif ( type(arg) == bool ) | ( type(arg) == int ):
arg = bool(arg)
# bool(1) and bool(True) will both result in True;
# bool(0) and bool(False) will both result in False
except ValueError:
print('Error encountered when parsing boolean arguments.')
print('Cancelling operation...\n')
sys.exit()
return arg
# the function below checks the input arguments and cancels the operation
# if the arguments cause unexpected errors, or if no arguments are given;
# otherwise, the script continues
def check_given_arguments():
try:
# from the command line, the argument parser will interpret the
# given arguments as strings; if the required argument (dataFolder)
# is not given, then an error message is shown,
# and the operation will be cancelled.
parser = argparse.ArgumentParser()
parser.add_argument('--dataFolder', required=True)
parser.add_argument('--removeZeroZ', default=False)
parser.add_argument('--outputPlot', default=False)
parser.add_argument('--surfaceGrid', default=False)
args = parser.parse_args()
argDictionary= {'dataFolder' : args.dataFolder,
'removeZeroZ' : args.removeZeroZ,
'outputPlot' : args.outputPlot,
'surfaceGrid' : args.surfaceGrid}
# the code continues only if the required dataFolder argument is given
dataFolder = is_folderPathStr_valid(argDictionary['dataFolder'])
if argDictionary['removeZeroZ'] is None:
removeZeroZ = False
else:
removeZeroZ = parse_boolean(argDictionary['removeZeroZ'])
if argDictionary['outputPlot'] is None:
outputPlot = False
else:
outputPlot = parse_boolean(argDictionary['outputPlot'])
if argDictionary['surfaceGrid'] is None:
surfaceGrid = False
else:
surfaceGrid = parse_boolean(argDictionary['surfaceGrid'])
except ValueError:
print('Error encountered when parsing arguments.')
print('Cancelling operation...\n')
sys.exit()
return [dataFolder, removeZeroZ, outputPlot, surfaceGrid]
def circleFit(X, Y):
# assemble the A matrix
A = np.zeros((len(X),3))
A[:,0] = X*2
A[:,1] = Y*2
A[:,2] = 1
# assemble the f matrix
f = np.zeros((len(X),1))
f[:,0] = (X*X) + (Y*Y)
C, residules, rank, singval = np.linalg.lstsq(A,f)
# solve for r
r = np.sqrt((C[0]*C[0])+(C[1]*C[1])+C[2])
return C[0], C[1], r
###############################################################################
# run the "check_given_arguments()" function to check the given arguments;
# operation will either continue with the proper arguments as given
# or end without further action
[dataFolder, removeZeroZ, outputPlot, surfaceGrid] = check_given_arguments()
dataFolder = path.normpath(dataFolder)
# if outputPlot is False, or if removeZeroZ is False,
# then surfaceGrid must be false by default, regardless of user input:
if not removeZeroZ : surfaceGrid = False
if not outputPlot : surfaceGrid = False
# use the python "global" module to find all ".dat" files in the given folder
datFileList = glob.glob( path.join(dataFolder, '*.dat') )
# print out the total # of ".dat" files found in the given folder
print('\nFound', len(datFileList), '".dat" files in folder', dataFolder)
count = 0
###############################################################################
# if there are 1 or more ".dat" files, continue; otherwise, stop operation
if len(datFileList) > 0:
# print out each ".dat" filepath to show the user
for line in datFileList: print(line)
# variable "dirpath" is the path of the folder containing this script
dir_path = path.dirname(path.realpath(__file__))
# create a path for a new folder in which the compressed data will be saved
npzFolder = path.join(dataFolder, 'CompressedNumpyData_' + \
path.basename(dataFolder) )
#==============================================================================
# if the new folder in which compressed data will be saved already exists,
# cancel the operation & display a message; otherwise, continue
if not path.exists(npzFolder):
# print out the location of the new folder where compressed data
# will be saved (same location as this script's path)
print('\nCompressed numpy files (".npz") will be saved to the folder',\
npzFolder)
# create the new folder where compressed data will be saved
os.makedirs(npzFolder)
###############################################################################
# if argument "outputPlot" is True, create the folders
# in which plots are to be saved
if outputPlot:
plotFolder = path.join(dir_path, 'Plots_' + \
path.basename( dataFolder ) )
if not path.exists(plotFolder):
os.makedirs(plotFolder)
print('\nDefault plots will be saved in the folder', \
plotFolder)
plotFolderAlreadyExists = False
else:
print('\nThere is already a folder', plotFolder)
print('No default plots will be saved.')
plotFolderAlreadyExists = True
plotHQFolder = path.join(dir_path, 'PlotsHQ_' + \
path.basename( dataFolder ) )
if not path.exists(plotHQFolder):
os.makedirs(plotHQFolder)
print('\nHigh-quality plots will be saved in the folder', \
plotHQFolder)
plotHQFolderAlreadyExists = False
else:
print('\nThere is already a folder', plotHQFolder)
print('No high-quality plots will be saved.')
plotHQFolderAlreadyExists = True
###############################################################################
#==============================================================================
# loop is iterated over each filepath stored in variable "datFileList"
for line in datFileList :
fileNameNoExtension = path.splitext( path.basename(line) )[0]
## splits the actual filename from its extension, e.g.:
## "B00001.dat" --> ("B00001", ".dat")
## using [0] selects the firt element, "B00001"
datNumpyArray = np.loadtxt(line, skiprows = 3)
## load data file into a numpy array
## parameter "skiprows" is used to remove headers in ".dat" files
###############################################################################
# if argument "removeZeroZ" is True,
# remove 0mm Z-displacment data points;
# otherwise, continue without affecting data
if removeZeroZ:
datNumpyArray = datNumpyArray[ (datNumpyArray[:,2] != 0) \
| (datNumpyArray[:,5] != 0)]
if count == 0:
# if this is the first file, find the center
xc, yc, r = circleFit(datNumpyArray[:, 0], datNumpyArray[:, 1])
count += 1
# adjust the x and y data based on the center
datNumpyArray[:, 0] = datNumpyArray[:, 0] - xc
datNumpyArray[:, 1] = datNumpyArray[:, 1] - yc
# the 6th column contains Z-displacement data,
# the 3rd column contains initial Z data;
# datNumpyArray = datNumpyArray[ (datNumpyArray[:,2] != 0) \
# | (datNumpyArray[:,5] != 0)]
# is read as:
# "keep all rows of 'datNumpyArray' where the number in the
# 6th column of 'datNumpyArray' is nonzero
# OR where the number in 3rd column is nonzero"
###############################################################################
# if argument "outputPlot" is True, save plots in .png format
# otherwise, continue without affecting data
if outputPlot:
if not (plotFolderAlreadyExists & plotHQFolderAlreadyExists):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = datNumpyArray[:,0]
Y = datNumpyArray[:,1]
Z = datNumpyArray[:,2]
dispX = datNumpyArray[:,3]
dispY = datNumpyArray[:,4]
dispZ = datNumpyArray[:,5]
finalX = X+dispX
finalY = Y+dispY
finalZ = Z+dispZ
ax.scatter(finalX, finalY, finalZ, zdir='z',\
s=.2, c='b', depthshade=False, edgecolor='')
# the commented-out code below was used to determine
# suitable axes limits that encompassed all data for all
# bubble tests; the hard-coded limits below are a result
# of inspecting the resulting fitted graphs...
# if len(datNumpyArray) > 0:
# xMin = np.round( np.min(finalX) )
# xMax = np.round( np.max(finalX) )
# yMin = np.round( np.min(finalY) )
# yMax = np.round( np.max(finalY) )
# zMin = np.round( np.min(finalZ) )
# zMax = np.round( np.max(finalZ) )
# ax.set_xlim3d( xMin-5, xMax+5 )
# ax.set_ylim3d( yMin-5, yMax+5 )
# ax.set_zlim3d( zMin-5, zMax+5 )
# else:
# xMin = -100; xMax = 100
# yMin = -100; yMax = 100
# zMin = -8; zMax = 50
# ax.set_xlim3d(xMin, xMax)
# ax.set_ylim3d(yMin, yMax)
# ax.set_zlim3d(zMin, zMax)
# ...these limits were selected as a result of the above:
ax.set_xlim3d(-120, 120)
ax.set_ylim3d(-120, 120)
ax.set_zlim3d(-120, 180)
ax.set_xlabel('X (mm)')
ax.set_ylabel('Y (mm)')
ax.set_zlabel('Z (mm)')
###############################################################################
# if argument "surfaceGrid" is True, add a 2D grid @ Z=10mm
# otherwise, continue without affecting data
if surfaceGrid:
# x_surf=np.arange(xMin-5, xMax+5, 1)
# y_surf=np.arange(yMin-5, yMax+5, 1)
x_surf=np.arange(-120, 120, 1)
y_surf=np.arange(-120, 120, 1)
x_surf, y_surf = np.meshgrid(x_surf,y_surf,sparse=True)
z_surf = 10
ax.plot_wireframe(x_surf, y_surf, z_surf, color='k', \
linewidth=0.5, linestyle='--', \
rcount=10, ccount=10)
###############################################################################
## save plot: default size & resolution
if not (plotFolderAlreadyExists):
plt.savefig(path.join(plotFolder, \
path.basename(dataFolder)+'_'+ \
fileNameNoExtension+'.png'), \
bbox_inches='tight', dpi=100)
## save plot: high-quality size & resolution
if not (plotHQFolderAlreadyExists):
## sets the dimensions of the high-quality image;
## the dimensions [13.66,7.02] are in inches
## and were chosen as a suitable high-definition size
fig.set_size_inches(np.array([13.66,7.02]))
plt.savefig(path.join(plotHQFolder, \
path.basename(dataFolder)+'_'+ \
fileNameNoExtension+'_HQ.png'), \
bbox_inches='tight', dpi=300)
plt.close(fig)
###############################################################################
## save numbers into a compressed numpy array (headers are removed)
## note: "zippedArray" is an arbitrary callback to retrieve data
np.savez_compressed(path.join(npzFolder, fileNameNoExtension), \
zippedArray = datNumpyArray)
#==============================================================================
## end of "for line in datFileList" loop
#==============================================================================
###############################################################################
else:
# cancel the operation & display a message if the new folder
# in which compressed data would have been saved already exists
print('\nThere is already a folder', npzFolder, '\nPlease try again.')
print('Cancelling operation...\n')
##=============================================================================
## here is an example on how to retrieve data from the zipped ".npy" files...
## (zipped numpy file extension = ".npz")
#
#datZippedList = glob.glob( path.join(npzFolder, '*.npz') )
## creates list of strings of zipped numpy filenames that we plan to retrieve
## utilizes the "global" method to find files with ".npz" extension
#
#unzippedDictionary = {}
## initialize empty dictionary; data will be saved into this variable
#
#for line in datZippedList :
## uncomment these 2 lines for further understanding of file manipulation
## print( path.splitext( path.basename(line) )[0] )
## print( path.join(dir_path, line) )
#
# with np.load(path.join(dir_path, line)) as unzipArray:
# test_Zip_retrieved = unzipArray['zippedArray']
# ## loads the values into a dictionary-like variable
# ## called "unzipArray"; this particular syntax using "with" command
# ## makes sure the associated files saved on disk are closed after use
#
# unzippedDictionary[ path.splitext( path.basename(line) )[0] ] = \
# unzipArray['zippedArray']
#
## to retrieve data from B00001.npz, use unzippedDictionary['B00001']
## to retrieve data from B00002.npz, use unzippedDictionary['B00002']
## etc.
## this is so we can use the filenames, B00001 etc., as variable names
##============================================================================= | 47.386029 | 99 | 0.530491 |
acfb362c9632c2c667375806322e5ef53e2d4fe7 | 7,365 | py | Python | smeter.py | kkatayama/basic_site | c71cb2c574d63f55e3a90422f31c17e20d7897b3 | [
"MIT"
] | null | null | null | smeter.py | kkatayama/basic_site | c71cb2c574d63f55e3a90422f31c17e20d7897b3 | [
"MIT"
] | 4 | 2021-03-03T15:12:13.000Z | 2021-03-03T15:13:30.000Z | smeter.py | kkatayama/basic_site | c71cb2c574d63f55e3a90422f31c17e20d7897b3 | [
"MIT"
] | null | null | null | # coding: utf-8
from bottle import Bottle, route, request, redirect, run, template, static_file, debug
import pandas as pd
import requests
import string
import json
import sys
import os
app = Bottle()
##########################################
# GLOBAL VARIABLES / SETTINGS #
##########################################
with open('api_key') as f:
api_key = {'key': '{}'.format(f.read().strip())}
app.config.update({
'app.root_path': sys.path[0] + '/',
'app.logged_in': {
'username': '',
'status': False
},
'app.accounts': {
'SMeter 2': 'password2',
'SMeter 3': 'password3',
'SMeter 4': 'password4'
},
'app.api_key': api_key
})
##########################################
# MAIN WEBSITE FUNCTIONS #
##########################################
# -- landing page
@app.route('/')
def get_index():
print('in get_index()')
status = request.app.config['app.logged_in']['status']
if (status):
username = request.app.config['app.logged_in']['username']
print('{} is logged in... serving: groups.tpl'.format(username))
api_key = request.app.config['app.api_key']['key']
group_url = "https://io.adafruit.com/api/v2/LukeZ1986/groups?x-aio-key={}".format(api_key)
df = pd.DataFrame(pd.read_json(group_url), columns=['name','key','feeds'])
df = df[df['name'] == username]
group_key = df.reset_index()['key'][0]
group_feeds = [[g['key'],g['id'],g['name'],g['created_at']] for g in df['feeds'].explode()]
return template('groups', username=username, group_key=group_key, group_feeds=group_feeds)
else:
print('user is not logged in... serving: login.tpl')
return template('login')
# -- login page
@app.route('/login')
def get_login():
print('in get_login()')
status = request.app.config['app.logged_in']['status']
if (status):
username = request.forms.get('username')
print('user is logged in... serving: index.tpl')
api_key = request.app.config['app.api_key']['key']
group_url = "https://io.adafruit.com/api/v2/LukeZ1986/groups?x-aio-key={}".format(api_key)
df = pd.DataFrame(pd.read_json(group_url), columns=['name','key','feeds'])
df = df[df['name'] == username]
group_key = df.reset_index()['key'][0]
group_feeds = [[g['key'],g['id'],g['name'],g['created_at']] for g in df['feeds'].explode()]
return template('groups', username=username, group_key=group_key, group_feeds=group_feeds)
else:
print('user is not logged in... serving: login.tpl')
return template('login')
# -- logoff
@app.route('/logoff')
def get_logoff():
print('logoff()')
app.config['app.logged_in']['status'] = False
app.config['app.logged_in']['username'] = ''
return template('login')
# -- check login credentials
# -- on sucesss, fetch all group feeds
@app.route('/login', method='POST')
def post_login():
print('in post_login')
uname = request.forms.get('username')
pword = request.forms.get('password')
print('uname = {}, pword = {}'.format(uname, pword))
try:
password = request.app.config['app.accounts'][uname]
print('password = {}, so username: {} exists...'.format(password, uname))
except Exception as e:
print(e)
return template('login', error='Bad Username')
if pword == password:
app.config['app.logged_in']['status'] = True
app.config['app.logged_in']['username'] = uname
print('{} successfully logged: fetching feeds...'.format(uname))
api_key = request.app.config['app.api_key']['key']
group_url = "https://io.adafruit.com/api/v2/LukeZ1986/groups?x-aio-key={}".format(api_key)
df = pd.DataFrame(pd.read_json(group_url), columns=['name','key','feeds'])
df = df[df['name'] == uname]
group_key = df.reset_index()['key'][0]
group_feeds = [[g['key'],g['id'],g['name'],g['created_at']] for g in df['feeds'].explode()]
user_name = uname
print(uname)
print(group_key)
return template('groups', username=uname, group_key=group_key, group_feeds=group_feeds)
else:
return template('login', error='Bad Password')
# -- fetch all feeds associated with group key
# -- return data formated for DataTables
@app.route('/table')
def get_table():
print('table selected...')
group_key = request.query.get('group_key')
feed_key = request.query.get('feed_key')
api_key = request.app.config['app.api_key']['key']
user_name = request.app.config['app.logged_in']['username']
feed_url = "https://io.adafruit.com/api/v2/LukeZ1986/groups/{}/feeds/{}/data?x-aio-key={}"
df = pd.read_json(feed_url.format(group_key, feed_key, api_key))
df = df.sort_values(by=['created_at'], ascending=True).reset_index()
tabledata = df.to_html(index=False, columns=['created_at', 'value', 'id', 'feed_id', 'feed_key', 'expiration'], escape=False).replace('<table border="1" class="dataframe">', '<table class="table table-bordered" id="dataTable" width="100%" cellspacing="0">').replace('<tr style="text-align: right;">', '<tr>')
# with open('tables.tpl') as f:
# table_template = f.read()
return template('tables', user_name=user_name, feed_key=feed_key, tabledata=tabledata)
# -- fetch all feeds associated with group key
# -- return data formated for chart.js
@app.route('/chart')
def get_chart():
chart_type = 'line'
group_key = request.query.get('group_key')
feed_key = request.query.get('feed_key')
api_key = request.app.config['app.api_key']['key']
user_name = request.app.config['app.logged_in']['username']
safe_name = '_'.join([c.translate(str.maketrans('','',string.punctuation+' ')) for c in [feed_key, chart_type]])
print('chart selected...')
print(feed_key)
feed_url = "https://io.adafruit.com/api/v2/LukeZ1986/groups/{}/feeds/{}/data?x-aio-key={}"
df = pd.read_json(feed_url.format(group_key, feed_key, api_key))
df = df.sort_values(by=['created_at'], ascending=True).reset_index()
labels = [dt.isoformat().split('+')[0] for dt in df['created_at']]
data = list(df['value'])
chart_type = 'line'
safe_name = '_'.join([c.translate(str.maketrans('','',string.punctuation+' ')) for c in [feed_key, chart_type]])
label = 'Time Series Plot: {}'.format(feed_key)
with open('charts/line/{}.js'.format(safe_name), 'w') as f:
chart_js = template('charts/line/template_line.js', group_key=group_key, feed_key=feed_key, safe_name=safe_name, label=label, labels=labels, data=data).replace(''','"')
f.write(chart_js)
return template('charts', user_name=user_name, safe_name=safe_name, feed_key=feed_key)
##########################################
##########################################
# ALLOW LOADING OF ALL STATIC FILES #
##########################################
# Static Routes
@app.route('/<filename:path>')
def serve_static_file(filename):
root_path = request.app.config['app.root_path']
print('serving file: {}{}'.format(root_path, filename))
return static_file('{}'.format(filename), root='{}'.format(root_path))
port = int(os.environ.get('PORT', 8800))
run(app, host='192.168.1.37', port=port, reloader=True, debug=True)
| 37.01005 | 312 | 0.609097 |
acfb36c01b91dcc2390e79ad7873767433fe39d7 | 2,633 | py | Python | src/unladen/filesystem.py | dfm/ladv | 580867c01fd3e696f1fdeffb6f979f2c48f898e6 | [
"MIT"
] | 1 | 2021-05-05T21:07:31.000Z | 2021-05-05T21:07:31.000Z | src/unladen/filesystem.py | dfm/unladen | 580867c01fd3e696f1fdeffb6f979f2c48f898e6 | [
"MIT"
] | null | null | null | src/unladen/filesystem.py | dfm/unladen | 580867c01fd3e696f1fdeffb6f979f2c48f898e6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__all__ = ["copy_source_to_target"]
import shutil
from glob import glob
from pathlib import Path
from typing import Iterable, Optional
from . import html
from .versions import Database, Rule, Version
def copy_source_to_target(
*,
source: Path,
target: Path,
version: Version,
base_url: Optional[str] = None,
alias_rules: Optional[Iterable[Rule]] = None,
include_version_menu: bool = True,
verbose: bool = False,
) -> None:
target.mkdir(parents=True, exist_ok=True)
# Load the database if it exists
database_path = target / "unladen.json"
if database_path.is_file():
database = Database.load(database_path)
else:
database = Database()
# Add this version to the database
database.add_version(version)
fullpath = target / version.path
# Delete any existing directory or file at the target path
rm_file_or_dir(fullpath, verbose=verbose)
# Copy the files
shutil.copytree(source, fullpath)
# Remove existing aliases
for name in database.aliases.keys():
rm_file_or_dir(target / name, verbose=verbose)
# Update alias links
database.update_aliases(rules=alias_rules)
for name, ref in database.aliases.items():
try:
ref_version = database[ref]
except KeyError:
print(f"Alias {name} for ref {ref} has no matching version")
continue
src = target / ref_version.path
dst = target / name
rm_file_or_dir(dst, verbose=verbose)
if verbose:
print(f"Copying {src} -> {dst}")
shutil.copytree(src, dst)
database.save(database_path)
# Inject the version info into the HTML
if include_version_menu:
version_style = html.load_style("versions")
version_menu = html.render_template(
"versions",
database=database,
current_version=version,
base_url=base_url,
)
for filename in glob(f"{fullpath}/**/*.html", recursive=True):
print(filename)
with open(filename, "r") as f:
txt = f.read()
txt = html.inject_into_html(
txt, version_style=version_style, version_menu=version_menu
)
with open(filename, "w") as f:
f.write(txt)
def rm_file_or_dir(path: Path, verbose: bool = False) -> None:
if path.exists():
if verbose:
print(f"{path} exists, removing")
if path.is_file() or path.is_symlink():
path.unlink()
else:
shutil.rmtree(path)
| 28.010638 | 75 | 0.617167 |
acfb36df6187406c9c697ef673a0c2b12d774091 | 1,525 | py | Python | mazeGen.py | Kartik-Nagpal/Tank-Trouble-IQ-Tester | 8b125710b2c31cafd5eb4aa614094746806337df | [
"MIT"
] | null | null | null | mazeGen.py | Kartik-Nagpal/Tank-Trouble-IQ-Tester | 8b125710b2c31cafd5eb4aa614094746806337df | [
"MIT"
] | null | null | null | mazeGen.py | Kartik-Nagpal/Tank-Trouble-IQ-Tester | 8b125710b2c31cafd5eb4aa614094746806337df | [
"MIT"
] | null | null | null | import numpy
from numpy.random import random_integers as rand
import matplotlib.pyplot as pyplot
def mazeGen(width=20, height=20, complexity=.1, density=.3):
# Only odd shapes
shape = ((height//2)*2 + 1, (width//2)*2 + 1);
# Adjust complexity and density relative to maze size
complexity = int(complexity * (5*(shape[0] + shape[1])));
density = int(density * ((shape[0]//2) * (shape[1]//2)));
# Build actual maze
Z = numpy.zeros(shape, dtype=bool);
# Fill borders
Z[0, :] = Z[-1, :] = 1;
Z[:, 0] = Z[:, -1] = 1;
# Make aisles
for i in range(density):
x, y = rand(0, shape[1]//2)*2, rand(0, shape[0]//2)*2;
Z[y, x] = 1;
for j in range(complexity):
neighbours = [];
if x > 1:
neighbours.append((y, x - 2));
if x < shape[1] - 2:
neighbours.append((y, x + 2));
if y > 1:
neighbours.append((y - 2, x));
if y < shape[0] - 2:
neighbours.append((y + 2, x));
if len(neighbours):
y_,x_ = neighbours[rand(0, len(neighbours) - 1)];
if Z[y_, x_] == 0:
Z[y_, x_] = 1;
Z[y_ + (y - y_)//2, x_ + (x - x_)//2] = 1;
x, y = x_, y_;
print(Z);
return Z;
#pyplot.figure(figsize=(8, 8));
#pyplot.imshow(mazeGen(100, 50, .9, .9), cmap=pyplot.cm.binary, interpolation='nearest');
#pyplot.xticks([]), pyplot.yticks([]);
#pyplot.show();
| 33.888889 | 89 | 0.487213 |
acfb374033d6bc7cfb6774f67c3ab0db80736680 | 5,708 | py | Python | tests/custom_assertions.py | jlanga/exfi | 6cd28423213aba0ab8ac191e002396ddc84c4be3 | [
"MIT"
] | 2 | 2017-11-02T11:31:41.000Z | 2020-11-28T07:42:27.000Z | tests/custom_assertions.py | jlanga/exfi | 6cd28423213aba0ab8ac191e002396ddc84c4be3 | [
"MIT"
] | 36 | 2017-04-26T09:36:54.000Z | 2021-04-16T12:35:52.000Z | tests/custom_assertions.py | jlanga/exon_finder | 6cd28423213aba0ab8ac191e002396ddc84c4be3 | [
"MIT"
] | 2 | 2017-07-23T23:03:36.000Z | 2017-09-29T15:30:55.000Z | #!/usr/bin/env python3
"""
tests.custom_assertions.py: custom assertions for unit tests:
- assertEqualListOfSeqrecords: check if a list of seqrecords have:
- the same length
- the same id
- the same sequence
- assertEqualSpliceGraphs: check if two splice graphs:
- are isomorphic with nx.is_isomorphic
- each node have the same coordinates
- each edge have the same overlap
"""
from typing import List, Dict
import networkx as nx
import pandas as pd
from Bio.SeqRecord import SeqRecord
def check_same_keys(dict1: dict, dict2: dict) -> None:
"""Check if two dicts have the exact same keys"""
if set(dict1.keys()) != set(dict2.keys()):
raise KeyError("Keys differ: {keys1} {keys2}".format(
keys1=dict1.keys(), keys2=dict2.keys()
))
def check_same_values(dict1: dict, dict2: dict) -> None:
"""Check if two dicts have the same values"""
for key, value1 in dict1.items(): # Check same values
value2 = dict2[key]
if value1 != value2:
raise ValueError("{key1}: {value1} != {key2} : {value2}".format(
key1=key, value1=value1, key2=key, value2=value2
))
def check_same_dict(dict1: dict, dict2: dict) -> None:
"""Check if two dicts contain the exact same values"""
check_same_keys(dict1, dict2)
check_same_values(dict1, dict2)
def check_equal_node2coord(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs have the same node2coord dicts"""
node2coord1 = nx.get_node_attributes(G=sg1, name="coordinates")
node2coord2 = nx.get_node_attributes(G=sg2, name="coordinates")
check_same_dict(node2coord1, node2coord2)
def check_equal_edge2overlap(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs have the same node2coord dicts"""
edge2overlap1 = nx.get_edge_attributes(G=sg1, name="overlaps")
edge2overlap2 = nx.get_edge_attributes(G=sg2, name="overlaps")
check_same_dict(edge2overlap1, edge2overlap2)
def check_equal_df_dict_values(dict1: dict, dict2: dict) -> None:
"""Check if two data frames are equal
Solution: https://stackoverflow.com/a/33223893
"""
from numpy import array_equal
for key, df1 in dict1.items():
df2 = dict2[key]
if not array_equal(df1, df2):
raise ValueError("df1 != df2:\n{df1}\n{df2}".format(df1=df1, df2=df2))
def check_equal_splice_graphs(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs are:
- isomorphic
- node2coord are equal
- edge2overlaps are equal
"""
if not nx.is_isomorphic(sg1, sg2):
AssertionError("splicegraph are not isomorphic")
check_equal_node2coord(sg1, sg2)
check_equal_edge2overlap(sg1, sg2)
def check_equal_dict_of_sg(dict1: dict, dict2: dict) -> None:
"""Check if each key, element are equal splice graphs"""
check_same_keys(dict1, dict2)
for key, sg1 in dict1.items():
sg2 = dict2[key]
check_equal_splice_graphs(sg1, sg2)
def check_equal_length(iter1: List, iter2: List) -> None:
"""Check if two iterables have the same length"""
length_1 = len(iter1)
length_2 = len(iter2)
if length_1 != length_2:
raise AssertionError('Lengths differ: {len_1} != {len_2}'.format(
len_1=length_1, len_2=length_2
))
def check_equal_seqrecrods(seqrecord1: SeqRecord, seqrecord2: SeqRecord) -> None:
"""Check if id and seq are equal"""
if seqrecord1.id != seqrecord2.id or seqrecord1.seq != seqrecord2.seq:
raise AssertionError(
'Records differ: {id1}: {seq1} {id2}: {seq2}'.format(
id1=seqrecord1.id, seq1=seqrecord1.seq, id2=seqrecord2.id, seq2=seqrecord2.seq
)
)
def check_equal_list_seqrecords(iter1: List[SeqRecord], iter2: List[SeqRecord]) -> None:
"""Check if a list of SeqRecords are equal"""
for i, _ in enumerate(iter1):
check_equal_seqrecrods(iter1[i], iter2[i])
class CustomAssertions:
"""
Custom assertions not covered in unittest:
- assertEqualListOfSeqrecords
"""
@classmethod
def assertEqualDict(self, dict1: dict, dict2: dict) -> None:
"""Check if two dicts are equal (values are compared with ==)"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_same_dict(dict1, dict2)
@classmethod
def assertEqualListOfSeqrecords(
self, records1: List[SeqRecord], records2: List[SeqRecord]) -> None:
"""
Check if each element of list_of_seqrecords1 is exactly equal to each one of
list_of_seqrecords2.
"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_equal_length(records1, records2)
check_equal_list_seqrecords(records1, records2)
@classmethod
def assertEqualSpliceGraphs(self, sg1: dict, sg2: dict) -> None:
"""Check if two splice graph are equal:"""
# pylint: disable=invalid-name,bad-classmethod-argument
check_equal_splice_graphs(sg1, sg2)
@classmethod
def assertEqualDictOfDF(
self, dict1: Dict[str, pd.DataFrame], dict2: Dict[str, pd.DataFrame]) -> None:
"""Check if two dicts of pd.DataFrame are equal"""
# pylint: disable=invalid-name,bad-classmethod-argument
check_same_keys(dict1, dict2)
check_equal_df_dict_values(dict1, dict2)
@classmethod
def assertEqualDictOfSpliceGraphs(self, dict1: dict, dict2: dict) -> None:
"""Check if two dicts of nx.DiGraph and some data attached to nodes and edges are equal"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_equal_dict_of_sg(dict1, dict2)
| 32.067416 | 98 | 0.667835 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.