hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9d5d8c2ca79305db8f5ef13af9a9c1b4d74afc8 | 3,011 | py | Python | Py3DViewer/visualization/DrawableSkeleton.py | hovey/py3DViewer | 7ae1697aa4860430d0d94b854f8b1f2a4b2d895f | [
"MIT"
] | 24 | 2019-10-29T14:05:08.000Z | 2022-03-18T09:05:30.000Z | Py3DViewer/visualization/DrawableSkeleton.py | hovey/py3DViewer | 7ae1697aa4860430d0d94b854f8b1f2a4b2d895f | [
"MIT"
] | 6 | 2020-12-13T01:49:29.000Z | 2021-01-05T15:31:46.000Z | Py3DViewer/visualization/DrawableSkeleton.py | hovey/py3DViewer | 7ae1697aa4860430d0d94b854f8b1f2a4b2d895f | [
"MIT"
] | 5 | 2020-02-03T16:12:08.000Z | 2022-03-23T17:58:03.000Z | import pythreejs as three
import numpy as np
from time import time, sleep
from .Colors import colors
from ..utils import Observer, ColorMap
import threading
import copy
class DrawableSkeleton(Observer):
def __init__(self, geometry, skeleton_color = None, reactive = False):
super(DrawableSkeleton, self).__init__()
self.geometry = geometry
if reactive:
self.geometry.attach(self)
self.wireframe = self.__initialize_wireframe()
self.color = skeleton_color
self.threejs_items = [self.wireframe]
self.updating = False
self.queue = False
def update_wireframe_color(self, new_color):
self.wireframe.material.color = new_color
def update_wireframe_opacity(self, new_opacity):
self.wireframe.material.opacity = new_opacity
def __initialize_wireframe(self):
edges_material = three.LineBasicMaterial(color='#ff0000',
linewidth = 1,
depthTest=False,
opacity=.2,
transparent=True)
wireframe = self.__get_wireframe_from_boundary()
return three.LineSegments(wireframe, material = edges_material)
def __as_buffer_attr(self, array):
return three.BufferAttribute(array, normalized = False, dynamic = True)
def __get_wireframe_from_boundary(self):
edges = self.geometry.vertices[self.geometry.as_edges_flat()].astype(np.float32)
buffer = np.empty((int(edges.shape[0] * 3), 3), dtype=np.float32).reshape(-1, 3)
buffer[:edges.shape[0]] = edges
vertices = self.__as_buffer_attr(buffer)
wireframe = three.BufferGeometry(attributes={'position': vertices})
wireframe.exec_three_obj_method("setDrawRange", 0, edges.shape[0])
return wireframe
def run(self, geometry):
edges = self.geometry.vertices[self.geometry.as_edges_flat()].astype(np.float32)
self.wireframe.geometry.attributes['position'].array[:edges.shape[0]] = edges
self.wireframe.geometry.exec_three_obj_method('setDrawRange', 0, edges.shape[0])
self.wireframe.geometry.attributes['position'].array = self.wireframe.geometry.attributes['position'].array
if self.queue:
self.queue = False
self.updating = False
self.update()
else:
self.updating = False
def update(self):
if (not self.updating):
self.updating=True
thread = threading.Thread(target=self.run, args=(self.geometry.copy(),))
thread.daemon = True
thread.start()
else:
self.queue = True
@property
def center(self):
return self.geometry.center
@property
def scale(self):
return self.geometry.scale | 39.618421 | 115 | 0.604782 |
7db621272459a92bfc33824d24d1794217a2c9d6 | 5,672 | py | Python | tests/test_server_auth.py | jracabado/edgedb | 74308d3e751451f78f86448710766693f291b394 | [
"Apache-2.0"
] | null | null | null | tests/test_server_auth.py | jracabado/edgedb | 74308d3e751451f78f86448710766693f291b394 | [
"Apache-2.0"
] | null | null | null | tests/test_server_auth.py | jracabado/edgedb | 74308d3e751451f78f86448710766693f291b394 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import edgedb
from edb.schema import defines as s_def
from edb.testbase import server as tb
class TestServerAuth(tb.ConnectedTestCase):
PARALLELISM_GRANULARITY = 'system'
TRANSACTION_ISOLATION = False
async def test_server_auth_01(self):
await self.con.query('''
CREATE SUPERUSER ROLE foo {
SET password := 'foo-pass';
}
''')
# bad password
with self.assertRaisesRegex(
edgedb.AuthenticationError,
'authentication failed'):
await self.connect(
user='foo',
password='wrong',
)
# good password
conn = await self.connect(
user='foo',
password='foo-pass',
)
await conn.aclose()
await self.con.query('''
CONFIGURE INSTANCE INSERT Auth {
comment := 'test',
priority := 0,
method := (INSERT Trust),
}
''')
try:
# bad password, but the trust method doesn't care
conn = await self.connect(
user='foo',
password='wrong',
)
await conn.aclose()
# insert password auth with a higher priority
await self.con.query('''
CONFIGURE INSTANCE INSERT Auth {
comment := 'test-2',
priority := -1,
method := (INSERT SCRAM),
}
''')
with self.assertRaisesRegex(
edgedb.AuthenticationError,
'authentication failed',
):
# bad password is bad again
await self.connect(
user='foo',
password='wrong',
)
finally:
await self.con.query('''
CONFIGURE INSTANCE RESET Auth FILTER .comment = 'test'
''')
await self.con.query('''
CONFIGURE INSTANCE RESET Auth FILTER .comment = 'test-2'
''')
await self.con.query('''
DROP ROLE foo;
''')
# Basically the second test, but we can't run it concurrently
# because disabling Auth above conflicts with the following test
await self.con.query('''
CREATE SUPERUSER ROLE bar {
SET password_hash := 'SCRAM-SHA-256$4096:SHzNmIppMwXnPSWgY2yMvg==$5zmnXMm9+mn2nseKPF1NTKvuoBPVSWgxHrnptxpQgcU=:/c1vJV+MmS7v9vv6CDVo56OyOJkNd3F+m3JIBB1U7ho=';
}
''') # noqa
try:
conn = await self.connect(
user='bar',
password='bar-pass',
)
await conn.aclose()
await self.con.query('''
ALTER ROLE bar {
SET password_hash := 'SCRAM-SHA-256$4096:mWDBY53yzQ4aDet5erBmbg==$ZboQEMuUhC6+1SChp2bx1qSRBZGAnyV4I8T/iK+qeEs=:B7yF2k10tTH2RHayOg3rw4Q6wqf+Fj5CuXR/9CyZ8n8=';
}
''') # noqa
conn = await self.connect(
user='bar',
password='bar-pass-2',
)
await conn.aclose()
# bad (old) password
with self.assertRaisesRegex(
edgedb.AuthenticationError,
'authentication failed'):
await self.connect(
user='bar',
password='bar-pass',
)
with self.assertRaisesRegex(
edgedb.EdgeQLSyntaxError,
'cannot specify both `password` and `password_hash`'
' in the same statement'):
await self.con.query('''
CREATE SUPERUSER ROLE bar1 {
SET password := 'hello';
SET password_hash := 'SCRAM-SHA-256$4096:SHzNmIppMwXnPSWgY2yMvg==$5zmnXMm9+mn2nseKPF1NTKvuoBPVSWgxHrnptxpQgcU=:/c1vJV+MmS7v9vv6CDVo56OyOJkNd3F+m3JIBB1U7ho=';
}
''') # noqa
with self.assertRaisesRegex(
edgedb.InvalidValueError,
'invalid SCRAM verifier'):
await self.con.query('''
CREATE SUPERUSER ROLE bar2 {
SET password_hash := 'SCRAM-BLAKE2B$4096:SHzNmIppMwXnPSWgY2yMvg==$5zmnXMm9+mn2nseKPF1NTKvuoBPVSWgxHrnptxpQgcU=:/c1vJV+MmS7v9vv6CDVo56OyOJkNd3F+m3JIBB1U7ho=';
}
''') # noqa
finally:
await self.con.query("DROP ROLE bar")
async def test_long_role_name(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'Role names longer than \d+ '
r'characters are not supported'):
await self.con.execute(
f'CREATE SUPERUSER ROLE myrole_{"x" * s_def.MAX_NAME_LENGTH};')
| 33.761905 | 181 | 0.530148 |
830fca2705b8e9062c11dde88764302ed8bea23c | 1,986 | py | Python | sphinx-sources/Examples/FourierOptics/PatternRecognition.py | opticspy/lightpipes-python | dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb | [
"BSD-3-Clause"
] | null | null | null | sphinx-sources/Examples/FourierOptics/PatternRecognition.py | opticspy/lightpipes-python | dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb | [
"BSD-3-Clause"
] | null | null | null | sphinx-sources/Examples/FourierOptics/PatternRecognition.py | opticspy/lightpipes-python | dbd66e46ca8263a6e9bf7690e5f2b2551f93f4cb | [
"BSD-3-Clause"
] | null | null | null | from LightPipes import *
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
f=0.6*m
gridsize=14*mm
wavelength=632.8*nm
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def Fouriertransform(F):
F=Forvard(f,Lens(f,0,0,Forvard(f,F)))
return F
def MakePhaseFilter(img):
F=Begin(gridsize,wavelength,N)
F=SubIntensity(img,F)
F=Fouriertransform(F)
phase=np.asarray(np.negative(Phase(F)))
return phase
img=[
rgb2gray(mpimg.imread('A.png')),
rgb2gray(mpimg.imread('B.png')),
rgb2gray(mpimg.imread('C.png')),
rgb2gray(mpimg.imread('X.png'))
]
ABC=rgb2gray(mpimg.imread('ABC.png'))
choice=['A','B','C']
N=ABC.shape[0]
X=range(N)
Z=range(N)
X, Z=np.meshgrid(X,Z)
#plt.contourf(X,Z,ABC,cmap='gray');plt.axis('off');plt.axis('equal')
#plt.show()
F1=Begin(gridsize,wavelength,N)
F1=MultIntensity(ABC,F1)
F1=Fouriertransform(F1)
I_NOFILTER=Intensity(0,Fouriertransform(F1))
#phase=MakePhaseFilter(img[0])
#plt.contourf(X,Z,phase,cmap='hot');plt.axis('off');plt.axis('equal')
#plt.show()
#phase=MakePhaseFilter(img[0])
#F=MultPhase(phase,F1)
#I=Intensity(1,Fouriertransform(F))
#plt.contourf(X,Z,I,cmap='hot');plt.axis('off');plt.axis('equal')
#plt.show()
fig, axs = plt.subplots(1,4,figsize=(12,5))
fig.suptitle('Pattern recognition\nusing Fourier optics', fontsize=20)
fig.canvas.manager.set_window_title('Pattern recognition example')
axs[0].contourf(X,Z,I_NOFILTER,cmap='hot');
axs[0].axis('off');axs[0].axis('equal');
axs[0].text(50,N,'non-filtered image')
for l in range(3):
phase=MakePhaseFilter(img[l])
F=MultPhase(phase,F1)
I=Intensity(1,Fouriertransform(F))
plt.contourf(X,Z,I,cmap='hot');plt.axis('off');plt.axis('equal')
axs[l+1].contourf(X,Z,I,cmap='hot');
axs[l+1].axis('off');
axs[l+1].axis('equal');
axs[l+1].text(100,N,choice[l])
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.01, hspace=None)
plt.show()
| 26.837838 | 91 | 0.683787 |
cb0f47f354429651f2a0c50d93e3989bd8dc9469 | 20,030 | py | Python | pulsemeeter/MainWindow.py | dacid44/pulsemeeter | 5367fe910c8d1eea2a0523549c2c26c446547445 | [
"MIT"
] | null | null | null | pulsemeeter/MainWindow.py | dacid44/pulsemeeter | 5367fe910c8d1eea2a0523549c2c26c446547445 | [
"MIT"
] | null | null | null | pulsemeeter/MainWindow.py | dacid44/pulsemeeter | 5367fe910c8d1eea2a0523549c2c26c446547445 | [
"MIT"
] | null | null | null | import os
import re
import signal
import shutil
import threading
import sys
import json
from .EqPopover import EqPopover
from .RnnoisePopover import RnnoisePopover
from .LatencyPopover import LatencyPopover
from .AppListWidget import AppList
from .PortSelectPopover import PortSelectPopover
from .settings import GLADEFILE, LAYOUT_DIR
from gi.repository import Gtk,Gdk,Gio,GLib
class MainWindow(Gtk.Window):
def __init__(self, pulse):
Gtk.Window.__init__(self)
self.builder = Gtk.Builder()
self.pulse = pulse
self.pulse.restart_window = False
self.layout = self.pulse.config['layout']
component_list = [
'window',
'menu_popover',
'rename_popover',
'popover_entry',
'latency_popover',
'latency_adjust',
'rnnoise_popover',
'rnnoise_latency_adjust',
'rnnoise_threshold_adjust',
'sink_input_list',
'source_output_list',
'sink_input_scroll',
'source_output_scroll',
'source_output_viewport',
'sink_input_viewport',
'vumeter_toggle',
'vi_1_peak',
'channel_groups',
]
for i in range(1, 4):
component_list.append(f'hi_{i}_adjust')
component_list.append(f'vi_{i}_adjust')
component_list.append(f'a_{i}_adjust')
component_list.append(f'b_{i}_adjust')
try:
self.builder.add_objects_from_file(
os.path.join(LAYOUT_DIR, f'{self.layout}.glade'),
component_list
)
except Exception as ex:
print('Error building main window!\n{}'.format(ex))
sys.exit(1)
if not 'enable_vumeters' in self.pulse.config:
self.pulse.config['enable_vumeters'] = True
self.enable_vumeters = True
if not shutil.which('pulse-vumeter') or self.pulse.config['enable_vumeters'] == False:
self.enable_vumeters = False
self.vumeter_toggle = self.builder.get_object('vumeter_toggle')
self.vumeter_toggle.set_active(self.enable_vumeters)
self.vumeter_toggle.connect('toggled', self.toggle_vumeters)
self.jack_toggle_check_button = self.builder.get_object('jack_toggle')
self.jack_toggle_check_button.set_active(self.pulse.config['jack']['enable'])
self.jack_toggle_check_button.set_sensitive(False)
# self.jack_toggle_check_button.connect('toggled', self.toggle_jack)
self.start_hardware_comboboxes()
self.start_inputs()
self.start_outputs()
self.start_app_list()
self.start_vumeters()
self.start_layout_combobox()
self.window = self.builder.get_object('window')
super().__init__(self.window)
if self.layout == 'default':
self.menu_button = self.builder.get_object('menu_button')
self.menu_popover = self.builder.get_object('menu_popover')
self.menu_popover.set_relative_to(self.menu_button)
self.menu_button.connect('pressed', self.open_settings)
self.window.connect('delete_event', self.delete_event)
# self.window.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.builder.connect_signals(self)
self.window.show_all()
signal.signal(signal.SIGTERM, self.delete_event)
signal.signal(signal.SIGINT, self.delete_event)
self.subscribe_thread.start()
def start_layout_combobox(self):
self.layout_combobox = self.builder.get_object('layout_combobox')
layout_list = os.listdir(LAYOUT_DIR)
i = 0
for layout in layout_list:
self.layout_combobox.append_text(layout[:len(layout) - 6])
if layout[:len(layout) - 6] == self.layout:
self.layout_combobox.set_active(i)
i += 1
self.layout_combobox.connect('changed', self.change_layout)
def change_layout(self, combobox):
self.pulse.config['layout'] = combobox.get_active_text()
self.pulse.restart_window = True
self.window.destroy()
self.delete_event(None, None)
def open_settings(self, widget):
self.menu_popover.popup()
def toggle_jack(self, widget):
self.pulse.config['jack']['enable'] = widget.get_active()
if widget.get_active() == True:
pass
def toggle_vumeters(self, widget):
if not shutil.which('pulse-vumeter'):
return
self.enable_vumeters = widget.get_active()
self.pulse.config['enable_vumeters'] = widget.get_active()
if widget.get_active() == False:
self.pulse.end_vumeter()
for i in ['hi', 'vi', 'a', 'b']:
for j in ['1','2','3']:
if self.pulse.config[i][j]['name'] != '':
if widget.get_active() == False:
self.vu_list[i][j].set_fraction(0)
self.vu_thread[i][j].join()
else:
self.vu_thread[i][j] = threading.Thread(target=self.listen_peak,
args=([i, j],))
self.vu_thread[i][j].start()
def start_vumeters(self):
self.vu_list = {}
self.vu_thread = {}
for i in ['hi', 'vi', 'a', 'b']:
self.vu_list[i] = {}
self.vu_thread[i] = {}
for j in ['1','2','3']:
grid = self.builder.get_object(f'{i}_{j}_vumeter')
self.vu_list[i][j] = Gtk.ProgressBar()
if self.layout == 'default':
self.vu_list[i][j].set_orientation(Gtk.Orientation.VERTICAL)
self.vu_list[i][j].set_margin_bottom(8)
self.vu_list[i][j].set_margin_top(8)
self.vu_list[i][j].set_halign(Gtk.Align.CENTER)
self.vu_list[i][j].set_inverted(True)
else:
self.vu_list[i][j].set_orientation(Gtk.Orientation.HORIZONTAL)
self.vu_list[i][j].set_vexpand(True)
self.vu_list[i][j].set_hexpand(True)
grid.add(self.vu_list[i][j])
if self.pulse.config[i][j]['name'] != '':
self.vu_thread[i][j] = threading.Thread(target=self.listen_peak,
args=([i, j],))
if self.enable_vumeters == True:
self.vu_thread[i][j].start()
def restart_vumeter(self, index, stop_only=None):
if self.enable_vumeters == False:
return
if stop_only != False:
if index[1] in self.pulse.vu_list[index[0]] or stop_only == True:
if index[1] in self.vu_thread[index[0]]:
self.pulse.vu_list[index[0]][index[1]].terminate()
# self.pulse.vu_list[index[0]].pop(index[1])
self.vu_thread[index[0]][index[1]].join()
self.vu_thread[index[0]].pop(index[1])
self.vu_list[index[0]][index[1]].set_fraction(0)
if stop_only == True:
return
self.vu_thread[index[0]][index[1]] = threading.Thread(target=self.listen_peak,
args=(index,))
self.vu_thread[index[0]][index[1]].start()
def start_app_list(self):
sink_input_viewport = self.builder.get_object('sink_input_viewport')
source_output_viewport = self.builder.get_object('source_output_viewport')
self.sink_input_box = AppList('sink-input', self.pulse)
self.source_output_box = AppList('source-output', self.pulse)
sink_input_viewport.add(self.sink_input_box)
source_output_viewport.add(self.source_output_box)
self.subscribe_thread = threading.Thread(target=self.listen_subscribe, args=())
def start_hardware_comboboxes(self):
self.sink_list = self.pulse.get_hardware_devices('sinks')
self.source_list = self.pulse.get_hardware_devices('sources')
for device in ['hi', 'a']:
name_size = 35 if device == 'a' else 20
if self.layout != 'default':
name_size = 100
devices = self.sink_list if device == 'a' else self.source_list
# for each combobox
found = False
for j in range(1, 4):
combobox = self.builder.get_object(f'{device}_{j}_combobox')
combobox.append_text('')
for i in range(0, len(devices)):
text = devices[i]['description'][:name_size]
if len(text) == name_size:
text = text + '...'
combobox.append_text(text)
if devices[i]['name'] == self.pulse.config[device][str(j)]['name']:
found = True
combobox.set_active(i + 1)
if found == False and self.pulse.config[device][str(j)]['jack'] == False:
self.pulse.config[device][str(j)]['name'] = ''
combobox.connect('changed', self.on_combo_changed, [device, str(j)], devices)
def start_inputs(self):
self.Rename_Popover = self.builder.get_object('rename_popover')
self.Popover_Entry = self.builder.get_object('popover_entry')
self.Popover_Entry.connect('activate', self.label_rename_entry)
self.vi_primary_buttons = []
hardware_inputs = self.pulse.get_hardware_devices('sources')
virtual_inputs = self.pulse.get_virtual_devices('sinks')
# for each input device
for i in ['1', '2', '3']:
name = self.pulse.config['vi'][i]['name']
label = self.builder.get_object(f'vi_{i}_label')
label.set_text(name if name != '' else f'Virtual Input {i}')
label_evt_box = self.builder.get_object(f'vi_{i}_label_event_box')
label_evt_box.connect('button_press_event', self.label_click, label, ['vi', i])
primary = self.builder.get_object(f'vi_{i}_primary')
primary.set_active(self.pulse.config['vi'][i]['primary'])
if self.pulse.config['vi'][i]['primary'] == True:
primary.set_sensitive(False)
primary.connect('toggled', self.toggle_primary, ['vi', i])
self.vi_primary_buttons.append(primary)
rnnoise = self.builder.get_object(f'hi_{i}_rnnoise')
rnnoise.set_active(self.pulse.config['hi'][i]['use_rnnoise'])
rnnoise.connect('toggled', self.toggle_rnnoise, ['hi', i], f'hi{i}_rnnoise')
rnnoise.connect('button_press_event', self.open_popover, RnnoisePopover, ['hi', i])
found = 0
for path in ['/usr/lib/ladspa', '/usr/local/lib/ladspa']:
if os.path.isfile(os.path.join(path, 'librnnoise_ladspa.so')):
found = 1
break
elif os.path.isfile(os.path.join(path, 'rnnoise_ladspa.so')):
found = 1
break
if found == 0:
rnnoise.set_visible(False)
rnnoise.set_no_show_all(True)
for device in ['hi', 'vi']:
dev_type = virtual_inputs if device == 'vi' else hardware_inputs
for dev in dev_type:
if dev['name'] == self.pulse.config[device][i]['name']:
self.pulse.config[device][i]['vol'] = dev['volume']
vol = self.builder.get_object(f'{device}_{i}_adjust')
vol.set_value(self.pulse.config[device][i]['vol'])
vol.connect('value-changed', self.volume_change, [device, i])
mute = self.builder.get_object(f'{device}_{i}_mute')
mute.set_active(self.pulse.config[device][i]['mute'])
mute.connect('toggled', self.toggle_mute, [device, i])
scale = self.builder.get_object(f'{device}_{i}_vol')
scale.add_mark(100, Gtk.PositionType.TOP, '')
# connection buttons
for k in ['a', 'b']:
for j in ['1', '2', '3']:
button = self.builder.get_object(f'{device}_{i}_{k}{j}')
button.set_active(self.pulse.config[device][i][k + j])
button.connect('toggled', self.toggle_loopback, [k, j], [device, i])
if self.pulse.config['jack']['enable'] == False:
button.connect('button_press_event', self.open_popover, LatencyPopover, [device, i, k + j])
else:
button.connect('button_press_event', self.open_popover, PortSelectPopover, [device, i, k + j])
def start_outputs(self):
self.b_primary_buttons = []
hardware_outputs = self.pulse.get_hardware_devices('sinks')
virtual_outputs = self.pulse.get_virtual_devices('sources')
for i in ['1', '2', '3']:
primary = self.builder.get_object(f'b_{i}_primary')
primary.set_active(self.pulse.config['b'][i]['primary'])
if self.pulse.config['b'][i]['primary'] == True:
primary.set_sensitive(False)
primary.connect('toggled', self.toggle_primary, ['b', i])
self.b_primary_buttons.append(primary)
for j in ['a', 'b']:
dev_list = hardware_outputs if j == 'a' else virtual_outputs
for dev in dev_list:
if dev['name'] == self.pulse.config[j][i]['name']:
self.pulse.config[j][i]['vol'] = dev['volume']
master = self.builder.get_object(f'{j}_{i}_adjust')
master.set_value(self.pulse.config[j][i]['vol'])
master.connect('value-changed', self.volume_change, [j, i])
mute = self.builder.get_object(f'{j}_{i}_mute')
mute.set_active(self.pulse.config[j][i]['mute'])
mute.connect('toggled', self.toggle_mute, [j, i])
eq = self.builder.get_object(f'{j}_{i}_eq')
eq.set_active(self.pulse.config[j][i]['use_eq'])
eq.connect('toggled', self.toggle_eq, [j, i])
eq.connect('button_press_event', self.open_popover, EqPopover, [j, i])
scale = self.builder.get_object(f'{j}_{i}_vol')
scale.add_mark(100, Gtk.PositionType.TOP, '')
if j == 'b':
label = self.builder.get_object(f'b{i}_label')
if label != None:
label.set_text(f'B{i} - {self.pulse.config["b"][i]["name"]}')
found = 0
for path in ['/usr/lib/ladspa', '/usr/local/lib/ladspa']:
if os.path.isfile(os.path.join(path, 'mbeq_1197.so')):
found = 1
if found == 0:
eq.set_visible(False)
eq.set_no_show_all(True)
def toggle_eq(self, button, index):
func = self.pulse.apply_eq if button.get_active() == True else self.pulse.remove_eq
func(index)
def toggle_rnnoise(self, widget, source_index, sink_name):
stat = 'connect' if widget.get_active() == True else 'disconnect'
self.pulse.rnnoise(source_index, sink_name, stat)
def toggle_mute(self, button, index):
state = 1 if button.get_active() else 0
self.pulse.mute(index, state)
def toggle_loopback(self, button, sink_index, source_index):
state = 'connect' if button.get_active() else 'disconnect'
self.pulse.connect(state, source_index, sink_index)
def volume_change(self, slider, index, stream_type=None):
val = int(slider.get_value())
if type(index) == int or self.pulse.config[index[0]][index[1]]['name'] != '':
self.pulse.volume(index, val, stream_type)
def open_popover(self, button, event, popover, index):
if event.button == 3:
if self.pulse.config[index[0]][index[1]]['name'] != '':
popover(button, self.pulse, index)
def label_rename_entry(self, widget):
name = widget.get_text()
if re.match('^[a-zA-Z0-9]*$', name):
if self.pulse.rename(self.Label_Index, name) == True:
self.PopActive.set_text(name)
self.sink_input_box.load_application_list()
self.source_output_box.load_application_list()
self.restart_vumeter(self.Label_Index)
else:
return
self.Rename_Popover.popdown()
def label_click(self, widget, event, label, index):
self.Label_Index = index
self.Rename_Popover.set_relative_to(widget)
self.Rename_Popover.popup()
self.PopActive = label
def on_combo_changed(self, widget, index, device):
model = widget.get_active()
# if device its not an empty name
if self.pulse.config[index[0]][index[1]]['name'] != '':
if index[0] == 'hi':
self.pulse.disable_source(index[1])
else:
self.pulse.disable_sink(index[1])
# if chosen device is not an empty name
if model > 0:
self.pulse.config[index[0]][index[1]]['name'] = device[model - 1]['name']
if index[0] == 'hi':
self.pulse.start_source(index[1])
else:
self.pulse.start_sink(index[1])
self.restart_vumeter(index)
# if its an empty name
else:
if self.pulse.config[index[0]][index[1]]['name'] != '':
self.restart_vumeter(index, True)
self.pulse.config[index[0]][index[1]]['name'] = ''
def toggle_primary(self, widget, index):
if widget.get_active() == False:
return
else:
widget.set_sensitive(False)
button_list = self.vi_primary_buttons if index[0] == 'vi' else self.b_primary_buttons
for i in range(3):
if str(i + 1) != index[1]:
button_list[i].set_sensitive(True)
button_list[i].set_active(False)
self.pulse.set_primary(index)
if index[0] == 'vi':
self.sink_input_box.load_application_list()
else:
self.source_output_box.load_application_list()
def listen_subscribe(self):
for i in self.pulse.subscribe():
if 'remove' in i:
id = i.split('#')[1].strip('\n')
if 'sink-input' in i:
GLib.idle_add(self.sink_input_box.remove_app_dev, id)
elif 'source-output' in i:
GLib.idle_add(self.source_output_box.remove_app_dev, id)
elif 'new' in i:
id = i.split('#')[1].strip('\n')
if 'sink-input' in i:
GLib.idle_add(self.sink_input_box.load_application_list, id)
elif 'source-output' in i:
GLib.idle_add(self.source_output_box.load_application_list, id)
def listen_peak(self, index):
old = 0
for i in self.pulse.vumeter(index):
try:
val = float(i.strip('\n'))
GLib.idle_add(self.vu_list[index[0]][index[1]].set_fraction, val)
except:
return
def delete_event(self, widget, event):
self.pulse.save_config()
self.pulse.end_subscribe()
self.subscribe_thread.join()
if self.enable_vumeters == True:
self.pulse.end_vumeter()
for i in ['hi', 'vi', 'a', 'b']:
for j in ['1','2','3']:
if j in self.vu_thread[i]:
self.vu_thread[i][j].join()
Gtk.main_quit()
return False
| 41.045082 | 122 | 0.557164 |
b5f3a778ae1156dd2ee9e707348f51b1814fe9a4 | 1,401 | py | Python | src/Lz78.py | cir1711/MOresolver | 0c7a166ca87f9f5bc48329557a212df1c1713c9e | [
"MIT"
] | null | null | null | src/Lz78.py | cir1711/MOresolver | 0c7a166ca87f9f5bc48329557a212df1c1713c9e | [
"MIT"
] | null | null | null | src/Lz78.py | cir1711/MOresolver | 0c7a166ca87f9f5bc48329557a212df1c1713c9e | [
"MIT"
] | null | null | null | from Lz77 import parsed
def lzBit(string):
l = list()
for i in string:
if i not in l:
l.append(i)
return len(bin(len(l)-1)[2:])
def lzLen(string):
return len(string)*lzBit(string)
def keymaker(string):
l = list()
index = 0
for i in string:
if i not in l:
l.append(i)
index += 1
else:
aux = ""
for j in string[index:]:
if aux not in l and aux != "":
l.append(aux)
break
else:
aux+=j
index += 1
fi = aux
l.append(fi)
return l
def encodeLZ(string):
l = keymaker(string)
l1 = []
l2 = []
for i in l:
if i[0] not in l2:
l1.append((0,i))
else:
aux = i[0]
for j in range(1,len(i)):
prev = aux
aux += i[j]
if aux not in l2:
l1.append((l.index(prev)+1, aux[len(aux)-1]))
break
else:
l1.append((0, i[0]))
l2.append(i)
return l1
def decodeLZ(string):
l = parsed(string)
l1 = list()
for i in l:
aux = i.split(",")
if aux[0] =="0":
l1.append(aux[1])
else:
l1.append(l1[int(aux[0])-1]+aux[1])
return "".join(("".join(l1)).split("'"))
def cadCompr(string, n=15):
a = len(encodeLZ(string))
return (lzBit(string)+len(bin(n-1)[2:]))*a
def comper(string):
return cadCompr(string)/lzLen(string)
def percent(string):
return (1-comper(string))*100 | 19.458333 | 55 | 0.518201 |
8476562a7623d2f46837634168c1853538436b31 | 3,011 | py | Python | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/vm_meta_data.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/vm_meta_data.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/vm_meta_data.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class VmMetaData:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'admin_pass': 'str'
}
attribute_map = {
'admin_pass': 'admin_pass'
}
def __init__(self, admin_pass=None):
"""VmMetaData - a model defined in huaweicloud sdk"""
self._admin_pass = None
self.discriminator = None
if admin_pass is not None:
self.admin_pass = admin_pass
@property
def admin_pass(self):
"""Gets the admin_pass of this VmMetaData.
Windows弹性云服务器Administrator用户的密码。
:return: The admin_pass of this VmMetaData.
:rtype: str
"""
return self._admin_pass
@admin_pass.setter
def admin_pass(self, admin_pass):
"""Sets the admin_pass of this VmMetaData.
Windows弹性云服务器Administrator用户的密码。
:param admin_pass: The admin_pass of this VmMetaData.
:type: str
"""
self._admin_pass = admin_pass
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VmMetaData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.182609 | 79 | 0.547991 |
d511f6434d6744e26f1413ca9425bef5429530d3 | 288 | py | Python | flag/urls.py | demlution/django-flag | 76c776a961e7fdcaa1475902c8a4f4486d902b4d | [
"MIT"
] | 25 | 2015-07-09T18:38:15.000Z | 2021-08-24T20:06:36.000Z | flag/urls.py | demlution/django-flag | 76c776a961e7fdcaa1475902c8a4f4486d902b4d | [
"MIT"
] | 8 | 2015-07-28T04:46:21.000Z | 2020-01-05T02:00:48.000Z | flag/urls.py | demlution/django-flag | 76c776a961e7fdcaa1475902c8a4f4486d902b4d | [
"MIT"
] | 13 | 2015-04-13T20:44:02.000Z | 2020-02-29T00:55:44.000Z | # flake8: noqa
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns("",
url(r"^$", "flag.views.flag", name="flag"),
url(r'^thank_you', TemplateView.as_view(template_name="flag/thank_you.html"), name='flag-reported'),
)
| 28.8 | 104 | 0.71875 |
b4f54f3627fdedd533bb21e6ee4d5c9936254e5a | 225 | py | Python | practice/77.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | 1 | 2020-01-15T11:04:16.000Z | 2020-01-15T11:04:16.000Z | practice/77.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | 2 | 2021-03-31T19:36:19.000Z | 2021-06-10T22:29:26.000Z | practice/77.py | porala/python | 41213189a9b35b5b8c40c048f4d6cd3f8e5f25f4 | [
"DOC"
] | null | null | null | #Create a script that gets user's age and returns year of birth
from datetime import datetime
age = int(input("What's your age? "))
year_birth = datetime.now().year - age
print("You were born back in %s" % year_birth)
| 32.142857 | 64 | 0.711111 |
770c389188dfe8c5aa7cbca671e2b3de4c30c017 | 7,362 | py | Python | PyQt5/PyQt_logger02.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | null | null | null | PyQt5/PyQt_logger02.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | 11 | 2021-02-08T20:45:23.000Z | 2022-03-12T01:00:11.000Z | PyQt5/PyQt_logger02.py | philip-shen/note_python | db0ad84af25464a22ac52e348960107c81e74a56 | [
"MIT"
] | null | null | null | '''
A Qt GUI for logging
http://plumberjack.blogspot.com/2019/11/a-qt-gui-for-logging.html
'''
import datetime
import logging
import random
import sys
import time
# Deal with minor differences between PySide2 and PyQt5
try:
from PySide2 import QtCore, QtGui, QtWidgets
Signal = QtCore.Signal
Slot = QtCore.Slot
except ImportError:
from PyQt5 import QtCore, QtGui, QtWidgets
Signal = QtCore.pyqtSignal
Slot = QtCore.pyqtSlot
logger = logging.getLogger(__name__)
#
# Signals need to be contained in a QObject or subclass in order to be correctly
# initialized.
#
class Signaller(QtCore.QObject):
signal = Signal(str, logging.LogRecord)
#
# Output to a Qt GUI is only supposed to happen on the main thread. So, this
# handler is designed to take a slot function which is set up to run in the main
# thread. In this example, the function takes a string argument which is a
# formatted log message, and the log record which generated it. The formatted
# string is just a convenience - you could format a string for output any way
# you like in the slot function itself.
#
# You specify the slot function to do whatever GUI updates you want. The handler
# doesn't know or care about specific UI elements.
#
class QtHandler(logging.Handler):
def __init__(self, slotfunc, *args, **kwargs):
super(QtHandler, self).__init__(*args, **kwargs)
self.signaller = Signaller()
self.signaller.signal.connect(slotfunc)
def emit(self, record):
s = self.format(record)
self.signaller.signal.emit(s, record)
#
# This example uses QThreads, which means that the threads at the Python level
# are named something like "Dummy-1". The function below gets the Qt name of the
# current thread.
#
def ctname():
return QtCore.QThread.currentThread().objectName()
#
# Used to generate random levels for logging.
#
LEVELS = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
logging.CRITICAL)
#
# This worker class represents work that is done in a thread separate to the
# main thread. The way the thread is kicked off to do work is via a button press
# that connects to a slot in the worker.
#
# Because the default threadName value in the LogRecord isn't much use, we add
# a qThreadName which contains the QThread name as computed above, and pass that
# value in an "extra" dictionary which is used to update the LogRecord with the
# QThread name.
#
# This example worker just outputs messages sequentially, interspersed with
# random delays of the order of a few seconds.
#
class Worker(QtCore.QObject):
@Slot()
def start(self):
extra = {'qThreadName': ctname() }
logger.debug('Started work', extra=extra)
i = 1
# Let the thread run until interrupted. This allows reasonably clean
# thread termination.
while not QtCore.QThread.currentThread().isInterruptionRequested():
delay = 0.5 + random.random() * 2
time.sleep(delay)
level = random.choice(LEVELS)
logger.log(level, 'Message after delay of %3.1f: %d', delay, i, extra=extra)
i += 1
#
# Implement a simple UI for this cookbook example. This contains:
#
# * A read-only text edit window which holds formatted log messages
# * A button to start work and log stuff in a separate thread
# * A button to log something from the main thread
# * A button to clear the log window
#
class Window(QtWidgets.QWidget):
COLORS = {
logging.DEBUG: 'black',
logging.INFO: 'blue',
logging.WARNING: 'orange',
logging.ERROR: 'red',
logging.CRITICAL: 'purple',
}
def __init__(self, app):
super(Window, self).__init__()
self.app = app
self.textedit = te = QtWidgets.QPlainTextEdit(self)
# Set whatever the default monospace font is for the platform
f = QtGui.QFont('nosuchfont')
f.setStyleHint(f.Monospace)
te.setFont(f)
te.setReadOnly(True)
PB = QtWidgets.QPushButton
self.work_button = PB('Start background work', self)
self.log_button = PB('Log a message at a random level', self)
self.clear_button = PB('Clear log window', self)
self.handler = h = QtHandler(self.update_status)
# Remember to use qThreadName rather than threadName in the format string.
fs = '%(asctime)s %(qThreadName)-12s %(levelname)-8s %(message)s'
formatter = logging.Formatter(fs)
h.setFormatter(formatter)
logger.addHandler(h)
# Set up to terminate the QThread when we exit
app.aboutToQuit.connect(self.force_quit)
# Lay out all the widgets
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(te)
layout.addWidget(self.work_button)
layout.addWidget(self.log_button)
layout.addWidget(self.clear_button)
self.setFixedSize(900, 400)
# Connect the non-worker slots and signals
self.log_button.clicked.connect(self.manual_update)
self.clear_button.clicked.connect(self.clear_display)
# Start a new worker thread and connect the slots for the worker
self.start_thread()
self.work_button.clicked.connect(self.worker.start)
# Once started, the button should be disabled
self.work_button.clicked.connect(lambda : self.work_button.setEnabled(False))
def start_thread(self):
self.worker = Worker()
self.worker_thread = QtCore.QThread()
self.worker.setObjectName('Worker')
self.worker_thread.setObjectName('WorkerThread') # for qThreadName
self.worker.moveToThread(self.worker_thread)
# This will start an event loop in the worker thread
self.worker_thread.start()
def kill_thread(self):
# Just tell the worker to stop, then tell it to quit and wait for that
# to happen
self.worker_thread.requestInterruption()
if self.worker_thread.isRunning():
self.worker_thread.quit()
self.worker_thread.wait()
else:
print('worker has already exited.')
def force_quit(self):
# For use when the window is closed
if self.worker_thread.isRunning():
self.kill_thread()
# The functions below update the UI and run in the main thread because
# that's where the slots are set up
@Slot(str, logging.LogRecord)
def update_status(self, status, record):
color = self.COLORS.get(record.levelno, 'black')
s = '<pre><font color="%s">%s</font></pre>' % (color, status)
self.textedit.appendHtml(s)
@Slot()
def manual_update(self):
# This function uses the formatted message passed in, but also uses
# information from the record to format the message in an appropriate
# color according to its severity (level).
level = random.choice(LEVELS)
extra = {'qThreadName': ctname() }
logger.log(level, 'Manually logged!', extra=extra)
@Slot()
def clear_display(self):
self.textedit.clear()
def main():
QtCore.QThread.currentThread().setObjectName('MainThread')
logging.getLogger().setLevel(logging.DEBUG)
app = QtWidgets.QApplication(sys.argv)
example = Window(app)
example.show()
sys.exit(app.exec_())
if __name__=='__main__':
main() | 34.890995 | 88 | 0.676447 |
ec6fe0b128055b134446e680e37b042ff8ba1710 | 22,270 | py | Python | behave/parser.py | achengs/behave | ecce2caa54b4a75e99af30ceb789320acf3f2ba2 | [
"BSD-2-Clause"
] | 5 | 2019-01-15T18:49:16.000Z | 2020-02-21T20:24:39.000Z | behave/parser.py | angelluk/behave_parallel | 7d508413dcc2fedbe0cd54d6fd4655d4b95ff712 | [
"BSD-2-Clause"
] | 6 | 2019-04-26T19:34:34.000Z | 2020-06-03T21:49:13.000Z | behave/parser.py | angelluk/behave_parallel | 7d508413dcc2fedbe0cd54d6fd4655d4b95ff712 | [
"BSD-2-Clause"
] | 9 | 2019-04-23T19:43:41.000Z | 2020-05-12T09:17:27.000Z | # -*- coding: UTF-8 -*-
from __future__ import absolute_import, with_statement
import re
import sys
import six
from behave import model, i18n
from behave.textutil import text as _text
DEFAULT_LANGUAGE = "en"
def parse_file(filename, language=None):
with open(filename, "rb") as f:
# file encoding is assumed to be utf8. Oh, yes.
data = f.read().decode("utf8")
return parse_feature(data, language, filename)
def parse_feature(data, language=None, filename=None):
# ALL data operated on by the parser MUST be unicode
assert isinstance(data, six.text_type)
try:
result = Parser(language).parse(data, filename)
except ParserError as e:
e.filename = filename
raise
return result
def parse_steps(text, language=None, filename=None):
"""
Parse a number of steps a multi-line text from a scenario.
Scenario line with title and keyword is not provided.
:param text: Multi-line text with steps to parse (as unicode).
:param language: i18n language identifier (optional).
:param filename: Filename (optional).
:return: Parsed steps (if successful).
"""
assert isinstance(text, six.text_type)
try:
result = Parser(language, variant="steps").parse_steps(text, filename)
except ParserError as e:
e.filename = filename
raise
return result
def parse_step(text, language=None, filename=None):
"""Parse one step as multi-line text.
:param text: Multi-line text with step to parse (as unicode).
:param language: i18n language identifier (optional).
:param filename: Filename (optional).
:return: Parsed step (if successful).
"""
steps = parse_steps(text, language=language, filename=filename)
assert len(steps) == 1
return steps[0]
def parse_tags(text):
"""
Parse tags from text (one or more lines, as string).
:param text: Multi-line text with tags to parse (as unicode).
:return: List of tags (if successful).
"""
# assert isinstance(text, unicode)
if not text:
return []
return Parser(variant="tags").parse_tags(text)
class ParserError(Exception):
def __init__(self, message, line, filename=None, line_text=None):
if line:
message += u" at line %d" % line
if line_text:
message += u': "%s"' % line_text.strip()
super(ParserError, self).__init__(message)
self.line = line
self.line_text = line_text
self.filename = filename
def __str__(self):
arg0 = _text(self.args[0])
if self.filename:
filename = _text(self.filename, sys.getfilesystemencoding())
return u'Failed to parse "%s": %s' % (filename, arg0)
else:
return u"Failed to parse <string>: %s" % arg0
if six.PY2:
__unicode__ = __str__
__str__ = lambda self: self.__unicode__().encode("utf-8")
class Parser(object):
"""Feature file parser for behave."""
# pylint: disable=too-many-instance-attributes
def __init__(self, language=None, variant=None):
if not variant:
variant = "feature"
self.language = language
self.variant = variant
self.state = "init"
self.line = 0
self.last_step = None
self.multiline_start = None
self.multiline_leading = None
self.multiline_terminator = None
self.filename = None
self.feature = None
self.statement = None
self.tags = []
self.lines = []
self.table = None
self.examples = None
self.keywords = None
if self.language:
self.keywords = i18n.languages[self.language]
# NOT-NEEDED: self.reset()
def reset(self):
# This can probably go away.
if self.language:
self.keywords = i18n.languages[self.language]
else:
self.keywords = None
self.state = "init"
self.line = 0
self.last_step = None
self.multiline_start = None
self.multiline_leading = None
self.multiline_terminator = None
self.filename = None
self.feature = None
self.statement = None
self.tags = []
self.lines = []
self.table = None
self.examples = None
def parse(self, data, filename=None):
self.reset()
self.filename = filename
for line in data.split("\n"):
self.line += 1
if not line.strip() and self.state != "multiline":
# -- SKIP EMPTY LINES, except in multiline string args.
continue
self.action(line)
if self.table:
self.action_table("")
feature = self.feature
if feature:
feature.parser = self
self.reset()
return feature
def _build_feature(self, keyword, line):
name = line[len(keyword) + 1:].strip()
language = self.language or DEFAULT_LANGUAGE
self.feature = model.Feature(self.filename, self.line, keyword,
name, tags=self.tags, language=language)
# -- RESET STATE:
self.tags = []
def _build_background_statement(self, keyword, line):
if self.tags:
msg = u"Background supports no tags: @%s" % (u" @".join(self.tags))
raise ParserError(msg, self.line, self.filename, line)
name = line[len(keyword) + 1:].strip()
statement = model.Background(self.filename, self.line, keyword, name)
self.statement = statement
self.feature.background = self.statement
def _build_scenario_statement(self, keyword, line):
name = line[len(keyword) + 1:].strip()
self.statement = model.Scenario(self.filename, self.line,
keyword, name, tags=self.tags)
self.feature.add_scenario(self.statement)
# -- RESET STATE:
self.tags = []
def _build_scenario_outline_statement(self, keyword, line):
# pylint: disable=C0103
# C0103 Invalid name "build_scenario_outline_statement", too long.
name = line[len(keyword) + 1:].strip()
self.statement = model.ScenarioOutline(self.filename, self.line,
keyword, name, tags=self.tags)
self.feature.add_scenario(self.statement)
# -- RESET STATE:
self.tags = []
def _build_examples(self, keyword, line):
if not isinstance(self.statement, model.ScenarioOutline):
message = u"Examples must only appear inside scenario outline"
raise ParserError(message, self.line, self.filename, line)
name = line[len(keyword) + 1:].strip()
self.examples = model.Examples(self.filename, self.line,
keyword, name, tags=self.tags)
# pylint: disable=E1103
# E1103 Instance of "Background" has no "examples" member
# (but some types could not be inferred).
self.statement.examples.append(self.examples)
# -- RESET STATE:
self.tags = []
def diagnose_feature_usage_error(self):
if self.feature:
return "Multiple features in one file are not supported."
else:
return "Feature should not be used here."
def diagnose_background_usage_error(self):
if self.feature and self.feature.scenarios:
return "Background may not occur after Scenario/ScenarioOutline."
elif self.tags:
return "Background does not support tags."
else:
return "Background should not be used here."
def diagnose_scenario_usage_error(self):
if not self.feature:
return "Scenario may not occur before Feature."
else:
return "Scenario should not be used here."
def diagnose_scenario_outline_usage_error(self): # pylint: disable=invalid-name
if not self.feature:
return "ScenarioOutline may not occur before Feature."
else:
return "ScenarioOutline should not be used here."
def ask_parse_failure_oracle(self, line):
"""
Try to find the failure reason when a parse failure occurs:
Oracle, oracle, ... what went wrong?
Zzzz
:param line: Text line where parse failure occured (as string).
:return: Reason (as string) if an explanation is found.
Otherwise, empty string or None.
"""
feature_kwd = self.match_keyword("feature", line)
if feature_kwd:
return self.diagnose_feature_usage_error()
background_kwd = self.match_keyword("background", line)
if background_kwd:
return self.diagnose_background_usage_error()
scenario_kwd = self.match_keyword("scenario", line)
if scenario_kwd:
return self.diagnose_scenario_usage_error()
scenario_outline_kwd = self.match_keyword("scenario_outline", line)
if scenario_outline_kwd:
return self.diagnose_scenario_outline_usage_error()
# -- OTHERWISE:
if self.variant == "feature" and not self.feature:
return "No feature found."
# -- FINALLY: No glue what went wrong.
return None
def action(self, line):
if line.strip().startswith("#") and self.state != "multiline":
if self.state != "init" or self.tags or self.variant != "feature":
return
# -- DETECT: language comment (at begin of feature file; state=init)
line = line.strip()[1:].strip()
if line.lstrip().lower().startswith("language:"):
language = line[9:].strip()
self.language = language
self.keywords = i18n.languages[language]
return
func = getattr(self, "action_" + self.state, None)
if func is None:
line = line.strip()
msg = u"Parser in unknown state %s;" % self.state
raise ParserError(msg, self.line, self.filename, line)
if not func(line):
line = line.strip()
msg = u'\nParser failure in state %s, at line %d: "%s"\n' % \
(self.state, self.line, line)
reason = self.ask_parse_failure_oracle(line)
if reason:
msg += u"REASON: %s" % reason
raise ParserError(msg, None, self.filename)
def action_init(self, line):
line = line.strip()
if line.startswith("@"):
self.tags.extend(self.parse_tags(line))
return True
feature_kwd = self.match_keyword("feature", line)
if feature_kwd:
self._build_feature(feature_kwd, line)
self.state = "feature"
return True
return False
# def subaction_detect_next_scenario(self, line):
# if line.startswith("@"):
# self.tags.extend(self.parse_tags(line))
# self.state = "next_scenario"
# return True
#
# scenario_kwd = self.match_keyword("scenario", line)
# if scenario_kwd:
# self._build_scenario_statement(scenario_kwd, line)
# self.state = "scenario"
# return True
#
# scenario_outline_kwd = self.match_keyword("scenario_outline", line)
# if scenario_outline_kwd:
# self._build_scenario_outline_statement(scenario_outline_kwd, line)
# self.state = "scenario"
# return True
#
# # -- OTHERWISE:
# return False
# pylint: disable=invalid-name
def subaction_detect_taggable_statement(self, line):
"""Subaction is used after first tag line is detected.
Additional lines with tags or taggable_statement follow.
Taggable statements (excluding Feature) are:
* Scenario
* ScenarioOutline
* Examples (within ScenarioOutline)
"""
if line.startswith("@"):
self.tags.extend(self.parse_tags(line))
self.state = "taggable_statement"
return True
scenario_kwd = self.match_keyword("scenario", line)
if scenario_kwd:
self._build_scenario_statement(scenario_kwd, line)
self.state = "scenario"
return True
scenario_outline_kwd = self.match_keyword("scenario_outline", line)
if scenario_outline_kwd:
self._build_scenario_outline_statement(scenario_outline_kwd, line)
self.state = "scenario"
return True
examples_kwd = self.match_keyword("examples", line)
if examples_kwd:
self._build_examples(examples_kwd, line)
self.state = "table"
return True
# -- OTHERWISE:
return False
# pylint: enable=invalid-name
def action_feature(self, line):
line = line.strip()
# OLD: if self.subaction_detect_next_scenario(line):
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline (or tags)
return True
background_kwd = self.match_keyword("background", line)
if background_kwd:
self._build_background_statement(background_kwd, line)
self.state = "steps"
return True
self.feature.description.append(line)
return True
# def action_next_scenario(self, line):
# """
# Entered after first tag for Scenario/ScenarioOutline is detected.
# """
# line = line.strip()
# if self.subaction_detect_next_scenario(line):
# return True
#
# return False
def action_taggable_statement(self, line):
"""Entered after first tag for Scenario/ScenarioOutline or
Examples is detected (= taggable_statement except Feature).
Taggable statements (excluding Feature) are:
* Scenario
* ScenarioOutline
* Examples (within ScenarioOutline)
"""
line = line.strip()
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline or Examples (or tags)
return True
return False
def action_scenario(self, line):
"""
Entered when Scenario/ScenarioOutline keyword/line is detected.
Hunts/collects scenario description lines.
DETECT:
* first step of Scenario/ScenarioOutline
* next Scenario/ScenarioOutline.
"""
line = line.strip()
step = self.parse_step(line)
if step:
# -- FIRST STEP DETECTED: End collection of scenario descriptions.
self.state = "steps"
self.statement.steps.append(step)
return True
# -- CASE: Detect next Scenario/ScenarioOutline
# * Scenario with scenario description, but without steps.
# * Title-only scenario without scenario description and steps.
# OLD: if self.subaction_detect_next_scenario(line):
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline (or tags)
return True
# -- OTHERWISE: Add scenario description line.
# pylint: disable=E1103
# E1103 Instance of "Background" has no "description" member...
self.statement.description.append(line)
return True
def action_steps(self, line):
"""
Entered when first step is detected (or nested step parsing).
Subcases:
* step
* multi-line text (doc-string), following a step
* table, following a step
* examples for a ScenarioOutline, after ScenarioOutline steps
DETECT:
* next Scenario/ScenarioOutline or Examples (in a ScenarioOutline)
"""
# pylint: disable=R0911
# R0911 Too many return statements (8/6)
stripped = line.lstrip()
if stripped.startswith('"""') or stripped.startswith("'''"):
self.state = "multiline"
self.multiline_start = self.line
self.multiline_terminator = stripped[:3]
self.multiline_leading = line.index(stripped[0])
return True
line = line.strip()
step = self.parse_step(line)
if step:
self.statement.steps.append(step)
return True
if self.subaction_detect_taggable_statement(line):
# -- DETECTED: Next Scenario, ScenarioOutline or Examples (or tags)
return True
if line.startswith("|"):
assert self.statement.steps, "TABLE-START without step detected."
self.state = "table"
return self.action_table(line)
return False
def action_multiline(self, line):
if line.strip().startswith(self.multiline_terminator):
step = self.statement.steps[-1]
step.text = model.Text(u"\n".join(self.lines), u"text/plain",
self.multiline_start)
if step.name.endswith(":"):
step.name = step.name[:-1]
self.lines = []
self.multiline_terminator = None
self.state = "steps"
return True
self.lines.append(line[self.multiline_leading:])
# -- BETTER DIAGNOSTICS: May remove non-whitespace in execute_steps()
removed_line_prefix = line[:self.multiline_leading]
if removed_line_prefix.strip():
message = u"BAD-INDENT in multiline text: "
message += u"Line '%s' would strip leading '%s'" % \
(line, removed_line_prefix)
raise ParserError(message, self.line, self.filename)
return True
def action_table(self, line):
line = line.strip()
if not line.startswith("|"):
if self.examples:
self.examples.table = self.table
self.examples = None
else:
step = self.statement.steps[-1]
step.table = self.table
if step.name.endswith(":"):
step.name = step.name[:-1]
self.table = None
self.state = "steps"
return self.action_steps(line)
# -- SUPPORT: Escaped-pipe(s) in Gherkin cell values.
# Search for pipe(s) that are not preceeded with an escape char.
cells = [cell.replace("\\|", "|").strip()
for cell in re.split(r"(?<!\\)\|", line[1:-1])]
if self.table is None:
self.table = model.Table(cells, self.line)
else:
if len(cells) != len(self.table.headings):
raise ParserError(u"Malformed table", self.line)
self.table.add_row(cells, self.line)
return True
def match_keyword(self, keyword, line):
if not self.keywords:
self.language = DEFAULT_LANGUAGE
self.keywords = i18n.languages[DEFAULT_LANGUAGE]
for alias in self.keywords[keyword]:
if line.startswith(alias + ":"):
return alias
return False
def parse_tags(self, line):
"""
Parse a line with one or more tags:
* A tag starts with the AT sign.
* A tag consists of one word without whitespace chars.
* Multiple tags are separated with whitespace chars
* End-of-line comment is stripped.
:param line: Line with one/more tags to process.
:raise ParserError: If syntax error is detected.
"""
assert line.startswith("@")
tags = []
for word in line.split():
if word.startswith("@"):
tags.append(model.Tag(word[1:], self.line))
elif word.startswith("#"):
break # -- COMMENT: Skip rest of line.
else:
# -- BAD-TAG: Abort here.
raise ParserError(u"tag: %s (line: %s)" % (word, line),
self.line, self.filename)
return tags
def parse_step(self, line):
for step_type in ("given", "when", "then", "and", "but"):
for kw in self.keywords[step_type]:
if kw.endswith("<"):
whitespace = ""
kw = kw[:-1]
else:
whitespace = " "
# try to match the keyword; also attempt a purely lowercase
# match if that'll work
if not (line.startswith(kw + whitespace)
or line.lower().startswith(kw.lower() + whitespace)):
continue
name = line[len(kw):].strip()
if step_type in ("and", "but"):
if not self.last_step:
raise ParserError(u"No previous step", self.line)
step_type = self.last_step
else:
self.last_step = step_type
step = model.Step(self.filename, self.line, kw, step_type,
name)
return step
return None
def parse_steps(self, text, filename=None):
"""
Parse support for execute_steps() functionality that supports step with:
* multiline text
* table
:param text: Text that contains 0..* steps
:return: List of parsed steps (as model.Step objects).
"""
assert isinstance(text, six.text_type)
if not self.language:
self.language = DEFAULT_LANGUAGE
self.reset()
self.filename = filename
self.statement = model.Scenario(filename, 0, u"scenario", u"")
self.state = "steps"
for line in text.split("\n"):
self.line += 1
if not line.strip() and self.state != "multiline":
# -- SKIP EMPTY LINES, except in multiline string args.
continue
self.action(line)
# -- FINALLY:
if self.table:
self.action_table("")
steps = self.statement.steps
return steps
| 35.293185 | 83 | 0.580692 |
e897fdf3eba6bb41f7b5748965a70c61d43eac45 | 5,784 | py | Python | examples/speedyspeech/baker/speedyspeech_updater.py | Jackwaterveg/Parakeet | e75a07076ba5766206a6cd1fb2e5f82b0ba3842c | [
"Apache-2.0"
] | null | null | null | examples/speedyspeech/baker/speedyspeech_updater.py | Jackwaterveg/Parakeet | e75a07076ba5766206a6cd1fb2e5f82b0ba3842c | [
"Apache-2.0"
] | null | null | null | examples/speedyspeech/baker/speedyspeech_updater.py | Jackwaterveg/Parakeet | e75a07076ba5766206a6cd1fb2e5f82b0ba3842c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import paddle
from paddle import distributed as dist
from paddle.fluid.layers import huber_loss
from paddle.nn import functional as F
from parakeet.modules.losses import masked_l1_loss, weighted_mean
from parakeet.modules.ssim import ssim
from parakeet.training.extensions.evaluator import StandardEvaluator
from parakeet.training.reporter import report
from parakeet.training.updaters.standard_updater import StandardUpdater
logging.basicConfig(
format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s',
datefmt='[%Y-%m-%d %H:%M:%S]')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class SpeedySpeechUpdater(StandardUpdater):
def __init__(self,
model,
optimizer,
dataloader,
init_state=None,
output_dir=None):
super().__init__(model, optimizer, dataloader, init_state=None)
log_file = output_dir / 'worker_{}.log'.format(dist.get_rank())
self.filehandler = logging.FileHandler(str(log_file))
logger.addHandler(self.filehandler)
self.logger = logger
self.msg = ""
def update_core(self, batch):
self.msg = "Rank: {}, ".format(dist.get_rank())
losses_dict = {}
decoded, predicted_durations = self.model(
text=batch["phones"],
tones=batch["tones"],
durations=batch["durations"])
target_mel = batch["feats"]
spec_mask = F.sequence_mask(
batch["num_frames"], dtype=target_mel.dtype).unsqueeze(-1)
text_mask = F.sequence_mask(
batch["num_phones"], dtype=predicted_durations.dtype)
# spec loss
l1_loss = masked_l1_loss(decoded, target_mel, spec_mask)
# duration loss
target_durations = batch["durations"]
target_durations = paddle.maximum(
target_durations.astype(predicted_durations.dtype),
paddle.to_tensor([1.0]))
duration_loss = weighted_mean(
huber_loss(
predicted_durations, paddle.log(target_durations), delta=1.0),
text_mask, )
# ssim loss
ssim_loss = 1.0 - ssim((decoded * spec_mask).unsqueeze(1),
(target_mel * spec_mask).unsqueeze(1))
loss = l1_loss + ssim_loss + duration_loss
optimizer = self.optimizer
optimizer.clear_grad()
loss.backward()
optimizer.step()
report("train/loss", float(loss))
report("train/l1_loss", float(l1_loss))
report("train/duration_loss", float(duration_loss))
report("train/ssim_loss", float(ssim_loss))
losses_dict["l1_loss"] = float(l1_loss)
losses_dict["duration_loss"] = float(duration_loss)
losses_dict["ssim_loss"] = float(ssim_loss)
losses_dict["loss"] = float(loss)
self.msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_dict.items())
class SpeedySpeechEvaluator(StandardEvaluator):
def __init__(self, model, dataloader, output_dir=None):
super().__init__(model, dataloader)
log_file = output_dir / 'worker_{}.log'.format(dist.get_rank())
self.filehandler = logging.FileHandler(str(log_file))
logger.addHandler(self.filehandler)
self.logger = logger
self.msg = ""
def evaluate_core(self, batch):
self.msg = "Evaluate: "
losses_dict = {}
decoded, predicted_durations = self.model(
text=batch["phones"],
tones=batch["tones"],
durations=batch["durations"])
target_mel = batch["feats"]
spec_mask = F.sequence_mask(
batch["num_frames"], dtype=target_mel.dtype).unsqueeze(-1)
text_mask = F.sequence_mask(
batch["num_phones"], dtype=predicted_durations.dtype)
# spec loss
l1_loss = masked_l1_loss(decoded, target_mel, spec_mask)
# duration loss
target_durations = batch["durations"]
target_durations = paddle.maximum(
target_durations.astype(predicted_durations.dtype),
paddle.to_tensor([1.0]))
duration_loss = weighted_mean(
huber_loss(
predicted_durations, paddle.log(target_durations), delta=1.0),
text_mask, )
# ssim loss
ssim_loss = 1.0 - ssim((decoded * spec_mask).unsqueeze(1),
(target_mel * spec_mask).unsqueeze(1))
loss = l1_loss + ssim_loss + duration_loss
# import pdb; pdb.set_trace()
report("eval/loss", float(loss))
report("eval/l1_loss", float(l1_loss))
report("eval/duration_loss", float(duration_loss))
report("eval/ssim_loss", float(ssim_loss))
losses_dict["l1_loss"] = float(l1_loss)
losses_dict["duration_loss"] = float(duration_loss)
losses_dict["ssim_loss"] = float(ssim_loss)
losses_dict["loss"] = float(loss)
self.msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_dict.items())
self.logger.info(self.msg)
| 36.840764 | 79 | 0.630705 |
f74f708b2e24158406787d50edf2095594e9def4 | 6,063 | py | Python | tests/dwd/observations/test_api_sites_geo.py | panodata/python_dwd | a9ee1bdf21b8fc12f6b6b33628ca804e656f310d | [
"MIT"
] | null | null | null | tests/dwd/observations/test_api_sites_geo.py | panodata/python_dwd | a9ee1bdf21b8fc12f6b6b33628ca804e656f310d | [
"MIT"
] | null | null | null | tests/dwd/observations/test_api_sites_geo.py | panodata/python_dwd | a9ee1bdf21b8fc12f6b6b33628ca804e656f310d | [
"MIT"
] | 1 | 2022-03-21T14:28:45.000Z | 2022-03-21T14:28:45.000Z | from pathlib import Path
import pytest
import numpy as np
from datetime import datetime
from unittest.mock import patch, MagicMock
import pandas as pd
from wetterdienst.dwd.metadata.column_map import METADATA_DTYPE_MAPPING
from wetterdienst.util.geo import derive_nearest_neighbours
from wetterdienst.util.geo import Coordinates
from wetterdienst.dwd.observations import (
DWDObservationSites,
DWDObservationParameterSet,
DWDObservationPeriod,
DWDObservationResolution,
)
from wetterdienst.exceptions import InvalidParameterCombination
HERE = Path(__file__).parent
METADATA_FILE = HERE / "FIXED_METADATA.JSON"
METADATA_DF = pd.read_json(METADATA_FILE)
METADATA_DF = METADATA_DF.astype(METADATA_DTYPE_MAPPING)
@patch(
"wetterdienst.dwd.observations.stations.metadata_for_climate_observations",
MagicMock(return_value=METADATA_DF),
)
def test_dwd_observation_sites_nearby_number_success():
# Test for one nearest station
sites = DWDObservationSites(
DWDObservationParameterSet.TEMPERATURE_AIR,
DWDObservationResolution.HOURLY,
DWDObservationPeriod.RECENT,
datetime(2020, 1, 1),
datetime(2020, 1, 20),
)
nearby_station = sites.nearby_number(
50.0,
8.9,
1,
)
nearby_station = nearby_station.drop("TO_DATE", axis="columns")
nearby_station.STATION_ID = nearby_station.STATION_ID.astype(np.int64)
pd.testing.assert_frame_equal(
nearby_station,
pd.DataFrame(
[
[
np.int64(4411),
np.datetime64("2002-01-24"),
155.0,
49.9195,
8.9671,
"Schaafheim-Schlierbach",
"Hessen",
11.65302672,
]
],
columns=[
"STATION_ID",
"FROM_DATE",
"STATION_HEIGHT",
"LAT",
"LON",
"STATION_NAME",
"STATE",
"DISTANCE_TO_LOCATION",
],
),
)
nearby_station = DWDObservationSites(
DWDObservationParameterSet.TEMPERATURE_AIR,
DWDObservationResolution.HOURLY,
DWDObservationPeriod.RECENT,
datetime(2020, 1, 1),
datetime(2020, 1, 20),
).nearby_radius(
50.0,
8.9,
20,
)
nearby_station = nearby_station.drop("TO_DATE", axis="columns")
nearby_station.STATION_ID = nearby_station.STATION_ID.astype(np.int64)
pd.testing.assert_frame_equal(
nearby_station,
pd.DataFrame(
[
[
np.int64(4411),
np.datetime64("2002-01-24 00:00:00"),
155.0,
49.9195,
8.9671,
"Schaafheim-Schlierbach",
"Hessen",
11.653026716750542,
],
[
np.int64(2480),
np.datetime64("2004-09-01 00:00:00"),
108.0,
50.0643,
8.993,
"Kahl/Main",
"Bayern",
12.572153957087247,
],
[
np.int64(7341),
np.datetime64("2005-07-16 00:00:00"),
119.0,
50.09,
8.7862,
"Offenbach-Wetterpark",
"Hessen",
16.13301589362613,
],
],
columns=[
"STATION_ID",
"FROM_DATE",
"STATION_HEIGHT",
"LAT",
"LON",
"STATION_NAME",
"STATE",
"DISTANCE_TO_LOCATION",
],
),
)
@patch(
"wetterdienst.dwd.observations.stations.metadata_for_climate_observations",
MagicMock(return_value=METADATA_DF),
)
def test_dwd_observation_sites_nearby_number_fail_1():
with pytest.raises(ValueError):
DWDObservationSites(
DWDObservationParameterSet.TEMPERATURE_AIR,
DWDObservationResolution.HOURLY,
DWDObservationPeriod.RECENT,
datetime(2020, 1, 1),
datetime(2020, 1, 20),
).nearby_number(
51.4,
9.3,
0,
)
@patch(
"wetterdienst.dwd.observations.stations.metadata_for_climate_observations",
MagicMock(return_value=METADATA_DF),
)
def test_dwd_observation_sites_nearby_number_fail_2():
with pytest.raises(InvalidParameterCombination):
DWDObservationSites(
DWDObservationParameterSet.SOIL,
DWDObservationResolution.MINUTE_10,
DWDObservationPeriod.RECENT,
datetime(2020, 1, 1),
datetime(2020, 1, 20),
).nearby_number(
51.4,
9.3,
1,
)
@patch(
"wetterdienst.dwd.observations.stations.metadata_for_climate_observations",
MagicMock(return_value=METADATA_DF),
)
def test_dwd_observation_sites_nearby_distance():
nearby_station = DWDObservationSites(
DWDObservationParameterSet.TEMPERATURE_AIR,
DWDObservationResolution.HOURLY,
DWDObservationPeriod.RECENT,
datetime(2020, 1, 1),
datetime(2020, 1, 20),
).nearby_radius(
50.0,
8.9,
10,
)
assert nearby_station.empty is True
def test_derive_nearest_neighbours():
coords = Coordinates(np.array([50.0, 51.4]), np.array([8.9, 9.3]))
metadata = pd.read_json(METADATA_FILE)
distances, indices_nearest_neighbours = derive_nearest_neighbours(
metadata.LAT.values, metadata.LON.values, coords
)
np.testing.assert_array_almost_equal(distances, np.array([0.00182907, 0.00227919]))
np.testing.assert_array_almost_equal(
indices_nearest_neighbours, np.array([432, 655])
)
| 28.331776 | 87 | 0.562263 |
3f230db68c74538f975b2ef54c592da880b5de91 | 10,736 | py | Python | chalice/deploy/executor.py | devangmehta123/chalice | 9cba1bff604871c03c179e0b4be94d59a93ba198 | [
"Apache-2.0"
] | null | null | null | chalice/deploy/executor.py | devangmehta123/chalice | 9cba1bff604871c03c179e0b4be94d59a93ba198 | [
"Apache-2.0"
] | null | null | null | chalice/deploy/executor.py | devangmehta123/chalice | 9cba1bff604871c03c179e0b4be94d59a93ba198 | [
"Apache-2.0"
] | null | null | null | import re
import pprint
import jmespath
from attr import asdict
from typing import Dict, List, Any # noqa
from chalice.deploy import models # noqa
from chalice.awsclient import TypedAWSClient # noqa
from chalice.utils import UI # noqa
class BaseExecutor(object):
def __init__(self, client, ui):
# type: (TypedAWSClient, UI) -> None
self._client = client
self._ui = ui
self.resource_values = [] # type: List[Dict[str, Any]]
def execute(self, plan):
# type: (models.Plan) -> None
pass
class Executor(BaseExecutor):
def __init__(self, client, ui):
# type: (TypedAWSClient, UI) -> None
super(Executor, self).__init__(client, ui)
# A mapping of variables that's populated as API calls
# are made. These can be used in subsequent API calls.
self.variables = {} # type: Dict[str, Any]
self._resource_value_index = {} # type: Dict[str, Any]
self._variable_resolver = VariableResolver()
def execute(self, plan):
# type: (models.Plan) -> None
messages = plan.messages
for instruction in plan.instructions:
message = messages.get(id(instruction))
if message is not None:
self._ui.write(message)
getattr(self, '_do_%s' % instruction.__class__.__name__.lower(),
self._default_handler)(instruction)
def _default_handler(self, instruction):
# type: (models.Instruction) -> None
raise RuntimeError("Deployment executor encountered an "
"unknown instruction: %s"
% instruction.__class__.__name__)
def _do_apicall(self, instruction):
# type: (models.APICall) -> None
final_kwargs = self._resolve_variables(instruction)
method = getattr(self._client, instruction.method_name)
result = method(**final_kwargs)
if instruction.output_var is not None:
self.variables[instruction.output_var] = result
def _do_copyvariable(self, instruction):
# type: (models.CopyVariable) -> None
to_var = instruction.to_var
from_var = instruction.from_var
self.variables[to_var] = self.variables[from_var]
def _do_storevalue(self, instruction):
# type: (models.StoreValue) -> None
result = self._variable_resolver.resolve_variables(
instruction.value, self.variables)
self.variables[instruction.name] = result
def _do_storemultiplevalue(self, instruction):
# type: (models.StoreValue) -> None
result = self._variable_resolver.resolve_variables(
instruction.value, self.variables)
data = self.variables.get(instruction.name)
if data and isinstance(data, list):
self.variables[instruction.name].extend(result)
else:
self.variables[instruction.name] = result
def _do_recordresourcevariable(self, instruction):
# type: (models.RecordResourceVariable) -> None
payload = {
'name': instruction.resource_name,
'resource_type': instruction.resource_type,
instruction.name: self.variables[instruction.variable_name],
}
self._add_to_deployed_values(payload)
def _do_recordresourcevalue(self, instruction):
# type: (models.RecordResourceValue) -> None
payload = {
'name': instruction.resource_name,
'resource_type': instruction.resource_type,
instruction.name: instruction.value,
}
self._add_to_deployed_values(payload)
def _add_to_deployed_values(self, payload):
# type: (Dict[str, str]) -> None
key = payload['name']
if key not in self._resource_value_index:
self._resource_value_index[key] = payload
self.resource_values.append(payload)
else:
# If the key already exists, we merge the new payload
# with the existing payload.
self._resource_value_index[key].update(payload)
def _do_jpsearch(self, instruction):
# type: (models.JPSearch) -> None
v = self.variables[instruction.input_var]
result = jmespath.search(instruction.expression, v)
self.variables[instruction.output_var] = result
def _do_builtinfunction(self, instruction):
# type: (models.BuiltinFunction) -> None
# Split this out to a separate class of built in functions
# once we add more functions.
if instruction.function_name == 'parse_arn':
resolved_args = self._variable_resolver.resolve_variables(
instruction.args, self.variables)
value = resolved_args[0]
parts = value.split(':')
result = {
'service': parts[2],
'region': parts[3],
'account_id': parts[4],
}
self.variables[instruction.output_var] = result
else:
raise ValueError("Unknown builtin function: %s"
% instruction.function_name)
def _resolve_variables(self, api_call):
# type: (models.APICall) -> Dict[str, Any]
try:
return self._variable_resolver.resolve_variables(
api_call.params, self.variables)
except UnresolvedValueError as e:
e.method_name = api_call.method_name
raise
class VariableResolver(object):
def resolve_variables(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
value_type = type(value).__name__.lower()
handler_name = '_resolve_%s' % value_type
handler = getattr(self, handler_name, None)
if handler:
return handler(value, variables)
else:
return value
def _resolve_variable(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
return variables[value.name]
def _resolve_stringformat(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
v = {k: variables[k] for k in value.variables}
return value.template.format(**v)
def _resolve_keydatavariable(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
return variables[value.name][value.key]
def _resolve_placeholder(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
# The key and method_name values are added
# as the exception propagates up the stack.
raise UnresolvedValueError('', value, '')
def _resolve_dict(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
final = {}
for k, v in value.items():
try:
final[k] = self.resolve_variables(v, variables)
except UnresolvedValueError as e:
e.key = k
raise
return final
def _resolve_list(self, value, variables):
# type: (Any, Dict[str, str]) -> Any
final_list = []
for v in value:
final_list.append(self.resolve_variables(v, variables))
return final_list
# This class is used for the ``chalice dev plan`` command.
# The dev commands don't have any backwards compatibility guarantees
# so we can alter this output as needed.
class DisplayOnlyExecutor(BaseExecutor):
# Max length of bytes object before we truncate with '<bytes>'
_MAX_BYTE_LENGTH = 30
_LINE_VERTICAL = u'\u2502'
def execute(self, plan):
# type: (models.Plan) -> None
spillover_values = {} # type: Dict[str, Any]
self._ui.write("Plan\n")
self._ui.write("====\n\n")
for instruction in plan.instructions:
getattr(self, '_do_%s' % instruction.__class__.__name__.lower(),
self._default_handler)(instruction, spillover_values)
self._write_spillover(spillover_values)
def _write_spillover(self, spillover_values):
# type: (Dict[str, Any]) -> None
if not spillover_values:
return
self._ui.write("Variable Pool\n")
self._ui.write("=============\n\n")
for key, value in spillover_values.items():
self._ui.write('%s:\n' % key)
self._ui.write(pprint.pformat(value) + '\n\n')
def _default_handler(self, instruction, spillover_values):
# type: (models.Instruction, Dict[str, Any]) -> None
instruction_name = self._upper_snake_case(
instruction.__class__.__name__)
for key, value in asdict(instruction).items():
if isinstance(value, dict):
value = self._format_dict(value, spillover_values)
line = ('%-30s %s%20s %-10s' % (
instruction_name, self._LINE_VERTICAL, '%s:' % key, value)
)
self._ui.write(line + '\n')
instruction_name = ''
self._ui.write('\n')
def _format_dict(self, dict_value, spillover_values):
# type: (Dict[str, Any], Dict[str, Any]) -> str
lines = ['']
for key, value in dict_value.items():
if not value:
continue
if isinstance(value, bytes) and len(value) > self._MAX_BYTE_LENGTH:
value = '<bytes>'
if isinstance(value, (dict, list)):
# We need a unique name to use so we just use a simple
# incrementing counter with the name prefixed.
spillover_name = '${%s_%s}' % (
key.upper(), len(spillover_values))
spillover_values[spillover_name] = value
value = spillover_name
line = '%-31s%s%-15s%s%20s %-10s' % (
' ', self._LINE_VERTICAL, ' ', self._LINE_VERTICAL,
'%s:' % key, value
)
lines.append(line)
return '\n'.join(lines)
def _upper_snake_case(self, v):
# type: (str) -> str
first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
end_cap_regex = re.compile('([a-z0-9])([A-Z])')
first = first_cap_regex.sub(r'\1_\2', v)
transformed = end_cap_regex.sub(r'\1_\2', first).upper()
return transformed
class UnresolvedValueError(Exception):
MSG = (
"The API parameter '%s' has an unresolved value "
"of %s in the method call: %s"
)
def __init__(self, key, value, method_name):
# type: (str, models.Placeholder, str) -> None
super(UnresolvedValueError, self).__init__()
self.key = key
self.value = value
self.method_name = method_name
def __str__(self):
# type: () -> str
return self.MSG % (self.key, self.value, self.method_name)
| 37.670175 | 79 | 0.599665 |
4fed5b51e8bc96971a81532d6ea9916354c617a7 | 732 | py | Python | Code/Python/WebSiteGrab/old/AmazonePriceFunc.py | dks1018/CoffeeShopCoding | 13ac1700673c86c601eb2758570920620a956e4c | [
"ADSL"
] | null | null | null | Code/Python/WebSiteGrab/old/AmazonePriceFunc.py | dks1018/CoffeeShopCoding | 13ac1700673c86c601eb2758570920620a956e4c | [
"ADSL"
] | null | null | null | Code/Python/WebSiteGrab/old/AmazonePriceFunc.py | dks1018/CoffeeShopCoding | 13ac1700673c86c601eb2758570920620a956e4c | [
"ADSL"
] | null | null | null | import webbrowser, sys, pyperclip, requests, bs4
def getAmazonPrice(productURL):
try:
res = requests.get(productURL)
except:
print(res.raise_for_status())
soup = bs4.BeautifulSoup(res.text,'html.parser')
try:
elems = soup.select('#buyNewSection > h5 > div > div.a-column.a-span8.a-text-right.a-span-last > div > span.a-size-medium.a-color-price.offer-price.a-text-normal')
except:
print("There was an error in the parser!")
return elems[0].text.strip()
price = getAmazonPrice('https://www.amazon.com/Automate-Boring-Stuff-Python-Programming/dp/1593275994')
print('The Price of Automate the boring stuff is: {0}'.format(price))
# Try a webscraper for a fun site xkcd.com | 38.526316 | 171 | 0.691257 |
346a170f7c266d2fab4cbe7036247c5ea2e0b080 | 981 | py | Python | hasker/qa/migrations/0013_auto_20180227_0917.py | kochnev/hacker | 17d95aa794c78251c9765ac6878d216aec85bbb4 | [
"MIT"
] | null | null | null | hasker/qa/migrations/0013_auto_20180227_0917.py | kochnev/hacker | 17d95aa794c78251c9765ac6878d216aec85bbb4 | [
"MIT"
] | null | null | null | hasker/qa/migrations/0013_auto_20180227_0917.py | kochnev/hacker | 17d95aa794c78251c9765ac6878d216aec85bbb4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-27 09:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('qa', '0012_auto_20180219_1252'),
]
operations = [
migrations.RemoveField(
model_name='votes',
name='content_type',
),
migrations.RemoveField(
model_name='votes',
name='object_id',
),
migrations.AddField(
model_name='votes',
name='answer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='qa.Answer'),
),
migrations.AddField(
model_name='votes',
name='question',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='qa.Question'),
),
]
| 28.028571 | 122 | 0.59633 |
5fc1591620525ca555e22205d071c6832bc44ce5 | 2,469 | py | Python | landlab/components/overland_flow/tests/test_kinwave.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | landlab/components/overland_flow/tests/test_kinwave.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | 1 | 2016-03-16T02:34:08.000Z | 2016-04-20T19:31:30.000Z | landlab/components/overland_flow/tests/test_kinwave.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Unit tests for landlab.components.overland_flow.KinwaveOverlandFlowModel
last updated: 3/14/16
"""
(_SHAPE, _SPACING, _ORIGIN) = ((10, 10), (25, 25), (0.0, 0.0))
_ARGS = (_SHAPE, _SPACING, _ORIGIN)
def test_KinWaveOF_name(kin_wave_of):
assert kin_wave_of.name == "KinwaveOverlandFlowModel"
def test_KinWaveOF_input_var_names(kin_wave_of):
assert kin_wave_of.input_var_names == (
"topographic__elevation",
"topographic__gradient",
)
def test_KinWaveOF_output_var_names(kin_wave_of):
assert kin_wave_of.output_var_names == (
"surface_water__depth",
"water__velocity",
"water__specific_discharge",
)
def test_KinWaveOF_var_units(kin_wave_of):
assert set(kin_wave_of.input_var_names) | set(kin_wave_of.output_var_names) == set(
dict(kin_wave_of.units).keys()
)
assert kin_wave_of.var_units("topographic__elevation") == "m"
assert kin_wave_of.var_units("topographic__gradient") == "m/m"
assert kin_wave_of.var_units("surface_water__depth") == "m"
assert kin_wave_of.var_units("water__velocity") == "m/s"
assert kin_wave_of.var_units("water__specific_discharge") == "m2/s"
def test_grid_shape(kin_wave_of):
assert kin_wave_of.grid.number_of_node_rows == _SHAPE[0]
assert kin_wave_of.grid.number_of_node_columns == _SHAPE[1]
def test_run_one_step():
from landlab import RasterModelGrid
import numpy as np
from landlab.components.overland_flow import KinwaveOverlandFlowModel
grid = RasterModelGrid((10, 10), xy_spacing=0.5)
grid.add_zeros("node", "topographic__elevation", dtype=float)
grid.add_zeros("node", "topographic__gradient")
topo_arr = np.ones(100).reshape(10, 10)
i = 0
while i <= 9:
topo_arr[:, i] = 5 + (0.002 * i)
i += 1
topo_arr = topo_arr.flatten()
grid["node"]["topographic__elevation"] = topo_arr
KinWaveOF = KinwaveOverlandFlowModel(
grid, precip_rate=100.0, precip_duration=1.0, roughness=0.02
)
KinWaveOF.run_one_step(60)
# I'll admit this is very non-robust. Solution roughly based on plot #9
# from Heng et. al, (2009): "Modeling overland flow and soil eroion on
# non uniform hillslopes: A finite volume scheme." They do not provide the
# numerical solution but the plots match...
max_h_mm = max(grid["node"]["surface_water__depth"]) * 1000.0
np.testing.assert_almost_equal(max_h_mm, 1.66666666667)
| 32.92 | 87 | 0.706359 |
c4d0b6346e4eef77f34da7a688bbe544254c6d61 | 7,188 | py | Python | setup.py | pascalwhoop/kedro | 424756dae498048094ebb00bb15674c88f31f7fd | [
"Apache-2.0"
] | null | null | null | setup.py | pascalwhoop/kedro | 424756dae498048094ebb00bb15674c88f31f7fd | [
"Apache-2.0"
] | null | null | null | setup.py | pascalwhoop/kedro | 424756dae498048094ebb00bb15674c88f31f7fd | [
"Apache-2.0"
] | 1 | 2021-08-22T08:16:22.000Z | 2021-08-22T08:16:22.000Z | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from codecs import open
from glob import glob
from itertools import chain
from os import path
from setuptools import find_packages, setup
name = "kedro"
here = path.abspath(path.dirname(__file__))
PANDAS = "pandas>=0.24"
SPARK = "pyspark>=2.2, <4.0"
HDFS = "hdfs>=2.5.8, <3.0"
S3FS = "s3fs>=0.3.0, <0.5"
# get package version
with open(path.join(here, name, "__init__.py"), encoding="utf-8") as f:
result = re.search(r'__version__ = ["\']([^"\']+)', f.read())
if not result:
raise ValueError("Can't find the version in kedro/__init__.py")
version = result.group(1)
# get the dependencies and installs
with open("requirements.txt", "r", encoding="utf-8") as f:
requires = [x.strip() for x in f if x.strip()]
# get test dependencies and installs
with open("test_requirements.txt", "r", encoding="utf-8") as f:
test_requires = [x.strip() for x in f if x.strip() and not x.startswith("-r")]
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
readme = f.read()
doc_html_files = [
name.replace("kedro/", "", 1)
for name in glob("kedro/framework/html/**/*", recursive=True)
]
template_files = []
for pattern in ["**/*", "**/.*", "**/.*/**", "**/.*/.**"]:
template_files.extend(
[
name.replace("kedro/", "", 1)
for name in glob("kedro/templates/" + pattern, recursive=True)
]
)
def _collect_requirements(requires):
return sorted(set(chain.from_iterable(requires.values())))
api_require = {"api.APIDataSet": ["requests~=2.20"]}
biosequence_require = {"biosequence.BioSequenceDataSet": ["biopython~=1.73"]}
dask_require = {"dask.ParquetDataSet": ["dask[complete]~=2.6"]}
geopandas_require = {
"geopandas.GeoJSONDataSet": ["geopandas>=0.6.0, <1.0", "pyproj>=2.2.0, <3.0"]
}
matplotlib_require = {"matplotlib.MatplotlibWriter": ["matplotlib>=3.0.3, <4.0"]}
holoviews_require = {"holoviews.HoloviewsWriter": ["holoviews~=1.13.0"]}
networkx_require = {"networkx.NetworkXDataSet": ["networkx~=2.4"]}
pandas_require = {
"pandas.CSVDataSet": [PANDAS],
"pandas.ExcelDataSet": [PANDAS, "xlrd~=1.0", "xlsxwriter~=1.0"],
"pandas.AppendableExcelDataSet": [PANDAS, "openpyxl>=3.0.3, <4.0"],
"pandas.FeatherDataSet": [PANDAS],
"pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
"pandas.HDFDataSet": [PANDAS, "tables~=3.6"],
"pandas.JSONDataSet": [PANDAS],
"pandas.ParquetDataSet": [PANDAS, "pyarrow>=0.12.0, <1.0.0"],
"pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
}
pillow_require = {"pillow.ImageDataSet": ["Pillow~=7.1.2"]}
spark_require = {
"spark.SparkDataSet": [SPARK, HDFS, S3FS],
"spark.SparkHiveDataSet": [SPARK, HDFS, S3FS],
"spark.SparkJDBCDataSet": [SPARK, HDFS, S3FS],
}
tensorflow_required = {
"tensorflow.TensorflowModelDataset": [
# currently only TensorFlow V2 supported for saving and loading.
# V1 requires HDF5 and serializes differently
"tensorflow~=2.0",
]
}
yaml_require = {"yaml.YAMLDataSet": [PANDAS, "PyYAML>=4.2, <6.0"]}
extras_require = {
"api": _collect_requirements(api_require),
"biosequence": _collect_requirements(biosequence_require),
"dask": _collect_requirements(dask_require),
"docs": [
"sphinx>=1.8.4, <2.0",
"sphinx_rtd_theme==0.4.3",
"docutils",
"nbsphinx==0.4.2",
"nbstripout==0.3.3",
"recommonmark==0.5.0",
"sphinx-autodoc-typehints==1.6.0",
"sphinx_copybutton==0.2.5",
"jupyter_client>=5.1, <7.0",
"tornado~=6.1",
"ipykernel~=5.3",
],
"geopandas": _collect_requirements(geopandas_require),
"ipython": ["ipython==7.10"],
"matplotlib": _collect_requirements(matplotlib_require),
"holoviews": _collect_requirements(holoviews_require),
"networkx": _collect_requirements(networkx_require),
"notebook_templates": ["nbconvert>=5.3.1, <6.0", "nbformat~=4.4"],
"pandas": _collect_requirements(pandas_require),
"pillow": _collect_requirements(pillow_require),
"profilers": ["memory_profiler>=0.50.0, <1.0"],
"spark": _collect_requirements(spark_require),
"tensorflow": _collect_requirements(tensorflow_required),
"yaml": _collect_requirements(yaml_require),
**api_require,
**biosequence_require,
**dask_require,
**geopandas_require,
**matplotlib_require,
**holoviews_require,
**networkx_require,
**pandas_require,
**pillow_require,
**spark_require,
**tensorflow_required,
**yaml_require,
}
extras_require["all"] = _collect_requirements(extras_require)
setup(
name=name,
version=version,
description="Kedro helps you build production-ready data and analytics pipelines",
license="Apache Software License (Apache 2.0)",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/quantumblacklabs/kedro",
python_requires=">=3.6, <3.9",
packages=find_packages(exclude=["docs*", "tests*", "tools*", "features*"]),
include_package_data=True,
tests_require=test_requires,
install_requires=requires,
author="QuantumBlack Labs",
entry_points={"console_scripts": ["kedro = kedro.framework.cli:main"]},
package_data={
name: ["py.typed", "test_requirements.txt"] + template_files + doc_html_files
},
zip_safe=False,
keywords="pipelines, machine learning, data pipelines, data science, data engineering",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
extras_require=extras_require,
)
| 36.861538 | 91 | 0.681553 |
c5a8e9579d92e5c26f0ddd105c7c1b6e5c46fefb | 4,142 | py | Python | webui/server/tornadows/webservices.py | puneet739/ChatLearner | b50bd15a00866e7dfdea39402e1158240c410289 | [
"Apache-2.0"
] | 590 | 2017-06-20T02:31:44.000Z | 2022-02-03T03:36:04.000Z | webui/server/tornadows/webservices.py | puneet739/ChatLearner | b50bd15a00866e7dfdea39402e1158240c410289 | [
"Apache-2.0"
] | 84 | 2017-06-28T10:04:02.000Z | 2021-01-14T14:49:35.000Z | webui/server/tornadows/webservices.py | puneet739/ChatLearner | b50bd15a00866e7dfdea39402e1158240c410289 | [
"Apache-2.0"
] | 225 | 2017-07-19T02:05:10.000Z | 2022-01-12T16:29:07.000Z | #!/usr/bin/env python
#
# Copyright 2011 Rodrigo Ancavil del Pino
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Implementation of webservices API 0.9 """
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.wsgi
class WebService(tornado.web.Application):
""" A implementation of web services for tornado web server.
import tornado.httpserver
import tornado.ioloop
from tornadows import webservices
from tornadows import xmltypes
from tornadows import soaphandler
from tornadows.soaphandler import webservice
class MyService(soaphandler.SoapHandler):
@webservice(_params=[xmltypes.Integer, xmltypes.Integer],_returns=xmltypes.Integer)
def sum(self, value1, value2):
result = value1 + value2
return result
if __name__ == "__main__":
app = webservices.WebService("MyService",MyService)
ws_server = tornado.httpserver.HTTPServer(app)
ws_server.listen(8080)
tornado.ioloop.IOLoop.instance().start()
"""
def __init__(self,services,object=None,wsdl=None):
""" Initializes the application for web services
Instances of this class are callable and can be passed to
HTTPServer of tornado to serve the web services.
The constructor for this class takes the name for the web
service (service), the class with the web service (object)
and wsdl with the wsdl file path (if this exist).
"""
if isinstance(services,list) and object == None:
srvs = []
for s in services:
srv = s[0]
obj = s[1]
dic = s[2]
srvs.append((r"/" + str(srv), obj, dic))
srvs.append((r"/" + str(srv) + "/", obj, dic))
tornado.web.Application.__init__(self, srvs)
else:
self._service = services
self._object = object
self._services = [(r"/"+str(self._service),self._object),
(r"/"+str(self._service)+"/",self._object),]
tornado.web.Application.__init__(self,self._services)
class WSGIWebService(tornado.wsgi.WSGIApplication):
""" A implementation of web services for tornado web server.
import tornado.httpserver
import tornado.ioloop
from tornadows import webservices
from tornadows import xmltypes
from tornadows import soaphandler
from tornadows.soaphandler import webservice
import wsgiref.simple_server
class MyService(soaphandler.SoapHandler):
@webservice(_params=[xmltypes.Integer, xmltypes.Integer],_returns=xmltypes.Integer)
def sum(self, value1, value2):
result = value1 + value2
return result
if __name__ == "__main__":
app = webservices.WSGIWebService("MyService",MyService)
server = wsgiref.simple_server.make_server('',8888,app)
server.serve_forever()
"""
def __init__(self,services,object=None,wsdl=None, default_host="", **settings):
""" Initializes the application for web services
Instances of this class are callable and can be passed to
HTTPServer of tornado to serve the web services.
The constructor for this class takes the name for the web
service (service), the class with the web service (object)
and wsdl with the wsdl file path (if this exist).
"""
if isinstance(services,list) and object == None:
srvs = []
for s in services:
srv = s[0]
obj = s[1]
srvs.append((r"/"+str(srv),obj))
srvs.append((r"/"+str(srv)+"/",obj))
tornado.wsgi.WSGIApplication.__init__(self,srvs,default_host, **settings)
else:
self._service = services
self._object = object
self._services = [(r"/"+str(self._service),self._object),
(r"/"+str(self._service)+"/",self._object),]
tornado.wsgi.WSGIApplication.__init__(self,self._services,default_host, **settings)
| 34.231405 | 86 | 0.716803 |
06ba0849de83fce796e7d5a84b58163e0454220c | 18,198 | py | Python | laceworkreports/cli/ReportHandlers/ContainerIntegrationCoverageHandler/ContainerIntegrationCoverageHandler.py | laceworkps/laceworkreports | 43f5189d2da39fc54ca24dc3c6c96d320bc9f8a5 | [
"BSD-3-Clause"
] | null | null | null | laceworkreports/cli/ReportHandlers/ContainerIntegrationCoverageHandler/ContainerIntegrationCoverageHandler.py | laceworkps/laceworkreports | 43f5189d2da39fc54ca24dc3c6c96d320bc9f8a5 | [
"BSD-3-Clause"
] | 44 | 2022-01-30T04:24:46.000Z | 2022-03-31T02:31:17.000Z | laceworkreports/cli/ReportHandlers/ContainerIntegrationCoverageHandler/ContainerIntegrationCoverageHandler.py | laceworkps/laceworkreports | 43f5189d2da39fc54ca24dc3c6c96d320bc9f8a5 | [
"BSD-3-Clause"
] | null | null | null | """
Report Handler
"""
from typing import Optional
import logging
from datetime import datetime, timedelta
from pathlib import Path
import typer
from laceworkreports import common
from laceworkreports.sdk.DataHandlers import DataHandlerTypes, ExportHandler
from laceworkreports.sdk.ReportHelpers import ContainerIntegrationQueries, ReportHelper
app = typer.Typer(no_args_is_help=True)
@app.command(no_args_is_help=True, help="Generate HTML report")
def html(
ctx: typer.Context,
start_time: datetime = typer.Option(
(datetime.utcnow() - timedelta(hours=25)).strftime(common.ISO_FORMAT),
formats=[common.ISO_FORMAT],
help="Start time for query period",
),
end_time: datetime = typer.Option(
(datetime.utcnow()).strftime(common.ISO_FORMAT),
formats=[common.ISO_FORMAT],
help="End time for query period",
),
subaccounts: bool = typer.Option(
False,
help="Enumerate subaccounts",
envvar=common.LACEWORK_REPORTS_SUBACCOUNTS,
),
file_path: str = typer.Option(
...,
help="Path to exported result",
envvar=common.LACEWORK_REPORTS_FILE_PATH,
),
template_path: str = typer.Option(
Path(__file__)
.resolve()
.parent.joinpath("container_integration_coverage.html.j2"),
help="Path to jinja2 template. Results will be passed as 'dataset' variable.",
envvar=common.LACEWORK_REPORTS_TEMPLATE_PATH,
),
) -> None:
"""
Set the command context
"""
# connect the lacework client
lw = common.config.connect()
# report details
report_title = "Container Integration Coverage"
db_table = "container_repos"
reportHelper = ReportHelper()
db_path = Path("database.db")
# db_path.unlink(missing_ok=True)
db_connection = f"sqlite:///{db_path.absolute()}?check_same_thread=False"
reportHelper.sqlite_drop_table(db_table, db_connection)
reportHelper.sqlite_drop_table("machines", db_connection)
reportHelper.sqlite_drop_table("cloud_accounts", db_connection)
reportHelper.sqlite_drop_table("discovered_cloud_accounts", db_connection)
reportHelper.sqlite_drop_table("discovered_container_repos", db_connection)
has_subaccounts = False
if subaccounts:
lwAccounts = reportHelper.get_subaccounts(client=lw)
if len(lwAccounts) == 0:
logging.error("Subaccounts specificed but none found")
raise Exception("Subaccounts specificed but none found")
else:
has_subaccounts = True
else:
lwAccounts = [{"accountName": lw._account}]
lacework_account_count = 0
for lwAccount in lwAccounts:
lacework_account_count += 1
if has_subaccounts:
logging.info(f"Switching to subaccount context: {lwAccount['accountName']}")
lw.set_subaccount(lwAccount["accountName"])
# sync all integrated repos (note: no cloud account associated at repo level)
logging.info("Syncing integrated container repos")
reportHelper.get_container_repos(
client=lw,
lwAccount=lwAccount["accountName"],
start_time=start_time,
end_time=end_time,
use_sqlite=True,
db_table=db_table,
db_connection=db_connection,
)
# sync cloud accounts with deployed agents
logging.info("Syncing cloud accounts with deployed agents")
reportHelper.get_discovered_cloud_accounts(
client=lw,
lwAccount=lwAccount["accountName"],
start_time=start_time,
end_time=end_time,
use_sqlite=True,
db_table="discovered_cloud_accounts",
db_connection=db_connection,
)
try:
result = reportHelper.sqlite_queries(
queries={
"cloud_account_query": """
SELECT
DISTINCT ACCOUNTID
FROM
:db_table
WHERE
ACCOUNTID IS NOT NULL
"""
},
db_connection=db_connection,
db_table="discovered_cloud_accounts",
)
discovered_cloud_accounts = [
x["ACCOUNTID"] for x in result["cloud_account_query"]
]
except Exception:
discovered_cloud_accounts = []
# get cloud accounts and sync to sqlite
cloud_accounts = reportHelper.get_cloud_accounts(
client=lw, lwAccount=lwAccount["accountName"]
)
ExportHandler(
format=DataHandlerTypes.SQLITE,
results=[{"data": cloud_accounts}],
file_path=file_path,
db_table="cloud_accounts",
db_connection=db_connection,
).export()
logging.info(
f"Discovered {len(discovered_cloud_accounts)} cloud accounts with agents deployed: {discovered_cloud_accounts}"
)
for cloud_account in cloud_accounts:
if (
cloud_account["enabled"] == 1
and cloud_account["accountId"] in discovered_cloud_accounts
):
# sync machines for this cloud account
logging.info(
f"Syncing container repos for {lwAccount['accountName']}:{cloud_account['accountId']}"
)
# sync all repos with active containers
reportHelper.get_discovered_container_repos(
client=lw,
lwAccount=lwAccount["accountName"],
cloud_account=cloud_account["accountId"],
start_time=start_time,
end_time=end_time,
use_sqlite=True,
db_table="discovered_container_repos",
db_connection=db_connection,
)
# ensure we have a container_repos table
if not reportHelper.sqlite_table_exists(
db_table="container_repos", db_connection=db_connection
):
container_repos_table = """
CREATE TABLE container_repos (
"createdOrUpdatedBy" TEXT,
"createdOrUpdatedTime" TEXT,
enabled BIGINT,
"intgGuid" TEXT,
"isOrg" BIGINT,
name TEXT,
props JSON,
type TEXT,
data JSON,
"serverToken" JSON
, state JSON, accountId TEXT, lwAccount TEXT)
"""
reportHelper.sqlite_execute(
query=container_repos_table, db_connection=db_connection
)
# ensure we have a discovered_container_repos table
if not reportHelper.sqlite_table_exists(
db_table="discovered_container_repos", db_connection=db_connection
):
discovered_container_repos_table = """
CREATE TABLE discovered_container_repos (
"LWACCOUNT" TEXT,
"ACCOUNTID" TEXT,
"IMAGE_ID" TEXT,
"REPO" TEXT,
"TAG" TEXT
)
"""
reportHelper.sqlite_execute(
query=discovered_container_repos_table,
db_connection=db_connection,
)
else:
logging.info(
f"Skipping disabled or inactive account {lwAccount['accountName']}:{cloud_account['accountId']}"
)
# use sqlite query to generate final result
results = reportHelper.sqlite_queries(
queries=ContainerIntegrationQueries,
db_table=db_table,
db_connection=db_connection,
)
if len(results["report"]) > 0:
report = results["report"]
# return additional stats under summary
stats = {}
for key in [x for x in results.keys() if x != "report"]:
stats[key] = results[key]
# write jinja template
ExportHandler(
format=DataHandlerTypes.JINJA2,
results=[
{
"data": [
{
"name": db_table,
"report": report,
"summary": {
"rows": len(report),
"reportTitle": report_title,
"stats": stats,
},
}
]
}
],
template_path=template_path,
file_path=file_path,
).export()
else:
logging.warn("No results found")
@app.command(name="csv", no_args_is_help=True, help="Generate CSV Report")
def csv_handler(
ctx: typer.Context,
start_time: datetime = typer.Option(
(datetime.utcnow() - timedelta(hours=25)).strftime(common.ISO_FORMAT),
formats=[common.ISO_FORMAT],
help="Start time for query period",
),
end_time: datetime = typer.Option(
(datetime.utcnow()).strftime(common.ISO_FORMAT),
formats=[common.ISO_FORMAT],
help="End time for query period",
),
subaccounts: bool = typer.Option(
False,
help="Enumerate subaccounts",
envvar=common.LACEWORK_REPORTS_SUBACCOUNTS,
),
summary_only: bool = typer.Option(
False,
help="Return only summary details",
envvar=common.LACEWORK_REPORTS_SUBACCOUNTS,
),
file_path: str = typer.Option(
...,
help="Path to exported result",
envvar=common.LACEWORK_REPORTS_FILE_PATH,
),
) -> None:
"""
Set the command context
"""
# connect the lacework client
lw = common.config.connect()
# report details
db_table = "container_repos"
reportHelper = ReportHelper()
db_path = Path("database.db")
# db_path.unlink(missing_ok=True)
db_connection = f"sqlite:///{db_path.absolute()}?check_same_thread=False"
reportHelper.sqlite_drop_table(db_table, db_connection)
reportHelper.sqlite_drop_table("machines", db_connection)
reportHelper.sqlite_drop_table("cloud_accounts", db_connection)
reportHelper.sqlite_drop_table("discovered_cloud_accounts", db_connection)
reportHelper.sqlite_drop_table("discovered_container_repos", db_connection)
has_subaccounts = False
if subaccounts:
lwAccounts = reportHelper.get_subaccounts(client=lw)
if len(lwAccounts) == 0:
logging.error("Subaccounts specificed but none found")
raise Exception("Subaccounts specificed but none found")
else:
has_subaccounts = True
else:
lwAccounts = [{"accountName": lw._account}]
lacework_account_count = 0
for lwAccount in lwAccounts:
lacework_account_count += 1
if has_subaccounts:
logging.info(f"Switching to subaccount context: {lwAccount['accountName']}")
lw.set_subaccount(lwAccount["accountName"])
# sync all integrated repos (note: no cloud account associated at repo level)
logging.info("Syncing integrated container repos")
reportHelper.get_container_repos(
client=lw,
lwAccount=lwAccount["accountName"],
start_time=start_time,
end_time=end_time,
use_sqlite=True,
db_table=db_table,
db_connection=db_connection,
)
# sync cloud accounts with deployed agents
logging.info("Syncing cloud accounts with deployed agents")
reportHelper.get_discovered_cloud_accounts(
client=lw,
lwAccount=lwAccount["accountName"],
start_time=start_time,
end_time=end_time,
use_sqlite=True,
db_table="discovered_cloud_accounts",
db_connection=db_connection,
)
try:
result = reportHelper.sqlite_queries(
queries={
"cloud_account_query": """
SELECT
DISTINCT ACCOUNTID
FROM
:db_table
WHERE
ACCOUNTID IS NOT NULL
"""
},
db_connection=db_connection,
db_table="discovered_cloud_accounts",
)
discovered_cloud_accounts = [
x["ACCOUNTID"] for x in result["cloud_account_query"]
]
except Exception:
discovered_cloud_accounts = []
# get cloud accounts and sync to sqlite
cloud_accounts = reportHelper.get_cloud_accounts(
client=lw, lwAccount=lwAccount["accountName"]
)
ExportHandler(
format=DataHandlerTypes.SQLITE,
results=[{"data": cloud_accounts}],
file_path=file_path,
db_table="cloud_accounts",
db_connection=db_connection,
).export()
logging.info(
f"Discovered {len(discovered_cloud_accounts)} cloud accounts with agents deployed: {discovered_cloud_accounts}"
)
for cloud_account in cloud_accounts:
if (
cloud_account["enabled"] == 1
and cloud_account["accountId"] in discovered_cloud_accounts
):
# sync machines for this cloud account
logging.info(
f"Syncing container repos for {lwAccount['accountName']}:{cloud_account['accountId']}"
)
# sync all repos with active containers
reportHelper.get_discovered_container_repos(
client=lw,
lwAccount=lwAccount["accountName"],
cloud_account=cloud_account["accountId"],
start_time=start_time,
end_time=end_time,
use_sqlite=True,
db_table="discovered_container_repos",
db_connection=db_connection,
)
# ensure we have a container_repos table
if not reportHelper.sqlite_table_exists(
db_table="container_repos", db_connection=db_connection
):
container_repos_table = """
CREATE TABLE container_repos (
"createdOrUpdatedBy" TEXT,
"createdOrUpdatedTime" TEXT,
enabled BIGINT,
"intgGuid" TEXT,
"isOrg" BIGINT,
name TEXT,
props JSON,
type TEXT,
data JSON,
"serverToken" JSON
, state JSON, accountId TEXT, lwAccount TEXT)
"""
reportHelper.sqlite_execute(
query=container_repos_table, db_connection=db_connection
)
# ensure we have a discovered_container_repos table
if not reportHelper.sqlite_table_exists(
db_table="discovered_container_repos", db_connection=db_connection
):
discovered_container_repos_table = """
CREATE TABLE discovered_container_repos (
"LWACCOUNT" TEXT,
"ACCOUNTID" TEXT,
"IMAGE_ID" TEXT,
"REPO" TEXT,
"TAG" TEXT
)
"""
reportHelper.sqlite_execute(
query=discovered_container_repos_table,
db_connection=db_connection,
)
else:
logging.info(
f"Skipping disabled or inactive account {lwAccount['accountName']}:{cloud_account['accountId']}"
)
# use sqlite query to generate final result
results = reportHelper.sqlite_queries(
queries=ContainerIntegrationQueries,
db_table=db_table,
db_connection=db_connection,
)
if len(results["report"]) > 0:
report = results["report"]
if summary_only:
report = results["account_coverage"]
logging.info("Building CSV from resultant data...")
ExportHandler(
format=DataHandlerTypes.CSV,
results=[{"data": report}],
file_path=file_path,
).export()
else:
logging.warn("No results found")
if __name__ == "__main__":
app()
| 37.991649 | 123 | 0.520167 |
6c7d43490077041d4790bd07b1d5f67d43e3a906 | 3,481 | py | Python | examples/impala_openai_gym_with_lstm.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 290 | 2018-07-29T15:30:57.000Z | 2022-03-19T02:46:53.000Z | examples/impala_openai_gym_with_lstm.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 76 | 2018-10-19T08:42:01.000Z | 2020-05-03T08:34:21.000Z | examples/impala_openai_gym_with_lstm.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 41 | 2018-10-30T07:05:05.000Z | 2022-03-01T08:28:24.000Z | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Example script for training a single-node IMPALA [1] agent on an OpenAI gym environment.
A single-node agent uses multi-threading (via tf's queue runners) to collect experiences (using
the "mu"-policy) and a learner (main) thread to update the model (the "pi"-policy).
Usage:
python impala_openai_gym_with_lstm.py [--config configs/impala_openai_gym_with_lstm.json] [--env LunarLander-v2]?
[1] IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures - Espeholt, Soyer,
Munos et al. - 2018 (https://arxiv.org/abs/1802.01561)
"""
import json
import os
import sys
from absl import flags
import numpy as np
import time
from rlgraph.agents import Agent
from rlgraph.environments import OpenAIGymEnv
FLAGS = flags.FLAGS
flags.DEFINE_string('config', './configs/impala_openai_gym_with_lstm.json', 'Agent config file.')
flags.DEFINE_string('env', None, 'openAI gym environment ID.')
flags.DEFINE_integer('visualize', -1, 'Show training for n worker(s).')
def main(argv):
try:
FLAGS(argv)
except flags.Error as e:
print('%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS))
agent_config_path = os.path.join(os.getcwd(), FLAGS.config)
with open(agent_config_path, 'rt') as fp:
agent_config = json.load(fp)
# Override openAI gym env per command line.
if FLAGS.env is None:
env_spec = agent_config["environment_spec"]
else:
env_spec = dict(type="openai-gym", gym_env=FLAGS.env)
# Override number of visualized envs per command line.
if FLAGS.visualize != -1:
env_spec["visualize"] = FLAGS.visualize
dummy_env = OpenAIGymEnv.from_spec(env_spec)
agent = Agent.from_spec(
agent_config,
state_space=dummy_env.state_space,
action_space=dummy_env.action_space
)
dummy_env.terminate()
learn_updates = 6000
mean_returns = []
for i in range(learn_updates):
ret = agent.update()
mean_return = _calc_mean_return(ret)
mean_returns.append(mean_return)
print("Iteration={} Loss={:.4f} Avg-reward={:.2f}".format(i, float(ret[1]), mean_return))
print("Mean return: {:.2f} / over the last 10 episodes: {:.2f}".format(
np.nanmean(mean_returns), np.nanmean(mean_returns[-10:])
))
time.sleep(1)
agent.terminate()
time.sleep(3)
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.nanmean(returns)
if __name__ == '__main__':
main(sys.argv)
| 31.93578 | 113 | 0.674519 |
5fdf40d16c67f78a863e7fd3a19f2f293d1422f0 | 29,660 | py | Python | zipline/finance/ledger.py | TheTradingAngel/zipline-trader | fdce0641005371f2b523c6faeb551c3d40273902 | [
"Apache-2.0"
] | null | null | null | zipline/finance/ledger.py | TheTradingAngel/zipline-trader | fdce0641005371f2b523c6faeb551c3d40273902 | [
"Apache-2.0"
] | null | null | null | zipline/finance/ledger.py | TheTradingAngel/zipline-trader | fdce0641005371f2b523c6faeb551c3d40273902 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from collections import namedtuple, OrderedDict
from functools import partial
from math import isnan
import logbook
import numpy as np
import pandas as pd
from six import iteritems, itervalues, PY2
from zipline.assets import Future
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.utils.sentinel import sentinel
from .position import Position
from ._finance_ext import (
PositionStats,
calculate_position_tracker_stats,
update_position_last_sale_prices,
)
log = logbook.Logger('Performance')
class PositionTracker(object):
"""The current state of the positions held.
Parameters
----------
data_frequency : {'daily', 'minute'}
The data frequency of the simulation.
"""
def __init__(self, data_frequency):
self.positions = OrderedDict()
self._unpaid_dividends = {}
self._unpaid_stock_dividends = {}
self._positions_store = zp.Positions()
self.data_frequency = data_frequency
# cache the stats until something alters our positions
self._dirty_stats = True
self._stats = PositionStats.new()
def update_position(self,
asset,
amount=None,
last_sale_price=None,
last_sale_date=None,
cost_basis=None,
take_profit_price=None,
stop_loss_price=None):
self._dirty_stats = True
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
if amount is not None:
position.amount = amount
if last_sale_price is not None:
position.last_sale_price = last_sale_price
if last_sale_date is not None:
position.last_sale_date = last_sale_date
if cost_basis is not None:
position.cost_basis = cost_basis
if take_profit_price is not None:
position.take_profit_price = take_profit_price
if stop_loss_price is not None:
position.stop_loss_price = stop_loss_price
if position.amount == 0:
del self.positions[asset]
try:
# if this position exists in our user-facing dictionary,
# remove it as well.
del self._positions_store[asset]
except KeyError:
pass
def execute_transaction(self, txn):
self._dirty_stats = True
asset = txn.asset
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
position.update(txn)
if abs(position.amount) < 1e-3: # To catch float rounding errors
del self.positions[asset]
try:
# if this position exists in our user-facing dictionary,
# remove it as well.
del self._positions_store[asset]
except KeyError:
pass
def update_exit_prices(self, asset, take_profit_price=None, stop_loss_price=None):
if asset in self.positions:
self.positions[asset].update_exit_prices(
take_profit_price=take_profit_price, stop_loss_price=stop_loss_price)
def handle_commission(self, asset, cost):
# Adjust the cost basis of the stock if we own it
if asset in self.positions:
self._dirty_stats = True
self.positions[asset].adjust_commission_cost_basis(asset, cost)
def handle_splits(self, splits):
"""Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
int: The leftover cash from fractional shares after modifying each
position.
"""
total_leftover_cash = 0
for asset, ratio in splits:
if asset in self.positions:
self._dirty_stats = True
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[asset]
leftover_cash = position.handle_split(asset, ratio)
total_leftover_cash += leftover_cash
return total_leftover_cash
def earn_dividends(self, cash_dividends, stock_dividends):
"""Given a list of dividends whose ex_dates are all the next trading
day, calculate and store the cash and/or stock payments to be paid on
each dividend's pay date.
Parameters
----------
cash_dividends : iterable of (asset, amount, pay_date) namedtuples
stock_dividends: iterable of (asset, payment_asset, ratio, pay_date)
namedtuples.
"""
for cash_dividend in cash_dividends:
self._dirty_stats = True # only mark dirty if we pay a dividend
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
div_owed = self.positions[cash_dividend.asset].earn_dividend(
cash_dividend,
)
try:
self._unpaid_dividends[cash_dividend.pay_date].append(div_owed)
except KeyError:
self._unpaid_dividends[cash_dividend.pay_date] = [div_owed]
for stock_dividend in stock_dividends:
self._dirty_stats = True # only mark dirty if we pay a dividend
div_owed = self.positions[
stock_dividend.asset
].earn_stock_dividend(stock_dividend)
try:
self._unpaid_stock_dividends[stock_dividend.pay_date].append(
div_owed,
)
except KeyError:
self._unpaid_stock_dividends[stock_dividend.pay_date] = [
div_owed,
]
def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except KeyError:
stock_payments = []
for stock_payment in stock_payments:
payment_asset = stock_payment['payment_asset']
share_count = stock_payment['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
if payment_asset in self.positions:
position = self.positions[payment_asset]
else:
position = self.positions[payment_asset] = Position(
payment_asset,
)
position.amount += share_count
return net_cash_payment
def maybe_create_close_position_transaction(self, asset, dt, data_portal):
if not self.positions.get(asset):
return None
amount = self.positions.get(asset).amount
price = data_portal.get_spot_value(
asset, 'price', dt, self.data_frequency)
# Get the last traded price if price is no longer available
if isnan(price):
price = self.positions.get(asset).last_sale_price
return Transaction(
asset=asset,
amount=-amount,
dt=dt,
price=price,
order_id=None,
)
def get_positions(self):
positions = self._positions_store
for asset, pos in iteritems(self.positions):
# Adds the new position if we didn't have one before, or overwrite
# one we have currently
positions[asset] = pos.protocol_position
return positions
def get_position_list(self):
return [
pos.to_dict()
for asset, pos in iteritems(self.positions)
if pos.amount != 0
]
def sync_last_sale_prices(self,
dt,
data_portal,
handle_non_market_minutes=False):
self._dirty_stats = True
if handle_non_market_minutes:
previous_minute = data_portal.trading_calendar.previous_minute(dt)
get_price = partial(
data_portal.get_adjusted_value,
field='price',
dt=previous_minute,
perspective_dt=dt,
data_frequency=self.data_frequency,
)
else:
get_price = partial(
data_portal.get_scalar_asset_spot_value,
field='price',
dt=dt,
data_frequency=self.data_frequency,
)
update_position_last_sale_prices(self.positions, get_price, dt)
@property
def stats(self):
"""The current status of the positions.
Returns
-------
stats : PositionStats
The current stats position stats.
Notes
-----
This is cached, repeated access will not recompute the stats until
the stats may have changed.
"""
if self._dirty_stats:
calculate_position_tracker_stats(self.positions, self._stats)
self._dirty_stats = False
return self._stats
if PY2:
def move_to_end(ordered_dict, key, last=False):
if last:
ordered_dict[key] = ordered_dict.pop(key)
else:
# please don't do this in python 2 ;_;
new_first_element = ordered_dict.pop(key)
# the items (without the given key) in the order they were inserted
items = ordered_dict.items()
# reset the ordered_dict to re-insert in the new order
ordered_dict.clear()
ordered_dict[key] = new_first_element
# add the items back in their original order
ordered_dict.update(items)
else:
move_to_end = OrderedDict.move_to_end
PeriodStats = namedtuple(
'PeriodStats',
'net_liquidation gross_leverage net_leverage',
)
not_overridden = sentinel(
'not_overridden',
'Mark that an account field has not been overridden',
)
class Ledger(object):
"""The ledger tracks all orders and transactions as well as the current
state of the portfolio and positions.
Attributes
----------
portfolio : zipline.protocol.Portfolio
The updated portfolio being managed.
account : zipline.protocol.Account
The updated account being managed.
position_tracker : PositionTracker
The current set of positions.
todays_returns : float
The current day's returns. In minute emission mode, this is the partial
day's returns. In daily emission mode, this is
``daily_returns[session]``.
daily_returns_series : pd.Series
The daily returns series. Days that have not yet finished will hold
a value of ``np.nan``.
daily_returns_array : np.ndarray
The daily returns as an ndarray. Days that have not yet finished will
hold a value of ``np.nan``.
"""
def __init__(self, trading_sessions, capital_base, data_frequency):
if len(trading_sessions):
start = trading_sessions[0]
else:
start = None
# Have some fields of the portfolio changed? This should be accessed
# through ``self._dirty_portfolio``
self.__dirty_portfolio = False
self._immutable_portfolio = zp.Portfolio(start, capital_base)
self._portfolio = zp.MutableView(self._immutable_portfolio)
self.daily_returns_series = pd.Series(
np.nan,
index=trading_sessions,
)
# Get a view into the storage of the returns series. Metrics
# can access this directly in minute mode for performance reasons.
self.daily_returns_array = self.daily_returns_series.values
self._previous_total_returns = 0
# this is a component of the cache key for the account
self._position_stats = None
# Have some fields of the account changed?
self._dirty_account = True
self._immutable_account = zp.Account()
self._account = zp.MutableView(self._immutable_account)
# The broker blotter can override some fields on the account. This is
# way to tangled up at the moment but we aren't fixing it today.
self._account_overrides = {}
self.position_tracker = PositionTracker(data_frequency)
self._processed_transactions = {}
self._orders_by_modified = {}
self._orders_by_id = OrderedDict()
# Keyed by asset, the previous last sale price of positions with
# payouts on price differences, e.g. Futures.
#
# This dt is not the previous minute to the minute for which the
# calculation is done, but the last sale price either before the period
# start, or when the price at execution.
self._payout_last_sale_prices = {}
@property
def todays_returns(self):
# compute today's returns in returns space instead of portfolio-value
# space to work even when we have capital changes
return (
(self.portfolio.returns + 1) /
(self._previous_total_returns + 1) -
1
)
@property
def _dirty_portfolio(self):
return self.__dirty_portfolio
@_dirty_portfolio.setter
def _dirty_portfolio(self, value):
if value:
# marking the portfolio as dirty also marks the account as dirty
self.__dirty_portfolio = self._dirty_account = value
else:
self.__dirty_portfolio = value
def start_of_session(self, session_label):
self._processed_transactions.clear()
self._orders_by_modified.clear()
self._orders_by_id.clear()
# Save the previous day's total returns so that ``todays_returns``
# produces returns since yesterday. This does not happen in
# ``end_of_session`` because we want ``todays_returns`` to produce the
# correct value in metric ``end_of_session`` handlers.
self._previous_total_returns = self.portfolio.returns
def end_of_bar(self, session_ix):
# make daily_returns hold the partial returns, this saves many
# metrics from doing a concat and copying all of the previous
# returns
self.daily_returns_array[session_ix] = self.todays_returns
def end_of_session(self, session_ix):
# save the daily returns time-series
self.daily_returns_series[session_ix] = self.todays_returns
def sync_last_sale_prices(self,
dt,
data_portal,
handle_non_market_minutes=False):
self.position_tracker.sync_last_sale_prices(
dt,
data_portal,
handle_non_market_minutes=handle_non_market_minutes,
)
self._dirty_portfolio = True
@staticmethod
def _calculate_payout(multiplier, amount, old_price, price):
return (price - old_price) * multiplier * amount
def _cash_flow(self, amount):
self._dirty_portfolio = True
p = self._portfolio
p.cash_flow += amount
p.cash += amount
def process_transaction(self, transaction):
"""Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
"""
asset = transaction.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
except KeyError:
self._payout_last_sale_prices[asset] = transaction.price
else:
position = self.position_tracker.positions[asset]
amount = position.amount
price = transaction.price
self._cash_flow(
self._calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
),
)
if amount + transaction.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
else:
self._cash_flow(-(transaction.price * transaction.amount))
self.position_tracker.execute_transaction(transaction)
# we only ever want the dict form from now on
transaction_dict = transaction.to_dict()
try:
self._processed_transactions[transaction.dt].append(
transaction_dict,
)
except KeyError:
self._processed_transactions[transaction.dt] = [transaction_dict]
def process_splits(self, splits):
"""Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list[(Asset, float)]
A list of splits. Each split is a tuple of (asset, ratio).
"""
leftover_cash = self.position_tracker.handle_splits(splits)
if leftover_cash > 0:
self._cash_flow(leftover_cash)
def process_order(self, order):
"""Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record.
"""
try:
dt_orders = self._orders_by_modified[order.dt]
except KeyError:
self._orders_by_modified[order.dt] = OrderedDict([
(order.id, order),
])
self._orders_by_id[order.id] = order
else:
self._orders_by_id[order.id] = dt_orders[order.id] = order
# to preserve the order of the orders by modified date
move_to_end(dt_orders, order.id, last=True)
move_to_end(self._orders_by_id, order.id, last=True)
def process_commission(self, commission):
"""Process the commission.
Parameters
----------
commission : zp.Event
The commission being paid.
"""
asset = commission['asset']
cost = commission['cost']
self.position_tracker.handle_commission(asset, cost)
self._cash_flow(-cost)
def close_position(self, asset, dt, data_portal):
txn = self.position_tracker.maybe_create_close_position_transaction(
asset,
dt,
data_portal,
)
if txn is not None:
self.process_transaction(txn)
def process_dividends(self, next_session, asset_finder, adjustment_reader):
"""Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
"""
position_tracker = self.position_tracker
# Earn dividends whose ex_date is the next trading day. We need to
# check if we own any of these stocks so we know to pay them out when
# the pay date comes.
held_sids = set(position_tracker.positions)
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
stock_dividends = (
adjustment_reader.get_stock_dividends_with_ex_date(
held_sids,
next_session,
asset_finder
)
)
# Earning a dividend just marks that we need to get paid out on
# the dividend's pay-date. This does not affect our cash yet.
position_tracker.earn_dividends(
cash_dividends,
stock_dividends,
)
# Pay out the dividends whose pay-date is the next session. This does
# affect out cash.
self._cash_flow(
position_tracker.pay_dividends(
next_session,
),
)
def capital_change(self, change_amount):
self.update_portfolio()
portfolio = self._portfolio
# we update the cash and total value so this is not dirty
portfolio.portfolio_value += change_amount
portfolio.cash += change_amount
def transactions(self, dt=None):
"""Retrieve the dict-form of all of the transactions in a given bar or
for the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up transactions for. If not passed,
or None is explicitly passed, all of the transactions will be
returned.
Returns
-------
transactions : list[dict]
The transaction information.
"""
if dt is None:
# flatten the by-day transactions
return [
txn
for by_day in itervalues(self._processed_transactions)
for txn in by_day
]
return self._processed_transactions.get(dt, [])
def orders(self, dt=None):
"""Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
"""
if dt is None:
# orders by id is already flattened
return [o.to_dict() for o in itervalues(self._orders_by_id)]
return [
o.to_dict()
for o in itervalues(self._orders_by_modified.get(dt, {}))
]
@property
def positions(self):
return self.position_tracker.get_position_list()
def _get_payout_total(self, positions):
calculate_payout = self._calculate_payout
payout_last_sale_prices = self._payout_last_sale_prices
total = 0
for asset, old_price in iteritems(payout_last_sale_prices):
position = positions[asset]
payout_last_sale_prices[asset] = price = position.last_sale_price
amount = position.amount
total += calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
)
return total
def update_portfolio(self):
"""Force a computation of the current portfolio state.
"""
if not self._dirty_portfolio:
return
portfolio = self._portfolio
pt = self.position_tracker
portfolio.positions = pt.get_positions()
position_stats = pt.stats
portfolio.positions_value = position_value = (
position_stats.net_value
)
portfolio.positions_exposure = position_stats.net_exposure
self._cash_flow(self._get_payout_total(pt.positions))
start_value = portfolio.portfolio_value
# update the new starting value
portfolio.portfolio_value = end_value = portfolio.cash + position_value
pnl = end_value - start_value
if start_value != 0:
returns = pnl / start_value
else:
returns = 0.0
portfolio.pnl += pnl
portfolio.returns = (
(1 + portfolio.returns) *
(1 + returns) -
1
)
# the portfolio has been fully synced
self._dirty_portfolio = False
@property
def portfolio(self):
"""Compute the current portfolio.
Notes
-----
This is cached, repeated access will not recompute the portfolio until
the portfolio may have changed.
"""
self.update_portfolio()
return self._immutable_portfolio
def calculate_period_stats(self):
position_stats = self.position_tracker.stats
portfolio_value = self.portfolio.portfolio_value
if portfolio_value == 0:
gross_leverage = net_leverage = np.inf
else:
gross_leverage = position_stats.gross_exposure / portfolio_value
net_leverage = position_stats.net_exposure / portfolio_value
return portfolio_value, gross_leverage, net_leverage
def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
"""Override fields on ``self.account``.
"""
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self']
@property
def account(self):
if self._dirty_account:
portfolio = self.portfolio
account = self._account
# If no attribute is found in the ``_account_overrides`` resort to
# the following default values. If an attribute is found use the
# existing value. For instance, a broker may provide updates to
# these attributes. In this case we do not want to over write the
# broker values with the default values.
account.settled_cash = portfolio.cash
account.accrued_interest = 0.0
account.buying_power = np.inf
account.equity_with_loan = portfolio.portfolio_value
account.total_positions_value = (
portfolio.portfolio_value - portfolio.cash
)
account.total_positions_exposure = (
portfolio.positions_exposure
)
account.regt_equity = portfolio.cash
account.regt_margin = np.inf
account.initial_margin_requirement = 0.0
account.maintenance_margin_requirement = 0.0
account.available_funds = portfolio.cash
account.excess_liquidity = portfolio.cash
account.cushion = (
(portfolio.cash / portfolio.portfolio_value)
if portfolio.portfolio_value else
np.nan
)
account.day_trades_remaining = np.inf
(account.net_liquidation,
account.gross_leverage,
account.net_leverage) = self.calculate_period_stats()
account.leverage = account.gross_leverage
# apply the overrides
for k, v in iteritems(self._account_overrides):
setattr(account, k, v)
# the account has been fully synced
self._dirty_account = False
return self._immutable_account
| 34.408353 | 86 | 0.599697 |
076f8bf26d13ccf102b4174e6e1c649a6a7cf3af | 978 | py | Python | frappe/core/doctype/module_profile/test_module_profile.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | frappe/core/doctype/module_profile/test_module_profile.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | frappe/core/doctype/module_profile/test_module_profile.py | erpnext-tm/frappe | 7b470f28e1cf00b0659c01e06a2d0a4693b28d98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
class TestModuleProfile(unittest.TestCase):
def test_make_new_module_profile(self):
if not frappe.db.get_value("Module Profile", "_Test Module Profile"):
frappe.get_doc(
{
"doctype": "Module Profile",
"module_profile_name": "_Test Module Profile",
"block_modules": [{"module": "Accounts"}],
}
).insert()
# add to user and check
if not frappe.db.get_value("User", "test-for-module_profile@example.com"):
new_user = frappe.get_doc(
{"doctype": "User", "email": "test-for-module_profile@example.com", "first_name": "Test User"}
).insert()
else:
new_user = frappe.get_doc("User", "test-for-module_profile@example.com")
new_user.module_profile = "_Test Module Profile"
new_user.save()
self.assertEqual(new_user.block_modules[0].module, "Accounts")
| 28.764706 | 98 | 0.705521 |
85952299622dd0475ce78a176a902badfecb65f1 | 774 | py | Python | photos/migrations/0006_auto_20201011_1054.py | Abzed/Gallery | b24cd58e6e6a23894c4ea340b8f03df8a4a062ff | [
"MIT"
] | null | null | null | photos/migrations/0006_auto_20201011_1054.py | Abzed/Gallery | b24cd58e6e6a23894c4ea340b8f03df8a4a062ff | [
"MIT"
] | null | null | null | photos/migrations/0006_auto_20201011_1054.py | Abzed/Gallery | b24cd58e6e6a23894c4ea340b8f03df8a4a062ff | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-11 10:54
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('photos', '0005_auto_20201010_1108'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'ordering': ['date']},
),
migrations.AddField(
model_name='image',
name='date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='location',
field=models.ManyToManyField(to='photos.Location'),
),
]
| 25.8 | 93 | 0.583979 |
aade6d5fbb686747894981e6327a62fc904fb540 | 435 | py | Python | Proyecto/DacodesJobs/DacodesJobs/settings/production.py | angel318/DacodesJobs | 3a8bb0248ab8addf462b175e039ae935a5e34197 | [
"bzip2-1.0.6"
] | null | null | null | Proyecto/DacodesJobs/DacodesJobs/settings/production.py | angel318/DacodesJobs | 3a8bb0248ab8addf462b175e039ae935a5e34197 | [
"bzip2-1.0.6"
] | null | null | null | Proyecto/DacodesJobs/DacodesJobs/settings/production.py | angel318/DacodesJobs | 3a8bb0248ab8addf462b175e039ae935a5e34197 | [
"bzip2-1.0.6"
] | null | null | null | from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dacodesjobs',
'USER': 'django',
'PASSWORD': 'holamundo',
'HOST': 'localhost',
'PORT': '',
}
}
| 21.75 | 65 | 0.6 |
6e752eb7565eb0b2d9b3fe45324ed9848f425718 | 4,770 | py | Python | tests/app/main/views/test_new_password.py | cds-snc/notification-admin | d4056798bf889ad29893667bbb67ead2f8e466e4 | [
"MIT"
] | 16 | 2019-11-05T21:35:49.000Z | 2022-01-12T15:00:32.000Z | tests/app/main/views/test_new_password.py | cds-snc/notification-admin | d4056798bf889ad29893667bbb67ead2f8e466e4 | [
"MIT"
] | 509 | 2019-07-11T22:03:19.000Z | 2022-03-30T15:19:26.000Z | tests/app/main/views/test_new_password.py | cds-snc/notification-admin | d4056798bf889ad29893667bbb67ead2f8e466e4 | [
"MIT"
] | 8 | 2020-02-21T20:19:29.000Z | 2022-03-31T14:17:02.000Z | import json
from datetime import datetime
from flask import url_for
from itsdangerous import SignatureExpired
from notifications_utils.url_safe_token import generate_token
from tests.conftest import url_for_endpoint_with_token
def test_should_render_new_password_template(
app_,
client,
api_user_active,
mock_login,
mock_send_verify_code,
mock_get_user_by_email_request_password_reset,
):
data = json.dumps(
{
"email": api_user_active["email_address"],
"created_at": str(datetime.utcnow()),
}
)
token = generate_token(data, app_.config["SECRET_KEY"], app_.config["DANGEROUS_SALT"])
response = client.get(url_for_endpoint_with_token(".new_password", token=token))
assert response.status_code == 200
assert "You can now create a new password for your account." in response.get_data(as_text=True)
def test_should_return_404_when_email_address_does_not_exist(
app_,
client,
mock_get_user_by_email_not_found,
):
data = json.dumps({"email": "no_user@d.canada.ca", "created_at": str(datetime.utcnow())})
token = generate_token(data, app_.config["SECRET_KEY"], app_.config["DANGEROUS_SALT"])
response = client.get(url_for_endpoint_with_token(".new_password", token=token))
assert response.status_code == 404
def test_should_redirect_to_two_factor_when_password_reset_is_successful(
app_,
client,
mock_get_user_by_email_request_password_reset,
mock_login,
mock_send_verify_code,
mock_reset_failed_login_count,
):
user = mock_get_user_by_email_request_password_reset.return_value
data = json.dumps({"email": user["email_address"], "created_at": str(datetime.utcnow())})
token = generate_token(data, app_.config["SECRET_KEY"], app_.config["DANGEROUS_SALT"])
response = client.post(
url_for_endpoint_with_token(".new_password", token=token),
data={"new_password": "a-new_password"},
)
assert response.status_code == 302
assert response.location == url_for(".two_factor_sms_sent", _external=True)
mock_get_user_by_email_request_password_reset.assert_called_once_with(user["email_address"])
def test_should_redirect_index_if_user_has_already_changed_password(
app_,
client,
mock_get_user_by_email_user_changed_password,
mock_login,
mock_send_verify_code,
mock_reset_failed_login_count,
):
user = mock_get_user_by_email_user_changed_password.return_value
data = json.dumps({"email": user["email_address"], "created_at": str(datetime.utcnow())})
token = generate_token(data, app_.config["SECRET_KEY"], app_.config["DANGEROUS_SALT"])
response = client.post(
url_for_endpoint_with_token(".new_password", token=token),
data={"new_password": "a-new_password"},
)
assert response.status_code == 302
assert response.location == url_for(".index", _external=True)
mock_get_user_by_email_user_changed_password.assert_called_once_with(user["email_address"])
def test_should_redirect_to_forgot_password_with_flash_message_when_token_is_expired(app_, client, mock_login, mocker):
mocker.patch(
"app.main.views.new_password.check_token",
side_effect=SignatureExpired("expired"),
)
token = generate_token("foo@bar.com", app_.config["SECRET_KEY"], app_.config["DANGEROUS_SALT"])
response = client.get(url_for_endpoint_with_token(".new_password", token=token))
assert response.status_code == 302
assert response.location == url_for(".forgot_password", _external=True)
def test_should_sign_in_when_password_reset_is_successful_for_email_auth(
app_,
client,
mock_get_user,
mock_get_user_by_email_request_password_reset,
mock_login,
mock_send_verify_code,
mock_reset_failed_login_count,
mock_update_user_password,
mock_get_login_events,
):
user = mock_get_user_by_email_request_password_reset.return_value
user["auth_type"] = "email_auth"
data = json.dumps({"email": user["email_address"], "created_at": str(datetime.utcnow())})
token = generate_token(data, app_.config["SECRET_KEY"], app_.config["DANGEROUS_SALT"])
response = client.post(
url_for_endpoint_with_token(".new_password", token=token),
data={"new_password": "a-new_password"},
)
assert response.status_code == 302
assert response.location == url_for(".show_accounts_or_dashboard", _external=True)
assert mock_get_user_by_email_request_password_reset.called
assert mock_reset_failed_login_count.called
# the log-in flow makes a couple of calls
mock_get_user.assert_called_once_with(user["id"])
mock_update_user_password.assert_called_once_with(user["id"], "a-new_password")
assert not mock_send_verify_code.called
| 37.559055 | 119 | 0.751782 |
094a79856600db99722acddf62683924456ee8a2 | 4,743 | py | Python | python/alembic/versions/19089cd961e8_changed_numeric_data_types_to_allow_.py | adrs0049/AdhesionRandomWalk | 25d9d805261be504a61a1d9ae329559f346fe95a | [
"MIT"
] | null | null | null | python/alembic/versions/19089cd961e8_changed_numeric_data_types_to_allow_.py | adrs0049/AdhesionRandomWalk | 25d9d805261be504a61a1d9ae329559f346fe95a | [
"MIT"
] | null | null | null | python/alembic/versions/19089cd961e8_changed_numeric_data_types_to_allow_.py | adrs0049/AdhesionRandomWalk | 25d9d805261be504a61a1d9ae329559f346fe95a | [
"MIT"
] | null | null | null | """Changed numeric data types to allow better data representation
Revision ID: 19089cd961e8
Revises: 35c7e94e6883
Create Date: 2015-07-23 12:06:07.423576
"""
# revision identifiers, used by Alembic.
revision = '19089cd961e8'
down_revision = '35c7e94e6883'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('density_data', 'density',
existing_type=mysql.DECIMAL(precision=10, scale=8),
type_=sa.Numeric(precision=20, scale=8),
existing_nullable=False)
op.alter_column('parameters', 'DomainSize',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('parameters', 'R',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('parameters', 'diffusion_coeff',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('parameters', 'drift_coeff',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('parameters', 'ic_p',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('parameters', 'omega_p',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('parameters', 'u0',
existing_type=mysql.DECIMAL(precision=19, scale=4),
type_=sa.Numeric(precision=19, scale=7),
existing_nullable=False)
op.alter_column('path_meta_data', 'stochastic',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=False)
op.alter_column('path_meta_data', 'time',
existing_type=mysql.DECIMAL(precision=20, scale=9),
type_=sa.Numeric(precision=24, scale=14),
existing_nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('path_meta_data', 'time',
existing_type=sa.Numeric(precision=24, scale=14),
type_=mysql.DECIMAL(precision=20, scale=9),
existing_nullable=False)
op.alter_column('path_meta_data', 'stochastic',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=False)
op.alter_column('parameters', 'u0',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('parameters', 'omega_p',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('parameters', 'ic_p',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('parameters', 'drift_coeff',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('parameters', 'diffusion_coeff',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('parameters', 'R',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('parameters', 'DomainSize',
existing_type=sa.Numeric(precision=19, scale=7),
type_=mysql.DECIMAL(precision=19, scale=4),
existing_nullable=False)
op.alter_column('density_data', 'density',
existing_type=sa.Numeric(precision=20, scale=8),
type_=mysql.DECIMAL(precision=10, scale=8),
existing_nullable=False)
### end Alembic commands ###
| 44.327103 | 66 | 0.62218 |
66744bb9e12f98cdfe6e3b0c8ae999ea80be2f8e | 1,843 | py | Python | marinetrafficapi/events/EV01_port_calls/query_params.py | arrrlo/marine-traffic-client-api | 1ac4b65010b1dc3f161940ee83815b341f9455ea | [
"MIT"
] | 15 | 2019-12-24T17:25:33.000Z | 2022-03-04T01:56:30.000Z | marinetrafficapi/events/EV01_port_calls/query_params.py | arrrlo/marine-traffic-client-api | 1ac4b65010b1dc3f161940ee83815b341f9455ea | [
"MIT"
] | 27 | 2019-03-14T09:04:07.000Z | 2022-03-02T09:20:36.000Z | marinetrafficapi/events/EV01_port_calls/query_params.py | arrrlo/marine-traffic-client-api | 1ac4b65010b1dc3f161940ee83815b341f9455ea | [
"MIT"
] | 3 | 2019-04-15T14:02:32.000Z | 2022-03-25T12:55:47.000Z | from marinetrafficapi.query_params import QueryParams
class EV01QueryParams(QueryParams):
"""Query params for EV01 API call."""
time_span = 'timespan', 'The maximum age, in minutes, of the \n' \
'returned port calls. Maximum value is 2880.'
port_id = 'portid', 'The MarineTraffic ID of the port you wish \n' \
'to monitor (found on the URL of the respective \n' \
'Port page) or port UN/LOCODE.'
mmsi = 'mmsi', 'The Maritime Mobile Service Identity \n' \
'(MMSI) of the vessel you wish to track.'
imo = 'imo', 'The International Maritime Organization \n' \
'(IMO) number of the vessel you wish to track.'
ship_id = 'shipid', 'A uniquely assigned ID by \n' \
'MarineTraffic for the subject vessel.'
move_type = 'movetype', 'Use 0 to only receive arrivals or 1 to \n' \
'only receive departures. If not used, \n' \
'the response will include both.'
exclude_intransit = 'exclude_intransit', 'Use 1 to exclude vessels in transit'
from_date = 'fromdate', 'Portcalls fromdate. \n' \
'Date format: YYYY-MM-DD HH:MM'
to_date = 'todate', 'Portcalls todate. \n' \
'Date format: YYYY-MM-DD HH:MM'
dwt_min = 'dwt_min', 'Data filter: minimum DWT \n' \
'applicable to IMO-having vessels'
dwt_max = 'dwt_max', 'Data filter: maximum DWT \n' \
'applicable to IMO-having vessels'
gt_min = 'gt_min', 'Data filter: minimum GT \n' \
'applicable to IMO-having vessels'
gt_max = 'gt_max', 'Data filter: maximum GT \n' \
'applicable to IMO-having vessels'
| 40.065217 | 82 | 0.558329 |
5c12bb75c73243bef6dd375d17a2e597e9ff3976 | 7,391 | py | Python | tests/integration-tests/tests/efa/test_efa.py | rangsimanketkaew/aws-parallelcluster | 650d59f47a96e1f21d3af83402fff9aafe717679 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/tests/efa/test_efa.py | rangsimanketkaew/aws-parallelcluster | 650d59f47a96e1f21d3af83402fff9aafe717679 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/tests/efa/test_efa.py | rangsimanketkaew/aws-parallelcluster | 650d59f47a96e1f21d3af83402fff9aafe717679 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import re
import pytest
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from utils import get_compute_nodes_instance_ids
from tests.common.assertions import assert_no_errors_in_logs
from tests.common.mpi_common import _test_mpi
from tests.common.schedulers_common import get_scheduler_commands
from tests.common.utils import fetch_instance_slots
@pytest.mark.regions(["us-east-1", "us-gov-west-1"])
@pytest.mark.instances(["c5n.18xlarge", "p3dn.24xlarge", "i3en.24xlarge"])
# Torque is not supported by OpenMPI distributed with EFA
# Slurm test is to verify EFA works correctly when using the SIT model in the config file
@pytest.mark.schedulers(["sge", "slurm"])
@pytest.mark.usefixtures("os")
def test_sit_efa(region, scheduler, instance, pcluster_config_reader, clusters_factory, test_datadir):
"""
Test all EFA Features.
Grouped all tests in a single function so that cluster can be reused for all of them.
"""
max_queue_size = 2
slots_per_instance = fetch_instance_slots(region, instance)
cluster_config = pcluster_config_reader(max_queue_size=max_queue_size)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_efa_installation(scheduler_commands, remote_command_executor, efa_installed=True)
_test_mpi(remote_command_executor, slots_per_instance, scheduler)
logging.info("Running on Instances: {0}".format(get_compute_nodes_instance_ids(cluster.cfn_name, region)))
_test_osu_benchmarks("openmpi", remote_command_executor, scheduler_commands, test_datadir, slots_per_instance)
_test_osu_benchmarks("intelmpi", remote_command_executor, scheduler_commands, test_datadir, slots_per_instance)
_test_shm_transfer_is_enabled(scheduler_commands, remote_command_executor)
assert_no_errors_in_logs(remote_command_executor, scheduler)
@pytest.mark.regions(["us-east-1"])
@pytest.mark.instances(["c5n.18xlarge"])
@pytest.mark.oss(["alinux2"])
@pytest.mark.schedulers(["slurm"])
@pytest.mark.usefixtures("os")
def test_hit_efa(region, scheduler, instance, pcluster_config_reader, clusters_factory, test_datadir):
"""
Test all EFA Features.
Grouped all tests in a single function so that cluster can be reused for all of them.
"""
max_queue_size = 2
slots_per_instance = fetch_instance_slots(region, instance)
cluster_config = pcluster_config_reader(max_queue_size=max_queue_size)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
_test_efa_installation(scheduler_commands, remote_command_executor, efa_installed=True, partition="efa-enabled")
_test_efa_installation(scheduler_commands, remote_command_executor, efa_installed=False, partition="efa-disabled")
_test_mpi(remote_command_executor, slots_per_instance, scheduler, partition="efa-enabled")
logging.info("Running on Instances: {0}".format(get_compute_nodes_instance_ids(cluster.cfn_name, region)))
_test_osu_benchmarks(
"openmpi",
remote_command_executor,
scheduler_commands,
test_datadir,
slots_per_instance,
partition="efa-enabled",
)
_test_osu_benchmarks(
"intelmpi",
remote_command_executor,
scheduler_commands,
test_datadir,
slots_per_instance,
partition="efa-enabled",
)
_test_shm_transfer_is_enabled(scheduler_commands, remote_command_executor, partition="efa-enabled")
assert_no_errors_in_logs(remote_command_executor, scheduler)
def _test_efa_installation(scheduler_commands, remote_command_executor, efa_installed=True, partition=None):
# Output contains:
# 00:06.0 Ethernet controller: Amazon.com, Inc. Device efa0
logging.info("Testing EFA installed")
if partition:
result = scheduler_commands.submit_command("lspci -n > /shared/lspci.out", partition=partition)
else:
result = scheduler_commands.submit_command("lspci -n > /shared/lspci.out")
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id)
scheduler_commands.assert_job_succeeded(job_id)
# Check if EFA interface is on compute node
result = remote_command_executor.run_remote_command("cat /shared/lspci.out")
if efa_installed:
assert_that(result.stdout).contains("1d0f:efa0")
else:
assert_that(result.stdout).does_not_contain("1d0f:efa0")
# Check EFA interface not present on master
result = remote_command_executor.run_remote_command("lspci -n")
assert_that(result.stdout).does_not_contain("1d0f:efa0")
def _test_osu_benchmarks(
mpi_version, remote_command_executor, scheduler_commands, test_datadir, slots_per_instance, partition=None
):
logging.info("Running OSU benchmarks for {0}".format(mpi_version))
remote_command_executor.run_remote_script(
str(test_datadir / "init_osu_benchmarks.sh"),
args=[mpi_version],
hide=True,
additional_files=[str(test_datadir / "osu-micro-benchmarks-5.6.3.tar.gz")],
)
if partition:
result = scheduler_commands.submit_script(
str(test_datadir / "osu_submit_{0}.sh".format(mpi_version)),
slots=2 * slots_per_instance,
partition=partition,
)
else:
result = scheduler_commands.submit_script(
str(test_datadir / "osu_submit_{0}.sh".format(mpi_version)), slots=2 * slots_per_instance
)
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id)
scheduler_commands.assert_job_succeeded(job_id)
output = remote_command_executor.run_remote_command("cat /shared/osu.out").stdout
latency = re.search(r"0\s+(\d\d)\.", output).group(1)
assert_that(int(latency)).is_less_than_or_equal_to(24)
def _test_shm_transfer_is_enabled(scheduler_commands, remote_command_executor, partition=None):
logging.info("Testing SHM Transfer is enabled")
if partition:
result = scheduler_commands.submit_command("fi_info -p efa 2>&1 > /shared/fi_info.out", partition=partition)
else:
result = scheduler_commands.submit_command("fi_info -p efa 2>&1 > /shared/fi_info.out")
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id)
scheduler_commands.assert_job_succeeded(job_id)
result = remote_command_executor.run_remote_command("cat /shared/fi_info.out")
assert_that(result.stdout).does_not_contain("SHM transfer will be disabled because of ptrace protection")
| 45.343558 | 118 | 0.764579 |
7139837b44ead6a40e5e2d0ad666173a40a5f077 | 43,905 | py | Python | pycqed/analysis_v2/VQE_EVC.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 60 | 2016-08-03T10:00:18.000Z | 2021-11-10T11:46:16.000Z | pycqed/analysis_v2/VQE_EVC.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 512 | 2016-08-03T17:10:02.000Z | 2022-03-31T14:03:43.000Z | pycqed/analysis_v2/VQE_EVC.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 34 | 2016-10-19T12:00:52.000Z | 2022-03-19T04:43:26.000Z | import time
import numpy as np
from pycqed.analysis import analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
# import dataprep for tomography module
# import tomography module
# using the data prep module of analysis V2
# from pycqed.analysis_v2 import tomography_dataprep as dataprep
from pycqed.analysis import measurement_analysis as ma
try:
import qutip as qt
except ImportError as e:
pass
# logging.warning('Could not import qutip, tomo code will not work')
def reshape_block(shots_data, segments_per_block=16, block_size=4092, mode='truncate'):
"""
inputs: shots_data 1D array of dimension N
organizes data in blocks of dimension block_size.
num of blocks is N/block_size
"""
N = len(shots_data)
# Data dimension needs to be an integer multiple of block_size
assert(N%block_size==0)
num_blocks = N//block_size
full_segments = block_size//segments_per_block
orfan_segments = block_size % segments_per_block
missing_segments = segments_per_block - orfan_segments
# print(N,num_blocks,full_segments,orfan_segments,missing_segments)
reshaped_data = shots_data.reshape((num_blocks,block_size))
if mode.lower()=='truncate':
truncate_idx = full_segments*segments_per_block
return reshaped_data[:,:truncate_idx]
elif mode.lower()=='padd':
padd_dim = (full_segments+1)*segments_per_block
return_block = np.nan*np.ones((num_blocks,padd_dim))
return_block[:,:block_size] = reshaped_data
return return_block
else:
raise ValueError('Mode not understood. Needs to be truncate or padd')
def all_repetitions(shots_data,segments_per_block=16):
flat_dim = shots_data.shape[0]*shots_data.shape[1]
# Data dimension needs to divide the segments_per_block
assert(flat_dim%segments_per_block==0)
num_blocks = flat_dim // segments_per_block
block_data = shots_data.reshape((num_blocks,segments_per_block))
return block_data
def get_segments_average(shots_data, segments_per_block=16, block_size=4092, mode='truncate', average=True):
reshaped_data = reshape_block(shots_data=shots_data,
segments_per_block=segments_per_block,
block_size=block_size,
mode=mode)
all_reps = all_repetitions(shots_data=reshaped_data,
segments_per_block=segments_per_block)
if average:
return np.mean(all_reps,axis=0)
else:
return all_reps
class ExpectationValueCalculation:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
# Binning all the points required for the tomo
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
# print(len(self.measurements_cal))
# print(self.measurements_cal)
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[0:4])
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
# print(self.measurements_cal[0:4])
# print(betas[0:4])
betas[4:8] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[4:8])
# print(betas[4:8])
betas[8:] = np.dot(np.linalg.inv(cal_matrix), self.measurements_cal[8:12])
# print(betas[8:])
return betas
def expectation_value_calculation_IdenZ(self):
betas = self._calibrate_betas()
#inverting the unprimed beta matrix
#up is unprimed
self.betas = betas
# print(self.betas[0:4], self.betas[4:8], self.betas[8:])
beta_0_up =self.betas[0]
beta_1_up =self.betas[1]
beta_2_up =self.betas[2]
beta_3_up =self.betas[3]
beta_matrix_up = np.array([[beta_0_up,beta_1_up,beta_2_up,beta_3_up],
[beta_0_up,-1*beta_1_up,beta_2_up,-1*beta_3_up],
[beta_0_up,beta_1_up,-1*beta_2_up,-1*beta_3_up],
[beta_0_up,-1*beta_1_up,-1*beta_2_up,beta_3_up]])
#assuming 0:4 are
# expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[1:4])
expect_value_IdenZ_up = np.dot(np.linalg.inv(beta_matrix_up), self.measurements_tomo[0:4])
#inverting the primed beta matrix
#p is primed
beta_0_p =self.betas[4]
beta_1_p =self.betas[5]
beta_2_p =self.betas[6]
beta_3_p =self.betas[7]
beta_matrix_p = np.array([[beta_0_p,beta_1_p,beta_2_p,beta_3_p],
[beta_0_p,-1*beta_1_p,beta_2_p,-1*beta_3_p],
[beta_0_p,beta_1_p,-1*beta_2_p,-1*beta_3_p],
[beta_0_p,-1*beta_1_p,-1*beta_2_p,beta_3_p]])
# beta_matrix_p = np.array([[-1*beta_1_p,beta_2_p,-1*beta_3_p],
# [beta_1_p,-1*beta_2_p,-1*beta_3_p],
# [-1*beta_1_p,-1*beta_2_p,beta_3_p]])
#assuming 0:4 are
expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[8:12])
# expect_value_IdenZ_p = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#inverting the unprimed beta matrix
#up is unprimed
beta_0_pp =self.betas[8]
beta_1_pp =self.betas[9]
beta_2_pp =self.betas[10]
beta_3_pp =self.betas[11]
beta_matrix_pp = np.array([[beta_0_pp,beta_1_pp,beta_2_pp,beta_3_pp],
[beta_0_pp,-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
[beta_0_pp,beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
[beta_0_pp,-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
# beta_matrix_pp = np.array([[-1*beta_1_pp,beta_2_pp,-1*beta_3_pp],
# [beta_1_pp,-1*beta_2_pp,-1*beta_3_pp],
# [-1*beta_1_pp,-1*beta_2_pp,beta_3_pp]])
#assuming 0:4 are
expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_pp), self.measurements_tomo[16:20])
# expect_value_IdenZ_pp = np.dot(np.linalg.inv(beta_matrix_p), self.measurements_tomo[1:4])
#take the mean of calculated expectation values of II, IZ, ZI, ZZ
#for three different beta vectors
expect_value_IdenZ = np.mean( np.array([expect_value_IdenZ_up,
expect_value_IdenZ_p,
expect_value_IdenZ_pp]),
axis=0 )
print(expect_value_IdenZ_up)
print(expect_value_IdenZ_p)
print(expect_value_IdenZ_pp)
return expect_value_IdenZ
def expectation_value_calculation_XX(self):
expect_value_XX_up = ((self.measurements_tomo[4] + self.measurements_tomo[5]) -2*self.betas[0])/2*self.betas[3]
expect_value_XX_p = ((self.measurements_tomo[12] + self.measurements_tomo[13])-2*self.betas[4])/2*self.betas[7]
expect_value_XX_pp = ((self.measurements_tomo[20] + self.measurements_tomo[21]) - 2*self.betas[8])/2*self.betas[11]
expectation_value_XX = (expect_value_XX_up + expect_value_XX_p + expect_value_XX_pp)/3
# print(expect_value_XX_up, expect_value_XX_p, expect_value_XX_pp)
return expectation_value_XX
def expectation_value_calculation_YY(self):
expect_value_YY_up = ((self.measurements_tomo[6] + self.measurements_tomo[7]) -2*self.betas[0])/2*self.betas[3]
expect_value_YY_p = ((self.measurements_tomo[14] + self.measurements_tomo[15])-2*self.betas[4])/2*self.betas[7]
expect_value_YY_pp = ((self.measurements_tomo[22] + self.measurements_tomo[23]) - 2*self.betas[8])/2*self.betas[11]
# print(expect_value_YY_up, expect_value_YY_p, expect_value_YY_pp)
expectation_value_YY = (expect_value_YY_up + expect_value_YY_p + expect_value_YY_pp)/3
return expectation_value_YY
def execute_expectation_value_calculation(self):
expect_values = np.zeros(6)
expect_values[0:4] = self.expectation_value_calculation_IdenZ()
# print(self.expectation_value_calculation_IdenZ())
expect_values[4] = self.expectation_value_calculation_XX()
# print(self.expectation_value_calculation_XX())
expect_values[5] = self.expectation_value_calculation_YY()
# print(self.expectation_value_calculation_YY())
return expect_values, self.betas
class ExpectationValueCalculation2:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
avg_h1 = self.ma_obj.measured_values[0]
avg_h2 = self.ma_obj.measured_values[1]
avg_h12 = self.ma_obj.measured_values[2]
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
class ExpectationValueCalculation3_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
mean_h1 = (h1_00+h1_10+h1_01+h1_11)/4
mean_h2 = (h2_00+h2_01+h2_10+h2_11)/4
mean_h12 = (h12_00+h12_11+h12_01+h12_10)/4
#subtract beta 0 from all measurements
#rescale them
avg_h1 -= mean_h1
avg_h2 -= mean_h2
avg_h12 -= mean_h12
scale_h1 = (h1_00+h1_10-h1_01-h1_11)/4
scale_h2 = (h2_00+h2_01-h2_10-h2_11)/4
scale_h12 = (h12_00+h12_11-h12_01-h12_10)/4
avg_h1 = (avg_h1)/scale_h1
avg_h2 = (avg_h2)/scale_h2
avg_h12 = (avg_h12)/scale_h12
#The averages have been redefined so redefine the cal terms
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
# II IZ ZI ZZ IX XI XX IY YI YY
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0], # 36
0, 0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0, # 29
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -1*beta_array[1],
-1*beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[2] - ev[1])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[5] + ev[4])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
print(self.expect_values)
expect_values_VQE = np.array([1,
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
self.expect_values = expect_values_VQE
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
beta_0_vec = np.repeat([self.betas_up[0],
self.betas_p[0],
self.betas_pp[0]], 8)
rescaled_measurements_tomo = self.measurements_tomo - beta_0_vec
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
self.expect_values = expect_values_VQE
print(self.expect_values)
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
self.expect_values = expect_values_VQE
return expect_values_VQE
class ExpectationValueCalculation2_shots:
def __init__(self, auto=True, label='', timestamp=None,
fig_format='png',
q0_label='q0',
q1_label='q1', close_fig=True, **kw):
self.label = label
self.timestamp = timestamp
self.fig_format = fig_format
# q0 == D2
self.q0_label = q0_label
# q1 == A
self.q1_label = q1_label
self.n_states = 2 ** 2
self.ma_obj = ma.MeasurementAnalysis(auto=False, label=label,
timestamp=timestamp)
self.ma_obj.get_naming_and_values()
# self.get_naming_and_values()
# hard coded number of segments for a 2 qubit state tomography
# constraint imposed by UHFLI
self.nr_segments = 16
# self.exp_name = os.path.split(self.folder)[-1][7:]
shots_I_q0 = get_segments_average(self.ma_obj.measured_values[0],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q1 = get_segments_average(self.ma_obj.measured_values[1],
segments_per_block=16,
block_size=4094,
average=False)
shots_I_q0q1 = np.multiply(shots_I_q0/(np.max(shots_I_q0)-np.min(shots_I_q0)),shots_I_q1/(np.max(shots_I_q1)-np.min(shots_I_q1)))
avg_h1 = np.mean(shots_I_q0,axis=0)
avg_h2 = np.mean(shots_I_q1,axis=0)
avg_h12 = np.mean(shots_I_q0q1,axis=0)
h1_00 = np.mean(avg_h1[8:10])
h1_01 = np.mean(avg_h1[10:12])
h1_10 = np.mean(avg_h1[12:14])
h1_11 = np.mean(avg_h1[14:])
h2_00 = np.mean(avg_h2[8:10])
h2_01 = np.mean(avg_h2[10:12])
h2_10 = np.mean(avg_h2[12:14])
h2_11 = np.mean(avg_h2[14:])
h12_00 = np.mean(avg_h12[8:10])
h12_01 = np.mean(avg_h12[10:12])
h12_10 = np.mean(avg_h12[12:14])
h12_11 = np.mean(avg_h12[14:])
self.measurements_tomo = (
np.array([avg_h1[0:8], avg_h2[0:8],
avg_h12[0:8]])).flatten()
# print(self.measurements_tomo)
# print(len(self.measurements_tomo))
# 108 x 1
# get the calibration points by averaging over the five measurements
# taken knowing the initial state we put in
self.measurements_cal = np.array(
[h1_00, h1_01, h1_10, h1_11,
h2_00, h2_01, h2_10, h2_11,
h12_00, h12_01, h12_10, h12_11])
def _calibrate_betas(self):
"""
calculates betas from calibration points for the initial measurement
operator
Betas are ordered by B0 -> II B1 -> IZ etc(binary counting)
<0|Z|0> = 1, <1|Z|1> = -1
Keyword arguments:
measurements_cal --- array(2 ** n_qubits) should be ordered
correctly (00, 01, 10, 11) for 2 qubits
"""
cal_matrix = np.zeros((self.n_states, self.n_states))
# get the coefficient matrix for the betas
for i in range(self.n_states):
for j in range(self.n_states):
# perform bitwise AND and count the resulting 1s
cal_matrix[i, j] = (-1)**(bin((i & j)).count("1"))
# invert solve the simple system of equations
# print(cal_matrix)
# print(np.linalg.inv(cal_matrix))
betas = np.zeros(12)
# print(self.measurements_cal[0:4])
betas[0:4] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[0:4])
self.betas_up = betas[0:4]
betas[4:8] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[4:8])
self.betas_p = betas[4:8]
betas[8:] = np.dot(np.linalg.inv(cal_matrix),
self.measurements_cal[8:12])
self.betas_pp = betas[8:]
return betas
def assemble_M_matrix_single_block(self, beta_array):
M_matrix_single_block_row_1 = np.array([beta_array[0], beta_array[1],
beta_array[2], beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_2 = np.array([beta_array[0],
-1*beta_array[1],
beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_3 = np.array([beta_array[0],
beta_array[1],
-1*beta_array[2],
-1*beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_4 = np.array([beta_array[0],
-1*beta_array[1],
-1*beta_array[2],
beta_array[3],
0, 0, 0, 0, 0, 0])
M_matrix_single_block_row_5 = np.array([beta_array[0],
0, 0, 0, -beta_array[1],
-beta_array[2],
beta_array[3], 0, 0, 0])
M_matrix_single_block_row_6 = np.array([beta_array[0], 0, 0, 0,
beta_array[1],
beta_array[2],
beta_array[3],
0, 0, 0])
M_matrix_single_block_row_7 = np.array([beta_array[0], 0, 0,
0, 0, 0, 0, beta_array[1],
beta_array[2],
beta_array[3]])
M_matrix_single_block_row_8 = np.array([beta_array[0], 0, 0, 0, 0,
0, 0, -beta_array[1],
-beta_array[2],
beta_array[3]])
M_matrix_single_block = np.vstack((M_matrix_single_block_row_1,
M_matrix_single_block_row_2,
M_matrix_single_block_row_3,
M_matrix_single_block_row_4,
M_matrix_single_block_row_5,
M_matrix_single_block_row_6,
M_matrix_single_block_row_7,
M_matrix_single_block_row_8))
M_matrix_single_block = M_matrix_single_block.reshape(8, 10)
return M_matrix_single_block
def assemble_M_matrix(self):
Block1 = self.assemble_M_matrix_single_block(self.betas_up)
Block2 = self.assemble_M_matrix_single_block(self.betas_p)
Block3 = self.assemble_M_matrix_single_block(self.betas_pp)
self.M_matrix = np.vstack((Block1, Block2, Block3)).reshape(24, 10)
return self.M_matrix
def invert_M_matrix(self):
self.inverse_matrix = np.linalg.pinv(self.M_matrix)
return self.inverse_matrix
def execute_error_signalling(self, ev):
II = (ev[0] - ev[3])/(1 - ev[3])
IZ = (ev[1] - ev[2])/(1 - ev[3])
ZI = (ev[1] - ev[2])/(1 - ev[3])
ZZ = (ev[3] - ev[0])/(1 - ev[3])
XX = (ev[4] + ev[5])/(1 - ev[3])
YY = (ev[4] + ev[5])/(1 - ev[3])
ev_error_signalling = np.array([II, IZ, ZI, ZZ, XX, YY])
return ev_error_signalling
def execute_expectation_value_calculation(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.invert_M_matrix()
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[3],
self.expect_values[6],
self.expect_values[9]])
return expect_values_VQE
def execute_expectation_value_calculation_traceone(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
return expect_values_VQE
def execute_expectation_value_calculation_T1signaling(self):
# assemble matrix that connects RO with terms
self._calibrate_betas()
self.assemble_M_matrix()
self.inverse_matrix = np.linalg.pinv(self.M_matrix[:, 1:])
# use it to get terms back from RO
rescaled_measurements_tomo = self.measurements_tomo
self.expect_values = np.dot(self.inverse_matrix,
rescaled_measurements_tomo)
expect_values_VQE = np.array([1,
self.expect_values[0],
self.expect_values[1],
self.expect_values[2],
self.expect_values[5],
self.expect_values[8]])
expect_values_VQE = self.execute_error_signalling(expect_values_VQE)
return expect_values_VQE
| 45.829854 | 137 | 0.524451 |
e9749da4c43542d06d0e597d633c6e9f728aedb0 | 3,197 | py | Python | wrappers/BackwardSelection.py | Arseny-N/ITMO_FS | 12ec02c2aa0a9370d75732034acf3afb26a0bce5 | [
"MIT"
] | null | null | null | wrappers/BackwardSelection.py | Arseny-N/ITMO_FS | 12ec02c2aa0a9370d75732034acf3afb26a0bce5 | [
"MIT"
] | null | null | null | wrappers/BackwardSelection.py | Arseny-N/ITMO_FS | 12ec02c2aa0a9370d75732034acf3afb26a0bce5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from collections import OrderedDict
import numpy as np
from utils import generate_features
from wrappers.wrapper_utils import get_current_cv_accuracy
class BackwardSelection:
"""
Backward Selection removes one feature at a time until the number of features to be removed are used. Which ever
feature has the least rank it is removed one by one.
Parameters
----------
estimator: object
A supervised learning estimator with a fit method that provides information about feature importance either
through a coef_ attribute or through a feature_importances_ attribute.
n_features : int
Number of features to be removed.
See Also
--------
Examples
--------
"""
def __init__(self, estimator, n_features, measure):
# self.__class__ = type(estimator.__class__.__name__, (self.__class__, estimator.__class__), vars(estimator))
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
self.__estimator__ = estimator
self.__n_features__ = n_features
self.features__ = []
self.__measure = measure
self.best_score = 0
def fit(self, X, y, cv=3):
"""
Fits wrapper.
Parameters
----------
X : array-like, shape (n_features,n_samples)
The training input samples.
y : array-like, shape (n_features,n_samples)
the target values.
cv : int
Number of folds in cross-validation
Returns
------
None
See Also
--------
Examples
--------
"""
features_ranks = dict(zip(generate_features(X), self.__measure(X, y)))
sorted_features_ranks = OrderedDict(sorted(features_ranks.items(), key=lambda x: x[1]))
selected_features = np.array([feature for feature in sorted_features_ranks])
number_of_features_left_to_remove = self.__n_features__
self.__estimator__.fit(X[:, selected_features], y)
accuracy = get_current_cv_accuracy(self.__estimator__, X, y, selected_features, cv)
i = 0
self.best_score = accuracy
while len(sorted_features_ranks) != i and i < len(selected_features):
iteration_features = np.delete(selected_features, i)
self.__estimator__.fit(X[:, iteration_features], y)
iteration_accuracy = get_current_cv_accuracy(self.__estimator__, X, y, iteration_features, cv)
if iteration_accuracy > self.best_score:
selected_features = iteration_features
number_of_features_left_to_remove -= 1
self.best_score = iteration_accuracy
if not number_of_features_left_to_remove:
break
else:
i += 1
self.features__ = selected_features
def predict(self, X):
self.__estimator__.predict(X[:, self.features__])
| 34.75 | 120 | 0.604942 |
d59bf5bca842b13981c3aeeb1b9afbbf8c092923 | 384 | py | Python | src/data_preparation/test_data/nested_comprehension.py | petroolg/typilus | 8304849f0912af95a378e357e8d4f3e10949de49 | [
"MIT"
] | 4 | 2021-05-10T10:56:13.000Z | 2022-02-18T00:21:33.000Z | src/data_preparation/test_data/nested_comprehension.py | petroolg/typilus | 8304849f0912af95a378e357e8d4f3e10949de49 | [
"MIT"
] | 6 | 2020-11-26T18:21:03.000Z | 2021-05-25T09:04:14.000Z | src/data_preparation/test_data/nested_comprehension.py | petroolg/typilus | 8304849f0912af95a378e357e8d4f3e10949de49 | [
"MIT"
] | 3 | 2020-12-07T17:06:09.000Z | 2022-02-18T00:21:37.000Z | empty_list = []
generated_list = [i for i in range(10)]
single_nested_list = [[i * j for j in range(i)] for i in range(10)]
double_nested_list = [[[i * j * k for k in range(j)] for j in range(i)] for i in range(10)]
single_comprehended_list = [i * j for i in range(10) for j in range(i)]
double_comprehended_list = [i * j * k for i in range(10) for j in range(i) for k in range(j)]
| 42.666667 | 93 | 0.666667 |
64f92075cf607a281c92c359b39d9c24842e8571 | 699 | py | Python | blog/models.py | paullab/my-first-blog | 1ee9f3e52eff29ff278d2ecfaf7921db0c093756 | [
"Apache-2.0"
] | null | null | null | blog/models.py | paullab/my-first-blog | 1ee9f3e52eff29ff278d2ecfaf7921db0c093756 | [
"Apache-2.0"
] | null | null | null | blog/models.py | paullab/my-first-blog | 1ee9f3e52eff29ff278d2ecfaf7921db0c093756 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
photo = models.ImageField(blank=True, null=True)
file_Save = models.FileField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
#python 2 : __unicode__
#python 3 : __str__
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post)
author = models.CharField(max_length=10)
message = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True) | 29.125 | 53 | 0.785408 |
bd04244888d08fcd4083d01adb3d4d2eb5be344c | 3,424 | py | Python | azext_iot/sdk/iothub/service/models/configuration_py3.py | lucadruda/azure-iot-cli-extension | 9d2f677d19580f8fbac860e079550167e743a237 | [
"MIT"
] | 79 | 2017-09-25T19:29:17.000Z | 2022-03-30T20:55:57.000Z | azext_iot/sdk/iothub/service/models/configuration_py3.py | lucadruda/azure-iot-cli-extension | 9d2f677d19580f8fbac860e079550167e743a237 | [
"MIT"
] | 305 | 2018-01-17T01:12:10.000Z | 2022-03-23T22:38:11.000Z | azext_iot/sdk/iothub/service/models/configuration_py3.py | lucadruda/azure-iot-cli-extension | 9d2f677d19580f8fbac860e079550167e743a237 | [
"MIT"
] | 69 | 2017-11-14T00:30:46.000Z | 2022-03-01T17:11:45.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Configuration(Model):
"""The configuration for Iot Hub device and module twins.
:param id: The unique identifier of the configuration.
:type id: str
:param schema_version: The schema version of the configuration.
:type schema_version: str
:param labels: The key-value pairs used to describe the configuration.
:type labels: dict[str, str]
:param content: The content of the configuration.
:type content: ~service.models.ConfigurationContent
:param target_condition: The query used to define the targeted devices or
modules. The query is based on twin tags and/or reported properties.
:type target_condition: str
:param created_time_utc: The creation date and time of the configuration.
:type created_time_utc: datetime
:param last_updated_time_utc: The update date and time of the
configuration.
:type last_updated_time_utc: datetime
:param priority: The priority number assigned to the configuration.
:type priority: int
:param system_metrics: The system metrics computed by the IoT Hub that
cannot be customized.
:type system_metrics: ~service.models.ConfigurationMetrics
:param metrics: The custom metrics specified by the developer as queries
against twin reported properties.
:type metrics: ~service.models.ConfigurationMetrics
:param etag: The ETag of the configuration.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema_version': {'key': 'schemaVersion', 'type': 'str'},
'labels': {'key': 'labels', 'type': '{str}'},
'content': {'key': 'content', 'type': 'ConfigurationContent'},
'target_condition': {'key': 'targetCondition', 'type': 'str'},
'created_time_utc': {'key': 'createdTimeUtc', 'type': 'iso-8601'},
'last_updated_time_utc': {'key': 'lastUpdatedTimeUtc', 'type': 'iso-8601'},
'priority': {'key': 'priority', 'type': 'int'},
'system_metrics': {'key': 'systemMetrics', 'type': 'ConfigurationMetrics'},
'metrics': {'key': 'metrics', 'type': 'ConfigurationMetrics'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, schema_version: str=None, labels=None, content=None, target_condition: str=None, created_time_utc=None, last_updated_time_utc=None, priority: int=None, system_metrics=None, metrics=None, etag: str=None, **kwargs) -> None:
super(Configuration, self).__init__(**kwargs)
self.id = id
self.schema_version = schema_version
self.labels = labels
self.content = content
self.target_condition = target_condition
self.created_time_utc = created_time_utc
self.last_updated_time_utc = last_updated_time_utc
self.priority = priority
self.system_metrics = system_metrics
self.metrics = metrics
self.etag = etag
| 46.90411 | 261 | 0.657418 |
867173c60e5d149994c9a09b16003860c1fc66f8 | 2,367 | py | Python | setup.py | troiganto/headercount | 9c70fb16c4c82d98dd18403c86c13b7d03030e4e | [
"Apache-2.0"
] | null | null | null | setup.py | troiganto/headercount | 9c70fb16c4c82d98dd18403c86c13b7d03030e4e | [
"Apache-2.0"
] | null | null | null | setup.py | troiganto/headercount | 9c70fb16c4c82d98dd18403c86c13b7d03030e4e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017 Nico Madysa
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from headercount import get_version
long_description = """Headercount goes through a C or C++ project and
searches for #include directives. It recursively follows project header
includes and prints statistics about which header was included how many
times. This indicates which headers are depended on the most, which is
useful when debugging long compile times of medium-sized projects.
"""
setup(
name='headercount',
version=get_version(),
python_requires='>=3.4',
packages=['headercount'],
entry_points={
'console_scripts': [
'headercount = headercount.__main__:main',
]
},
zip_safe=True,
author='Nico Madysa',
author_email='uebertreiber@gmx.de',
description='Count directly and indirectly included headers in a C/C++ project',
long_description=long_description,
license='Apache License, Version 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Topic :: Software Development :: Debuggers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
],
keywords='header include c c++ include',
url='https://github.com/troiganto/headercount',
)
| 36.984375 | 84 | 0.680186 |
44ed3ffb5a453f8206a282e2be618c03c47812e9 | 1,923 | py | Python | devices/devices/xboxcontroller.py | AutodeskRoboticsLab/RLRoboticAssembly | 7f72266f626de53762482310f92f6b5c817085b3 | [
"MIT"
] | 21 | 2019-09-27T09:57:52.000Z | 2022-02-16T22:18:41.000Z | devices/devices/xboxcontroller.py | AutodeskRoboticsLab/RLRoboticAssembly | 7f72266f626de53762482310f92f6b5c817085b3 | [
"MIT"
] | 2 | 2020-11-23T08:51:38.000Z | 2021-11-18T06:57:22.000Z | devices/devices/xboxcontroller.py | AutodeskRoboticsLab/RLRoboticAssembly | 7f72266f626de53762482310f92f6b5c817085b3 | [
"MIT"
] | 3 | 2019-10-14T03:58:45.000Z | 2021-02-04T20:09:38.000Z | """
"""
import pygame
from pygame.locals import *
from devices.device import InputDevice
class XBoxController(InputDevice):
def __init__(self, *args, **kwargs):
InputDevice.__init__(self, *args, **kwargs)
@property
def codename(self):
return 'xbc'
def _connect(self):
pygame.init()
pygame.joystick.init()
assert pygame.joystick.get_count() > 0, \
'No joystick found!'
self._device = pygame.joystick.Joystick(0)
self._device.init()
self._z_directions = [1, 1]
self._is_connected = True
def _disconnect(self):
pass
def _update(self):
action = []
for event in pygame.event.get():
if event.type == JOYBUTTONUP or event.type == JOYBUTTONDOWN:
button_state = [self._device.get_button(i) for i in range(15)]
self._z_directions[0] *= -1 if button_state[8] else 1
self._z_directions[1] *= -1 if button_state[9] else 1
if event.type == JOYAXISMOTION:
action = [self._device.get_axis(i) for i in range(6)]
x = action[0]
y = action[1] * -1
z = (action[4] + 1) / 2 * self._z_directions[0]
rx = action[2]
ry = action[3] * -1
rz = (action[5] + 1) / 2 * self._z_directions[1]
action = [x, y, z, rx, ry, rz]
if len(action) == 0:
return
for i in range(6):
action[i] = 0.0 if abs(action[i]) < 0.001 else action[i] # hpf
action[i] *= self.pos_scaling if i < 3 else self.orn_scaling
action[i] = round(action[i], 5)
self.pose = action
if __name__ == '__main__':
def printout(s):
print(s.pose)
s = XBoxController()
s.start()
s.on_update = printout
import time
while True:
time.sleep(1)
| 27.471429 | 78 | 0.534581 |
ddf8b83b4ee6acf5820b92d5d32f78c78ce2ad3a | 761 | py | Python | tests/test_ssl.py | jmcarp/requests-middleware | b899f79af2053f39268ddb76d9a1951e405a34cf | [
"MIT"
] | 20 | 2015-01-16T07:51:59.000Z | 2021-07-21T21:25:21.000Z | tests/test_ssl.py | jmcarp/requests-middleware | b899f79af2053f39268ddb76d9a1951e405a34cf | [
"MIT"
] | 1 | 2015-09-14T16:48:19.000Z | 2015-11-08T05:55:03.000Z | tests/test_ssl.py | jmcarp/requests-middleware | b899f79af2053f39268ddb76d9a1951e405a34cf | [
"MIT"
] | 5 | 2015-01-16T08:04:20.000Z | 2019-03-13T04:50:38.000Z | # -*- coding: utf-8 -*-
import pytest
import ssl
import requests
from requests_middleware.middleware import MiddlewareHTTPAdapter
from requests_middleware.contrib import sslware
@pytest.fixture
def session():
session = requests.Session()
ssl_middleware = sslware.SSLMiddleware(ssl.PROTOCOL_TLSv1)
middlewares = [ssl_middleware]
adapter = MiddlewareHTTPAdapter(middlewares=middlewares)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Integration tests
@pytest.mark.httpretty
def test_ssl(session, page_fixture):
resp = session.get('http://test.com/page')
pool_kwargs = resp.connection.poolmanager.connection_pool_kw
assert pool_kwargs.get('ssl_version') == ssl.PROTOCOL_TLSv1
| 25.366667 | 64 | 0.755585 |
0b8df99dfe445cfb8bf1b61d3605d4eadb807ffe | 116 | py | Python | DjangoAPI/admin.py | qbdq/MLAPI_DJREST | eb8e716014fba0ef70f6c9d71f05557f268aefee | [
"MIT"
] | null | null | null | DjangoAPI/admin.py | qbdq/MLAPI_DJREST | eb8e716014fba0ef70f6c9d71f05557f268aefee | [
"MIT"
] | null | null | null | DjangoAPI/admin.py | qbdq/MLAPI_DJREST | eb8e716014fba0ef70f6c9d71f05557f268aefee | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import house
# Register your models here.
admin.site.register(house) | 19.333333 | 32 | 0.801724 |
3a63bb023ff21858ba03438ca5d5d0493406723f | 553 | py | Python | docs/ext/substitute.py | HarshCasper/qhub | 4c864db7164f7d31506021f55a5d8968b701ca9d | [
"BSD-3-Clause"
] | null | null | null | docs/ext/substitute.py | HarshCasper/qhub | 4c864db7164f7d31506021f55a5d8968b701ca9d | [
"BSD-3-Clause"
] | null | null | null | docs/ext/substitute.py | HarshCasper/qhub | 4c864db7164f7d31506021f55a5d8968b701ca9d | [
"BSD-3-Clause"
] | null | null | null | """
This is a hard replace as soon as the source file is read, so no respect for any markup at all
Simply forces replace of ||QHUB_VERSION|| with the qhub_version_string in conf.py
"""
def dosubs(app, docname, source):
"""
Replace QHUB_VERSION with the qhub version
"""
if app.config.qhub_version_string != "":
src = source[0]
source[0] = src.replace("||QHUB_VERSION||", app.config.qhub_version_string)
def setup(app):
app.connect("source-read", dosubs)
app.add_config_value("qhub_version_string", "", "env")
| 29.105263 | 94 | 0.681736 |
9f81fd1558f53e6b5f411fc16dad14862065bc93 | 3,862 | py | Python | examples/welcome_message.py | moreno19/instabot2 | 55128d4563612e82544b66d8f6e2f81551cdc3af | [
"Apache-2.0"
] | null | null | null | examples/welcome_message.py | moreno19/instabot2 | 55128d4563612e82544b66d8f6e2f81551cdc3af | [
"Apache-2.0"
] | null | null | null | examples/welcome_message.py | moreno19/instabot2 | 55128d4563612e82544b66d8f6e2f81551cdc3af | [
"Apache-2.0"
] | null | null | null | """
instabot example
Workflow:
Welcome message for new followers.
"""
import argparse
import os
import sys
from datetime import datetime, timedelta
from tqdm import tqdm
import time
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot, utils
bot = Bot()
NOTIFIED_USERS_PATH = 'notified_users.txt'
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-users', type=str, nargs='?', help='a path to already notified users')
parser.add_argument('-message', type=str, nargs='?', help='message text')
args = parser.parse_args()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
print("starting to run\n")
while 1:
print ('Starting a fresh hourly run\n')
# Check on existed file with notified users
notified_users = utils.file(NOTIFIED_USERS_PATH)
if not notified_users.list:
notified_users.save_list(bot.followers)
print(
'All followers saved in file {users_path}.\n'
'In a next time, for all new followers script will send messages.'.format(
users_path=NOTIFIED_USERS_PATH
)
)
exit(0)
print('Read saved list of notified users. Count: {count}'.format(
count=len(notified_users)
))
all_followers = bot.followers
print('Amount of all followers is {count}'.format(
count=len(all_followers)
))
new_followers = set(all_followers) - notified_users.set
if not new_followers:
print("no new followers right now. This sucks! I'm going to sleep again\n")
#sleepytiem
#re-set the time
dt = datetime.now() + timedelta(hours=1)
dt = dt.replace(minute=10)
pc = 0
while datetime.now() < dt:
pc += 1
cur = int(str(datetime.now()).split(' ')[1][3:5])
goal = int(str(dt).split(' ')[1][3:5])
if cur < 10:
t = 10 - cur
else:
t = 60 - cur
if pc is 10:
print("sleeping: will run again in " +str(t)+" minutes\n")
pc = 0
time.sleep(1)
print('Found new followers. Count: {count}'.format(
count=len(new_followers)
))
for follower in tqdm(new_followers):
try:
name = str(bot.get_user_info(follower)["full_name"])
if len(name.split(' ')) is 2:
name = name.split(' ')[0]
MESSAGE = "Hey "+name+"! Thanks for checking out our eco-friendly, reusable stainless-steel straws! Our mission is to eliminate as much plastic as possible in order to preserve our oceans. Getting your own Boba Buddy is a simple way to eliminate up to 600 plastic straws that hurt marine wildlife everyday. If you're ready to join the movement, click on one of our pictures, or check out our website at thebobabuddy.com - You'll make a difference! Have an awesome day, and keep making smart choices for our planet <3"
except:
MESSAGE = "Hey there, thanks for checking out our eco-friendly, stainless-steel straws! Our mission is to eliminate as much plastic as possible in order to preserve our oceans, one straw at a time. Getting your own Boba Buddy is a simple way to eliminate up to 600 plastic straws that hurt marine wildlife everyday. If you're ready to join the movement, click on one of our pictures, or check out our website at thebobabuddy.com - You'll make a difference! Have a beautiful day, and keep making smart choices for our planet <3"
print(MESSAGE)
time.sleep(2)
if bot.send_message(MESSAGE, follower):
notified_users.append(follower)
#re-set the time
dt = datetime.now() + timedelta(hours=1)
dt = dt.replace(minute=10)
| 36.093458 | 539 | 0.656914 |
0e47668667748baa094547992313d63ede627fb1 | 1,619 | py | Python | carebt/examples/action_with_params.py | CareBT/carebt | 44c6da1e36e1f45baa5de5d5d9a5b733423c325d | [
"Apache-2.0"
] | 2 | 2021-11-08T12:19:39.000Z | 2021-12-02T16:10:05.000Z | carebt/examples/action_with_params.py | CareBT/carebt | 44c6da1e36e1f45baa5de5d5d9a5b733423c325d | [
"Apache-2.0"
] | null | null | null | carebt/examples/action_with_params.py | CareBT/carebt | 44c6da1e36e1f45baa5de5d5d9a5b733423c325d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Andreas Steck (steck.andi@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from carebt import ActionNode
from carebt import NodeStatus
class AddTwoNumbersAction(ActionNode):
"""The `AddTwoNumbersAction` demonstrates a careBT `ActionNode`.
The `AddTwoNumbersAction` demonstrates a careBT `ActionNode` with two
input parameters and one output parameter. It takes the two inputs,
adds them and returns the result.
Input Parameters
----------------
?x : int, default = 0
The first value
?y : int, default = 0
The second value
Output Parameters
-----------------
?z : int
The sum of ?x and ?y
"""
def __init__(self, bt_runner):
super().__init__(bt_runner, '?x ?y => ?z')
def on_init(self) -> None:
if(self._x is None):
self._x = 0
if(self._y is None):
self._y = 0
def on_tick(self) -> None:
self._z = self._x + self._y
print(f'AddTwoNumbersAction: calculating: {self._x} + {self._y} = {self._z}')
self.set_status(NodeStatus.SUCCESS)
| 30.54717 | 85 | 0.657196 |
6df6fadf4438e1e2a55c27f7bd94bf23783f4550 | 878 | py | Python | cmd/miscellaneous/ping.py | Xinverse/BOTC-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2020-06-21T17:20:17.000Z | 2020-06-21T17:20:17.000Z | cmd/miscellaneous/ping.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2020-07-07T03:47:44.000Z | 2020-07-07T03:47:44.000Z | cmd/miscellaneous/ping.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1932c649c81a5a1eab735d7abdee0761c2853940 | [
"MIT"
] | 1 | 2022-02-18T00:42:19.000Z | 2022-02-18T00:42:19.000Z | """Contains the ping command cog"""
import botutils
import json
from discord.ext import commands
from ._miscellaneous import Miscellaneous
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
ping_str = language["cmd"]["ping"]
class Ping(Miscellaneous, name = language["system"]["miscellaneous_cog"]):
"""Ping command"""
@commands.command(
pass_context = True,
name = "ping",
aliases = ["pong"],
brief = language["doc"]["ping"]["brief"],
help = language["doc"]["ping"]["help"],
description = language["doc"]["ping"]["description"]
)
@commands.check(botutils.check_if_lobby_or_dm_or_admin)
async def ping(self, ctx):
"""Check the latency."""
msg = ping_str.format(botutils.BotEmoji.beating_heart, round(self.client.latency, 4))
await ctx.send(msg)
| 29.266667 | 93 | 0.651481 |
dfcea5c1923d50670398ecfa6f6e37ccfe15c338 | 56 | py | Python | CodeWars/Python/002-Grasshopper_-_Terminal_game_move_function.py | IsFilimonov/Interviews | 261b59cd80e1451804c37b03b4cce7c1b63f609d | [
"MIT"
] | 2 | 2021-05-09T22:39:49.000Z | 2021-09-16T12:44:09.000Z | CodeWars/Python/002-Grasshopper_-_Terminal_game_move_function.py | IsFilimonov/Interviews | 261b59cd80e1451804c37b03b4cce7c1b63f609d | [
"MIT"
] | null | null | null | CodeWars/Python/002-Grasshopper_-_Terminal_game_move_function.py | IsFilimonov/Interviews | 261b59cd80e1451804c37b03b4cce7c1b63f609d | [
"MIT"
] | null | null | null | def move(position, roll):
return roll * 2 + position | 28 | 30 | 0.678571 |
c5348111c01b7e880526c172381b21d1f7ec096c | 411 | py | Python | experiments/fdtd-2d/tmp_files/5823.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/fdtd-2d/tmp_files/5823.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/fdtd-2d/tmp_files/5823.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/5823.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,32,2)
tile(1,4,16,4)
tile(2,2,32,2)
tile(2,4,16,4)
tile(3,2,32,2)
tile(3,4,16,4)
| 22.833333 | 116 | 0.720195 |
110ef858996bd741555a4f7847a8107a4df4ad3d | 1,094 | py | Python | maddux/examples/tutorial.py | emielke12/maddux | fed7770c54124c14935523e6bcdba81ad9538cbc | [
"MIT"
] | 23 | 2016-04-23T18:13:24.000Z | 2021-12-14T23:37:09.000Z | maddux/examples/tutorial.py | emielke12/maddux | fed7770c54124c14935523e6bcdba81ad9538cbc | [
"MIT"
] | 12 | 2016-04-14T23:59:51.000Z | 2019-05-25T10:00:14.000Z | maddux/examples/tutorial.py | emielke12/maddux | fed7770c54124c14935523e6bcdba81ad9538cbc | [
"MIT"
] | 8 | 2018-10-19T13:52:27.000Z | 2020-10-31T23:30:24.000Z | import numpy as np
from maddux.robots.predefined_robots import simple_human_arm
from maddux.objects import Ball, Target, Obstacle
from maddux.environment import Environment
def tutorial():
"""Code from our tutorial on the documentation"""
# Create an arm with a specific config and base position
q0 = np.array([0.5, 0.2, 0, 0.5, 0, 0, 0])
base_pos = np.array([2.0, 2.0, 0.0])
# And link segments of length 2.0
arm = simple_human_arm(2.0, 2.0, q0, base_pos)
# We then create a ball, target, and obstacle
ball = Ball(position=[2.0, 0.0, 2.0], radius=0.15)
target = Target(position=[5.0, 8.0, 2.0], radius=0.5)
obstacle = Obstacle([4, 4, 0], [5, 5, 2])
# And use these to create an environment with dimensions 10x10x10
env = Environment(dimensions=[10, 10, 10],
dynamic_objects=[ball],
static_objects=[target, obstacle],
robot=arm)
arm.ikine(ball.position)
env.animate(3.0)
arm.save_path("tutorial_path")
if __name__ == '__main__':
tutorial()
| 32.176471 | 69 | 0.624314 |
56965fd95491cf65dcb18cb0be5524bc5af92292 | 7,680 | py | Python | vnpy/trader/option_object.py | AITrading2020/vnpy-2.1.3 | 526cd42227daba2fec5e664bfab203cec1d27267 | [
"MIT"
] | null | null | null | vnpy/trader/option_object.py | AITrading2020/vnpy-2.1.3 | 526cd42227daba2fec5e664bfab203cec1d27267 | [
"MIT"
] | null | null | null | vnpy/trader/option_object.py | AITrading2020/vnpy-2.1.3 | 526cd42227daba2fec5e664bfab203cec1d27267 | [
"MIT"
] | null | null | null | """
Basic data structure used for general trading function in VN Trader.
"""
from dataclasses import dataclass
from datetime import datetime
from logging import INFO
from .constant import Direction, Exchange, Interval, Offset, Status, Product, OptionType, OrderType
ACTIVE_STATUSES = set([Status.SUBMITTING, Status.NOTTRADED, Status.PARTTRADED])
@dataclass
class BaseData:
"""
Any data object needs a gateway_name as source
and should inherit base data.
"""
gateway_name: str
@dataclass
class TickData(BaseData):
"""
Tick data contains information about:
* last trade in market
* orderbook snapshot
* intraday market statistics.
"""
symbol: str
exchange: Exchange
datetime: datetime
name: str = ""
volume: float = 0
open_interest: float = 0
last_price: float = 0
last_volume: float = 0
limit_up: float = 0
limit_down: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
pre_close: float = 0
bid_price_1: float = 0
bid_price_2: float = 0
bid_price_3: float = 0
bid_price_4: float = 0
bid_price_5: float = 0
ask_price_1: float = 0
ask_price_2: float = 0
ask_price_3: float = 0
ask_price_4: float = 0
ask_price_5: float = 0
bid_volume_1: float = 0
bid_volume_2: float = 0
bid_volume_3: float = 0
bid_volume_4: float = 0
bid_volume_5: float = 0
ask_volume_1: float = 0
ask_volume_2: float = 0
ask_volume_3: float = 0
ask_volume_4: float = 0
ask_volume_5: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class BarData(BaseData):
"""
Candlestick bar data of a certain trading period.
"""
symbol: str
exchange: Exchange
datetime: datetime
interval: Interval = None
volume: float = 0
open_interest: float = 0
open_price: float = 0
high_price: float = 0
low_price: float = 0
close_price: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderData(BaseData):
"""
Order data contains information for tracking lastest status
of a specific order.
"""
symbol: str
exchange: Exchange
orderid: str
type: OrderType = OrderType.LIMIT
direction: Direction = None
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
traded: float = 0
status: Status = Status.SUBMITTING
datetime: datetime = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
def is_active(self) -> bool:
"""
Check if the order is active.
"""
if self.status in ACTIVE_STATUSES:
return True
else:
return False
def create_cancel_request(self) -> "CancelRequest":
"""
Create cancel request object from order.
"""
req = CancelRequest(
orderid=self.orderid, symbol=self.symbol, exchange=self.exchange
)
return req
@dataclass
class TradeData(BaseData):
"""
Trade data contains information of a fill of an order. One order
can have several trade fills.
"""
symbol: str
exchange: Exchange
orderid: str
tradeid: str
direction: Direction = None
offset: Offset = Offset.NONE
price: float = 0
volume: float = 0
datetime: datetime = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_orderid = f"{self.gateway_name}.{self.orderid}"
self.vt_tradeid = f"{self.gateway_name}.{self.tradeid}"
@dataclass
class PositionData(BaseData):
"""
Positon data is used for tracking each individual position holding.
"""
symbol: str
exchange: Exchange
direction: Direction
volume: float = 0
frozen: float = 0
price: float = 0
pnl: float = 0
yd_volume: float = 0
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
self.vt_positionid = f"{self.vt_symbol}.{self.direction.value}"
@dataclass
class AccountData(BaseData):
"""
Account data contains information about balance, frozen and
available.
"""
accountid: str
balance: float = 0
frozen: float = 0
def __post_init__(self):
""""""
self.available = self.balance - self.frozen
self.vt_accountid = f"{self.gateway_name}.{self.accountid}"
@dataclass
class LogData(BaseData):
"""
Log data is used for recording log messages on GUI or in log files.
"""
msg: str
level: int = INFO
def __post_init__(self):
""""""
self.time = datetime.now()
@dataclass
class ContractData(BaseData):
"""
Contract data contains basic information about each contract traded.
"""
symbol: str
exchange: Exchange
name: str
product: Product
size: int
pricetick: float
min_volume: float = 1 # minimum trading volume of the contract
stop_supported: bool = False # whether server supports stop order
net_position: bool = False # whether gateway uses net position volume
history_data: bool = False # whether gateway provides bar history data
option_strike: float = 0
option_underlying: str = "" # vt_symbol of underlying contract
option_type: OptionType = None
option_expiry: datetime = None
option_portfolio: str = ""
option_index: str = "" # for identifying options with same strike price
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class SubscribeRequest:
"""
Request sending to specific gateway for subscribing tick data update.
"""
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class OrderRequest:
"""
Request sending to specific gateway for creating a new order.
"""
symbol: str
exchange: Exchange
direction: Direction
type: OrderType
volume: float
price: float = 0
offset: Offset = Offset.NONE
reference: str = ""
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
def create_order_data(self, orderid: str, gateway_name: str) -> OrderData:
"""
Create order data from request.
"""
order = OrderData(
symbol=self.symbol,
exchange=self.exchange,
orderid=orderid,
type=self.type,
direction=self.direction,
offset=self.offset,
price=self.price,
volume=self.volume,
gateway_name=gateway_name,
)
return order
@dataclass
class CancelRequest:
"""
Request sending to specific gateway for canceling an existing order.
"""
orderid: str
symbol: str
exchange: Exchange
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
@dataclass
class HistoryRequest:
"""
Request sending to specific gateway for querying history data.
"""
symbol: str
exchange: Exchange
start: datetime
end: datetime = None
interval: Interval = None
def __post_init__(self):
""""""
self.vt_symbol = f"{self.symbol}.{self.exchange.value}"
| 22.857143 | 99 | 0.626563 |
dadc661e7ea07e52133ba1d73c92b2e7cf5c558a | 27,847 | py | Python | layers/gat_layer.py | D-ick-Li/SBRS-CL-HG | 5d7b09d5ce5f2f3a628f34faa9ba59343ce490d8 | [
"MIT"
] | null | null | null | layers/gat_layer.py | D-ick-Li/SBRS-CL-HG | 5d7b09d5ce5f2f3a628f34faa9ba59343ce490d8 | [
"MIT"
] | null | null | null | layers/gat_layer.py | D-ick-Li/SBRS-CL-HG | 5d7b09d5ce5f2f3a628f34faa9ba59343ce490d8 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import enum
####################################
# This section is inspired from https://github.com/gordicaleksa/pytorch-GAT
####################################
class LayerType(enum.Enum):
IMP1 = 0,
IMP2 = 1,
IMP3 = 2
class GAT(torch.nn.Module):
"""
I've added 3 GAT implementations - some are conceptually easier to understand some are more efficient.
The most interesting and hardest one to understand is implementation #3.
Imp1 and imp2 differ in subtle details but are basically the same thing.
Tip on how to approach this:
understand implementation 2 first, check out the differences it has with imp1, and finally tackle imp #3.
"""
def __init__(self, num_of_layers, num_heads_per_layer, num_features_per_layer, add_skip_connection=True, bias=True,
dropout=0.6, layer_type=LayerType.IMP3, log_attention_weights=False):
super().__init__()
assert num_of_layers == len(num_heads_per_layer) == len(num_features_per_layer) - 1, f'Enter valid arch params.'
GATLayer = get_layer_type(layer_type) # fetch one of 3 available implementations
num_heads_per_layer = [1] + num_heads_per_layer # trick - so that I can nicely create GAT layers below
gat_layers = [] # collect GAT layers
for i in range(num_of_layers):
layer = GATLayer(
num_in_features=num_features_per_layer[i] * num_heads_per_layer[i], # consequence of concatenation
num_out_features=num_features_per_layer[i+1],
num_of_heads=num_heads_per_layer[i+1],
concat=True if i < num_of_layers - 1 else False, # last GAT layer does mean avg, the others do concat
activation=nn.ELU() if i < num_of_layers - 1 else None, # last layer just outputs raw scores
dropout_prob=dropout,
add_skip_connection=add_skip_connection,
bias=bias,
log_attention_weights=log_attention_weights
)
gat_layers.append(layer)
self.gat_net = nn.Sequential(
*gat_layers,
)
# data is just a (in_nodes_features, topology) tuple, I had to do it like this because of the nn.Sequential:
# https://discuss.pytorch.org/t/forward-takes-2-positional-arguments-but-3-were-given-for-nn-sqeuential-with-linear-layers/65698
def forward(self, data):
return self.gat_net(data)
class GATLayer(torch.nn.Module):
"""
Base class for all implementations as there is much code that would otherwise be copy/pasted.
"""
head_dim = 1
def __init__(self, num_in_features, num_out_features, num_of_heads, layer_type, concat=True, activation=nn.ELU(),
dropout_prob=0.6, add_skip_connection=True, bias=True, log_attention_weights=False):
super().__init__()
# Saving these as we'll need them in forward propagation in children layers (imp1/2/3)
self.num_of_heads = num_of_heads
self.num_out_features = num_out_features
self.concat = concat # whether we should concatenate or average the attention heads
self.add_skip_connection = add_skip_connection
#
# Trainable weights: linear projection matrix (denoted as "W" in the paper), attention target/source
# (denoted as "a" in the paper) and bias (not mentioned in the paper but present in the official GAT repo)
#
if layer_type == LayerType.IMP1:
# Experimenting with different options to see what is faster (tip: focus on 1 implementation at a time)
self.proj_param = nn.Parameter(torch.Tensor(num_of_heads, num_in_features, num_out_features))
else:
# You can treat this one matrix as num_of_heads independent W matrices
self.linear_proj = nn.Linear(num_in_features, num_of_heads * num_out_features, bias=False)
# After we concatenate target node (node i) and source node (node j) we apply the additive scoring function
# which gives us un-normalized score "e". Here we split the "a" vector - but the semantics remain the same.
# Basically instead of doing [x, y] (concatenation, x/y are node feature vectors) and dot product with "a"
# we instead do a dot product between x and "a_left" and y and "a_right" and we sum them up
self.scoring_fn_target = nn.Parameter(torch.Tensor(1, num_of_heads, num_out_features)) # 相当于a的上半部分
self.scoring_fn_source = nn.Parameter(torch.Tensor(1, num_of_heads, num_out_features)) # 相当于a的下半部分
if layer_type == LayerType.IMP1: # simple reshape in the case of implementation 1
self.scoring_fn_target = nn.Parameter(self.scoring_fn_target.reshape(num_of_heads, num_out_features, 1))
self.scoring_fn_source = nn.Parameter(self.scoring_fn_source.reshape(num_of_heads, num_out_features, 1))
# Bias is definitely not crucial to GAT - feel free to experiment (I pinged the main author, Petar, on this one)
if bias and concat:
self.bias = nn.Parameter(torch.Tensor(num_of_heads * num_out_features))
elif bias and not concat:
self.bias = nn.Parameter(torch.Tensor(num_out_features))
else:
self.register_parameter('bias', None)
if add_skip_connection:
self.skip_proj = nn.Linear(num_in_features, num_of_heads * num_out_features, bias=False)
else:
self.register_parameter('skip_proj', None)
#
# End of trainable weights
#
self.leakyReLU = nn.LeakyReLU(0.2) # using 0.2 as in the paper, no need to expose every setting
self.softmax = nn.Softmax(dim=-1) # -1 stands for apply the log-softmax along the last dimension
self.activation = activation
# Probably not the nicest design but I use the same module in 3 locations, before/after features projection
# and for attention coefficients. Functionality-wise it's the same as using independent modules.
self.dropout = nn.Dropout(p=dropout_prob)
self.log_attention_weights = log_attention_weights # whether we should log the attention weights
self.attention_weights = None # for later visualization purposes, I cache the weights here
self.init_params(layer_type)
def init_params(self, layer_type):
"""
The reason we're using Glorot (aka Xavier uniform) initialization is because it's a default TF initialization:
https://stackoverflow.com/questions/37350131/what-is-the-default-variable-initializer-in-tensorflow
The original repo was developed in TensorFlow (TF) and they used the default initialization.
Feel free to experiment - there may be better initializations depending on your problem.
"""
nn.init.xavier_uniform_(self.proj_param if layer_type == LayerType.IMP1 else self.linear_proj.weight)
nn.init.xavier_uniform_(self.scoring_fn_target)
nn.init.xavier_uniform_(self.scoring_fn_source)
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
def skip_concat_bias(self, attention_coefficients, in_nodes_features, out_nodes_features):
if self.log_attention_weights: # potentially log for later visualization in playground.py
self.attention_weights = attention_coefficients
# if the tensor is not contiguously stored in memory we'll get an error after we try to do certain ops like view
# only imp1 will enter this one
if not out_nodes_features.is_contiguous():
out_nodes_features = out_nodes_features.contiguous()
if self.add_skip_connection: # add skip or residual connection
if out_nodes_features.shape[-1] == in_nodes_features.shape[-1]: # if FIN == FOUT
# unsqueeze does this: (N, FIN) -> (N, 1, FIN), out features are (N, NH, FOUT) so 1 gets broadcast to NH
# thus we're basically copying input vectors NH times and adding to processed vectors
out_nodes_features += in_nodes_features.unsqueeze(1)
else:
# FIN != FOUT so we need to project input feature vectors into dimension that can be added to output
# feature vectors. skip_proj adds lots of additional capacity which may cause overfitting.
out_nodes_features += self.skip_proj(in_nodes_features).view(-1, self.num_of_heads, self.num_out_features)
if self.concat:
# shape = (N, NH, FOUT) -> (N, NH*FOUT)
out_nodes_features = out_nodes_features.view(-1, self.num_of_heads * self.num_out_features)
else:
# shape = (N, NH, FOUT) -> (N, FOUT)
out_nodes_features = out_nodes_features.mean(dim=self.head_dim)
if self.bias is not None:
out_nodes_features += self.bias
return out_nodes_features if self.activation is None else self.activation(out_nodes_features)
class GATLayerImp3(GATLayer):
"""
Implementation #3 was inspired by PyTorch Geometric: https://github.com/rusty1s/pytorch_geometric
But, it's hopefully much more readable! (and of similar performance)
It's suitable for both transductive and inductive settings. In the inductive setting we just merge the graphs
into a single graph with multiple components and this layer is agnostic to that fact! <3
"""
src_nodes_dim = 0 # position of source nodes in edge index
trg_nodes_dim = 1 # position of target nodes in edge index
nodes_dim = 0 # node dimension/axis
head_dim = 1 # attention head dimension/axis
def __init__(self, num_in_features, num_out_features, num_of_heads, concat=True, activation=nn.ELU(),
dropout_prob=0.1, add_skip_connection=True, bias=True, log_attention_weights=False):
# Delegate initialization to the base class
super().__init__(num_in_features, num_out_features, num_of_heads, LayerType.IMP3, concat, activation, dropout_prob,
add_skip_connection, bias, log_attention_weights)
def forward(self, data):
#
# Step 1: Linear Projection + regularization
#
in_nodes_features, edge_index = data # unpack data
num_of_nodes = in_nodes_features.shape[self.nodes_dim]
assert edge_index.shape[0] == 2, f'Expected edge index with shape=(2,E) got {edge_index.shape}'
# shape = (N, FIN) where N - number of nodes in the graph, FIN - number of input features per node
# We apply the dropout to all of the input node features (as mentioned in the paper)
# Note: for Cora features are already super sparse so it's questionable how much this actually helps
in_nodes_features = self.dropout(in_nodes_features)
# shape = (N, FIN) * (FIN, NH*FOUT) -> (N, NH, FOUT) where NH - number of heads, FOUT - num of output features
# We project the input node features into NH independent output features (one for each attention head)
nodes_features_proj = self.linear_proj(in_nodes_features).view(-1, self.num_of_heads, self.num_out_features)
nodes_features_proj = self.dropout(nodes_features_proj) # in the official GAT imp they did dropout here as well
#
# Step 2: Edge attention calculation
#
# Apply the scoring function (* represents element-wise (a.k.a. Hadamard) product)
# shape = (N, NH, FOUT) * (1, NH, FOUT) -> (N, NH, 1) -> (N, NH) because sum squeezes the last dimension
# Optimization note: torch.sum() is as performant as .sum() in my experiments
scores_source = (nodes_features_proj * self.scoring_fn_source).sum(dim=-1)
scores_target = (nodes_features_proj * self.scoring_fn_target).sum(dim=-1)
# We simply copy (lift) the scores for source/target nodes based on the edge index. Instead of preparing all
# the possible combinations of scores we just prepare those that will actually be used and those are defined
# by the edge index.
# scores shape = (E, NH), nodes_features_proj_lifted shape = (E, NH, FOUT), E - number of edges in the graph
scores_source_lifted, scores_target_lifted, nodes_features_proj_lifted = self.lift(scores_source, scores_target, nodes_features_proj, edge_index)
scores_per_edge = self.leakyReLU(scores_source_lifted + scores_target_lifted)
# 直接相加是因为原本a * (Whi||Whj)相当于内积,就是逐个元素的相加,而且最后是对应i,j的相加
# shape = (E, NH, 1)
attentions_per_edge = self.neighborhood_aware_softmax(scores_per_edge, edge_index[self.trg_nodes_dim], num_of_nodes)
# Add stochasticity to neighborhood aggregation
attentions_per_edge = self.dropout(attentions_per_edge)
#
# Step 3: Neighborhood aggregation
#
# Element-wise (aka Hadamard) product. Operator * does the same thing as torch.mul
# shape = (E, NH, FOUT) * (E, NH, 1) -> (E, NH, FOUT), 1 gets broadcast into FOUT
nodes_features_proj_lifted_weighted = nodes_features_proj_lifted * attentions_per_edge
# 这是相当于 α*HW
# This part sums up weighted and projected neighborhood feature vectors for every target node
# shape = (N, NH, FOUT)
out_nodes_features = self.aggregate_neighbors(nodes_features_proj_lifted_weighted, edge_index, in_nodes_features, num_of_nodes)
#
# Step 4: Residual/skip connections, concat and bias
#
out_nodes_features = self.skip_concat_bias(attentions_per_edge, in_nodes_features, out_nodes_features)
return (out_nodes_features, edge_index)
#
# Helper functions (without comments there is very little code so don't be scared!)
#
def neighborhood_aware_softmax(self, scores_per_edge, trg_index, num_of_nodes):
"""
As the fn name suggest it does softmax over the neighborhoods. Example: say we have 5 nodes in a graph.
Two of them 1, 2 are connected to node 3. If we want to calculate the representation for node 3 we should take
into account feature vectors of 1, 2 and 3 itself. Since we have scores for edges 1-3, 2-3 and 3-3
in scores_per_edge variable, this function will calculate attention scores like this: 1-3/(1-3+2-3+3-3)
(where 1-3 is overloaded notation it represents the edge 1-3 and it's (exp) score) and similarly for 2-3 and 3-3
i.e. for this neighborhood we don't care about other edge scores that include nodes 4 and 5.
Note:
Subtracting the max value from logits doesn't change the end result but it improves the numerical stability
and it's a fairly common "trick" used in pretty much every deep learning framework.
Check out this link for more details:
https://stats.stackexchange.com/questions/338285/how-does-the-subtraction-of-the-logit-maximum-improve-learning
"""
# Calculate the numerator. Make logits <= 0 so that e^logit <= 1 (this will improve the numerical stability)
scores_per_edge = scores_per_edge - scores_per_edge.max()
exp_scores_per_edge = scores_per_edge.exp() # softmax
# Calculate the denominator. shape = (E, NH)
neigborhood_aware_denominator = self.sum_edge_scores_neighborhood_aware(exp_scores_per_edge, trg_index, num_of_nodes)
# 1e-16 is theoretically not needed but is only there for numerical stability (avoid div by 0) - due to the
# possibility of the computer rounding a very small number all the way to 0.
attentions_per_edge = exp_scores_per_edge / (neigborhood_aware_denominator + 1e-16)
# shape = (E, NH) -> (E, NH, 1) so that we can do element-wise multiplication with projected node features
return attentions_per_edge.unsqueeze(-1)
def sum_edge_scores_neighborhood_aware(self, exp_scores_per_edge, trg_index, num_of_nodes):
# The shape must be the same as in exp_scores_per_edge (required by scatter_add_) i.e. from E -> (E, NH)
trg_index_broadcasted = self.explicit_broadcast(trg_index, exp_scores_per_edge)
# 把trg_index原本是(E, )扩展到(E, NH)
# shape = (N, NH), where N is the number of nodes and NH the number of attention heads
size = list(exp_scores_per_edge.shape) # convert to list otherwise assignment is not possible
size[self.nodes_dim] = num_of_nodes
# size原本是(E, NH),现在改成(N,NH)
neighborhood_sums = torch.zeros(size, dtype=exp_scores_per_edge.dtype, device=exp_scores_per_edge.device)
# position i will contain a sum of exp scores of all the nodes that point to the node i (as dictated by the
# target index)
neighborhood_sums.scatter_add_(self.nodes_dim, trg_index_broadcasted, exp_scores_per_edge)
# 将exp_scores_per_edge的数据按照trg_index_broadcasted加入neighborhood_sums矩阵中,其中
# self.nodes_dim规定了加到目标矩阵的第几个维度,这里就是在第0个维度对应的是节点索引上面相加
# self[index[i][j][k]][j][k] += other[i][j][k] # 如果 dim == 0
# Expand again so that we can use it as a softmax denominator. e.g. node i's sum will be copied to
# all the locations where the source nodes pointed to i (as dictated by the target index)
# shape = (N, NH) -> (E, NH)
return neighborhood_sums.index_select(self.nodes_dim, trg_index)
def aggregate_neighbors(self, nodes_features_proj_lifted_weighted, edge_index, in_nodes_features, num_of_nodes):
size = list(nodes_features_proj_lifted_weighted.shape) # convert to list otherwise assignment is not possible
size[self.nodes_dim] = num_of_nodes # shape = (N, NH, FOUT)
out_nodes_features = torch.zeros(size, dtype=in_nodes_features.dtype, device=in_nodes_features.device)
# shape = (E) -> (E, NH, FOUT)
trg_index_broadcasted = self.explicit_broadcast(edge_index[self.trg_nodes_dim], nodes_features_proj_lifted_weighted)
# aggregation step - we accumulate projected, weighted node features for all the attention heads
# shape = (E, NH, FOUT) -> (N, NH, FOUT)
out_nodes_features.scatter_add_(self.nodes_dim, trg_index_broadcasted, nodes_features_proj_lifted_weighted)
return out_nodes_features
def lift(self, scores_source, scores_target, nodes_features_matrix_proj, edge_index):
"""
Lifts i.e. duplicates certain vectors depending on the edge index.
One of the tensor dims goes from N -> E (that's where the "lift" comes from).
"""
src_nodes_index = edge_index[self.src_nodes_dim]
trg_nodes_index = edge_index[self.trg_nodes_dim]
# Using index_select is faster than "normal" indexing (scores_source[src_nodes_index]) in PyTorch!
scores_source = scores_source.index_select(self.nodes_dim, src_nodes_index) # index_select是指定的维度选择切片
scores_target = scores_target.index_select(self.nodes_dim, trg_nodes_index) # 选择了对应节点的分数
nodes_features_matrix_proj_lifted = nodes_features_matrix_proj.index_select(self.nodes_dim, src_nodes_index)
# 选择源节点的特征信息
return scores_source, scores_target, nodes_features_matrix_proj_lifted
def explicit_broadcast(self, this, other):
# Append singleton dimensions until this.dim() == other.dim()
for _ in range(this.dim(), other.dim()):
this = this.unsqueeze(-1)
# Explicitly expand so that shapes are the same
return this.expand_as(other) # 把this重复到跟other一样
class GATLayerImp2(GATLayer):
"""
Implementation #2 was inspired by the official GAT implementation: https://github.com/PetarV-/GAT
It's conceptually simpler than implementation #3 but computationally much less efficient.
Note: this is the naive implementation not the sparse one and it's only suitable for a transductive setting.
It would be fairly easy to make it work in the inductive setting as well but the purpose of this layer
is more educational since it's way less efficient than implementation 3.
"""
def __init__(self, num_in_features, num_out_features, num_of_heads, concat=True, activation=nn.ELU(),
dropout_prob=0.6, add_skip_connection=True, bias=True, log_attention_weights=False):
super().__init__(num_in_features, num_out_features, num_of_heads, LayerType.IMP2, concat, activation, dropout_prob,
add_skip_connection, bias, log_attention_weights)
def forward(self, data):
#
# Step 1: Linear Projection + regularization (using linear layer instead of matmul as in imp1)
#
in_nodes_features, connectivity_mask = data # unpack data
num_of_nodes = in_nodes_features.shape[0]
assert connectivity_mask.shape == (num_of_nodes, num_of_nodes), \
f'Expected connectivity matrix with shape=({num_of_nodes},{num_of_nodes}), got shape={connectivity_mask.shape}.'
# shape = (N, FIN) where N - number of nodes in the graph, FIN - number of input features per node
# We apply the dropout to all of the input node features (as mentioned in the paper)
in_nodes_features = self.dropout(in_nodes_features)
# shape = (N, FIN) * (FIN, NH*FOUT) -> (N, NH, FOUT) where NH - number of heads, FOUT - num of output features
# We project the input node features into NH independent output features (one for each attention head)
nodes_features_proj = self.linear_proj(in_nodes_features).view(-1, self.num_of_heads, self.num_out_features)
nodes_features_proj = self.dropout(nodes_features_proj) # in the official GAT imp they did dropout here as well
#
# Step 2: Edge attention calculation (using sum instead of bmm + additional permute calls - compared to imp1)
#
# Apply the scoring function (* represents element-wise (a.k.a. Hadamard) product)
# shape = (N, NH, FOUT) * (1, NH, FOUT) -> (N, NH, 1)
# Optimization note: torch.sum() is as performant as .sum() in my experiments
scores_source = torch.sum((nodes_features_proj * self.scoring_fn_source), dim=-1, keepdim=True)
scores_target = torch.sum((nodes_features_proj * self.scoring_fn_target), dim=-1, keepdim=True)
# src shape = (NH, N, 1) and trg shape = (NH, 1, N)
scores_source = scores_source.transpose(0, 1)
scores_target = scores_target.permute(1, 2, 0)
# shape = (NH, N, 1) + (NH, 1, N) -> (NH, N, N) with the magic of automatic broadcast <3
# In Implementation 3 we are much smarter and don't have to calculate all NxN scores! (only E!)
# Tip: it's conceptually easier to understand what happens here if you delete the NH dimension
all_scores = self.leakyReLU(scores_source + scores_target)
# connectivity mask will put -inf on all locations where there are no edges, after applying the softmax
# this will result in attention scores being computed only for existing edges
all_attention_coefficients = self.softmax(all_scores + connectivity_mask)
#
# Step 3: Neighborhood aggregation (same as in imp1)
#
# batch matrix multiply, shape = (NH, N, N) * (NH, N, FOUT) -> (NH, N, FOUT)
out_nodes_features = torch.bmm(all_attention_coefficients, nodes_features_proj.transpose(0, 1))
# Note: watch out here I made a silly mistake of using reshape instead of permute thinking it will
# end up doing the same thing, but it didn't! The acc on Cora didn't go above 52%! (compared to reported ~82%)
# shape = (N, NH, FOUT)
out_nodes_features = out_nodes_features.permute(1, 0, 2)
#
# Step 4: Residual/skip connections, concat and bias (same as in imp1)
#
out_nodes_features = self.skip_concat_bias(all_attention_coefficients, in_nodes_features, out_nodes_features)
return (out_nodes_features, connectivity_mask)
class GATLayerImp1(GATLayer):
"""
This implementation is only suitable for a transductive setting.
It would be fairly easy to make it work in the inductive setting as well but the purpose of this layer
is more educational since it's way less efficient than implementation 3.
"""
def __init__(self, num_in_features, num_out_features, num_of_heads, concat=True, activation=nn.ELU(),
dropout_prob=0.6, add_skip_connection=True, bias=True, log_attention_weights=False):
super().__init__(num_in_features, num_out_features, num_of_heads, LayerType.IMP1, concat, activation, dropout_prob,
add_skip_connection, bias, log_attention_weights)
def forward(self, data):
#
# Step 1: Linear Projection + regularization
#
in_nodes_features, connectivity_mask = data # unpack data
num_of_nodes = in_nodes_features.shape[0]
assert connectivity_mask.shape == (num_of_nodes, num_of_nodes), \
f'Expected connectivity matrix with shape=({num_of_nodes},{num_of_nodes}), got shape={connectivity_mask.shape}.'
# shape = (N, FIN) where N - number of nodes in the graph, FIN number of input features per node
# We apply the dropout to all of the input node features (as mentioned in the paper)
in_nodes_features = self.dropout(in_nodes_features)
# shape = (1, N, FIN) * (NH, FIN, FOUT) -> (NH, N, FOUT) where NH - number of heads, FOUT num of output features
# We project the input node features into NH independent output features (one for each attention head)
nodes_features_proj = torch.matmul(in_nodes_features.unsqueeze(0), self.proj_param)
nodes_features_proj = self.dropout(nodes_features_proj) # in the official GAT imp they did dropout here as well
#
# Step 2: Edge attention calculation
#
# Apply the scoring function (* represents element-wise (a.k.a. Hadamard) product)
# batch matrix multiply, shape = (NH, N, FOUT) * (NH, FOUT, 1) -> (NH, N, 1)
scores_source = torch.bmm(nodes_features_proj, self.scoring_fn_source)
scores_target = torch.bmm(nodes_features_proj, self.scoring_fn_target)
# shape = (NH, N, 1) + (NH, 1, N) -> (NH, N, N) with the magic of automatic broadcast <3
# In Implementation 3 we are much smarter and don't have to calculate all NxN scores! (only E!)
# Tip: it's conceptually easier to understand what happens here if you delete the NH dimension
all_scores = self.leakyReLU(scores_source + scores_target.transpose(1, 2))
# connectivity mask will put -inf on all locations where there are no edges, after applying the softmax
# this will result in attention scores being computed only for existing edges
all_attention_coefficients = self.softmax(all_scores + connectivity_mask)
#
# Step 3: Neighborhood aggregation
#
# shape = (NH, N, N) * (NH, N, FOUT) -> (NH, N, FOUT)
out_nodes_features = torch.bmm(all_attention_coefficients, nodes_features_proj)
# shape = (N, NH, FOUT)
out_nodes_features = out_nodes_features.transpose(0, 1)
#
# Step 4: Residual/skip connections, concat and bias (same across all the implementations)
#
out_nodes_features = self.skip_concat_bias(all_attention_coefficients, in_nodes_features, out_nodes_features)
return (out_nodes_features, connectivity_mask)
#
# Helper functions
#
def get_layer_type(layer_type):
assert isinstance(layer_type, LayerType), f'Expected {LayerType} got {type(layer_type)}.'
if layer_type == LayerType.IMP1:
return GATLayerImp1
elif layer_type == LayerType.IMP2:
return GATLayerImp2
elif layer_type == LayerType.IMP3:
return GATLayerImp3
else:
raise Exception(f'Layer type {layer_type} not yet supported.') | 54.816929 | 153 | 0.69099 |
34c45bf1c08a0a89b25799a822c7560909afac45 | 3,743 | py | Python | export-as-mod.py | tmeedend/ac-mod-generator | bb560f9f27c9b5366334d5ac1f2cbfe52f5a66e0 | [
"MIT"
] | 2 | 2021-01-14T09:20:20.000Z | 2021-03-11T04:32:53.000Z | export-as-mod.py | tmeedend/ac-mod-generator | bb560f9f27c9b5366334d5ac1f2cbfe52f5a66e0 | [
"MIT"
] | null | null | null | export-as-mod.py | tmeedend/ac-mod-generator | bb560f9f27c9b5366334d5ac1f2cbfe52f5a66e0 | [
"MIT"
] | null | null | null | import os
import subprocess
import sys
import tempfile
import json
import argparse
import ntpath
from actools import common
from actools import tracks
from actools import cars
from actools import params
from actools import archives
from actools import clean
def processMod(paramsToUse, modTool, modsToProcess):
if modsToProcess != None and modsToProcess.strip() != "":
if modsToProcess == "#all":
modTool.packAllMods(paramsToUse, paramsToUse.acpath)
else:
if modsToProcess.startswith("tags:"):
tags = modsToProcess.split("tags:")[1].split(',')
mods = modTool.findModsWithTag(paramsToUse.acpath, tags)
else:
mods = modsToProcess.split(",")
for mod in mods:
if not modTool.isKunosMod(mod) or not paramsToUse.skipKunosMods:
modTool.packMod(mod.strip(), paramsToUse, paramsToUse.acpath)
def main():
paramsToUse = params.Params(os.path.dirname(os.path.realpath(__file__)))
paramsToUse.checkEnv()
# the guess param has been given
if paramsToUse.guessToProcess != None:
# if it's a file, then, we can process it if it's an archive
if os.path.isfile(paramsToUse.guessToProcess):
extension = os.path.splitext(paramsToUse.guessToProcess.lower())[1]
if extension == ".rar" or extension == ".zip" or extension == ".7z":
print("processing archive " + paramsToUse.guessToProcess)
archives.transformToValidMod(paramsToUse, paramsToUse.guessToProcess)
return
# if, it's a dir we can process a car or track only if it's in an assetto corsa dir
elif os.path.isdir(paramsToUse.guessToProcess):
if not paramsToUse.acpath in paramsToUse.guessToProcess:
sys.exit("Cannot process directory " + paramsToUse.guessToProcess + " because it's not in Assetto Corsa installation directory")
print("processing directory " + paramsToUse.guessToProcess)
if (archives.isTrack(paramsToUse.guessToProcess)):
processMod(paramsToUse, tracks.TrackTools(paramsToUse.sevenzipexec, paramsToUse.quickbmsexec), os.path.basename(paramsToUse.guessToProcess))
return
elif (archives.isCar(paramsToUse.guessToProcess)):
processMod(paramsToUse, cars.CarTools(paramsToUse.sevenzipexec, paramsToUse.quickbmsexec), os.path.basename(paramsToUse.guessToProcess))
return
elif (archives.isMod(paramsToUse.guessToProcess)):
archives.archiveValidMod(paramsToUse, paramsToUse.guessToProcess, ntpath.basename(paramsToUse.guessToProcess))
return
else:
print("Cannot guess what kind of mod this file is: " + paramsToUse.guessToProcess)
else:
trackTools = tracks.TrackTools(paramsToUse.sevenzipexec, paramsToUse.quickbmsexec)
carTools = cars.CarTools(paramsToUse.sevenzipexec, paramsToUse.quickbmsexec)
processMod(paramsToUse, trackTools, paramsToUse.tracksToProcess)
processMod(paramsToUse, carTools, paramsToUse.carsToProcess)
archives.transformToValidMod(paramsToUse, paramsToUse.archiveToProcess)
if(paramsToUse.clean):
if input("This will delete any car or track inside " + paramsToUse.acpath + " without a 'ui' directory. Are you sure? (y/n)") != "y":
exit()
clean.cleanCars(paramsToUse.acpath)
clean.cleanTracks(paramsToUse.acpath)
if paramsToUse.findTracksByTags != None:
foundTracks = trackTools.findModsWithTag(paramsToUse.acpath, paramsToUse.findTracksByTags.split(','))
for track in foundTracks:
print(track)
if paramsToUse.findCarsByTags != None:
foundCars = carTools.findModsWithTag(paramsToUse.acpath, paramsToUse.findCarsByTags.split(','))
for car in foundCars:
print(car)
main()
# modFiles : vérifier dans cette méthode si les fichiers existent
# chemins mis en paramètre : vérifier qu'ils existent
# shaders spéciaux cf shutoko
| 44.035294 | 145 | 0.748865 |
4cf0df47c987f644c539fef54f8fdd8a0bad8dd6 | 4,858 | py | Python | dashmat/actions.py | realestate-com-au/dashmat | 433886e52698f0ddb9956f087b76041966c3bcd1 | [
"MIT"
] | 1 | 2016-02-02T14:37:17.000Z | 2016-02-02T14:37:17.000Z | dashmat/actions.py | realestate-com-au/dashmat | 433886e52698f0ddb9956f087b76041966c3bcd1 | [
"MIT"
] | null | null | null | dashmat/actions.py | realestate-com-au/dashmat | 433886e52698f0ddb9956f087b76041966c3bcd1 | [
"MIT"
] | null | null | null | from dashmat.server.server import Server, generate_dashboard_js
from dashmat.datastore import JsonDataStore, RedisDataStore
from dashmat.server.react import ReactServer
from dashmat.scheduler import Scheduler
from input_algorithms.spec_base import NotSpecified
from textwrap import dedent
import logging
import redis
import json
import six
import sys
import os
log = logging.getLogger("dashmat.actions")
available_actions = {}
def an_action(func):
available_actions[func.__name__] = func
func.label = "Default"
return func
@an_action
def list_tasks(collector):
"""List the available_tasks"""
print("Usage: dashmat <task>")
print("")
print("Available tasks to choose from are:")
print("-----------------------------------")
print("")
keygetter = lambda item: item[1].label
tasks = sorted(available_actions.items(), key=keygetter)
sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0]))
max_length = max(len(name) for name, _ in sorted_tasks)
for key, task in sorted_tasks:
desc = dedent(task.__doc__ or "").strip().split('\n')[0]
print("\t{0}{1} :-: {2}".format(" " * (max_length-len(key)), key, desc))
print("")
@an_action
def serve(collector):
modules = collector.configuration["__active_modules__"]
dashboards = collector.configuration["dashboards"]
module_options = collector.configuration["modules"]
dashmat = collector.configuration["dashmat"]
config_root = collector.configuration["config_root"]
datastore = JsonDataStore(os.path.join(config_root, "data.json"))
if dashmat.redis_host:
datastore = RedisDataStore(redis.Redis(dashmat.redis_host))
Server(
dashmat.host
, dashmat.port
, dashmat.debug
, dashboards
, modules
, module_options
, datastore
, dashmat.dynamic_dashboard_js
, dashmat.compiled_static_prep
, dashmat.compiled_static_folder
, dashmat.without_checks
).serve()
@an_action
def requirements(collector):
"""Just print out the requirements"""
out = sys.stdout
artifact = collector.configuration['dashmat'].artifact
if artifact not in (None, "", NotSpecified):
if isinstance(artifact, six.string_types):
out = open(artifact, 'w')
else:
out = artifact
for active in collector.configuration['__imported__'].values():
for requirement in active.requirements():
out.write("{0}\n".format(requirement))
@an_action
def run_checks(collector):
"""Just run the checks for our modules"""
artifact = collector.configuration["dashmat"].artifact
chosen = artifact
if chosen in (None, "", NotSpecified):
chosen = None
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
config_root = collector.configuration["config_root"]
module_options = collector.configuration["modules"]
datastore = JsonDataStore(os.path.join(config_root, "data.json"))
if dashmat.redis_host:
datastore = RedisDataStore(redis.Redis(dashmat.redis_host))
scheduler = Scheduler(datastore)
for name, module in modules.items():
if chosen is None or name == chosen:
server = module.make_server(module_options[name].server_options)
scheduler.register(module, server, name)
scheduler.twitch(force=True)
@an_action
def list_npm_modules(collector, no_print=False):
"""List the npm modules that get installed in a docker image for the react server"""
default = ReactServer().default_npm_deps()
for _, module in sorted(collector.configuration["__active_modules__"].items()):
default.update(module.npm_deps())
if not no_print:
print(json.dumps(default, indent=4, sort_keys=True))
return default
@an_action
def collect_dashboard_js(collector):
"""Generate dashboard javascript for each dashboard"""
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
compiled_static_prep = dashmat.compiled_static_prep
compiled_static_folder = dashmat.compiled_static_folder
npm_deps = list_npm_modules(collector, no_print=True)
react_server = ReactServer()
react_server.prepare(npm_deps, compiled_static_folder)
for dashboard in collector.configuration["dashboards"].values():
log.info("Generating compiled javascript for dashboard:{0}".format(dashboard.path))
filename = dashboard.path.replace("_", "__").replace("/", "_")
location = os.path.join(compiled_static_folder, "dashboards", "{0}.js".format(filename))
if os.path.exists(location):
os.remove(location)
generate_dashboard_js(dashboard, react_server, compiled_static_folder, compiled_static_prep, modules)
| 34.7 | 109 | 0.695554 |
ea776fec512e07fa2ffe74482a0f7e072344de72 | 82 | py | Python | scripts/field/autogen_merStandAlone.py | pardovot/MS-private-server | ef7fde137a58ff71f83ba2229ac1f9c01d31bd6a | [
"MIT"
] | null | null | null | scripts/field/autogen_merStandAlone.py | pardovot/MS-private-server | ef7fde137a58ff71f83ba2229ac1f9c01d31bd6a | [
"MIT"
] | null | null | null | scripts/field/autogen_merStandAlone.py | pardovot/MS-private-server | ef7fde137a58ff71f83ba2229ac1f9c01d31bd6a | [
"MIT"
] | null | null | null | # ObjectID: 0
# Character field ID when accessed: 910150002
# ParentID: 910150002
| 20.5 | 45 | 0.768293 |
9033eb13a83acc147fc807320897d6eca88a715e | 65,122 | py | Python | verpy/pybin3/db0.py | vhnatyk/vlsistuff | 0981097bd19a0c482728dcc5048a3615ac9a9a90 | [
"MIT"
] | 1 | 2021-04-23T04:08:58.000Z | 2021-04-23T04:08:58.000Z | verpy/pybin3/db0.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | null | null | null | verpy/pybin3/db0.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | null | null | null |
import os,sys,string,pickle,types
import logs
import traceback
import module_class as mcl
# from module_class import pr_stmt
# from module_class import module_class
import matches
import pprint
if os.path.exists('packages_save.py'):
sys.path.append('.')
import packages_save
else:
packages_save = False
def main():
load_parsed('.')
dump_all_verilog('all.v')
dump_all_all()
if PackFile:
PackFile.write('endmodule\n')
PackFile.close()
Pack2File.close()
Modules = {}
def dumpDataBase():
Keys = list(DataBase.keys())
Keys.sort()
Fout = open('database.dump','w')
for Key in Keys:
Fout.write('db %s %s\n'%(Key,DataBase[Key]))
def dump_all_verilog(Fname):
Fout = open(Fname,'w')
for Mod in Modules:
logs.log_info('dumping %s'%Mod)
Modules[Mod].dump_verilog(Fout)
Fout.close()
def dump_all_all():
for Mod in Modules:
logs.log_info('dumping %s'%Mod)
Modules[Mod].dump()
def load_parsed(Rundir):
global Global,Modules
Modules={}
Global = mcl.module_class('global_module')
logs.setCurrentModule('load_parsed db0')
try:
load_db0('%s/db0.pickle'%Rundir)
Key = 'Main',1
dumpDataBase()
scan1(Key)
return Modules
except:
load_db0('db0.pickle')
Key = 'Main',1
scan1(Key)
logs.log_fatal('reading file probably failed on syntax')
return {}
def load_db0(Fname):
global DataBase
File = open(Fname,'rb')
DataBase = pickle.load(File)
File.close()
def scan1(Key):
global Stack,State,Current,Modules,ModuleStuffs
Stack=[]
State='idle'
Current=0
ModuleStuffs=[]
if Key not in DataBase: return
List = DataBase[Key]
if (List==[]):
return
for Item in List:
if len(Item)==2:
if Item[0]=='Define':
add_define(Item)
elif Item[0]=='Module':
add_module(Item)
elif Item[0]=='Package':
add_package(Item)
else:
scan1(Item)
else:
logs.log_err('exxx %s %s'%(Key,Item))
PackFile = False
def add_package(Key):
global PackFile,Pack2File
List = DataBase[Key]
Vars = matches.matches(List,'package ? ; !Parameters endpackage')
if Vars:
Pack = Vars[0][0]
Pack2File = open('%s_save.py'%Pack,'w')
Pack2File.write('# PARAMETERS,TYPEDEFS = {},{} \n')
List = get_list(Vars[1])
for Item in List:
if Item[0]=='parameter':
Pack2File.write('PARAMETERS["%s"] = '%(Item[1]))
pprint.pprint(Item[2],Pack2File)
elif Item[0]=='typedef':
Pack2File.write('TYPEDEFS["%s"] = '%(Item[1]))
pprint.pprint(Item[2:],Pack2File)
if (Item[2][0]=='enum'):
LL = Item[3]
if LL[0][0]=='parameter':
for Prm in LL:
Pack2File.write('PARAMETERS["%s"] = '%(Prm[1]))
pprint.pprint(Prm[2],Pack2File)
else:
logs.log_error('package got item=%s'%str(Item))
return
logs.log_error('package got list=%s'%str(List))
def pr_typedef(List):
if List[0]!='typedef':
logs.log_error('typedef print got "%s" as header, not typedef'%(str(List[0])))
return '// err typedef'
Name = List[1]
Str = 'typedef '
Vars = matches.matches(List[2],['?','?',['?','?']])
if Vars:
Str += '%s %s [%s:%s] {\n '%(Vars[0],Vars[1],Vars[2],Vars[3])
else:
logs.log_error('typedef of %s got for list2 "%s"'%(Name,List[2]))
LL = []
for Item in List[3]:
Vars = matches.matches(Item,'parameter ? ?')
if Vars:
X = '%s = %s\n'%(Vars[0],mcl.pr_expr(Vars[1]))
LL.append(X)
Lstr = ' ,'.join(LL)
Str += Lstr
Str += '\n} %s;'%(Name)
return Str
def add_module(Key):
global Current,ModuleStuffs
List = DataBase[Key]
Module = List[1][0]
Current = mcl.module_class(Module)
Modules[Module]=Current
logs.log_info('addmodule %s (%s)'%(Module,Modules.keys()))
if len(List)==5:
Lhead = DataBase[List[2]]
Lbody = DataBase[List[3]]
Lparams=[]
elif len(List)==6:
Lparams = DataBase[List[2]]
Lhead = DataBase[List[3]]
Lbody = DataBase[List[4]]
elif len(List)==3:
if (List[0][1]=='define'):
Var = List[1][0]
Expr = get_expr(List[2])
add_module_define(Var,Expr)
else:
logs.log_err('dont know to deal with %d %s'%(len(List),List))
return
if (len(Lparams)>0):
add_module_params(Lparams)
if (len(Lhead)>1):
add_module_header(Lhead)
if (len(Lbody)>0):
ModuleStuffs = [Lbody]
while (ModuleStuffs!=[]):
add_module_stuff()
def add_module_define(Var,Expr):
Current.add_define(Var,Expr)
def add_generate_item(List):
Vars = matches.matches(List,'if ( !Expr ) !GenStatement else !GenStatement')
if Vars:
Cond = get_expr(Vars[0])
Yes = get_statements(Vars[1])
No = get_statements(Vars[2])
Current.add_generate([['ifelse',Cond,['list']+Yes,['list']+No]])
return
Vars = matches.matches(List,'if ( !Expr ) !GenStatement')
if Vars:
Cond = get_expr(Vars[0])
Yes = get_statements(Vars[1])
Current.add_generate([['if',Cond,['list']+Yes]])
return
if len(List)==3:
Statement = get_statements(List[1])
if (len(Statement)==1)and(type(Statement) is list):
Statement = Statement[0]
Current.add_generate(Statement)
else:
logs.log_err('dont know to deal with generate len=%d %s'%(len(List),str(List)))
def add_define_item(List):
Tok = List[0][1]
if Tok=='backtick_include':
Expr = get_expr(List[1])
if Current:
Current.add_include(Expr)
else:
logs.log_err('dont know to deal with %d %s'%(len(List),List))
return
elif Tok=='backtick_undef':
return
elif Tok=='define':
if List[0][0]=='`include':
Expr = get_expr(List[1])
Current.add_include(Expr)
return
logs.log_err('dont know to deal with %d add_define_item %s'%(len(List),List))
def add_define(Key):
List = DataBase[Key]
Tok = List[1][0]
if len(List)>2:
Expr = get_expr(List[2])
else:
Expr = 1
if Current:
Current.add_define(Tok,Expr)
else:
Global.add_define(Tok,Expr)
def add_module_params(List1):
for Item in List1:
if len(Item)==2:
if (Item[0]=='head_params'):
add_module_params(DataBase[Item])
elif (Item[0]=='head_param'):
LL = DataBase[Item]
Vars = matches.matches(LL,'parameter ? ? = ?')
if Vars:
Who = Vars[1][0]
Expr = get_expr(Vars[2])
Current.add_parameter(Who,Expr)
elif (len(LL)==4)and(LL[0][0]=='parameter'):
Who = LL[1][0]
Expr = get_expr(LL[3])
Current.add_parameter(Who,Expr)
elif (len(LL)==4)and(LL[0][0]=='localparam'):
Who = LL[1][0]
Expr = get_expr(LL[3])
Current.add_parameter(Who,Expr)
elif (len(LL)==3)and(LL[1][0]=='='):
Who = LL[0][0]
Expr = get_expr(LL[2])
Current.add_parameter(Who,Expr)
else:
logs.log_err('add_module_param got %d %s'%(len(LL),LL))
def add_module_header(List0):
Dir = False
Vars = matches.matches(List0,'( !Header_list ) ;')
if Vars:
List = get_list(DataBase[Vars[0]])
for Item in List:
if type(Item) is tuple:
Vars2 = matches.matches(Item,'extdir ? ? ?')
if Vars2:
Dir = Vars2[0]
Net = Vars2[1]
if notUsualDir(Dir):
Usual,Type = notUsualDir(Dir)
Wid = getTypeDefWid(Type)
Current.add_sig(Net,Usual,Wid)
record_original_typedef(Net,Type)
else:
Wid = Vars2[2]
Current.add_sig(Net,Dir,Wid)
else:
logs.log_error('add_module_header got %s'%str(Item))
elif type(Item) is str:
if Dir: Current.add_sig(Item,Dir,Wid)
else:
logs.log_error('add_module_header got(1) %s'%str(Item))
return
logs.log_error('add_module_header got(1) %s'%str(List0))
return
OriginalTypeDefs={}
def record_original_typedef(Net,Type):
OriginalTypeDefs[(Net,Current.Module)] = Type
def getStructFields(Kind):
if not packages_save: return 0,{}
if Kind in packages_save.TYPEDEFS:
Struct = packages_save.TYPEDEFS[Kind]
if Struct[0]=='struct':
res = {}
Tot = 0
Str = Struct[1][:]
Str.reverse()
for Item in Str:
Wid = getTypeDefWid(Item[0])
Name = Item[1]
res[Name] = (Wid+Tot-1,Tot)
Tot += Wid
return Tot,res
logs.log_error('getStructFields name=%s '%(str(Kind)))
return 0,{}
def getTypeDefWid(Kind):
if packages_save and (Kind in packages_save.TYPEDEFS):
Struct = packages_save.TYPEDEFS[Kind]
if Struct[0]=='struct':
Tot = 0
for Item in Struct[1]:
Wid = getTypeDefWid(Item[0])
Tot += Wid
return Tot
Vars = matches.matches(Struct[0],'enum logic ?')
if Vars:
H,L = Vars[0]
return H-L+1
if Kind=='logic': return 1
if Kind=='integer': return 32
if Kind=='real': return 64
logs.log_error('getTypeDefWid name=%s '%(str(Kind)))
traceback.print_stack(None,None,logs.Flogs[0])
return 5
def get_when(Item):
List = DataBase[Item]
if len(List)==4:
return get_when_items(List[2])
elif (len(List)==2)and(List[0][0]=='@'):
Ok = List[1]
if len(Ok)==1:
return [Ok]
if (len(Ok)==4)and(Ok[1]=='token'):
return [Ok[0]]
if (len(Ok)==4)and(Ok[0]=='*'):
return [Ok[0]]
if (len(Ok)==4)and(Ok[0]=='(*)'):
return ['*']
logs.log_err('get_when got0 %s'%str(List))
return []
else:
logs.log_err('get_when got1 %s'%str(List))
return []
# ensure(len(List)==4,(List,Item))
def get_when_items(Item1):
if is_terminal(Item1):
if Item1[0]=='*':
return ['*']
List = DataBase[Item1]
res = []
for Item in List:
if len(Item)==2:
if (Item[0]=='When_item'):
LL = DataBase[Item]
if len(LL)==2:
Edge = LL[0][0]
Expr = get_expr(LL[1])
res.append(['edge',Edge,Expr])
elif len(LL)==1:
Expr = get_expr(LL[0])
res.append(Expr)
else:
logs.log_err('get_when_items %s'%(LL))
elif (Item[0]=='When_items'):
more = get_when_items(Item)
res.extend(more)
return res
def get_exprs(Item1):
List = DataBase[Item1]
res = []
for Item in List:
if len(Item)==2:
if (Item[0]=='Exprs'):
more = get_exprs(Item)
res.extend(more)
elif (Item[0]=='Expr'):
more = get_expr(Item)
res.append(more)
return res
def get_soft_assigns(Item1):
List = DataBase[Item1]
res=['list']
Vars = matches.matches(List,'assign !Soft_assigns ;')
if Vars:
Res = get_soft_assigns(Vars[0])
return Res
Vars = matches.matches(List,'assign ? = !Expr ;')
if Vars:
Dst = get_expr(Vars[0])
Src = get_expr(Vars[1])
res.append(['=',Dst,Src])
return ('=',Dst,Src)
for Item in List:
if Item[0]=='Soft_assigns':
More = get_soft_assigns(Item)
res.extend(More)
elif Item[0]=='Soft_assign':
List2 = DataBase[Item]
done=False
Vars = matches.matches(List2,'integer ? = ?')
if Vars:
Dst = get_expr(Vars[0])
Src = get_expr(Vars[1])
res.append(['=',Dst,Src])
Current.add_sig(Dst,'integer',0)
done=True
Vars = matches.matches(List2,'genvar ? = ?')
if Vars:
Dst = get_expr(Vars[0])
Src = get_expr(Vars[1])
res.append(['=',Dst,Src])
Current.add_sig(Dst,'genvar',0)
done=True
Vars = matches.matches(List2,'? = ?')
if not done and Vars:
Dst = get_expr(Vars[0])
Src = get_expr(Vars[1])
res.append(['=',Dst,Src])
done=True
Vars = matches.matches(List2,'? plusplus')
if not done and Vars:
Dst = get_expr(Vars[0])
res.append(['=',Dst,['+',Dst,1]])
done=True
if not done:
logs.log_err('get_soft_assigns not done got %s'%str(List2))
elif Item[0]=='assign':
List2 = DataBase[Item]
else:
logs.log_err('get_soft_assigns got %s'%str(Item))
return res
def get_statement(Item):
if (type(Item) is tuple):
List = DataBase[Item]
else:
List = Item
Vars = matches.matches(List,'!AlwaysKind !Statement')
if Vars:
Always = get_expr(Vars[0])
Stats = get_statement(Vars[1])
return [Always,[],Stats]
Vars = matches.matches(List,'!AlwaysKind !When !Statement')
if Vars:
Always = get_expr(Vars[0])
When = get_when(Vars[1])
Stats = get_statement(Vars[2])
return [Always,When,Stats]
Vars = matches.matches(List,'always !When !Statement')
if Vars:
When = get_when(Vars[0])
Stats = get_statement(Vars[1])
return ['always',When,Stats]
Vars = matches.matches(List,'!IntDir !Tokens_list ;')
if Vars:
Dir = get_dir(Vars[0])
if Dir=='logic': Dir='wire';
List0 = get_list(Vars[1])
res=[]
for Net in List0:
res.append(('declare',Dir,Net,0))
return res
Vars = matches.matches(List,'!IntDir !Width !Width ? ;')
if Vars:
Dir = get_dir(Vars[0])
if Dir=='logic': Dir='wire';
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[2])
List0 = get_list(Vars[3])
res=[]
for Net in List0:
res.append(('declare',Dir,Net,('packed',Wid0,Wid1)))
return res
Vars = matches.matches(List,'!IntDir !Width ? = !Expr ;')
if Vars:
Dir = get_dir(Vars[0])
if Dir=='logic': Dir='wire';
Wid = get_wid(Vars[1])
Net = Vars[2]
res=['list']
res.append(('declare',Dir,Net,Wid))
Src = get_expr(Vars[3])
Dst = get_expr(Net)
res.append(('assigns',['list',['=',Dst,Src]]))
return res
Vars = matches.matches(List,'!IntDir !Width !Tokens_list ;')
if Vars:
Dir = get_dir(Vars[0])
if Dir=='logic': Dir='wire';
Wid0 = get_wid(Vars[1])
List0 = get_list(Vars[2])
res=[]
for Net in List0:
res.append(('declare',Dir,Net,Wid0))
return res
Vars = matches.matches(List,"for ( ? ; ? ; ? ) ?")
if Vars:
Assigns1 = get_soft_assigns(Vars[0])
Cond = get_expr(Vars[1])
Assigns2 = get_soft_assigns(Vars[2])
Stmt = get_statement(Vars[3])
return ['for',Assigns1,Cond,Assigns2,Stmt]
#ILIA
Vars = matches.matches(List,"parameter ? ;")
if Vars:
More = get_list(Vars[0])
return More
Vars = matches.matches(List,"localparam ? ;")
if Vars:
More = get_list(Vars[0])
return More
if len(List)==1:
if List[0][0]=='Always':
LL = DataBase[List[0]]
return get_statement(LL)
if List[0][0]=='While_statement':
List2 = DataBase[List[0]]
Cond = get_expr(List2[2])
Stmt = get_statement(List2[4])
return ['while',Cond,Stmt]
if List[0][0] in ['Instance']:
List = DataBase[List[0]]
return instance_statement(List)
if List[0][0] in ['Assign']:
List2 = DataBase[List[0]]
Assigns = get_soft_assigns(List[0])
return ['assigns',Assigns]
if len(List)==2:
if List[0][0]=='Dotted':
Dotted = get_dotted_items(DataBase[List[0]])
return ['dotted',Dotted]
if List[0][0]=='When':
When = get_when(List[0])
if len(When)==1:
When = When[0]
return ['when',When]
if List[0][0]=='$finish':
return ['$finish']
if List[0][1]=='token':
return ['taskcall',List[0][0]]
if (List[0][0]=='begin')and(List[1][0]=='end'):
return ['empty_begin_end']
if len(List)==3:
if List[0][0]=='integer':
return ['integer',List[1][0]]
if List[0][0]=='genvar':
return ['genvar',List[1][0]]
if List[0][0]=='begin':
LL = get_statements(List[1])
return ['list']+LL
if List[0][0]=='fork':
LL = get_statements(List[1])
return ['fork']+LL
if List[0][0]=='#':
Dly = get_expr(List[1])
return ['#',Dly]
if List[0][0]=='wait':
Dly = get_expr(List[1])
return ['wait',Dly]
if List[0][1]=='emit':
Eve = get_expr(List[1])
return ['emit',Eve]
if List[0][1]=='disable':
Eve = get_expr(List[1])
return ['disable',Eve]
if List[0][1]=='release':
Eve = get_expr(List[1])
return ['release',Eve]
if len(List)==4:
if List[1][0]=='<=':
Dst = get_expr(List[0])
Src = get_expr(List[2])
return ['<=',Dst,Src]
if List[1][0]=='=':
Dst = get_expr(List[0])
Src = get_expr(List[2])
return ['=',Dst,Src]
if List[0][0]=='reg':
Wid = get_expr(List[1])
Src = get_expr(List[2])
return ['reg',Wid,Src]
if len(List)==3:
if List[0][0]=='reg':
Src = get_expr(List[1])
return ['reg',0,Src]
if len(List)==5:
if (List[0][0]=='force'):
Dst = get_expr(List[1])
Src = get_expr(List[3])
return ['force',Dst,Src]
if (List[0][0]=='assign'):
Dst = get_expr(List[1])
Src = get_expr(List[3])
return ['assign',Dst,Src]
if List[1][0]=='<=':
Dst = get_expr(List[0])
Dly = get_expr(List[2])
Src = get_expr(List[3])
return ['<=',Dst,Src,Dly]
if List[0][0]=='begin':
return ['named_begin',List[2][0],['list']+get_statements(List[3])]
if (List[1][0]=='(')and(List[0][1]=='token'):
exprs = get_exprs(List[2])
return ['functioncall',List[0][0],exprs]
if (List[0][0]=='if'):
Cond = get_expr(List[2])
Yes = get_statement(List[4])
return ['if',Cond,Yes]
if len(List)==6:
if (List[0][0] in ['case','casez','casex']):
Switch = get_expr(List[2])
LLL = DataBase[List[4]]
Cases = get_cases(List[4])
return [List[0][0],Switch,Cases]
if len(List)==7:
if (List[0][0]=='if')and(List[5][0]=='else'):
Cond = get_expr(List[2])
Yes = get_statement(List[4])
No = get_statement(List[6])
return ['ifelse',Cond,Yes,No]
if (List[0][0] in ['CaseKind'] ):
Switch = get_expr(List[2])
Cases = get_cases(List[4])
Default = get_default(List[5])
Cases.append(['default',Default])
XX = DataBase[List[0]]
if matches.matches(XX,'unique case'):
return ['unique_case',Switch,Cases]
elif matches.matches(XX,'case'):
return ['case',Switch,Cases]
else:
logs.log_err('CaseKind got %s'%str(List[0]))
return []
if (List[0][0] in ['case','casez','casex'] ):
Switch = get_expr(List[2])
Cases = get_cases(List[4])
Default = get_default(List[5])
Cases.append(['default',Default])
return [List[0][0],Switch,Cases]
Vars = matches.matches(List,'!CaseKind ( !Expr ) !Cases !Default endcase')
if Vars:
Switch = get_expr(Vars[1])
Cases = get_cases(Vars2[2])
XX = DataBase[Vars[0]]
Default = get_default(List[5])
Cases.append(['default',Default])
if matches.matches(XX,'unique case'):
return ['unique_case',Switch,Cases]
elif matches.matches(XX,'case'):
return ['case',Switch,Cases]
else:
logs.log_err('CaseKind got %s'%str(List[0]))
Vars = matches.matches(List,'!CaseKind ( !Expr ) !Cases endcase')
if Vars:
Switch = get_expr(Vars[1])
Cases = get_cases(Vars[2])
XX = DataBase[Vars[0]]
if matches.matches(XX,'unique case'):
return ['unique_case',Switch,Cases]
elif matches.matches(XX,'case'):
return ['case',Switch,Cases]
else:
logs.log_err('CaseKind got %s'%str(List[0]))
Vars = matches.matches(List,'return ? ;')
if Vars:
return ['return',Vars[0]]
if (type(List) is list)and(len(List)==1):
return get_statement(List[0])
Vars = matches.matches(List,'!IntDir !Tokens_list = !Expr ;')
if Vars:
logs.log_err('needs care. wire in genvar with assign')
return
logs.log_err(' db0: untreated statement len=%d list="%s"'%(len(List),List),True)
return []
def get_default(Item):
List = DataBase[Item]
if List[2][0]==';':
return ['empty_begin_end']
return get_statement(List[2])
def get_cases(Item1):
if Item1 not in DataBase: return []
List = DataBase[Item1][:]
while List[0][0]=='Cases':
Itm = List[0]
Part = DataBase[Itm][:]
List = Part + List[1:]
res = []
for Item in List:
if len(Item)==2:
if (Item[0]=='Case'):
List2 = DataBase[Item]
if len(List2)==3:
Cond = get_exprs(List2[0])
if List2[2][0]!=';':
Statement = get_statement(List2[2])
res.append([Cond,Statement])
else:
logs.log_err('case %s %s'%(Item,List2))
elif (Item[0]=='Cases'):
more = get_cases(Item)
res.extend(more)
elif (Item[0]=='default'):
return ['default']
else:
logs.log_err('bad case item "%s"'%(str(Item)))
return []
else:
logs.log_err('bad case item "%s"'%(str(Item)))
return []
return res
def get_statements(Item1):
List2 = flattenList(Item1)
Vars = matches.matches(List2,'begin !GenStatements end')
if Vars:
more = get_statements(Vars[0])
return more
Vars = matches.matches(List2,'begin : ? !GenStatements end')
if Vars:
more = get_statements(Vars[1])
return more
res = []
for Item in List2:
if len(Item)==2:
if (Item[0]=='Statement'):
x = get_statement(Item)
res.append(x)
elif (Item[0]=='Statements'):
more = get_statements(Item)
res.extend(more)
elif (Item[0]=='GenStatements'):
more = get_statements(Item)
res.extend(more)
elif (Item[0]=='GenStatement'):
x = get_statement(Item)
res.append(x)
elif (Item[0]=='Assign'):
LL = DataBase[Item]
Vars = matches.matches(LL,'assign ? = ? ;')
if Vars:
Dst = get_expr(Vars[0])
Src = get_expr(Vars[1])
res.append(['assigns',('=',Dst,Src)])
else:
logs.log_error('Assign %s'%str(DataBase[Item]))
else:
logs.log_err('fallOff #0411# %s'%str(Item))
elif (len(Item)==3)and(Item[0][0]=='begin')and(Item[2][0]=='end'):
x = get_statement(Item[1])
res.append(x)
else:
logs.log_err('fallOff #0413# %s "%s"'%(Item,List2))
return res
def get_wid(Item):
if len(Item)==2:
List = DataBase[Item]
if (len(List)==5):
if List[2][0]==':':
High = get_expr(List[1])
Low = get_expr(List[3])
return High,Low
if List[2][1]=='plus_range':
High = get_expr(List[1])
Low = get_expr(List[3])
return ['-',['+',High,Low],1],High
if List[2][1]=='minus_range':
High = get_expr(List[1])
Low = get_expr(List[3])
return High,['+',['-',High,Low],1]
elif (len(List)==1):
if List[0][0]=='integer':
return 31,0
if List[0][0]=='Width':
return get_wid(List[0])
elif len(List)==4:
if (List[0][0]=='#')and(List[2][0]=='Prms_list'):
LL = get_conns(List[2])
return LL
elif len(List)==3:
if (List[0][0]=='[')and(List[2][0]==']'):
Num =get_expr(List[1])
return ['-',Num,1],0
logs.log_err('get_wid got %s %s'%(len(List),List))
elif len(Item)==1:
if Item[0][0]=='integer':
return 31,0
logs.log_err('get_wid %s'%str(Item))
return 0,0
def get_busbit(Item):
if len(Item)==2:
List = DataBase[Item]
if (len(List)==3):
Ind = get_expr(List[1])
return Ind
logs.log_err('get_busbit %s %s'%(Item,List))
def get_dir(Item):
if (type(Item) is list)and(len(Item)==1):
return get_dir(Item[0])
if tuple(Item) in DataBase:
return get_dir(DataBase[tuple(Item)])
if (type(Item) is tuple)and(len(Item)==4):
return Item[0]
Vars = matches.matches(Item,'!PureExt !IntKind',False)
if Vars:
AA = Vars[0][0]
BB = Vars[1][0]
return '%s %s'%(AA,BB)
Vars = matches.matches(Item,'!PureExt !IntKind signed',False)
if Vars:
AA = Vars[0][0]
BB = Vars[1][0]
return '%s %s signed'%(AA,BB)
Vars = matches.matches(Item,'!PureExt signed',False)
if Vars:
AA = Vars[0][0]
return '%s signed'%(AA)
Vars = matches.matches(Item,'wire signed',False)
if Vars:
return 'wire signed'
Vars = matches.matches(Item,'reg signed',False)
if Vars:
return 'reg signed'
Vars = matches.matches(Item,'input logic',False)
if Vars:
return 'input'
Vars = matches.matches(Item,'output logic',False)
if Vars:
return 'output'
if type(Item) is tuple:
if Item[0] in ['wire','logic']:
return Item[0]
logs.log_err('get_dir %s'%str(Item))
return 'wire'
def get_list(Item):
if (type(Item) is list)and(len(Item)==1):
Item = Item[0]
if tuple(Item) in DataBase:
Item = DataBase[tuple(Item)]
if (type(Item) is list)and(len(Item)==1):
Item = Item[0]
if (type(Item) is tuple)and(len(Item)==4):
return [Item[0]]
if tuple(Item) in DataBase:
Item = DataBase[tuple(Item)]
Vars = matches.matches(Item,'? , !Tokens_list')
if Vars:
More = get_list(Vars[1])
return [Vars[0][0]]+More
Vars = matches.matches(Item,'!Header_list , !Header_item')
if Vars:
More = get_list(Vars[0])
This = get_list(Vars[1])
return More+This
Vars = matches.matches(Item,'!Pairs2 , !Pair2')
if Vars:
More0 = DataBase[Vars[0]]
if len(More0)==3:
More = get_list(Vars[0])
else:
More = [get_pair(Vars[0])]
This = get_pair(Vars[1])
return More+[This]
Vars = matches.matches(Item,'!Parameters !PackageItem')
if Vars:
More = get_list(Vars[0])
This = get_list(Vars[1])
return This+More
Vars = matches.matches(Item,'parameter !Pairs ;')
if Vars:
More = get_list(Vars[0])
return More
Vars = matches.matches(Item,'localparam !Pairs ;')
if Vars:
More = get_list(Vars[0])
return More
Vars = matches.matches(Item,'? = !Expr')
if Vars:
Expr = get_expr(Vars[1])
return [('parameter',Vars[0][0],Expr)]
Vars = matches.matches(Item,'typedef enum logic !Width { !Pairs } ? ;')
if Vars:
Wid = get_wid(Vars[0])
Pairs = get_list(Vars[1])
Name = get_expr(Vars[2])
return [('typedef',Name,['enum','logic',Wid],Pairs)]
Vars = matches.matches(Item,'!Pairs , !Pair')
if Vars:
Pairs = get_list(Vars[0])
Pair = get_list(Vars[1])
return Pairs+Pair
Vars = matches.matches(Item,'!SimpleDefs !SimpleDef')
if Vars:
Pairs = get_list(Vars[0])
Pair = get_list(Vars[1])
return Pairs+Pair
Vars = matches.matches(Item,'!IntDir ? ;')
if Vars:
Dir = get_dir(Vars[0])
Name = get_expr(Vars[1])
return [(Dir,Name)]
if Item[0][0]=='ExtDir':
return [getExtDir(Item)]
if (type(Item) is list)and(len(Item))==1:
return get_list(Item[0])
logs.log_err('get_list %s'%str(Item))
logs.pStack()
return []
def getExtDir(Item):
Vars = matches.matches(Item,'!ExtDir ?')
if Vars:
Dir = get_dir(Vars[0])
Name = get_expr(Vars[1])
return ('extdir',Dir,Name,0)
Vars = matches.matches(Item,'!ExtDir !Width ?')
if Vars:
Dir = get_dir(Vars[0])
Wid = get_wid(Vars[1])
Name = get_expr(Vars[2])
return ('extdir',Dir,Name,Wid)
Vars = matches.matches(Item,'!ExtDir !Width !Width ?')
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[2])
Name = get_expr(Vars[3])
return ('extdir',Dir,Name,('packed',Wid0,Wid1))
Vars = matches.matches(Item,'!ExtDir !Width ? !Width')
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[3])
Name = get_expr(Vars[2])
return ('extdir',Dir,Name,('double',Wid0,Wid1))
logs.log_err('getExtDir got "%s"'%str(Item))
return []
def get_pair(Item):
if len(Item)==1: return get_pair(Item[0])
if in_db(Item):
return get_pair(DataBase[Item])
Vars = matches.matches(Item,'? : ?')
if Vars:
Var = get_expr(Vars[0])
Expr = get_expr(Vars[1])
return (Var,Expr)
logs.log_error('get_pair %s failed'%str(Item))
return False
def add_module_stuff():
List1 = ModuleStuffs.pop(0)
if len(List1)==0:
return
for Item in List1:
if len(Item)==2:
if (Item[0]=='Mstuff'):
Mstuff = DataBase[Item]
if Mstuff[0][0] in ['if','ifelse']:
add_generate_item(Mstuff)
else:
add_module_item(DataBase[Item][0])
elif (Item[0]=='Module_stuffs'):
ModuleStuffs.append(DataBase[Item])
def add_module_item(Item):
if len(Item)==2:
List = DataBase[Item]
if Item[0]=='Definition':
add_definition(List)
elif Item[0]=='Parameter':
add_parameter(List[-2])
elif Item[0]=='Localparam':
add_localparam(List)
elif Item[0]=='Assign':
add_hard_assign(List)
elif Item[0]=='Instance':
add_instance(List)
elif Item[0]=='Always':
add_always(List)
elif Item[0]=='Initial':
add_initial(List)
elif Item[0]=='Function':
add_function(List)
elif Item[0]=='Task':
add_task(List)
elif Item[0]=='Define':
add_define_item(List)
elif Item[0]=='Generate':
add_generate_item(List)
elif Item[0]=='Typedef':
add_typedef_item(List)
elif Item[0]=='GenFor_statement':
X = get_statement(Item)
Current.add_generate(X)
elif Item[0]=='Pragma':
Str = List[1]
add_pragma(Str)
else:
logs.log_err('untreated(0) "%s" "%s"'%(Item,List))
elif (len(Item)==4)and(Item[1]=='pragma'):
add_pragma(Item)
elif (len(Item)==4)and(Item[1]=='newver'):
add_newver(Item)
else:
logs.log_err('untreated(1) len=%d "%s"'%(len(Item),str(Item)))
def add_typedef_item(List):
Vars = matches.matches(List,'typedef struct { ? } ? ;')
if Vars:
Name = get_expr(Vars[1])
LL = get_list(Vars[0])
packages_save.TYPEDEFS[Name]=('struct',LL)
return
logs.log_error('add_typedef_item got "%s"'%str(List))
def add_pragma(Item):
Current.pragmas.append(Item)
def add_newver(Item):
Current.newvers.append(Item)
def add_function(List):
if istoken(List[1]):
Func = List[1][0]
Wid=0
Next = 2
elif iswidth(List[1]):
Func = List[2][0]
Wid = get_wid(List[1])
Next = 3
else:
logs.log_err('untreated add_func %s'%(str(List)))
return
if List[Next][0]=='(':
Header = get_header_list(List[Next+1])
Next += 4
else:
Header=[]
if List[Next][0]=='Mem_defs':
Memdefs = get_mem_defs(List[Next])
else:
Memdefs = []
Statement = get_statement(List[-2])
Header.extend(Memdefs)
Current.add_function(Func,Wid,Header,Statement)
def get_mem_defs(Ptr):
List=DataBase[Ptr]
res = []
for Item in List:
if len(Item)==2:
if (Item[0]=='Mem_defs'):
more = get_mem_defs(Item)
res.extend(more)
if (Item[0]=='Mem_def'):
List2 = DataBase[Item]
Dir,Wid,Names = get_definition(List2)
for Name in Names:
res.append([Name,Wid,Dir])
return res
def add_task(List):
Task = List[1][0]
if len(List)==5:
Statement = get_statement(List[3])
Current.add_task(Task,Statement)
return
if len(List)==6:
Statement = get_statement(List[4])
Defs = get_mem_defs(List[3])
Current.add_task(Task,Statement,Defs)
return
if len(List)==9:
Statement = get_statement(List[7])
Defs = get_mem_defs(List[6])
Header = get_header_list(List[3])
Header.extend(Defs)
Current.add_task(Task,Statement,Header)
return
logs.log_err('task not treated %s %s %s'%(Task,len(List),List))
def get_header_list(Item1):
List = DataBase[Item1]
res=[]
for Item in List:
if Item[0]=='Header_list':
X = get_header_list(Item)
res.extend(X)
elif Item[0]=='Header_item':
List2 = DataBase[Item]
if (len(List2)==3)and(List2[0][0]=='ExtDir'):
Dir = get_dir(List2[0])
Wid = get_wid(List2[1])
Name = List2[2][0]
res.append([Name,Wid,Dir])
elif Item[0][0]==',':
pass
else:
logs.log_err('get_header_list %s'%List)
return res
def add_always(List):
Kind = get_expr(List[0])
if len(List)==3:
When = get_when(List[1])
Statement = get_statement(List[2])
if len(When)>1:
When = ['list']+When
else:
When = When[0]
Current.add_always(Statement,When,Kind)
elif len(List)==2:
Statement = get_statement(List[1])
Current.add_always(Statement)
else:
logs.log_err('bad always %s'%List)
def add_initial(List):
Statement = get_statement(List[1])
Current.add_initial(Statement)
def add_instance(List):
Vars = matches.matches(List,'? ? ;')
if Vars:
Current.add_inst(Vars[0],Vars[1])
return
Type = List[0][0]
if (List[1][0]=='('):
Inst = invent_inst(Type,Current)
Current.add_inst(Type,Inst)
if (len(List)>=4)and(List[2][0]=='Exprs'):
LL = get_exprs(List[2])
for i in range(len(LL)):
Sig = LL[i]
Pin = 'pin%d'%i
Current.add_conn(Inst,Pin,Sig)
return
else:
logs.log_error('add_instance %d %s'%(len(List),str(List)))
if istoken(List[1]):
Inst = List[1][0]
Params=[]
elif (List[1][0]=='('):
Inst = invent_inst('buf',Current)
Params=[]
else:
Inst = List[2][0]
Params = get_inst_params(List[1])
Current.add_inst(Type,Inst)
for (Prm,Val) in Params:
Current.add_inst_param(Inst,Prm,Val)
if List[-5][0]=='Width':
Wid = get_wid(List[-5])
Current.add_inst_param(Inst,'inst_width',('width',Wid))
if List[-3][0]=='Conns_list':
Conns = get_conns(List[-3])
for Pin,Sig in Conns:
Current.add_conn(Inst,Pin,Sig)
return
if List[-3][0]=='Exprs':
LL = get_exprs(List[-3])
for i in range(len(LL)):
Sig = LL[i]
Pin = 'pin%d'%i
Current.add_conn(Inst,Pin,Sig)
return
if (List[-2][0]==')')and(List[-3][0]=='('): return
logs.log_err('add_instance len=%d list=%s'%(len(List),List))
def instance_statement(List):
Type = List[0][0]
Inst = '?'
Conns = {}
Params = {}
if (List[1][0]=='('):
Inst = invent_inst(Type,Current)
elif istoken(List[1]):
Inst = List[1][0]
Params={}
else:
Inst = List[2][0]
Params = get_inst_params(List[1])
if List[-3][0]=='Conns_list':
Conns = get_conns(List[-3])
elif List[-3][0]=='Exprs':
Conns={}
LL = get_exprs(List[-3])
for i in range(len(LL)):
Sig = LL[i]
Pin = 'pin%d'%i
Conns[Pin]=Sig
return ['instance',Type,Inst,Params,Conns]
Invents={}
def invent_inst(Base,Current):
while 1:
if Base not in Invents:
Invents[Base]=0
Indx = Invents[Base]
Invents[Base] += 1
Inst = '%s_%d'%(Base,Indx)
if Inst not in Current.insts:
return Inst
def add_hard_assign(List):
Vars = matches.matches(List,'!Soft_assign',False)
if Vars:
Item = DataBase[Vars[0]]
add_hard_assign(Item)
return
Vars = matches.matches(List,'!LSH = !Expr',False)
if Vars:
Dst = get_expr(Vars[0])
Src = get_expr(Vars[1])
Current.add_hard_assign(Dst,Src)
return
Vars = matches.matches(List,'!Soft_assigns , !Soft_assign',False)
if Vars:
add_hard_assign(DataBase[Vars[0]])
add_hard_assign(DataBase[Vars[1]])
return
Vars = matches.matches(List,'assign !Soft_assigns ;',False)
if Vars:
Item = Vars[0]
add_hard_assign(DataBase[Item])
return
if (len(List)==5):
Dst = get_expr(List[1])
Src = get_expr(List[3])
Current.add_hard_assign(Dst,Src)
elif (len(List)==6):
Dst = get_expr(List[2])
Src = get_expr(List[4])
if List[1][0]=='AssignParams':
Params = get_inst_params(List[1])
Current.add_hard_assign(Dst,Src,'',Params)
elif List[1][0]=='StrengthDef':
Strength = get_strength(List[1])
Current.add_hard_assign(Dst,Src,Strength,'')
else:
Current.add_hard_assign(Dst,Src)
logs.log_err('add_hard_assign %d %s'%(len(List),List))
elif (len(List)==7):
Dst = get_expr(List[3])
Src = get_expr(List[5])
if List[1][0]=='AssignParams':
Params = get_inst_params(List[1])
elif List[2][0]=='AssignParams':
Params = get_inst_params(List[2])
else:
Params=''
if List[1][0]=='StrengthDef':
Strength = get_strength(List[1])
elif List[2][0]=='StrengthDef':
Strength = get_strength(List[2])
else:
Strength=''
Current.add_hard_assign(Dst,Src,Strength,Params)
else:
logs.log_err('add_hard_assign %d %s'%(len(List),List))
def get_strength(Item):
List = DataBase[Item]
if (len(List)==5)and(Item[0]=='StrengthDef'):
One = get_strength(List[1])
Two = get_strength(List[3])
return One,Two
if (len(List)==1)and(Item[0]=='Strength'):
return List[0][0]
logs.log_err('get_strength %s %s'%(Item,List))
return []
def get_inst_params(Item):
List = DataBase[Item]
if (len(List)==2)and(List[0][0]=='#'):
Dly = get_expr(List[1])
return [['delay',Dly]]
if (len(List)==4)and(List[2][0]=='Exprs'):
LL = get_exprs(List[2])
res=[]
for i in range(len(LL)):
res.append([i,LL[i]])
return res
if (len(List)==4)and(List[2][0]=='Prms_list'):
LL = get_conns(List[2])
return LL
logs.log_err('get_inst_params %d %s %s'%(len(List),Item,List))
return []
def get_definition(List):
if List[1][1]=='domino':
return 'wire',0,List[3][0]
Dir = get_dir(List[0])
if len(List)>4:
if List[-3][0]=='=':
List1 = List[:-3]+[(';',';',0,0)]
What = get_definition(List1)
return What
if len(List)==5:
Wid = get_wid(List[1])
Name = get_names(List[2])
Wid2 = get_wid(List[3])
return Dir,Wid,Name,Wid2
if len(List)==4:
Wid = get_wid(List[1])
Names = get_names(List[2])
return Dir,Wid,Names
if len(List)==3:
Wid=0
Names = get_names(List[1])
return Dir,Wid,Names
logs.log_err('bad definition %d %s'%(len(List),List))
return 0
def add_definition(List):
Vars = matches.matches(List,'enum !WireLogic !Width { !Tokens_list } !Tokens_list ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid = get_wid(Vars[1])
List0 = get_list(Vars[2])
List1 = get_list(Vars[3])
for Net in List1:
Current.add_sig(Net,Dir,Wid)
for ind,Enum in enumerate(List0):
Current.add_localparam(Enum,ind)
return
Vars = matches.matches(List,'!ExtDir !Tokens_list ;',False)
if Vars:
Dir = get_dir(Vars[0])
List0 = get_list(Vars[1])
for Net in List0:
Current.add_sig(Net,Dir,0)
return
Vars = matches.matches(List,'!ExtDir !Width !Tokens_list ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid = get_wid(Vars[1])
List0 = get_list(Vars[2])
for Net in List0:
Current.add_sig(Net,Dir,Wid)
return
Vars = matches.matches(List,'!IntDir !Width !Tokens_list = !Expr ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid = get_wid(Vars[1])
List0 = get_list(Vars[2])
Expr = get_expr(Vars[3])
for Net in List0:
Current.add_sig(Net,Dir,Wid)
Current.add_hard_assign(Net,Expr)
return
Vars = matches.matches(List,'!IntDir ? = !Expr ;',False)
if Vars:
Dir = get_dir(Vars[0])
List0 = get_list(Vars[1])
Expr = get_expr(Vars[2])
for Net in List0:
Current.add_sig(Net,Dir,0)
Current.add_hard_assign(Net,Expr)
return
Vars = matches.matches(List,'!IntDir !Width ? = !Expr ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid = get_wid(Vars[1])
List0 = get_list(Vars[2])
Expr = get_expr(Vars[3])
for Net in List0:
Current.add_sig(Net,Dir,Wid)
Current.add_hard_assign(Net,Expr)
return
Vars = matches.matches(List,'!IntDir !InstParams ? = !Expr ;',False)
if Vars:
Dir = get_dir(Vars[0])
List0 = get_list(Vars[2])
Expr = get_expr(Vars[3])
for Net in List0:
Current.add_sig(Net,Dir,0)
Current.add_hard_assign(Net,Expr)
return
Vars = matches.matches(List,'!IntDir !Width !Tokens_list ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid = get_wid(Vars[1])
List0 = get_list(Vars[2])
for Net in List0:
Current.add_sig(Net,Dir,Wid)
return
Vars = matches.matches(List,'!IntDir !Tokens_list ;',False)
if Vars:
Dir = get_dir(Vars[0])
List0 = get_list(Vars[1])
if notUsualDir(Dir):
Usual,Type = notUsualDir(Dir)
Wid = getTypeDefWid(Type)
for Net in List0:
record_original_typedef(Net,Type)
Current.add_sig(Net,Usual,Wid)
else:
for Net in List0:
Current.add_sig(Net,Dir,0)
return
Vars = matches.matches(List,'!IntDir !Width ? !Width ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[3])
Name = get_expr(Vars[2])
Current.add_sig(Name,Dir,('double',Wid0,Wid1))
return
Vars = matches.matches(List,'!IntDir !Width !Width ? ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[2])
Name = get_list(Vars[3])
if type(Name) is list:
for Net in Name:
Current.add_sig(Net,Dir,('packed',Wid0,Wid1))
else:
Current.add_sig(Name,Dir,('packed',Wid0,Wid1))
return
Vars = matches.matches(List,'!IntDir !Width ? !BusBit ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Name = get_expr(Vars[2])
Wid1 = get_wid(Vars[3])
Current.add_sig(Name,Dir,('double',Wid0,Wid1))
return
Vars = matches.matches(List,'!IntDir !Width !Width ? ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[2])
Name = get_expr(Vars[3])
Current.add_sig(Name,Dir,('packed',Wid0,Wid1))
return
Vars = matches.matches(List,'!IntDir !Width !Width ? = ? ;',False)
if Vars:
Dir = get_dir(Vars[0])
Wid0 = get_wid(Vars[1])
Wid1 = get_wid(Vars[2])
Name = get_expr(Vars[3])
Expr = get_expr(Vars[4])
Current.add_sig(Name,Dir,('packed',Wid0,Wid1))
Current.add_hard_assign(Name,Expr)
return
Vars = matches.matches(List,'const logic !Width !Width ? = { !Exprs } ;',False)
if Vars:
Wid0 = get_wid(Vars[0])
Wid1 = get_wid(Vars[1])
Exprs = get_expr_list(Vars[3])
Name = Vars[2];
Current.add_sig(Name,'wire',('packed',Wid1,Wid0))
for ind,Val in enumerate(Exprs):
Current.add_hard_assign(['subbit',Name,ind],Val)
return
logs.log_err('bad new definition "%s"'%str(List))
def get_expr_list(Item):
if (type(Item) is list)and(len(Item)==1):
return get_expr_list(Item[0])
if in_db(Item):
return get_expr_list(DataBase[Item])
if is_terminal(Item):
return [get_expr(Item)]
Vars = matches.matches(Item,'!Exprs , !Expr')
if Vars:
if in_db(Vars[0]):
More = get_expr_list(DataBase[Vars[0]])
else:
More = get_expr(Vars[0])
Expr = get_expr(Vars[1])
return More + [Expr]
logs.log_err('get_expr_list failed on "%s"'%str(Item))
return []
def in_db(Item):
if type(Item) is not tuple: return False
return Item in DataBase
def is_terminal(Item):
if type(Item) is not tuple: return False
return len(Item)==4
if type(Item) is not tuple: return False
return Item in DataBase
def add_parameter(Ptr):
List2 = DataBase[Ptr]
for Item in List2:
if len(Item)==2:
if (Item[0]=='Pair'):
List = DataBase[Item]
Name=List[0][0]
Expr = get_expr(List[2])
Current.add_parameter(Name,Expr)
else:
add_parameter(Item)
def add_localparam(List0):
Vars = matches.matches(List0,'localparam !Pairs ;',False)
if Vars:
List1 = DataBase[Vars[0]]
Pairs = get_list(Vars[0])
for Item in Pairs:
if len(Item)==2:
List2 = DataBase[Item]
Vars2 = matches.matches(List2,'?token = !Expr',False)
if Vars2:
Name = Vars2[0][0]
Expr = get_expr(Vars2[1])
Current.add_localparam(Name,Expr)
elif (len(Item)==3)and(Item[0]=='parameter'):
Name = Item[1]
Expr = get_expr(Item[2])
Current.add_localparam(Name,Expr)
return
Vars = matches.matches(List0,'localparam !Width !Pairs ;',False)
if Vars:
List1 = DataBase[Vars[1]]
for Item in List1:
List2 = DataBase[Item]
Vars2 = matches.matches(List2,'?token = !Expr',False)
if Vars2:
Name = Vars2[0][0]
Expr = get_expr(Vars2[1])
Current.add_localparam(Name,Expr)
return
Vars = matches.matches(List0,'localparam !Width !Width !Pairs ;',False)
if Vars:
List1 = DataBase[Vars[2]]
for Item in List1:
List2 = DataBase[Item]
Vars2 = matches.matches(List2,'?token = !Expr',False)
if Vars2:
Name = Vars2[0][0]
Expr = get_expr(Vars2[1])
Current.add_localparam(Name,Expr)
return
logs.log_error('localparam failed in %s'%(str(List0)))
return
def flattenList(Ptr):
Key = Ptr[0]
if tuple(Ptr) not in DataBase: return Ptr
List = DataBase[Ptr]
if (List[0][0]==Key):
if len(List)>=3:
while (len(List)>=3)and(List[1][0]==',')and(List[0][0]==Key):
LL = DataBase[List[0]]
List = LL + List[2:]
else:
while (len(List)>=2)and(List[0][0]==Key):
LL = DataBase[List[0]]
List = LL + List[1:]
return List
def get_conns(Ptr):
List=DataBase[Ptr]
List2 = flattenList(Ptr)
res = []
for Item in List2:
if len(Item)==2:
if (Item[0]=='Conns_list'):
more = get_conns(Item)
res.extend(more)
elif (Item[0]=='Prms_list'):
more = get_conns(Item)
res.extend(more)
elif (Item[0]=='Connection'):
List2 = DataBase[Item]
Pin = List2[1][0]
if Pin=='*':
Sig='*'
elif List2[3][0]==')':
Sig=False
else:
Sig = get_expr(List2[3])
res.append([Pin,Sig])
elif (Item[0]=='PrmAssign'):
List2 = DataBase[Item]
if (len(List2)==5)and(List2[0][0]=='.'):
Param = List2[1][0]
Sig = get_expr(List2[3])
res.append((Param,Sig))
else:
logs.log_err( 'strange get_conns %s'%str(List2))
else:
logs.log_err('strange get_conns %s'%str(Item))
elif (type(Item) is tuple)and(Item[0]==','):
pass
else:
logs.log_err('strange2 get_conns %s'%str(Item))
return res
def get_names(Ptr):
if (len(Ptr)==4)and(Ptr[1]=='token'):
return Ptr[0]
List = DataBase[Ptr]
res = []
More=[]
while List!=[]:
for Item in List:
if len(Item)==2:
if (Item[0]=='Tokens_list'):
More.extend(DataBase[Item])
# more = get_names(Item)
# res.extend(more)
elif len(Item)==4:
if Item[0]!=',':
res.append(Item[0])
List=More[:]
More=[]
return res
def checkInPackages(Item):
if not packages_save: return
if 'PARAMETERS' not in dir(packages_save): return
if Item in packages_save.PARAMETERS:
if Item not in Current.localparams:
Current.localparams[Item]=packages_save.PARAMETERS[Item]
def findField(Item):
wrds = Item.split('.')
Net = wrds[0]
Base = OriginalTypeDefs[(Net,Current.Module)]
Field = wrds[1]
Tot,Fields = getStructFields(Base)
if Field in Fields:
return ['subbus',Net,Fields[Field]]
logs.log_info('findField failed on net="%s"'%str(Base))
return 'findField'
def get_expr(Item):
if type(Item) is int: return Item
if type(Item) is str: return Item
if len(Item)==4:
if Item[1]=='token':
if '.' in Item[0]:
return findField(Item[1])
checkInPackages(Item)
return Item[0]
if Item[1]=='number':
return int(Item[0])
if Item[1]=='string':
return Item[0]
if Item[1]=='hex':
return ['hex',Item[0]]
if (type(Item) is tuple):
if Item[0]=='*':
return '*'
if len(Item)==2:
List = DataBase[Item]
if Item[0]=='Crazy3':
Vars = matches.matches(List,'? !crazy2 ? )',False)
if Vars:
Type = Vars[0][0]
Expr = get_expr(Vars[2])
return Expr
if len(List)==1:
if List[0][0]in ['always','always_comb','always_ff','always_latch']: return 'always'
if List[0][0]in ['always','always_comb','always_ff']: return List[0][0]
if List[0][0]=='Expr':
return get_expr(List[0])
if List[0][1]=='floating':
return float(List[0][0])
if List[0][1]=='exttype':
return List[0][0]
if List[0][1]=='number':
return int(List[0][0])
if List[0][1]=='define':
return ['define',List[0][0]]
if List[0][1]=='token':
if '.' in List[0][0]:
return findField(List[0][0])
checkInPackages(List[0][0])
return List[0][0]
if List[0][1]=='string':
return List[0][0]
if List[0][1]=='ubin':
X = List[0][0].split("'b")
return ['bin',X[0],X[1]]
if List[0][1]=='uhex':
X = List[0][0].split("'h")
return ['hex',X[0],X[1]]
if List[0][1]=='udig':
X = List[0][0].split("'d")
if len(X)==2:
if X[0] == '':
return ['dig',32,X[1]]
return ['dig',X[0],X[1]]
else:
return ['dig',32,X[0]]
if List[0][1]=='bin':
if List[0][0]=="'x":
return ['bin',32,'x']
X = List[0][0].replace("'b",' ')
X = X.replace("'sb",' ')
X1 = X.split()
return ['bin',X1[0],X1[1]]
if List[0][1]=='dig':
if "'sd" in List[0][0]:
X = List[0][0].replace("'sd",' ')
else:
X = List[0][0].replace("'d",' ')
X1 = X.split()
if len(X1)==1:
return ['dig',0,X1]
else:
return ['dig',X1[0],X1[1]]
if List[0][1]=='hex':
X = List[0][0].replace("'h",' ')
X1 = X.split()
return ['hex',X1[0],X1[1]]
if List[0][0]=='CurlyList':
return curly_list(List[0])
if List[0][0]=='Dotted':
List2 = DataBase[List[0]]
Dotted = get_dotted_items(List2)
return ('dotted',Dotted)
if List[0][0]=='Crazy':
XX = DataBase[List[0]]
Vars = matches.matches(XX,"crazy1 default : ? }")
if Vars:
Expr = get_expr(Vars[0])
return Expr
Vars = matches.matches(XX,"crazy1 !Pairs2 }")
if Vars:
Pairs = get_list(Vars[0])
res = ['curly']
for A,B in Pairs:
res.append(B)
return res
logs.log_error('crazy got "%s"'%(str(XX)))
return 0
elif len(List)==2:
if List[0][0]=='#':
Dly = get_expr(List[1])
return ['#',Dly]
if istoken(List[0])and(iswidth(List[1])):
return ['subbus',List[0][0],get_wid(List[1])]
if istoken(List[0])and(isbusbit(List[1])):
return ['subbit',List[0][0],get_busbit(List[1])]
if is_math_op(List[0])and isexpr(List[1]):
return [List[0][0],get_expr(List[1])]
elif len(List)==3:
if List[1][0]=='CurlyItems':
X = get_curly_items(List[1])
return ['curly']+X
if isexpr(List[0])and isexpr(List[2]) and is_math_op(List[1]):
Expr0 = get_expr(List[0])
Expr1 = get_expr(List[2])
Op = List[1][0]
if Op in ['+','*','|','||','^','&&','||','<<','>>']:
RR = [Op]
if (type(Expr0) is tuple)and(Expr0[0]==Op):
RR.extend(list(Expr0[1:]))
else:
RR.append(Expr0)
if (type(Expr1) is tuple)and(Expr1[0]==Op):
RR.extend(list(Expr1[1:]))
else:
RR.append(Expr1)
return RR
return [Op,Expr0,Expr1]
if (List[0][0]=='(')and(List[2][0]==')'):
return get_expr(List[1])
if (List[0][1]=='token')and(List[1][0]=='BusBit')and(List[2][0]=='Width'):
BB = get_busbit(List[1])
BW = get_wid(List[2])
return ['sub_slice',List[0][0],BB,BW]
if (List[0][1]=='token')and(List[1][0]=='BusBit')and(List[2][0]=='BusBit'):
BB = get_busbit(List[1])
BW = get_busbit(List[2])
return ['sub_slicebit',List[0][0],BB,BW]
elif len(List)==4:
if (List[1][0]=='(') and istoken(List[0]):
exprs = get_exprs(List[2])
return ['functioncall',List[0][0],exprs]
elif (List[0][1]=='define'):
Def = List[0][0]
expr = get_expr(List[2])
return ['define',Def,expr]
elif (List[0][1]=='#')and(List[1][1]=='(')and(List[3][1]==')'):
return get_expr(List[2])
elif len(List)==5:
if List[1][0]=='?':
try:
return ['question',get_expr(List[0]),get_expr(List[2]),get_expr(List[4])]
except:
return ['question',get_expr(List[0]),get_expr(List[2]),List[4]]
if (List[0][0]=='[')and(List[2][0]==':'):
Hi = get_expr(List[1])
Lo = get_expr(List[3])
return ['width',Hi,Lo]
logs.log_err('bad get_expr %s %s %s'%(Item,len(List),List))
traceback.print_stack(None,None,logs.Flogs[0])
return 0
if len(Item)==3:
if Item[0]=='dig':
return ['dig',Item[1],Item[2]]
if Item[0]=='hex':
return ['hex',Item[1],Item[2]]
if Item[0]=='bin':
return ['bin',Item[1],Item[2]]
if len(Item)==4:
if (Item[1]=='floating'):
return float(Item[0])
if Item[1]=='always':
return Item[0]
if (len(Item)==3)and(Item[0] in ['**','<<','>>','+','-','/','*','functioncall']):
return Item
if (len(Item)==4)and(Item[0] in ['question']):
return Item
if Item[0] == 'curly':
return Item
logs.log_err('DB0: very bad expr %s %s'%(len(Item),str(Item)))
traceback.print_stack()
return 0
def get_dotted_items(List):
if len(List)==3:
if istoken(List[0]) and istoken(List[2]):
return [List[0][0],List[2][0]]
More = get_dotted_items(DataBase[List[2]])
return [List[0]]+More
elif len(List)==4:
LL = [List[0][0]]
if isbusbit(List[3]):
LL.append(('subbit',List[2][0],get_busbit(List[3])))
return LL
elif len(List)==6:
if istoken(List[0]) and istoken(List[2])and(isexpr(List[4])):
return [List[0][0],('functioncall',List[2][0],get_expr(List[4]))]
logs.log_err('dotted ilia! len=%d %s'%(len(List),List))
return []
def curly_list(Item):
List = DataBase[Item]
Item1 = List[1]
return ['curly']+get_curly_items(Item1)
def get_curly_items(Item1):
List = DataBase[Item1]
res = []
for Item in List:
if len(Item)==2:
if (Item[0]=='CurlyItem'):
LL = DataBase[Item]
if len(LL)==4:
Rep = get_expr(LL[0])
What = get_expr(LL[2])
X=['repeat',Rep,What]
elif(len(LL)==1):
X = get_expr(LL[0])
elif(len(LL)==2):
X = get_expr(LL[0])
Y = curly_list(LL[1])
return ['repeat',X,Y]
else:
logs.log_err('curly got %s out of %s'%(Item,LL))
X='error'
if (type(X) is list)and(len(X)==1)and (type(X[0])is list):
res.extend(X)
else:
res.append(X)
elif (Item[0]=='CurlyItems'):
LL = DataBase[Item]
while (len(LL)==3)and(LL[1][0]==','):
A = get_curly_items(LL[2])
LL = DataBase[LL[0]]
res = A + res
more = get_curly_items(LL[0])
res = more + res
elif (Item[0]=='Expr'):
X = get_expr(Item)
res.append(X)
else:
logs.log_err('curly got %s'%(str(Item)))
return []
return res
def istoken(X):
return (len(X)==4)and(X[1]=='token')
def isbusbit(X):
return (len(X)==2)and(X[0]=='BusBit')
def iswidth(X):
if (len(X)==2):
if (X[0]=='Width'): return True
if (X[0]=='WidthInt'): return True
return False
def isexpr(X):
return (len(X)==2)and(X[0]=='Expr')
def is_math_op(X):
return (len(X)==4)and(X[0] in MathOps)
MathOps = ('** ~^ !^ ~& !& ~| !& + - * / ^ % & | && || ! ~ < > << >> >>> == <= >= != === !==').split()
def infoOf(Key):
List = DataBase[Key]
for X in List:
if len(X)==4:
return 'line=%s pos=%s'%(X[2],X[3])
return 'location unknown'
usualDirs = ('input output inout wire reg logic signed unsigned genvar integer').split()
def notUsualDir(Dir):
if Dir in usualDirs: return False
ww = Dir.split()
goods = []
for word in ww:
if word not in usualDirs:
if goods==[]: goods=['wire']
return ' '.join(goods),word
else:
goods.append(word)
return False
def picklize():
Fout = open('modules.pickle','w')
pickle.dump(Modules,Fout)
Fout.close()
def ensure(Cond,What):
if Cond: return
logs.log_err('ensure failed on %s'%str(What))
if __name__=='__main__':
main()
| 30.848887 | 103 | 0.496222 |
b541195657e23ab27bc9734e8959f3324198e146 | 3,449 | py | Python | python/gnuplot/coaps_plot.py | seekindark/helloworld | 00fe439fdbd98add53f3bec7eac2b1ba1dc817a7 | [
"BSD-2-Clause"
] | null | null | null | python/gnuplot/coaps_plot.py | seekindark/helloworld | 00fe439fdbd98add53f3bec7eac2b1ba1dc817a7 | [
"BSD-2-Clause"
] | 2 | 2019-06-24T00:56:52.000Z | 2019-06-24T01:28:29.000Z | python/gnuplot/coaps_plot.py | seekindark/helloworld | 00fe439fdbd98add53f3bec7eac2b1ba1dc817a7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
from console_loggin import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import time
import os
#0 view h248 statis current coaps
#1 Current Coaps Statistics Value:
#2 (Elapsed time: 0:11:52)
#3 ===================================================
#4 Max Min Avg
#5 Context Attempts per Second : 59 0 45
#6 Accept Context Attempts per Second : 59 0 45
#7 Successful Context Completion per Second : 0 0 0
#8 Unsuccessful Context Completion per Second : 0 0 0
#9 Reject Context Attempts per Second : 0 0 0
#10 ADD Commands per Second : 117 0 90
#11 H.248 Messages per Second : 87 0 67
#12 H.248 Commands per Second : 146 0 112
#13 CmSock layer messages per Second : 87 0 67
#14 SCoC: H.248 Messages per Context : 0 0 0
#15 SCoC: H.248 Commands per Context : 0 0 0
#16 H248.11 Events per Second : 0 0 0
#17 Dropped Messages per Second : 0 0 0
#18 --------------------------------------------------------------------------
#19 sbc01media:ACT-SCM:1.11(r0)>=9:diag:main:vMG1#
def parse(output):
lines = output.splitlines()
if len(lines) != 20:
print("ERRROR < 20")
exit()
aca_l = lines[6]
rca_l = lines[9]
add_l = lines[10]
aca = int(aca_l.split()[-1])
rca = int(rca_l.split()[-1])
add = int(add_l.split()[-1])
return (aca, rca, add)
def main():
num_of_seconds = 500
acas = []
rcas = []
adds = []
diag = Diag()
try:
output = diag.run_command("define vmg sc 1")
print(output)
output = diag.run_command("clear h248 statis current")
print(output)
output = diag.run_command("diag clear sysmon trace_values")
print(output)
output = diag.run_command("diag define sysmon trace_value h248_statis add 0 100")
print(output)
output = diag.run_command("diag define sysmon trace_value h248_statis aca 0 100")
print(output)
output = diag.run_command("diag define sysmon trace_value h248_statis rca 0 100")
print(output)
output = diag.run_command("diag define sysmon start h248_t h248_t 1")
print(output)
for i in range(num_of_seconds):
print("Second: %d" % i)
output = diag.run_command("view h248 statis current coaps")
parsed = parse(output)
acas.append(parsed[0])
rcas.append(parsed[1])
adds.append(parsed[2])
print(parsed)
time.sleep(1)
output = diag.run_command("diag define sysmon end")
print(output)
x = range(num_of_seconds)
fig = plt.figure()
plt.plot(x, acas, label = "aca")
plt.plot(x, rcas, label = "rca")
plt.plot(x, adds, label = "add")
plt.legend(loc='upper left')
fig.savefig('plot.png')
except Diag.CommandTimeoutException:
print("timeout")
if __name__ == '__main__': main()
| 35.556701 | 89 | 0.504204 |
c5ea89fa5b62ae91125a0218ac2c5cf782ef8648 | 54,529 | py | Python | drf_spectacular/openapi.py | blueyed/drf-spectacular | 06737f96063943eab73639ab254a1ed52ad5ae63 | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/openapi.py | blueyed/drf-spectacular | 06737f96063943eab73639ab254a1ed52ad5ae63 | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/openapi.py | blueyed/drf-spectacular | 06737f96063943eab73639ab254a1ed52ad5ae63 | [
"BSD-3-Clause"
] | null | null | null | import copy
import re
import typing
import uritemplate
from django.core import exceptions as django_exceptions
from django.core import validators
from django.db import models
from django.utils.translation import gettext_lazy as _
from rest_framework import permissions, renderers, serializers
from rest_framework.fields import _UnvalidatedField, empty
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import ListModelMixin
from rest_framework.schemas.inspectors import ViewInspector
from rest_framework.schemas.utils import get_pk_description # type: ignore
from rest_framework.settings import api_settings
from rest_framework.utils.model_meta import get_field_info
from rest_framework.views import APIView
from drf_spectacular.authentication import OpenApiAuthenticationExtension
from drf_spectacular.contrib import * # noqa: F403, F401
from drf_spectacular.drainage import add_trace_message, get_override, has_override
from drf_spectacular.extensions import (
OpenApiFilterExtension, OpenApiSerializerExtension, OpenApiSerializerFieldExtension,
)
from drf_spectacular.plumbing import (
ComponentRegistry, ResolvedComponent, UnableToProceedError, append_meta, build_array_type,
build_basic_type, build_choice_field, build_examples_list, build_media_type_object,
build_object_type, build_parameter_type, error, follow_field_source, force_instance, get_doc,
get_view_model, is_basic_type, is_create_operation, is_field, is_list_serializer,
is_patched_serializer, is_serializer, is_trivial_string_variation, resolve_regex_path_parameter,
resolve_type_hint, safe_ref, warn,
)
from drf_spectacular.settings import spectacular_settings
from drf_spectacular.types import OpenApiTypes, build_generic_type
from drf_spectacular.utils import OpenApiParameter, OpenApiResponse
class AutoSchema(ViewInspector):
method_mapping = {
'get': 'retrieve',
'post': 'create',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy',
}
def get_operation(self, path, path_regex, path_prefix, method, registry: ComponentRegistry):
self.registry = registry
self.path = path
self.path_regex = path_regex
self.path_prefix = path_prefix
self.method = method
operation = {}
operation['operationId'] = self.get_operation_id()
operation['description'] = self.get_description()
summary = self.get_summary()
if summary:
operation['summary'] = summary
parameters = self._get_parameters()
if parameters:
operation['parameters'] = parameters
tags = self.get_tags()
if tags:
operation['tags'] = tags
request_body = self._get_request_body()
if request_body:
operation['requestBody'] = request_body
auth = self.get_auth()
if auth:
operation['security'] = auth
deprecated = self.is_deprecated()
if deprecated:
operation['deprecated'] = deprecated
operation['responses'] = self._get_response_bodies()
return operation
def _is_list_view(self, serializer=None):
"""
partially heuristic approach to determine if a view yields an object or a
list of objects. used for operationId naming, array building and pagination.
defaults to False if all introspection fail.
"""
if serializer is None:
serializer = self.get_response_serializers()
if isinstance(serializer, dict) and serializer:
# extract likely main serializer from @extend_schema override
serializer = {str(code): s for code, s in serializer.items()}
serializer = serializer[min(serializer)]
if is_list_serializer(serializer):
return True
if is_basic_type(serializer):
return False
if hasattr(self.view, 'action'):
return self.view.action == 'list'
# list responses are "usually" only returned by GET
if self.method.lower() != 'get':
return False
if isinstance(self.view, ListModelMixin):
return True
# primary key/lookup variable in path is a strong indicator for retrieve
if isinstance(self.view, GenericAPIView):
lookup_url_kwarg = self.view.lookup_url_kwarg or self.view.lookup_field
if lookup_url_kwarg in uritemplate.variables(self.path):
return False
return False
def get_override_parameters(self):
""" override this for custom behaviour """
return []
def _process_override_parameters(self):
result = {}
for parameter in self.get_override_parameters():
if isinstance(parameter, OpenApiParameter):
if parameter.response:
continue
if is_basic_type(parameter.type):
schema = build_basic_type(parameter.type)
elif is_serializer(parameter.type):
schema = self.resolve_serializer(parameter.type, 'request').ref
else:
schema = parameter.type
if parameter.exclude:
result[parameter.name, parameter.location] = None
else:
result[parameter.name, parameter.location] = build_parameter_type(
name=parameter.name,
schema=schema,
location=parameter.location,
required=parameter.required,
description=parameter.description,
enum=parameter.enum,
deprecated=parameter.deprecated,
style=parameter.style,
explode=parameter.explode,
default=parameter.default,
examples=build_examples_list(parameter.examples),
)
elif is_serializer(parameter):
# explode serializer into separate parameters. defaults to QUERY location
mapped = self._map_serializer(parameter, 'request')
for property_name, property_schema in mapped['properties'].items():
result[property_name, OpenApiParameter.QUERY] = build_parameter_type(
name=property_name,
schema=property_schema,
description=property_schema.pop('description', None),
location=OpenApiParameter.QUERY,
required=property_name in mapped.get('required', []),
)
else:
warn(f'could not resolve parameter annotation {parameter}. Skipping.')
return result
def _get_format_parameters(self):
parameters = []
formats = self.map_renderers('format')
if api_settings.URL_FORMAT_OVERRIDE and len(formats) > 1:
parameters.append(build_parameter_type(
name=api_settings.URL_FORMAT_OVERRIDE,
schema=build_basic_type(OpenApiTypes.STR),
location=OpenApiParameter.QUERY,
enum=formats
))
return parameters
def _get_parameters(self):
def dict_helper(parameters):
return {(p['name'], p['in']): p for p in parameters}
override_parameters = self._process_override_parameters()
# remove overridden path parameters beforehand so that there are no irrelevant warnings.
path_variables = [
v for v in uritemplate.variables(self.path) if (v, 'path') not in override_parameters
]
parameters = {
**dict_helper(self._resolve_path_parameters(path_variables)),
**dict_helper(self._get_filter_parameters()),
**dict_helper(self._get_pagination_parameters()),
**dict_helper(self._get_format_parameters()),
}
# override/add/remove @extend_schema parameters
for key, parameter in override_parameters.items():
if parameter is None:
# either omit or explicitly remove parameter
if key in parameters:
del parameters[key]
else:
parameters[key] = parameter
if callable(spectacular_settings.SORT_OPERATION_PARAMETERS):
return sorted(parameters.values(), key=spectacular_settings.SORT_OPERATION_PARAMETERS)
elif spectacular_settings.SORT_OPERATION_PARAMETERS:
return sorted(parameters.values(), key=lambda p: p['name'])
else:
return list(parameters.values())
def get_description(self):
""" override this for custom behaviour """
action_or_method = getattr(self.view, getattr(self.view, 'action', self.method.lower()), None)
view_doc = get_doc(self.view.__class__)
action_doc = get_doc(action_or_method)
return action_doc or view_doc
def get_summary(self):
""" override this for custom behaviour """
return None
def get_auth(self):
"""
Obtains authentication classes and permissions from view. If authentication
is known, resolve security requirement for endpoint and security definition for
the component section.
For custom authentication subclass ``OpenApiAuthenticationExtension``.
"""
auths = []
for authenticator in self.view.get_authenticators():
scheme = OpenApiAuthenticationExtension.get_match(authenticator)
if not scheme:
warn(
f'could not resolve authenticator {authenticator.__class__}. There '
f'was no OpenApiAuthenticationExtension registered for that class. '
f'Try creating one by subclassing it. Ignoring for now.'
)
continue
security_requirements = scheme.get_security_requirement(self)
if security_requirements is not None:
auths.append(security_requirements)
component = ResolvedComponent(
name=scheme.name,
type=ResolvedComponent.SECURITY_SCHEMA,
object=authenticator.__class__,
schema=scheme.get_security_definition(self)
)
self.registry.register_on_missing(component)
if spectacular_settings.SECURITY:
auths.extend(spectacular_settings.SECURITY)
perms = [p.__class__ for p in self.view.get_permissions()]
if permissions.AllowAny in perms:
auths.append({})
elif permissions.IsAuthenticatedOrReadOnly in perms and self.method in permissions.SAFE_METHODS:
auths.append({})
return auths
def get_request_serializer(self):
""" override this for custom behaviour """
return self._get_serializer()
def get_response_serializers(self):
""" override this for custom behaviour """
return self._get_serializer()
def get_tags(self) -> typing.List[str]:
""" override this for custom behaviour """
tokenized_path = self._tokenize_path()
# use first non-parameter path part as tag
return tokenized_path[:1]
def get_operation_id(self):
""" override this for custom behaviour """
tokenized_path = self._tokenize_path()
# replace dashes as they can be problematic later in code generation
tokenized_path = [t.replace('-', '_') for t in tokenized_path]
if self.method.lower() == 'get' and self._is_list_view():
action = 'list'
else:
action = self.method_mapping[self.method.lower()]
if not tokenized_path:
tokenized_path.append('root')
if re.search(r'<drf_format_suffix\w*:\w+>', self.path_regex):
tokenized_path.append('formatted')
return '_'.join(tokenized_path + [action])
def is_deprecated(self):
""" override this for custom behaviour """
return False
def _tokenize_path(self):
# remove path prefix
path = re.sub(
pattern=self.path_prefix,
repl='',
string=self.path,
flags=re.IGNORECASE
)
# remove path variables
path = re.sub(pattern=r'\{[\w\-]+\}', repl='', string=path)
# cleanup and tokenize remaining parts.
path = path.rstrip('/').lstrip('/').split('/')
return [t for t in path if t]
def _resolve_path_parameters(self, variables):
parameters = []
for variable in variables:
schema = build_basic_type(OpenApiTypes.STR)
description = ''
resolved_parameter = resolve_regex_path_parameter(
self.path_regex, variable, self.map_renderers('format'),
)
if resolved_parameter:
schema = resolved_parameter['schema']
elif get_view_model(self.view) is None:
warn(
f'could not derive type of path parameter "{variable}" because because it '
f'is untyped and obtaining queryset from the viewset failed. '
f'Consider adding a type to the path (e.g. <int:{variable}>) or annotating '
f'the parameter type with @extend_schema. Defaulting to "string".'
)
else:
try:
model = get_view_model(self.view)
model_field = model._meta.get_field(variable)
schema = self._map_model_field(model_field, direction=None)
if 'description' not in schema and model_field.primary_key:
description = get_pk_description(model, model_field)
except django_exceptions.FieldDoesNotExist:
warn(
f'could not derive type of path parameter "{variable}" because '
f'model "{model}" did contain no such field. Consider annotating '
f'parameter with @extend_schema. Defaulting to "string".'
)
parameters.append(build_parameter_type(
name=variable,
location=OpenApiParameter.PATH,
description=description,
schema=schema
))
return parameters
def _get_filter_parameters(self):
if not self._is_list_view():
return []
if getattr(self.view, 'filter_backends', None) is None:
return []
parameters = []
for filter_backend in self.view.filter_backends:
filter_extension = OpenApiFilterExtension.get_match(filter_backend())
if filter_extension:
parameters += filter_extension.get_schema_operation_parameters(self)
else:
parameters += filter_backend().get_schema_operation_parameters(self.view)
return parameters
def _get_pagination_parameters(self):
if not self._is_list_view():
return []
paginator = self._get_paginator()
if not paginator:
return []
filter_extension = OpenApiFilterExtension.get_match(paginator)
if filter_extension:
return filter_extension.get_schema_operation_parameters(self)
else:
return paginator.get_schema_operation_parameters(self.view)
def _map_model_field(self, model_field, direction):
assert isinstance(model_field, models.Field)
# to get a fully initialized serializer field we use DRF's own init logic
try:
field_cls, field_kwargs = serializers.ModelSerializer().build_field(
field_name=model_field.name,
info=get_field_info(model_field.model),
model_class=model_field.model,
nested_depth=0,
)
field = field_cls(**field_kwargs)
field.field_name = model_field.name
except: # noqa
field = None
# For some cases, the DRF init logic either breaks (custom field with internal type) or
# the resulting field is underspecified with regards to the schema (ReadOnlyField).
if field and isinstance(field, serializers.PrimaryKeyRelatedField):
# special case handling only for _resolve_path_parameters() where neither queryset nor
# parent is set by build_field. patch in queryset as _map_serializer_field requires it
if not field.queryset:
field.queryset = model_field.related_model.objects.none()
return self._map_serializer_field(field, direction)
elif isinstance(field, serializers.ManyRelatedField):
# special case handling similar to the case above. "parent.parent" on child_relation
# is None and there is no queryset. patch in as _map_serializer_field requires one.
if not field.child_relation.queryset:
field.child_relation.queryset = model_field.related_model.objects.none()
return self._map_serializer_field(field, direction)
elif field and not isinstance(field, (serializers.ReadOnlyField, serializers.ModelField)):
return self._map_serializer_field(field, direction)
elif isinstance(model_field, models.ForeignKey):
return self._map_model_field(model_field.target_field, direction)
elif hasattr(models, 'JSONField') and isinstance(model_field, models.JSONField):
# fix for DRF==3.11 with django>=3.1 as it is not yet represented in the field_mapping
return build_basic_type(OpenApiTypes.OBJECT)
elif hasattr(models, model_field.get_internal_type()):
# be graceful when the model field is not explicitly mapped to a serializer
internal_type = getattr(models, model_field.get_internal_type())
field_cls = serializers.ModelSerializer.serializer_field_mapping.get(internal_type)
if not field_cls:
warn(
f'model field "{model_field.get_internal_type()}" has no mapping in '
f'ModelSerializer. It may be a deprecated field. Defaulting to "string"'
)
return build_basic_type(OpenApiTypes.STR)
return self._map_serializer_field(field_cls(), direction)
else:
error(
f'could not resolve model field "{model_field}". Failed to resolve through '
f'serializer_field_mapping, get_internal_type(), or any override mechanism. '
f'Defaulting to "string"'
)
return build_basic_type(OpenApiTypes.STR)
def _map_serializer_field(self, field, direction):
meta = self._get_serializer_field_meta(field)
if has_override(field, 'field'):
override = get_override(field, 'field')
if is_basic_type(override):
schema = build_basic_type(override)
if schema is None:
return None
elif isinstance(override, dict):
schema = override
else:
schema = self._map_serializer_field(force_instance(override), direction)
field_component_name = get_override(field, 'field_component_name')
if field_component_name:
component = ResolvedComponent(
name=field_component_name,
type=ResolvedComponent.SCHEMA,
schema=schema,
object=field,
)
self.registry.register_on_missing(component)
return append_meta(component.ref, meta)
else:
return append_meta(schema, meta)
serializer_field_extension = OpenApiSerializerFieldExtension.get_match(field)
if serializer_field_extension:
schema = serializer_field_extension.map_serializer_field(self, direction)
if serializer_field_extension.get_name():
component = ResolvedComponent(
name=serializer_field_extension.get_name(),
type=ResolvedComponent.SCHEMA,
schema=schema,
object=field,
)
self.registry.register_on_missing(component)
return append_meta(component.ref, meta)
else:
return append_meta(schema, meta)
# nested serializer with many=True gets automatically replaced with ListSerializer
if is_list_serializer(field):
if is_serializer(field.child):
component = self.resolve_serializer(field.child, direction)
return append_meta(build_array_type(component.ref), meta) if component else None
else:
schema = self._map_serializer_field(field.child, direction)
return append_meta(build_array_type(schema), meta)
# nested serializer
if is_serializer(field):
component = self.resolve_serializer(field, direction)
return append_meta(component.ref, meta) if component else None
# Related fields.
if isinstance(field, serializers.ManyRelatedField):
schema = self._map_serializer_field(field.child_relation, direction)
# remove hand-over initkwargs applying only to outer scope
schema.pop('description', None)
schema.pop('readOnly', None)
return append_meta(build_array_type(schema), meta)
if isinstance(field, serializers.PrimaryKeyRelatedField):
# read_only fields do not have a Manager by design. go around and get field
# from parent. also avoid calling Manager. __bool__ as it might be customized
# to hit the database.
if getattr(field, 'queryset', None) is not None:
model_field = field.queryset.model._meta.pk
else:
if isinstance(field.parent, serializers.ManyRelatedField):
model = field.parent.parent.Meta.model
source = field.parent.source.split('.')
else:
model = field.parent.Meta.model
source = field.source.split('.')
# estimates the relating model field and jumps to it's target model PK field.
# also differentiate as source can be direct (pk) or relation field (model).
model_field = follow_field_source(model, source)
if callable(model_field):
# follow_field_source bailed with a warning. be graceful and default to str.
model_field = models.TextField()
# primary keys are usually non-editable (readOnly=True) and map_model_field correctly
# signals that attribute. however this does not apply in the context of relations.
schema = self._map_model_field(model_field, direction)
schema.pop('readOnly', None)
return append_meta(schema, meta)
if isinstance(field, serializers.StringRelatedField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.SlugRelatedField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.HyperlinkedIdentityField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.HyperlinkedRelatedField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.MultipleChoiceField):
return append_meta(build_array_type(build_choice_field(field)), meta)
if isinstance(field, serializers.ChoiceField):
return append_meta(build_choice_field(field), meta)
if isinstance(field, serializers.ListField):
if isinstance(field.child, _UnvalidatedField):
return append_meta(build_array_type(build_basic_type(OpenApiTypes.ANY)), meta)
elif is_serializer(field.child):
component = self.resolve_serializer(field.child, direction)
return append_meta(build_array_type(component.ref), meta) if component else None
else:
schema = self._map_serializer_field(field.child, direction)
# remove automatically attached but redundant title
if is_trivial_string_variation(field.field_name, schema.get('title')):
schema.pop('title', None)
return append_meta(build_array_type(schema), meta)
# DateField and DateTimeField type is string
if isinstance(field, serializers.DateField):
return append_meta(build_basic_type(OpenApiTypes.DATE), meta)
if isinstance(field, serializers.DateTimeField):
return append_meta(build_basic_type(OpenApiTypes.DATETIME), meta)
if isinstance(field, serializers.TimeField):
return append_meta(build_basic_type(OpenApiTypes.TIME), meta)
if isinstance(field, serializers.EmailField):
return append_meta(build_basic_type(OpenApiTypes.EMAIL), meta)
if isinstance(field, serializers.URLField):
return append_meta(build_basic_type(OpenApiTypes.URI), meta)
if isinstance(field, serializers.UUIDField):
return append_meta(build_basic_type(OpenApiTypes.UUID), meta)
if isinstance(field, serializers.DurationField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.IPAddressField):
# TODO this might be a DRF bug. protocol is not propagated to serializer although it
# should have been. results in always 'both' (thus no format)
if 'ipv4' == field.protocol.lower():
schema = build_basic_type(OpenApiTypes.IP4)
elif 'ipv6' == field.protocol.lower():
schema = build_basic_type(OpenApiTypes.IP6)
else:
schema = build_basic_type(OpenApiTypes.STR)
return append_meta(schema, meta)
# DecimalField has multipleOf based on decimal_places
if isinstance(field, serializers.DecimalField):
if getattr(field, 'coerce_to_string', api_settings.COERCE_DECIMAL_TO_STRING):
content = {**build_basic_type(OpenApiTypes.STR), 'format': 'decimal'}
if field.max_whole_digits:
content['pattern'] = (
f'^\\d{{0,{field.max_whole_digits}}}'
f'(\\.\\d{{0,{field.decimal_places}}})?$'
)
else:
content = build_basic_type(OpenApiTypes.DECIMAL)
if field.max_whole_digits:
content['maximum'] = int(field.max_whole_digits * '9') + 1
content['minimum'] = -content['maximum']
self._map_min_max(field, content)
return append_meta(content, meta)
if isinstance(field, serializers.FloatField):
content = build_basic_type(OpenApiTypes.FLOAT)
self._map_min_max(field, content)
return append_meta(content, meta)
if isinstance(field, serializers.IntegerField):
content = build_basic_type(OpenApiTypes.INT)
self._map_min_max(field, content)
# 2147483647 is max for int32_size, so we use int64 for format
if int(content.get('maximum', 0)) > 2147483647 or int(content.get('minimum', 0)) > 2147483647:
content['format'] = 'int64'
return append_meta(content, meta)
if isinstance(field, serializers.FileField):
if spectacular_settings.COMPONENT_SPLIT_REQUEST and direction == 'request':
content = build_basic_type(OpenApiTypes.BINARY)
else:
use_url = getattr(field, 'use_url', api_settings.UPLOADED_FILES_USE_URL)
content = build_basic_type(OpenApiTypes.URI if use_url else OpenApiTypes.STR)
return append_meta(content, meta)
if isinstance(field, serializers.SerializerMethodField):
method = getattr(field.parent, field.method_name)
return append_meta(self._map_response_type_hint(method), meta)
if isinstance(field, (serializers.BooleanField, serializers.NullBooleanField)):
return append_meta(build_basic_type(OpenApiTypes.BOOL), meta)
if isinstance(field, serializers.JSONField):
return append_meta(build_basic_type(OpenApiTypes.OBJECT), meta)
if isinstance(field, (serializers.DictField, serializers.HStoreField)):
content = build_basic_type(OpenApiTypes.OBJECT)
if not isinstance(field.child, _UnvalidatedField):
content['additionalProperties'] = self._map_serializer_field(field.child, direction)
return append_meta(content, meta)
if isinstance(field, serializers.CharField):
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
if isinstance(field, serializers.ReadOnlyField):
# direct source from the serializer
assert field.source_attrs, f'ReadOnlyField "{field}" needs a proper source'
# when field is nested inside a ListSerializer, the Meta class is 2 steps removed
if is_list_serializer(field.parent):
model = field.parent.parent.Meta.model
source = field.parent.source_attrs
else:
model = field.parent.Meta.model
source = field.source_attrs
target = follow_field_source(model, source)
if callable(target):
schema = self._map_response_type_hint(target)
elif isinstance(target, models.Field):
schema = self._map_model_field(target, direction)
else:
assert False, f'ReadOnlyField target "{field}" must be property or model field'
return append_meta(schema, meta)
# DRF was not able to match the model field to an explicit SerializerField and therefore
# used its generic fallback serializer field that simply wraps the model field.
if isinstance(field, serializers.ModelField):
schema = self._map_model_field(field.model_field, direction)
return append_meta(schema, meta)
warn(f'could not resolve serializer field "{field}". Defaulting to "string"')
return append_meta(build_basic_type(OpenApiTypes.STR), meta)
def _map_min_max(self, field, content):
if field.max_value:
content['maximum'] = field.max_value
if field.min_value:
content['minimum'] = field.min_value
def _map_serializer(self, serializer, direction):
serializer = force_instance(serializer)
serializer_extension = OpenApiSerializerExtension.get_match(serializer)
if serializer_extension:
schema = serializer_extension.map_serializer(self, direction)
else:
schema = self._map_basic_serializer(serializer, direction)
return self._postprocess_serializer_schema(schema, serializer, direction)
def _postprocess_serializer_schema(self, schema, serializer, direction):
"""
postprocess generated schema for component splitting, if enabled.
does only apply to direct component schemas and not intermediate schemas
like components composed of sub-component via e.g. oneOf.
"""
if not spectacular_settings.COMPONENT_SPLIT_REQUEST:
return schema
properties = schema.get('properties', [])
required = schema.get('required', [])
for prop_name in list(properties):
if direction == 'request' and properties[prop_name].get('readOnly'):
del schema['properties'][prop_name]
if prop_name in required:
required.remove(prop_name)
if direction == 'response' and properties[prop_name].get('writeOnly'):
del schema['properties'][prop_name]
if prop_name in required:
required.remove(prop_name)
# remove empty listing as it violates schema specification
if 'required' in schema and not required:
del schema['required']
return schema
def _get_serializer_field_meta(self, field):
if not isinstance(field, serializers.Field):
return {}
meta = {}
if field.read_only:
meta['readOnly'] = True
if field.write_only:
meta['writeOnly'] = True
if field.allow_null:
meta['nullable'] = True
if field.default is not None and field.default != empty and not callable(field.default):
default = field.to_representation(field.default)
if isinstance(default, set):
default = list(default)
meta['default'] = default
if field.label and not is_trivial_string_variation(field.label, field.field_name):
meta['title'] = str(field.label)
if field.help_text:
meta['description'] = str(field.help_text)
return meta
def _map_basic_serializer(self, serializer, direction):
serializer = force_instance(serializer)
required = set()
properties = {}
for field in serializer.fields.values():
if isinstance(field, serializers.HiddenField):
continue
if field.field_name in get_override(serializer, 'exclude_fields', []):
continue
schema = self._map_serializer_field(field, direction)
# skip field if there is no schema for the direction
if schema is None:
continue
add_to_required = (
field.required
or (schema.get('readOnly') and not spectacular_settings.COMPONENT_NO_READ_ONLY_REQUIRED)
)
if add_to_required:
required.add(field.field_name)
self._map_field_validators(field, schema)
if field.field_name in get_override(serializer, 'deprecate_fields', []):
schema['deprecated'] = True
properties[field.field_name] = safe_ref(schema)
if is_patched_serializer(serializer, direction):
required = []
return build_object_type(
properties=properties,
required=required,
description=get_doc(serializer.__class__),
)
def _map_field_validators(self, field, schema):
for v in field.validators:
if isinstance(v, validators.EmailValidator):
schema['format'] = 'email'
elif isinstance(v, validators.URLValidator):
schema['format'] = 'uri'
elif isinstance(v, validators.RegexValidator):
pattern = v.regex.pattern.encode('ascii', 'backslashreplace').decode()
pattern = pattern.replace(r'\x', r'\u00') # unify escaping
pattern = pattern.replace(r'\Z', '$').replace(r'\A', '^') # ECMA anchors
schema['pattern'] = pattern
elif isinstance(v, validators.MaxLengthValidator):
attr_name = 'maxLength'
if isinstance(field, serializers.ListField):
attr_name = 'maxItems'
schema[attr_name] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MinLengthValidator):
attr_name = 'minLength'
if isinstance(field, serializers.ListField):
attr_name = 'minItems'
schema[attr_name] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MaxValueValidator):
schema['maximum'] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.MinValueValidator):
schema['minimum'] = v.limit_value() if callable(v.limit_value) else v.limit_value
elif isinstance(v, validators.DecimalValidator):
if v.max_digits:
digits = v.max_digits
if v.decimal_places is not None and v.decimal_places > 0:
digits -= v.decimal_places
schema['maximum'] = int(digits * '9') + 1
schema['minimum'] = -schema['maximum']
def _map_response_type_hint(self, method):
hint = get_override(method, 'field') or typing.get_type_hints(method).get('return')
if is_serializer(hint) or is_field(hint):
return self._map_serializer_field(force_instance(hint), 'response')
try:
return resolve_type_hint(hint)
except UnableToProceedError:
warn(
f'unable to resolve type hint for function "{method.__name__}". Consider '
f'using a type hint or @extend_schema_field. Defaulting to string.'
)
return build_basic_type(OpenApiTypes.STR)
def _get_paginator(self):
pagination_class = getattr(self.view, 'pagination_class', None)
if pagination_class:
return pagination_class()
return None
def map_parsers(self):
return list(dict.fromkeys([p.media_type for p in self.view.get_parsers()]))
def map_renderers(self, attribute):
assert attribute in ['media_type', 'format']
return list(dict.fromkeys([
getattr(r, attribute).split(';')[0]
for r in self.view.get_renderers()
if not isinstance(r, renderers.BrowsableAPIRenderer) and getattr(r, attribute, None)
]))
def _get_serializer(self):
view = self.view
try:
if isinstance(view, GenericAPIView):
# try to circumvent queryset issues with calling get_serializer. if view has NOT
# overridden get_serializer, its safe to use get_serializer_class.
if view.__class__.get_serializer == GenericAPIView.get_serializer:
return view.get_serializer_class()()
return view.get_serializer()
elif isinstance(view, APIView):
# APIView does not implement the required interface, but be lenient and make
# good guesses before giving up and emitting a warning.
if callable(getattr(view, 'get_serializer', None)):
return view.get_serializer()
elif callable(getattr(view, 'get_serializer_class', None)):
return view.get_serializer_class()()
elif hasattr(view, 'serializer_class'):
return view.serializer_class
else:
error(
'unable to guess serializer. This is graceful '
'fallback handling for APIViews. Consider using GenericAPIView as view base '
'class, if view is under your control. Ignoring view for now. '
)
else:
error('Encountered unknown view base class. Please report this issue. Ignoring for now')
except Exception as exc:
error(
f'exception raised while getting serializer. Hint: '
f'Is get_serializer_class() returning None or is get_queryset() not working without '
f'a request? Ignoring the view for now. (Exception: {exc})'
)
def get_examples(self):
return []
def _get_examples(self, serializer, direction, media_type, status_code=None, extras=None):
examples = self.get_examples()
if not examples:
if is_list_serializer(serializer):
examples = get_override(serializer.child, 'examples', [])
elif is_serializer(serializer):
examples = get_override(serializer, 'examples', [])
# additional examples provided via OpenApiResponse are merged with the other methods
extras = extras or []
filtered_examples = []
for example in examples + extras:
if direction == 'request' and example.response_only:
continue
if direction == 'response' and example.request_only:
continue
if media_type and media_type != example.media_type:
continue
if status_code and status_code not in example.status_codes:
continue
filtered_examples.append(example)
return build_examples_list(filtered_examples)
def _get_request_body(self):
# only unsafe methods can have a body
if self.method not in ('PUT', 'PATCH', 'POST'):
return None
request_serializer = self.get_request_serializer()
if isinstance(request_serializer, dict):
content = []
request_body_required = True
for media_type, serializer in request_serializer.items():
schema, partial_request_body_required = self._get_request_for_media_type(serializer)
examples = self._get_examples(serializer, 'request', media_type)
if schema is None:
continue
content.append((media_type, schema, examples))
request_body_required &= partial_request_body_required
else:
schema, request_body_required = self._get_request_for_media_type(request_serializer)
if schema is None:
return None
content = [
(media_type, schema, self._get_examples(request_serializer, 'request', media_type))
for media_type in self.map_parsers()
]
request_body = {
'content': {
media_type: build_media_type_object(schema, examples)
for media_type, schema, examples in content
}
}
if request_body_required:
request_body['required'] = request_body_required
return request_body
def _get_request_for_media_type(self, serializer):
serializer = force_instance(serializer)
if is_list_serializer(serializer):
if is_serializer(serializer.child):
component = self.resolve_serializer(serializer.child, 'request')
schema = build_array_type(component.ref)
else:
schema = build_array_type(self._map_serializer_field(serializer.child, 'request'))
request_body_required = True
elif is_serializer(serializer):
if self.method == 'PATCH':
# we simulate what DRF is doing: the entry serializer is set to partial
# for PATCH requests. serializer instances received via extend_schema
# may be reused; prevent race conditions by modifying a copy.
serializer = copy.copy(serializer)
serializer.partial = True
component = self.resolve_serializer(serializer, 'request')
if not component.schema:
# serializer is empty so skip content enumeration
return None, False
schema = component.ref
# request body is only required if any required property is not read-only
readonly_props = [
p for p, s in component.schema.get('properties', {}).items() if s.get('readOnly')
]
required_props = component.schema.get('required', [])
request_body_required = any(req not in readonly_props for req in required_props)
elif is_basic_type(serializer):
schema = build_basic_type(serializer)
request_body_required = False
else:
warn(
f'could not resolve request body for {self.method} {self.path}. Defaulting to generic '
'free-form object. (Maybe annotate a Serializer class?)'
)
schema = build_generic_type()
schema['description'] = 'Unspecified request body'
request_body_required = False
return schema, request_body_required
def _get_response_bodies(self):
response_serializers = self.get_response_serializers()
if (
is_serializer(response_serializers)
or is_basic_type(response_serializers)
or isinstance(response_serializers, OpenApiResponse)
):
if self.method == 'DELETE':
return {'204': {'description': _('No response body')}}
if is_create_operation(self.method, self.view):
return {'201': self._get_response_for_code(response_serializers, '201')}
return {'200': self._get_response_for_code(response_serializers, '200')}
elif isinstance(response_serializers, dict):
# custom handling for overriding default return codes with @extend_schema
responses = {}
for code, serializer in response_serializers.items():
if isinstance(code, tuple):
code, media_types = str(code[0]), code[1:]
else:
code, media_types = str(code), None
content_response = self._get_response_for_code(serializer, code, media_types)
if code in responses:
responses[code]['content'].update(content_response['content'])
else:
responses[code] = content_response
return responses
else:
warn(
f'could not resolve "{response_serializers}" for {self.method} {self.path}. '
f'Expected either a serializer or some supported override mechanism. '
f'Defaulting to generic free-form object.'
)
schema = build_basic_type(OpenApiTypes.OBJECT)
schema['description'] = _('Unspecified response body')
return {'200': self._get_response_for_code(schema, '200')}
def _get_response_for_code(self, serializer, status_code, media_types=None):
if isinstance(serializer, OpenApiResponse):
serializer, description, examples = (
serializer.response, serializer.description, serializer.examples
)
else:
description, examples = '', []
serializer = force_instance(serializer)
headers = self._get_response_headers_for_code(status_code)
headers = {'headers': headers} if headers else {}
if not serializer:
return {**headers, 'description': description or _('No response body')}
elif is_list_serializer(serializer):
if is_serializer(serializer.child):
schema = self.resolve_serializer(serializer.child, 'response').ref
else:
schema = self._map_serializer_field(serializer.child, 'response')
elif is_serializer(serializer):
component = self.resolve_serializer(serializer, 'response')
if not component.schema:
return {**headers, 'description': description or _('No response body')}
schema = component.ref
elif is_basic_type(serializer):
schema = build_basic_type(serializer)
elif isinstance(serializer, dict):
# bypass processing and use given schema directly
schema = serializer
# prevent invalid dict case in _is_list_view() as this not a status_code dict.
serializer = None
else:
warn(
f'could not resolve "{serializer}" for {self.method} {self.path}. Expected either '
f'a serializer or some supported override mechanism. Defaulting to '
f'generic free-form object.'
)
schema = build_basic_type(OpenApiTypes.OBJECT)
schema['description'] = _('Unspecified response body')
if (
self._is_list_view(serializer)
and get_override(serializer, 'many') is not False
and '200' <= status_code < '300'
):
schema = build_array_type(schema)
paginator = self._get_paginator()
if (
paginator
and is_serializer(serializer)
and (not is_list_serializer(serializer) or is_serializer(serializer.child))
):
paginated_name = f'Paginated{self._get_serializer_name(serializer, "response")}List'
component = ResolvedComponent(
name=paginated_name,
type=ResolvedComponent.SCHEMA,
schema=paginator.get_paginated_response_schema(schema),
object=serializer.child if is_list_serializer(serializer) else serializer,
)
self.registry.register_on_missing(component)
schema = component.ref
elif paginator:
schema = paginator.get_paginated_response_schema(schema)
if not media_types:
media_types = self.map_renderers('media_type')
return {
**headers,
'content': {
media_type: build_media_type_object(
schema,
self._get_examples(serializer, 'response', media_type, status_code, examples)
)
for media_type in media_types
},
'description': description
}
def _get_response_headers_for_code(self, status_code) -> dict:
result = {}
for parameter in self.get_override_parameters():
if not isinstance(parameter, OpenApiParameter):
continue
if not parameter.response:
continue
if (
isinstance(parameter.response, list)
and status_code not in [str(code) for code in parameter.response]
):
continue
if is_basic_type(parameter.type):
schema = build_basic_type(parameter.type)
elif is_serializer(parameter.type):
schema = self.resolve_serializer(parameter.type, 'response').ref
else:
schema = parameter.type
if parameter.location not in [OpenApiParameter.HEADER, OpenApiParameter.COOKIE]:
warn(f'incompatible location type ignored for response parameter {parameter.name}')
parameter_type = build_parameter_type(
name=parameter.name,
schema=schema,
location=parameter.location,
required=parameter.required,
description=parameter.description,
enum=parameter.enum,
deprecated=parameter.deprecated,
style=parameter.style,
explode=parameter.explode,
default=parameter.default,
examples=build_examples_list(parameter.examples),
)
del parameter_type['name']
del parameter_type['in']
result[parameter.name] = parameter_type
return result
def _get_serializer_name(self, serializer, direction):
serializer_extension = OpenApiSerializerExtension.get_match(serializer)
if serializer_extension and serializer_extension.get_name():
# library override mechanisms
name = serializer_extension.get_name()
elif getattr(getattr(serializer, 'Meta', None), 'ref_name', None) is not None:
# local override mechanisms. for compatibility with drf-yasg we support meta ref_name,
# though we do not support the serializer inlining feature.
# https://drf-yasg.readthedocs.io/en/stable/custom_spec.html#serializer-meta-nested-class
name = serializer.Meta.ref_name
elif is_list_serializer(serializer):
return self._get_serializer_name(serializer.child, direction)
else:
name = serializer.__class__.__name__
if name.endswith('Serializer'):
name = name[:-10]
if is_patched_serializer(serializer, direction):
name = 'Patched' + name
if direction == 'request' and spectacular_settings.COMPONENT_SPLIT_REQUEST:
name = name + 'Request'
return name
def resolve_serializer(self, serializer, direction) -> ResolvedComponent:
assert is_serializer(serializer), (
f'internal assumption violated because we expected a serializer here and instead '
f'got a "{serializer}". This may be the result of another app doing some unexpected '
f'magic or an invalid internal call. Feel free to report this as a bug at '
f'https://github.com/tfranzel/drf-spectacular/issues '
)
serializer = force_instance(serializer)
with add_trace_message(serializer.__class__.__name__):
component = ResolvedComponent(
name=self._get_serializer_name(serializer, direction),
type=ResolvedComponent.SCHEMA,
object=serializer,
)
if component in self.registry:
return self.registry[component] # return component with schema
self.registry.register(component)
component.schema = self._map_serializer(serializer, direction)
discard_component = (
# components with empty schemas serve no purpose
not component.schema
# concrete component without properties are likely only transactional so discard
or (
component.schema.get('type') == 'object'
and not component.schema.get('properties')
and 'additionalProperties' not in component.schema
)
)
if discard_component:
del self.registry[component]
return ResolvedComponent(None, None) # sentinel
return component
| 44.990924 | 106 | 0.618277 |
0888acbe8e9f0efbd617dfa27e88021454b48eaa | 1,424 | py | Python | setup.py | joshtrivedi/face-detection-tflite | 6ae3bc770dd029af0c1c716d46ace6c8ced05fef | [
"MIT"
] | 53 | 2021-03-23T01:32:47.000Z | 2022-03-23T10:52:28.000Z | setup.py | joshtrivedi/face-detection-tflite | 6ae3bc770dd029af0c1c716d46ace6c8ced05fef | [
"MIT"
] | 6 | 2021-04-12T21:41:14.000Z | 2021-08-25T04:20:30.000Z | setup.py | joshtrivedi/face-detection-tflite | 6ae3bc770dd029af0c1c716d46ace6c8ced05fef | [
"MIT"
] | 14 | 2021-04-05T13:10:44.000Z | 2022-02-17T10:33:01.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright © 2021 Patrick Levin
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from setuptools import setup, __version__
from pkg_resources import parse_version
minimum_version = parse_version('42.0.0')
if parse_version(__version__) < minimum_version:
raise RuntimeError(
f'Package setuptools must be at least version {minimum_version}')
setup()
| 41.882353 | 80 | 0.769663 |
06acf35cafeeadeb5172c8e8abffe1fad72cd576 | 17,798 | py | Python | pyzoo/zoo/util/tf.py | limn2o4/analytics-zoo | 78d6ce10976a7e1320ff5ebdf431db93a439ec56 | [
"Apache-2.0"
] | 2,970 | 2017-06-08T00:24:43.000Z | 2022-03-30T12:14:55.000Z | pyzoo/zoo/util/tf.py | limn2o4/analytics-zoo | 78d6ce10976a7e1320ff5ebdf431db93a439ec56 | [
"Apache-2.0"
] | 3,530 | 2017-05-09T08:29:10.000Z | 2022-03-21T02:11:45.000Z | pyzoo/zoo/util/tf.py | limn2o4/analytics-zoo | 78d6ce10976a7e1320ff5ebdf431db93a439ec56 | [
"Apache-2.0"
] | 972 | 2017-05-09T07:03:50.000Z | 2022-03-23T07:48:48.000Z | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import copy
import tempfile
import re
import shutil
from zoo.common.utils import put_local_file_to_remote, get_remote_file_to_local, get_file_list,\
is_local_path
def process_grad(grad):
from tensorflow.python.framework import ops
import tensorflow as tf
if grad is not None:
grad = ops.convert_to_tensor_or_indexed_slices(grad)
if isinstance(grad, ops.IndexedSlices):
# In IndexedSlices is not supported in java api, we have to convert it to
# a dense tensor. This operation is potentially expensive, but there seems
# no work around
grad = tf.unsorted_segment_sum(grad.values, grad.indices,
grad.dense_shape[0])
return grad
def _to_operation_name(name):
return name.split(":")[0]
def _to_floats(vs):
return [float(v) for v in vs]
def export_tf(sess, folder, inputs, outputs,
generate_backward=False, allow_non_differentiable_input=True):
"""
Export the frozen tensorflow graph as well as the inputs/outputs information
to the folder for inference.
This function will
1. freeze the graph (replace all variables with constants)
2. strip all unused node as specified by inputs and outputs
3. add placeholder nodes as needed
4. write the frozen graph and inputs/outputs names to the folder
Note: There should not be any queuing operation between inputs and outputs
:param sess: tensorflow session holding the variables to be saved
:param folder: the folder where graph file and inputs/outputs information are saved
:param inputs: a list of tensorflow tensors that will be fed during inference
:param outputs: a list of tensorflow tensors that will be fetched during inference
:return:
"""
from tensorflow.python.platform import gfile
import tensorflow as tf
output_node_names = list({t.op.name for t in outputs})
graph_def = sess.graph_def
graph = sess.graph
# clear device specifications
for node in graph_def.node:
node.device = ""
non_placeholder_input_names = []
type_enums = []
for input_tensor in inputs:
if input_tensor.op.type not in ["Placeholder", "PlaceholderWithDefault"]:
non_placeholder_input_names.append(input_tensor.name)
type_enums.append(input_tensor.dtype.as_datatype_enum)
output_names = [o.name for o in outputs]
all_variables = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
import zoo.util.tf_graph_util as graph_util
# freeze graph
frozen_graph_def = graph_util.convert_variables_to_constants(
sess,
graph_def,
output_node_names
)
optimized_graph_def, old_names2new = strip_unused(frozen_graph_def,
non_placeholder_input_names,
output_names,
type_enums)
nodes_of_graph = []
for node in optimized_graph_def.node:
nodes_of_graph.append(node.name + ":0")
nodes_of_graph_set = set(nodes_of_graph)
new_input_names = []
error_input_nodes = []
for t in inputs:
if t.name in old_names2new:
if old_names2new[t.name] not in nodes_of_graph_set:
error_input_nodes.append("\"" + (t.name)[0:-2] + "\"")
new_input_names.append(old_names2new[t.name])
else:
if t.name not in nodes_of_graph_set:
error_input_nodes.append("\"" + (t.name)[0:-2] + "\"")
new_input_names.append(t.name)
if error_input_nodes:
error_nodes_name = " and ".join(error_input_nodes)
raise ValueError("Node %s doesn't exist in the graph" % str(error_nodes_name))
# check all placeholder in the graph are listed in the new_input_names:
new_input_nodes = {name.split(":")[0] for name in new_input_names}
for node in optimized_graph_def.node:
if node.op == "Placeholder" and node.name not in new_input_nodes:
raise ValueError(
"Node %s is a Placeholder but not listed in inputs, inputs are %s"
% (node.name, inputs))
temp_tensors = None
used_variables = []
grad_variables = []
grad_inputs = []
if generate_backward:
nodes = set([n.name for n in optimized_graph_def.node])
for v in all_variables:
if v.op.name in nodes:
used_variables.append(v.name)
with tf.Graph().as_default() as g:
tf.import_graph_def(optimized_graph_def, name='')
output_tensors = [g.get_tensor_by_name(x) for x in output_names]
grad_output_placeholders = [tf.placeholder(dtype=x.dtype,
name=x.name.split(":")[0] + "_grad",
shape=x.shape) for x in output_tensors]
variables = [g.get_tensor_by_name(x) for x in used_variables]
inputs = [g.get_tensor_by_name(x) for x in new_input_names]
grads = tf.gradients(output_tensors, variables + inputs,
grad_ys=grad_output_placeholders)
grads = [process_grad(grad) for grad in grads]
temp_tensors = _find_temp_tensors(grads, nodes)
grad_variables = [x.name for x in grads[0:len(variables)]]
grad_inputs = []
for i in range(len(variables), len(grads)):
grad = grads[i]
if grad is not None:
grad_inputs.append(grad.name)
else:
# if input is not differentiable, we just return zero
input_tensor = inputs[i - len(variables)]
if allow_non_differentiable_input:
zero_grad = tf.zeros(shape=tf.shape(input_tensor))
grad_inputs.append(zero_grad.name)
else:
raise ValueError(
"input tensor: %s is not differentiable" % input_tensor.name)
optimized_graph_def = g.as_graph_def()
if not os.path.isdir(folder):
os.makedirs(folder)
with gfile.GFile(os.path.join(folder, "frozen_inference_graph.pb"), "wb") as f:
f.write(optimized_graph_def.SerializeToString())
meta = {
"input_names": new_input_names,
"output_names": output_names
}
if generate_backward:
meta["temp_tensors"] = list(temp_tensors)
meta["variables"] = used_variables
meta["grad_variables"] = grad_variables
meta["grad_inputs"] = grad_inputs
with open(os.path.join(folder, "graph_meta.json"), "w") as f:
f.write(json.dumps(meta))
def _find_temp_tensors(grads, forward_ops):
'''
find all the tensors that are used for computing grads and has been
computed during forward
:param grads:
:param forward_ops:
:return:
'''
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
queue = queue.Queue()
for grad in grads:
queue.put(grad)
temp_tensors = set()
visited = set()
while not queue.empty():
tensor = queue.get()
# this is necessary, because input may not be differentiable
if tensor is None:
continue
else:
visited.add(tensor.name)
if tensor.op.type == "Placeholder":
continue
if tensor.op.name in forward_ops:
temp_tensors.add(tensor.name)
continue
for input_tensor in tensor.op.inputs:
# this is necessary because there may be a cycle in the graph such as tf.while_loop
if input_tensor.name not in visited:
queue.put(input_tensor)
return temp_tensors
def strip_unused(input_graph_def, input_tensor_names, output_tensor_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_tensor_names: A list of the nodes we use as inputs.
output_tensor_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A `GraphDef` with all unnecessary ops removed. and a map containing the old input
names to the new input names
Raises:
ValueError: If any element in `input_node_names` refers to a tensor instead
of an operation.
KeyError: If any element in `input_node_names` is not found in the graph.
"""
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
for name in input_tensor_names:
if ":" not in name:
raise ValueError("Input '%s' appears to refer to a Operation, "
"not a Tensor." % name)
old2new = {}
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
not_found = {name for name in input_tensor_names}
input_node_names = {name.split(":")[0] for name in input_tensor_names}
output_node_names = list({name.split(":")[0] for name in output_tensor_names})
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name not in input_node_names:
for i in range(len(node.input)):
if _append_port(node.input[i]) in input_tensor_names:
old_name = _append_port(node.input[i])
not_found.remove(old_name)
new_input_name = node.input[i].replace(":", "_")
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = new_input_name
if isinstance(placeholder_type_enum, list):
input_node_index = input_tensor_names.index(old_name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(
node.attr["_output_shapes"])
node.input[i] = new_input_name
old2new[old_name] = new_input_name + ":0"
inputs_replaced_graph_def.node.extend([placeholder_node])
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
if not_found:
raise KeyError("The following input nodes were not found: %s\n" % not_found)
import zoo.util.tf_graph_util as graph_util
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def, old2new
def _append_port(input_name):
if input_name.find(":") == -1:
return input_name + ":0"
else:
return input_name
def change_path_in_tf_checkpoint(checkpoint_path, ckpt_name):
# change checkpoint file
with open(checkpoint_path) as f:
import re
new_lines = []
lines = f.readlines()
# replace model_checkpoint_path and all_model_checkpoint_paths to checkpoint name
# instead of the absolute checkpoint path
for line in lines:
if re.compile("^model_checkpoint_path: \"(.*)\"$").match(line):
new_lines.append("model_checkpoint_path: \"{}\"\n".format(ckpt_name))
elif re.compile("^all_model_checkpoint_paths: \"(.*)\"$").match(line):
new_lines.append("all_model_checkpoint_paths: \"{}\"\n".format(ckpt_name))
else:
new_lines.append(line)
with open(checkpoint_path, 'w') as f:
f.writelines(new_lines)
def save_tf_checkpoint(sess, checkpoint_path, saver=None):
"""
Save tf checkpoint without using native tensorflow remote access method.
:param sess: tf session to be saved.
:param checkpoint_path: checkpoint path. Could be local, hdfs, s3 filesystems.
:param saver: tf saver to save checkpoint
"""
import tensorflow as tf
if is_local_path(checkpoint_path):
if saver is None:
saver = tf.train.Saver()
saver.save(sess, checkpoint_path)
else:
ckpt_name = os.path.basename(checkpoint_path)
remote_dir = os.path.dirname(checkpoint_path)
# save to local checkpoint
temp = tempfile.mkdtemp()
if saver is None:
saver = tf.train.Saver()
saver.save(sess, os.path.join(temp, ckpt_name))
change_path_in_tf_checkpoint(os.path.join(temp, "checkpoint"), ckpt_name)
# move to remote
[put_local_file_to_remote(os.path.join(temp, file), os.path.join(remote_dir, file),
over_write=True)
for file in os.listdir(temp)]
shutil.rmtree(temp)
def get_checkpoint_state(checkpoint_dir):
"""
Get tf checkpoint state from checkpoint directory without using native tensorflow accessing
remote method.
:param checkpoint_dir: tensorflow checkpoint directory. Could be local, hdfs, s3 filesystems.
:return: tf checkpoint protobuf
"""
import tensorflow as tf
if is_local_path(checkpoint_dir):
return tf.train.get_checkpoint_state(checkpoint_dir)
else:
# check if checkpoint file exists
file_list = get_file_list(checkpoint_dir)
has_checkpoint = False
for file in file_list:
if os.path.basename(file) == 'checkpoint':
has_checkpoint = True
break
if not has_checkpoint:
return None
# get checkpoint file
temp = tempfile.mkdtemp()
get_remote_file_to_local(os.path.join(checkpoint_dir, "checkpoint"),
os.path.join(temp, "checkpoint"))
ckpt_name = None
with open(os.path.join(temp, "checkpoint")) as f:
lines = f.readlines()
# get checkpoint name from 'checkpoint' file
for line in lines:
m = re.compile("^model_checkpoint_path: \"(.*)\"$").match(line)
if m:
ckpt_name = m.group(1)
break
if ckpt_name is None:
shutil.rmtree(temp)
return None
# filter checkpoint files
checkpoint_files = [file for file in file_list
if os.path.basename(file).startswith(ckpt_name)]
if not checkpoint_files:
shutil.rmtree(temp)
return None
# get checkpoint files to local
[get_remote_file_to_local(file, os.path.join(temp, os.path.basename(file)))
for file in checkpoint_files]
# get checkpoint state
ckpt = tf.train.get_checkpoint_state(temp)
if not ckpt:
shutil.rmtree(temp)
return None
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir, ckpt_name)
ckpt.all_model_checkpoint_paths[:] = [os.path.join(checkpoint_dir, ckpt_name)]
shutil.rmtree(temp)
return ckpt
def load_tf_checkpoint(sess, checkpoint_path, saver=None):
"""
Load tensorflow checkpoint from checkpoint path without using native tensorflow accessing
remote method.
:param sess: tensorflow session to be loaded to.
:param checkpoint_path: tensorflow checkpoint path. Could be local, hdfs, s3 filesystems.
:param saver: tensorflow saver to load checkpoint
"""
import tensorflow as tf
if is_local_path(checkpoint_path):
if saver is None:
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
else:
ckpt_name = os.path.basename(checkpoint_path)
checkpoint_dir = os.path.dirname(checkpoint_path)
# get remote file lists
file_list = get_file_list(checkpoint_dir)
# filter checkpoint files
checkpoint_files = [file for file in file_list
if os.path.basename(file).startswith(ckpt_name)]
# get checkpoint files to local
temp = tempfile.mkdtemp()
[get_remote_file_to_local(file, os.path.join(temp, os.path.basename(file)))
for file in checkpoint_files]
if saver is None:
saver = tf.train.Saver()
try:
saver.restore(sess, os.path.join(temp, ckpt_name))
except Exception as e:
raise e
finally:
shutil.rmtree(temp)
| 39.030702 | 99 | 0.625576 |
b5c136e59ba16930b10fd7e5395b82a5b6af93d5 | 1,754 | py | Python | sahara/plugins/mapr/services/drill/drill.py | esikachev/sahara-backup | a470fa6aec5f1009d41d82fabc1e5d64874aa213 | [
"Apache-2.0"
] | null | null | null | sahara/plugins/mapr/services/drill/drill.py | esikachev/sahara-backup | a470fa6aec5f1009d41d82fabc1e5d64874aa213 | [
"Apache-2.0"
] | null | null | null | sahara/plugins/mapr/services/drill/drill.py | esikachev/sahara-backup | a470fa6aec5f1009d41d82fabc1e5d64874aa213 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.commands as cmd
import sahara.plugins.mapr.util.validation_utils as vu
DRILL = np.NodeProcess(
name='drill-bits',
ui_name='Drill',
package='mapr-drill',
open_ports=[]
)
@six.add_metaclass(s.Single)
class Drill(s.Service):
def __init__(self):
super(Drill, self).__init__()
self._name = 'drill'
self._ui_name = 'Drill'
self._version = '0.7'
self._node_processes = [DRILL]
self._ui_info = [('Drill', DRILL, 'http://%s:8047')]
self._validation_rules = [vu.at_least(1, DRILL)]
def install(self, cluster_context, instances):
# Drill requires running cluster
pass
def post_start(self, cluster_context, instances):
instances = instances or cluster_context.get_instances(DRILL)
super(Drill, self).install(cluster_context, instances)
for instance in instances:
cmd.chown(instance, 'mapr:mapr', self.service_dir(cluster_context))
cmd.re_configure_sh(instance, cluster_context)
| 33.09434 | 79 | 0.704105 |
57520c0cea3875d54802b6bcb8d156ff910b7717 | 3,921 | py | Python | examples/cursorcontrol_buttons_text.py | FoamyGuy/Adafruit_CircuitPython_CursorControl | 386e235c06054346f795425e02cce05a494d17be | [
"MIT"
] | 3 | 2021-07-21T20:11:30.000Z | 2021-12-31T21:26:48.000Z | examples/cursorcontrol_buttons_text.py | FoamyGuy/Adafruit_CircuitPython_CursorControl | 386e235c06054346f795425e02cce05a494d17be | [
"MIT"
] | 19 | 2019-06-25T22:20:26.000Z | 2022-03-01T08:21:31.000Z | examples/cursorcontrol_buttons_text.py | FoamyGuy/Adafruit_CircuitPython_CursorControl | 386e235c06054346f795425e02cce05a494d17be | [
"MIT"
] | 9 | 2019-07-02T18:17:28.000Z | 2022-02-26T19:58:14.000Z | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import displayio
from adafruit_button import Button
from adafruit_display_text import label
import terminalio
from adafruit_cursorcontrol.cursorcontrol import Cursor
from adafruit_cursorcontrol.cursorcontrol_cursormanager import CursorManager
# Create the display
display = board.DISPLAY
# Create the display context
splash = displayio.Group()
# Use the built-in system font
font = terminalio.FONT
##########################################################################
# Make a background color fill
color_bitmap = displayio.Bitmap(display.width, display.height, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0x404040
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
##########################################################################
# Set up button/label properties
BUTTON_WIDTH = 80
BUTTON_HEIGHT = 40
BUTTON_MARGIN = 20
LBL_HEADER = [100, 20]
LBL_TEXT = [120, 40]
# Resize buttons for small display (PyGamer)
if display.width < 240:
BUTTON_WIDTH = int(BUTTON_WIDTH / 2)
BUTTON_HEIGHT = int(BUTTON_HEIGHT / 2)
BUTTON_MARGIN = int(BUTTON_MARGIN / 2)
LBL_HEADER[0] -= 75
LBL_HEADER[1] -= 10
LBL_TEXT[0] -= 70
LBL_TEXT[1] += 55
# Create the buttons
buttons = []
button_speed_inc = Button(
x=BUTTON_MARGIN,
y=BUTTON_MARGIN + BUTTON_HEIGHT,
width=BUTTON_WIDTH,
height=BUTTON_HEIGHT,
label="Speed+",
label_font=font,
)
buttons.append(button_speed_inc)
button_speed_dec = Button(
x=BUTTON_MARGIN,
y=BUTTON_MARGIN * 4 + BUTTON_HEIGHT,
width=BUTTON_WIDTH,
height=BUTTON_HEIGHT,
label="Speed-",
label_font=font,
)
buttons.append(button_speed_dec)
button_scale_pos = Button(
x=BUTTON_MARGIN * 3 + 2 * BUTTON_WIDTH,
y=BUTTON_MARGIN + BUTTON_HEIGHT,
width=BUTTON_WIDTH,
height=BUTTON_HEIGHT,
label="Scale+",
label_font=font,
style=Button.SHADOWRECT,
)
buttons.append(button_scale_pos)
button_scale_neg = Button(
x=BUTTON_MARGIN * 3 + 2 * BUTTON_WIDTH,
y=BUTTON_MARGIN * 4 + BUTTON_HEIGHT,
width=BUTTON_WIDTH,
height=BUTTON_HEIGHT,
label="Scale-",
label_font=font,
style=Button.SHADOWRECT,
)
buttons.append(button_scale_neg)
# Show the button
for b in buttons:
splash.append(b.group)
# Create a text label
text_label = label.Label(
font, text="CircuitPython Cursor!", color=0x00FF00, x=LBL_HEADER[0], y=LBL_HEADER[1]
)
splash.append(text_label)
text_speed = label.Label(font, color=0x00FF00, x=LBL_TEXT[0], y=LBL_TEXT[1])
splash.append(text_speed)
text_scale = label.Label(font, color=0x00FF00, x=LBL_TEXT[0], y=LBL_TEXT[1] + 20)
splash.append(text_scale)
# initialize the mouse cursor object
mouse_cursor = Cursor(display, display_group=splash)
# initialize the cursormanager
cursor = CursorManager(mouse_cursor)
# show displayio group
display.show(splash)
prev_btn = None
while True:
cursor.update()
if cursor.is_clicked is True:
for i, b in enumerate(buttons):
if b.contains((mouse_cursor.x, mouse_cursor.y)):
b.selected = True
print("Button %d pressed" % i)
if i == 0: # Increase the cursor speed
mouse_cursor.speed += 1
elif i == 1: # Decrease the cursor speed
mouse_cursor.speed -= 1
if i == 2: # Increase the cursor scale
mouse_cursor.scale += 1
elif i == 3: # Decrease the cursor scale
mouse_cursor.scale -= 1
prev_btn = b
elif prev_btn is not None:
prev_btn.selected = False
text_speed.text = "Speed: {0}px".format(mouse_cursor.speed)
text_scale.text = "Scale: {0}px".format(mouse_cursor.scale)
time.sleep(0.1)
| 27.612676 | 88 | 0.667687 |
e1c867e749be8208294892b3f586c662339fc5d5 | 298 | py | Python | main.py | enesusta/eksi-crawler | 16c3f546c9ce41b46753de36e9886f24d3408e04 | [
"MIT"
] | 5 | 2020-11-23T19:43:32.000Z | 2022-02-06T11:44:21.000Z | main.py | enesusta/eksi-crawler | 16c3f546c9ce41b46753de36e9886f24d3408e04 | [
"MIT"
] | null | null | null | main.py | enesusta/eksi-crawler | 16c3f546c9ce41b46753de36e9886f24d3408e04 | [
"MIT"
] | null | null | null | from crawler import entries
f = open('eksi.md', 'w+', encoding="utf-8")
f.write('| |\n')
f.write('| -- |')
for i in range(1, 2000):
list = entries(i)
if list != 404: # if there is content to show.
for entry in list:
f.write('\n|' + entry + '|')
else:
break | 21.285714 | 50 | 0.520134 |
500cc2ad403ada5fa714d5a703c1e30798848e15 | 381 | py | Python | sample/create_table/sample_create_table_from_dataframe.py | thombashi/SimpleSQLite | 0cc3a4902a17334b80ff3aa43b01ff2aeca193cf | [
"MIT"
] | 126 | 2016-02-21T17:14:25.000Z | 2022-03-28T23:25:27.000Z | sample/create_table/sample_create_table_from_dataframe.py | thombashi/SimpleSQLite | 0cc3a4902a17334b80ff3aa43b01ff2aeca193cf | [
"MIT"
] | 21 | 2016-03-20T02:24:11.000Z | 2021-09-29T12:46:31.000Z | sample/create_table/sample_create_table_from_dataframe.py | thombashi/SimpleSQLite | 0cc3a4902a17334b80ff3aa43b01ff2aeca193cf | [
"MIT"
] | 18 | 2016-03-14T00:56:34.000Z | 2021-07-21T07:24:49.000Z | #!/usr/bin/env python3
import pandas
from simplesqlite import SimpleSQLite
def main():
con = SimpleSQLite("pandas_df.sqlite")
con.create_table_from_dataframe(
pandas.DataFrame(
[[0, 0.1, "a"], [1, 1.1, "bb"], [2, 2.2, "ccc"]], columns=["id", "value", "name"]
),
table_name="pandas_df",
)
if __name__ == "__main__":
main()
| 18.142857 | 93 | 0.572178 |
5bebd3384a6dcdd7733ba2f081958d1f17795f94 | 142 | py | Python | Ogrenciler/Burcu/01TEMELPYTHONBILGILER/soru3.py | ProEgitim/Python-Dersleri-BEM | b25e9fdb1fa3026925a46b2fcbcba348726b775c | [
"MIT"
] | 1 | 2021-04-18T17:35:22.000Z | 2021-04-18T17:35:22.000Z | Ogrenciler/Burcu/01TEMELPYTHONBILGILER/soru3.py | waroi/Python-Dersleri-BEM | b25e9fdb1fa3026925a46b2fcbcba348726b775c | [
"MIT"
] | null | null | null | Ogrenciler/Burcu/01TEMELPYTHONBILGILER/soru3.py | waroi/Python-Dersleri-BEM | b25e9fdb1fa3026925a46b2fcbcba348726b775c | [
"MIT"
] | 2 | 2021-04-18T18:22:26.000Z | 2021-04-24T17:16:19.000Z | """
iki sayı alın ve yerlerini değişin
"""
a:float=float(input("a= "))
b:float=float(input("b= "))
temp:float=0
temp=a
a=b
b=temp
print(a,b) | 11.833333 | 34 | 0.647887 |
839095c9bf123fa85c972440f952244ab39d4982 | 172 | py | Python | Python/quick_zip.py | tjnorred/Infrastructure | d56382272c3d35ac2af93540b24103e3fcc409f5 | [
"MIT"
] | null | null | null | Python/quick_zip.py | tjnorred/Infrastructure | d56382272c3d35ac2af93540b24103e3fcc409f5 | [
"MIT"
] | null | null | null | Python/quick_zip.py | tjnorred/Infrastructure | d56382272c3d35ac2af93540b24103e3fcc409f5 | [
"MIT"
] | null | null | null |
import zipfile as zf
SOURCE = r'<path_to_zip'
DESTINATION = r'<path_to_dest>'
with zf.ZipFile(SOURCE, 'r') as zf:
zf.extractall(DESTINATION)
zf.close() #cleanup
| 17.2 | 35 | 0.697674 |
d64694040f657acd255e20d9fffe2c54bb7765e8 | 2,522 | py | Python | test/python/algorithms/test_entangler_map.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,599 | 2018-07-10T10:59:12.000Z | 2022-03-31T23:56:25.000Z | test/python/algorithms/test_entangler_map.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 5,244 | 2018-07-10T06:20:13.000Z | 2022-03-31T22:18:48.000Z | test/python/algorithms/test_entangler_map.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,409 | 2018-07-10T02:16:12.000Z | 2022-03-31T09:01:32.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Entangler Map """
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from qiskit.utils import get_entangler_map, validate_entangler_map
class TestEntanglerMap(QiskitAlgorithmsTestCase):
"""Test Entangler Map"""
def test_map_type_linear(self):
""",ap type linear test"""
ref_map = [[0, 1], [1, 2], [2, 3]]
entangler_map = get_entangler_map("linear", 4)
for (ref_src, ref_targ), (exp_src, exp_targ) in zip(ref_map, entangler_map):
self.assertEqual(ref_src, exp_src)
self.assertEqual(ref_targ, exp_targ)
def test_map_type_full(self):
"""map type full test"""
ref_map = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
entangler_map = get_entangler_map("full", 4)
for (ref_src, ref_targ), (exp_src, exp_targ) in zip(ref_map, entangler_map):
self.assertEqual(ref_src, exp_src)
self.assertEqual(ref_targ, exp_targ)
def test_validate_entangler_map(self):
"""validate entangler map test"""
valid_map = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
self.assertTrue(validate_entangler_map(valid_map, 4))
valid_map_2 = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3], [3, 2]]
self.assertTrue(validate_entangler_map(valid_map_2, 4, True))
invalid_map = [[0, 4], [4, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
with self.assertRaises(ValueError):
validate_entangler_map(invalid_map, 4)
invalid_map_2 = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3], [3, 2]]
with self.assertRaises(ValueError):
validate_entangler_map(invalid_map_2, 4)
wrong_type_map = {0: [1, 2, 3], 1: [2, 3]}
with self.assertRaises(TypeError):
validate_entangler_map(wrong_type_map, 4)
wrong_type_map_2 = [(0, 1), (0, 2), (0, 3)]
with self.assertRaises(TypeError):
validate_entangler_map(wrong_type_map_2, 4)
if __name__ == "__main__":
unittest.main()
| 36.550725 | 84 | 0.627676 |
436904777de017c5ccd88271aa0994b73a493822 | 4,932 | py | Python | main_beat_aligned.py | LanXiangExcavator/Beat-aligned_Transformer | c17b014f01761d1b38addd3a846db3c0b5ba5e80 | [
"MIT"
] | 3 | 2022-03-03T07:37:01.000Z | 2022-03-09T07:47:11.000Z | main_beat_aligned.py | LanXiangExcavator/Beat-aligned_Transformer | c17b014f01761d1b38addd3a846db3c0b5ba5e80 | [
"MIT"
] | null | null | null | main_beat_aligned.py | LanXiangExcavator/Beat-aligned_Transformer | c17b014f01761d1b38addd3a846db3c0b5ba5e80 | [
"MIT"
] | 1 | 2022-02-28T14:55:23.000Z | 2022-02-28T14:55:23.000Z | import argparse
import collections
import torch
import numpy as np
import data_loader.data_loaders as module_data
import model.loss as module_loss
import model.metric as module_metric
import model.swin_transformer_1d as module_arch_swin_transformer
import model.beat_aligned_transformer as module_arch_beat_aligned_transformer
from parse_config import ConfigParser
from trainer import Trainer_beat_aligned_data
from evaluater import Evaluater_beat_aligned_data
from model.metric import ChallengeMetric
from utils.util import load_model
from utils.lr_scheduler import CosineAnnealingWarmUpRestarts, GradualWarmupScheduler
import datetime
files_models = {
"swin_transformer_1d": ['swin_transformer'],
"beat_aligned_transformer": ['beat_aligned_transformer'],
}
def main(config):
logger = config.get_logger('train')
# build model architecture, then print to console
global model
for file, types in files_models.items():
for type in types:
if config["arch"]["type"] == type:
model = config.init_obj('arch', eval("module_arch_" + file))
logger.info(model)
if config['arch'].get('weight_path', False):
model = load_model(model, config["arch"]["weight_path"])
# get function handles of loss and metrics
# print(config)
criterion = getattr(module_loss, config['loss']['type'])
# get function handles of metrics
challenge_metrics = ChallengeMetric(config['data_loader']['args']['label_dir'])
# challenge_metrics = ChallengeMetric2(num_classes=9)
metrics = [getattr(challenge_metrics, met) for met in config['metrics']]
# build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.init_obj('optimizer', torch.optim, trainable_params)
if config["lr_scheduler"]["type"] == "CosineAnnealingWarmRestarts":
params = config["lr_scheduler"]["args"]
lr_scheduler = CosineAnnealingWarmUpRestarts(optimizer, T_0=params["T_0"], T_mult=params["T_mult"],
T_up=params["T_up"], gamma=params["gamma"], eta_max=params["eta_max"])
elif config["lr_scheduler"]["type"] == "GradualWarmupScheduler":
params = config["lr_scheduler"]["args"]
scheduler_steplr_args = dict(params["after_scheduler"]["args"])
scheduler_steplr = getattr(torch.optim.lr_scheduler, params["after_scheduler"]["type"])(optimizer, **scheduler_steplr_args)
lr_scheduler = GradualWarmupScheduler(optimizer, multiplier=params["multiplier"], total_epoch=params["total_epoch"], after_scheduler=scheduler_steplr)
else:
lr_scheduler = config.init_obj('lr_scheduler', torch.optim.lr_scheduler, optimizer)
if config["only_test"] == False:
# setup data_loader instances
data_loader = config.init_obj('data_loader', module_data)
valid_data_loader = data_loader.valid_data_loader
trainer = Trainer_beat_aligned_data(model, criterion, metrics, optimizer,
config=config,
data_loader=data_loader,
valid_data_loader=valid_data_loader,
lr_scheduler=lr_scheduler)
trainer.train()
evaluater = Evaluater_beat_aligned_data(model, criterion, metrics,
config=config)
evaluater.evaluate()
if __name__ == '__main__':
start_time = datetime.datetime.now()
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default='0', type=str,
help='indices of GPUs to enable (default: all)')
args.add_argument('-s', '--seed', type=int, default=0)
args.add_argument('-t', '--only_test', default=False, type=bool,
help='only test (default: False)')
# custom cli options to modify configuration from default values given in json file.
CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
options = [
CustomArgs(['--lr', '--learning_rate'], type=float, target='optimizer;args;lr'),
CustomArgs(['--bs', '--batch_size'], type=int, target='data_loader;args;batch_size')
]
config = ConfigParser.from_args(args, options)
import os
print("torch.cuda.device_count(): ", torch.cuda.device_count())
print("CUDA_VISIBLE_DEVICES: ", os.environ["CUDA_VISIBLE_DEVICES"])
main(config)
end_time = datetime.datetime.now()
print("程序运行时间:" + str((end_time - start_time).seconds) + "秒")
| 45.247706 | 158 | 0.681468 |
c35715af083441201b6d6dc05af54a56b8bcb312 | 975 | py | Python | setup.py | beatrizuezu/pycpfcnpj | 065742e68cc0185341a9b1df35606560f78fd21a | [
"MIT"
] | null | null | null | setup.py | beatrizuezu/pycpfcnpj | 065742e68cc0185341a9b1df35606560f78fd21a | [
"MIT"
] | null | null | null | setup.py | beatrizuezu/pycpfcnpj | 065742e68cc0185341a9b1df35606560f78fd21a | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='pycpfcnpj',
version='1.5.1',
description='Python module for brazilian register numbers for persons (CPF) and companies (CNPJ).',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='cpf cnpj validation generation',
url='https://github.com/matheuscas/pycpfcnpj',
author='Matheus Cardoso',
author_email='matheus.mcas@gmail.com',
license='MIT',
packages=['pycpfcnpj'],
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False)
| 39 | 105 | 0.603077 |
998346f2ec29f86896454309f95e50449f696d12 | 2,325 | py | Python | vseq/utils/argparsing.py | JakobHavtorn/vseq | bdd0258738b5f43d6f0f6c3df4b8b270f06d0aea | [
"MIT"
] | 7 | 2021-03-25T12:33:53.000Z | 2022-03-23T13:10:31.000Z | vseq/utils/argparsing.py | JakobHavtorn/vseq | bdd0258738b5f43d6f0f6c3df4b8b270f06d0aea | [
"MIT"
] | null | null | null | vseq/utils/argparsing.py | JakobHavtorn/vseq | bdd0258738b5f43d6f0f6c3df4b8b270f06d0aea | [
"MIT"
] | null | null | null | import argparse
import json
import os
from functools import partial
from json import JSONDecoder
def json_or_str(arg, json_decoder=json.loads):
"""Parse inputs that can be either json or string as json if it looks like JSON otherwise string"""
is_json = len(arg.split("{")) > 1 or len(arg.split("[")) > 1
if is_json:
return json_decoder(arg)
return arg.split(" ")
def json_file_or_json(arg, json_decoder=json.loads):
"""Parse argument by reading it is a json string and if that fails as a file path to a json file."""
is_json = len(arg.split("{")) > 1 or len(arg.split("[")) > 1
if is_json:
return json_decoder(arg)
with open(arg, "r") as json_file:
json_out = json.load(json_file)
return json_out
def json_file_or_json_unique_keys(arg):
"""Parse a json file or string and make any duplicate keys unique by appending -i for i from 0 to N"""
def make_unique(key, dct):
counter = 0
unique_key = key
while unique_key in dct:
counter += 1
unique_key = "{}-{}".format(key, counter)
return unique_key
def parse_object_pairs(pairs):
dct = dict()
for key, value in pairs:
if key in dct:
key = make_unique(key, dct)
dct[key] = value
return dct
decoder = JSONDecoder(object_pairs_hook=parse_object_pairs)
return json_file_or_json(arg, json_decoder=decoder.decode)
def str2bool(arg):
"""Parse a string argument to bool.
To be used as:
parser.add_argument('--some_var', type=str2bool, default=False, const=True)
Arguments parsed to 'True' (case insensitive):
--some_var true
--some_var t
--some_var yes
--some_var y
--some_var 1
Arguments parsed to 'False' (case insensitive):
--some_var false
--some_var f
--some_var no
--some_var n
--some_var 0
See https://stackoverflow.com/a/43357954/4203328
"""
if isinstance(arg, bool):
return arg
if arg.lower() in ("yes", "true", "t", "y", "1"):
return True
elif arg.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError(f"Could not parse argument {arg} of type {type(arg)}")
| 28.012048 | 106 | 0.615484 |
06237df9ed0f49fd9df8084d7d0a34d88fbccd9c | 1,645 | py | Python | psana/psana/momentum/EleMomentumRemi.py | ZhenghengLi/lcls2 | 94e75c6536954a58c8937595dcac295163aa1cdf | [
"BSD-3-Clause-LBNL"
] | 16 | 2017-11-09T17:10:56.000Z | 2022-03-09T23:03:10.000Z | psana/psana/momentum/EleMomentumRemi.py | ZhenghengLi/lcls2 | 94e75c6536954a58c8937595dcac295163aa1cdf | [
"BSD-3-Clause-LBNL"
] | 6 | 2017-12-12T19:30:05.000Z | 2020-07-09T00:28:33.000Z | psana/psana/momentum/EleMomentumRemi.py | ZhenghengLi/lcls2 | 94e75c6536954a58c8937595dcac295163aa1cdf | [
"BSD-3-Clause-LBNL"
] | 25 | 2017-09-18T20:02:43.000Z | 2022-03-27T22:27:42.000Z | import numpy as np
class EleMomentumRemi():
def __init__(self,t0_ns=0,x0_mm=0,y0_mm=0,vjetx_mmPns=0,vjety_mmPns=0,
l_mm=None,U_V=None):
self.t0 = t0_ns
self.x0 = x0_mm
self.y0 = y0_mm
self.vjetx = vjetx_mmPns
self.vjety = vjety_mmPns
self.U = U_V
self.l = l_mm
self.mmPns2au = 0.4571028904957055
self.VPmm2mmPns = 1.75882014896e-1
self.amu2au = 1836.15
self.au2tesla = 2.35e5
self.au2mm = 5.28e-8
def CalcPtr(self,B_tesla,omega_mns,x_mm,y_mm,t_ns):
R = np.sqrt((x_mm-self.x0)**2 + (y_mm-self.y0)**2)
angle = omega_mns*(t_ns-self.t0)
angle %= 2*np.pi
Ptr = (B_tesla/self.au2tesla)*R/self.au2mm/(2*np.abs(np.sin(angle/2)))
theta = (np.arctan2(y_mm-self.y0,x_mm-self.x0)+2*np.pi)%(2*np.pi)
phi = theta - angle/2
Px = Ptr*np.cos(phi) - self.mmPns2au*self.vjetx
Py = Ptr*np.sin(phi) - self.mmPns2au*self.vjety
return Ptr, phi, Px, Py
def CalcR(self,B_tesla,omega_mns,Ptr,t_ns):
angle = omega_mns*(t_ns-self.t0)
angle %= 2*np.pi
R = self.au2mm*(2*np.abs(np.sin(angle/2)))*Ptr/(B_tesla/self.au2tesla)
return R
def CalcPzOneAcc(self,m_amu,t_ns):
return self.amu2au*m_amu*self.mmPns2au*self.l/(t_ns-self.t0) - 8.04e-2*self.U*(t_ns-self.t0)/(2*self.l)
def CalcPzOneAccApprox(self,ta_ns,ta0_ns,sfc=None):
if sfc is not None:
return sfc*(ta0_ns-ta_ns)
else:
return 8.04e-2*self.U*(ta0_ns-ta_ns)/self.l
| 35 | 111 | 0.569605 |
36fa5ed4e1bc3b4aac2502588733708a2de96cb9 | 1,235 | py | Python | covid19openapi/thaicovid19.py | rsxss/covid19-open-api | fab3d7b3706266f6ea5127505807fc76cd550dd5 | [
"MIT"
] | null | null | null | covid19openapi/thaicovid19.py | rsxss/covid19-open-api | fab3d7b3706266f6ea5127505807fc76cd550dd5 | [
"MIT"
] | null | null | null | covid19openapi/thaicovid19.py | rsxss/covid19-open-api | fab3d7b3706266f6ea5127505807fc76cd550dd5 | [
"MIT"
] | null | null | null | from __future__ import annotations
from .basecovid19 import BaseCovid19
from .covid19scope import Covid19
import requests
class ThaiCovid19(BaseCovid19):
"""ThaiCovid19 requests data from specific source"""
def __init__(self: BaseCovid19, source: str = 'covid19.th-stat.com') -> None:
"""
Args:
source (str):
"""
super().__init__(Covid19.THAI, source)
def get(self: ThaiCovid19, field: str, **kwargs) -> dict:
"""
Args:
field (str):
**kwargs:
"""
url = self.api.get(field, None) if not field.startswith('http') else field
result = None
if url is not None:
response = requests.get(url, **kwargs)
if response.status_code == 200:
result = response.json()
return result
def get_all(self: ThaiCovid19, lim: int = 10) -> dict:
"""
Args:
lim (int):
"""
result = {}
data = self.api.items()
for counter, (field, url) in enumerate(data):
if counter > lim:
break
result[field] = self.get(url)
return result
| 27.444444 | 83 | 0.519838 |
84d86331215fe08a6494798937d28726eeda530c | 6,191 | py | Python | mtrl/logger.py | NagisaZj/mtrl | 9aa7b614e22cecddfb61da2a85e196e26aeea814 | [
"MIT"
] | null | null | null | mtrl/logger.py | NagisaZj/mtrl | 9aa7b614e22cecddfb61da2a85e196e26aeea814 | [
"MIT"
] | null | null | null | mtrl/logger.py | NagisaZj/mtrl | 9aa7b614e22cecddfb61da2a85e196e26aeea814 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Implementation based on Denis Yarats' implementation of [SAC](https://github.com/denisyarats/pytorch_sac).
import json
import os
import time
from functools import singledispatch
from typing import Dict, List
import numpy as np
import torch
from termcolor import colored
@singledispatch
def serialize_log(val):
"""Used by default."""
return val
@serialize_log.register(np.float32)
def np_float32(val):
return np.float64(val)
@serialize_log.register(np.int64)
def np_int64(val):
return int(val)
class Meter(object):
def __init__(self):
pass
def update(self, value, n=1):
pass
def value(self):
pass
class AverageMeter(Meter):
def __init__(self):
self._sum = 0
self._count = 0
def update(self, value, n=1):
self._sum += value
self._count += n
def value(self):
return self._sum / max(1, self._count)
class CurrentMeter(Meter):
def __init__(self):
pass
def update(self, value, n=1):
self._value = value
def value(self):
return self._value
class MetersGroup(object):
def __init__(self, file_name, formating, mode: str, retain_logs: bool):
self._file_name = file_name
self._mode = mode
if not retain_logs:
if os.path.exists(file_name):
os.remove(file_name)
self._formating = formating
self._meters: Dict[str, Meter] = {}
def log(self, key, value, n=1):
if key not in self._meters:
metric_type = self._formating[key][2]
if metric_type == "average":
self._meters[key] = AverageMeter()
elif metric_type == "constant":
self._meters[key] = CurrentMeter()
else:
raise ValueError(f"{metric_type} is not supported by logger.")
self._meters[key].update(value, n)
def _prime_meters(self):
data = {}
for key, meter in self._meters.items():
data[key] = meter.value()
data["mode"] = self._mode
return data
def _dump_to_file(self, data):
data["logbook_timestamp"] = time.strftime("%I:%M:%S%p %Z %b %d, %Y")
with open(self._file_name, "a") as f:
f.write(json.dumps(data, default=serialize_log) + "\n")
def _format(self, key, value, ty):
template = "%s: "
if ty == "int":
template += "%d"
elif ty == "float":
template += "%.04f"
elif ty == "time":
template += "%.01f s"
elif ty == "str":
template += "%s"
else:
raise "invalid format type: %s" % ty
return template % (key, value)
def _dump_to_console(self, data, prefix):
prefix = colored(prefix, "yellow" if prefix == "train" else "green")
pieces = ["{:5}".format(prefix)]
for key, (disp_key, ty, _) in self._formating.items():
if key in data:
value = data.get(key, 0)
if disp_key is not None:
pieces.append(self._format(disp_key, value, ty))
print("| %s" % (" | ".join(pieces)))
def dump(self, step, prefix):
if len(self._meters) == 0:
return
data = self._prime_meters()
data["step"] = step
self._dump_to_file(data)
self._dump_to_console(data, prefix)
self._meters.clear()
class Logger(object):
def __init__(self, log_dir, config, retain_logs: bool = False):
self._log_dir = log_dir
self.config = config
if "metaworld" in self.config.env.name:
num_envs = int(
"".join(
[
x
for x in self.config.env.benchmark._target_.split(".")[1]
if x.isdigit()
]
)
)
if 'mt1' in self.config.env.name:
num_envs = 50
else:
env_list: List[str] = []
for key in self.config.metrics:
if "_" in key:
mode, submode = key.split("_")
# todo: should we instead throw an error here?
if mode in self.config.env and submode in self.config.env[mode]:
env_list += self.config.env[mode][submode]
else:
if key in self.config.env:
env_list += self.config.env[key]
num_envs = len(set(env_list))
def _get_formatting(
current_formatting: List[List[str]],
) -> Dict[str, List[str]]:
formating: Dict[str, List[str]] = {
_format[0]: _format[1:] for _format in current_formatting
}
if num_envs > 0:
keys = list(formating.keys())
for key in keys:
if key.endswith("_"):
value = formating.pop(key)
for index in range(num_envs):
new_key = key + str(index)
if value[0] is None:
abbr = None
else:
abbr = value[0] + str(index)
formating[new_key] = [abbr, *value[1:]]
return formating
self.mgs = {
key: MetersGroup(
os.path.join(log_dir, f"{key}.log"),
formating=_get_formatting(current_formatting=value),
mode=key,
retain_logs=retain_logs,
)
for key, value in self.config.metrics.items()
}
# print(self.mgs['eval']._formating.keys())
def log(self, key, value, step, n=1):
assert key.startswith("train") or key.startswith("eval")
if type(value) == torch.Tensor:
value = value.item()
mode, key = key.split("/", 1)
self.mgs[mode].log(key, value, n)
def dump(self, step):
for key in self.mgs:
self.mgs[key].dump(step, key)
| 30.2 | 108 | 0.517687 |
f4bc72625c7eddebb6d2f11e036db215e9aa51eb | 1,039 | py | Python | tests/test_configure.py | DevicePilot/devicepilot-py | e4d60abdf2cb54c27720f38d6b75fa4f0106301c | [
"MIT"
] | 2 | 2018-11-16T15:44:52.000Z | 2018-11-16T15:53:16.000Z | tests/test_configure.py | DevicePilot/devicepilot-py | e4d60abdf2cb54c27720f38d6b75fa4f0106301c | [
"MIT"
] | null | null | null | tests/test_configure.py | DevicePilot/devicepilot-py | e4d60abdf2cb54c27720f38d6b75fa4f0106301c | [
"MIT"
] | null | null | null | """configure tests"""
import os
import unittest
from devicepilot.configure import configure
class TestConfigure(unittest.TestCase):
"""configure tests"""
def test_get_explicit_api_key(self):
"""gets an explicit api key"""
key = configure("the-key")
self.assertEqual(key, "TOKEN the-key")
def test_get_key_from_environment(self):
"""gets a key from the environment"""
os.environ["DP_API_KEY"] = "here-is-a-key"
key = configure()
self.assertEqual(key, "TOKEN here-is-a-key")
del os.environ["DP_API_KEY"]
def test_normalises_token(self):
"""normalises TOKEN"""
key = configure("ToKeN the-key")
self.assertEqual(key, "TOKEN the-key")
def test_raise_if_no_key(self):
"""raises if no key provided"""
with self.assertRaises(ValueError):
configure()
def test_raise_if_blank_key(self):
"""raises if no blank provided"""
with self.assertRaises(ValueError):
configure(" ")
| 28.081081 | 52 | 0.629451 |
ad8e221ffe7f6452c5ed50dcd3075f50a1b064d8 | 444 | py | Python | tests/testing/packages/conflicting_package/setup.py | Kami/venv-update | afe9886b7481de770f2a684ec7450368006d54f3 | [
"MIT"
] | 165 | 2015-01-05T10:19:30.000Z | 2022-03-22T21:14:58.000Z | tests/testing/packages/conflicting_package/setup.py | Kami/venv-update | afe9886b7481de770f2a684ec7450368006d54f3 | [
"MIT"
] | 148 | 2015-01-06T19:57:15.000Z | 2021-07-23T20:50:06.000Z | tests/testing/packages/conflicting_package/setup.py | Kami/venv-update | afe9886b7481de770f2a684ec7450368006d54f3 | [
"MIT"
] | 30 | 2015-01-05T10:19:35.000Z | 2022-03-01T00:48:53.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from setuptools import setup
setup(
name=str('conflicting_package'),
version='1',
url='example.com',
author='nobody',
author_email='nobody@example.com',
install_requires=[
'many_versions_package<2',
],
options={
'bdist_wheel': {
'universal': 1,
}
},
)
| 19.304348 | 39 | 0.648649 |
c88d18541811cd7fdb355959874768ec4e4307de | 22,482 | py | Python | tests/conftest.py | vhawk19/zulip-terminal | 838e49c8a8baa740f06eace456e1cf31b16151c9 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | vhawk19/zulip-terminal | 838e49c8a8baa740f06eace456e1cf31b16151c9 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | vhawk19/zulip-terminal | 838e49c8a8baa740f06eace456e1cf31b16151c9 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from copy import deepcopy
from typing import Any, Dict
import pytest
from zulipterminal.ui_tools.boxes import MessageBox
from zulipterminal.ui_tools.buttons import StreamButton, UserButton
from zulipterminal.helper import initial_index as helper_initial_index
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""
Forces all the tests to work offline.
"""
monkeypatch.delattr("requests.sessions.Session.request")
@pytest.fixture(autouse=True)
def no_asynch(mocker):
"""
Make all function calls synchronous.
"""
mocker.patch('zulipterminal.helper.asynch')
# --------------- Controller Fixtures -----------------------------------------
@pytest.fixture
def stream_button(mocker):
"""
Mocked stream button.
"""
button = StreamButton(
properties=['PTEST', 205, '#bfd56f', False],
controller=mocker.patch('zulipterminal.core.Controller'),
width=40,
view=mocker.patch('zulipterminal.ui.View')
)
return button
@pytest.fixture
def user_button(mocker, width=38):
"""
Mocked User Button.
"""
return UserButton(
user={
'user_id': 5179,
'full_name': 'Boo Boo',
'email': 'boo@zulip.com',
},
width=width,
controller=mocker.patch('zulipterminal.core.Controller'),
view=mocker.patch('zulipterminal.ui.View')
)
@pytest.fixture
def msg_box(mocker, messages_successful_response):
"""
Mocked MessageBox with stream message
"""
return MessageBox(
messages_successful_response['messages'][0],
mocker.patch('zulipterminal.model.Model'),
None,
)
# --------------- Model Fixtures ----------------------------------------------
@pytest.fixture
def messages_successful_response() -> Dict[str, Any]:
"""
A successful response from a /messages API query.
"""
response = {
'anchor': 10000000000000000,
'messages': [{
'id': 537286,
'sender_full_name': 'Foo Foo',
'timestamp': 1520918722,
'client': 'website',
'recipient_id': 6076,
'sender_email': 'foo@zulip.com',
'type': 'stream',
'sender_realm_str': '',
'flags': ['read'],
'sender_id': 5140,
'content_type': 'text/x-markdown',
'stream_id': 205,
'subject': 'Test',
'reactions': [],
'subject_links': [],
'avatar_url': '/user_avatars/2/foo.png?x=x&version=2',
'is_me_message': False,
'sender_short_name': 'foo',
'content': 'Stream content here.',
'display_recipient': 'PTEST',
}, {
'id': 537287,
'sender_full_name': 'Foo Foo',
'timestamp': 1520918736,
'client': 'website',
'recipient_id': 5780,
'is_me_message': False,
'sender_email': 'foo@zulip.com',
'flags': ['read'],
'sender_id': 5140,
'content_type': 'text/x-markdown',
'sender_realm_str': '',
'subject': '',
'reactions': [],
'type': 'private',
'avatar_url': '/user_avatars/2/foo.png?x=x&version=2',
'subject_links': [],
'sender_short_name': 'foo',
'content': 'Hey PM content here.',
'display_recipient': [{
'id': 5179,
'is_mirror_dummy': False,
'full_name': 'Boo Boo',
'short_name': 'boo',
'email': 'boo@zulip.com',
}, {
'short_name': 'foo',
'id': 5140,
'is_mirror_dummy': False,
'full_name': 'Foo Foo',
'email': 'foo@zulip.com',
}],
}, {
'id': 537288,
'sender_full_name': 'Foo Foo',
'timestamp': 1520918737,
'client': 'website',
'recipient_id': 5780, # FIXME Unsure
'is_me_message': False,
'sender_email': 'foo@zulip.com',
'flags': ['read'],
'sender_id': 5140,
'content_type': 'text/x-markdown',
'sender_realm_str': '',
'subject': '',
'reactions': [],
'type': 'private',
'avatar_url': '/user_avatars/2/foo.png?x=x&version=2',
'subject_links': [],
'sender_short_name': 'foo',
'content': 'Hey PM content here again.',
'display_recipient': [{
'id': 5179,
'is_mirror_dummy': False,
'full_name': 'Boo Boo',
'short_name': 'boo',
'email': 'boo@zulip.com',
}, {
'short_name': 'foo',
'id': 5140,
'is_mirror_dummy': False,
'full_name': 'Foo Foo',
'email': 'foo@zulip.com',
}, {
'short_name': 'bar',
'id': 5180,
'is_mirror_dummy': False,
'full_name': 'Bar Bar',
'email': 'bar@zulip.com',
}],
}],
'result': 'success',
'msg': '',
}
return response
@pytest.fixture(scope="module")
def initial_data():
"""
Response from /register API request.
"""
return {
'unsubscribed': [{
'audible_notifications': False,
'description': 'announce',
'stream_id': 7,
'is_old_stream': True,
'desktop_notifications': False,
'pin_to_top': False,
'stream_weekly_traffic': 0,
'invite_only': False,
'name': 'announce',
'push_notifications': False,
'email_address': '',
'color': '#bfd56f',
'in_home_view': True
}],
'result': 'success',
'queue_id': '1522420755:786',
'realm_users': [{
'bot_type': None,
'is_bot': False,
'is_admin': False,
'email': 'FOOBOO@gmail.com',
'full_name': 'Tomás Farías',
'user_id': 5827,
'avatar_url': None,
'is_active': True
}, {
'full_name': 'Jari Winberg',
'user_id': 6086,
'avatar_url': None,
'is_active': True,
'bot_type': None,
'is_bot': False,
'is_admin': False,
'email': 'nyan.salmon+sns@gmail.com',
}, {
'bot_type': None,
'is_bot': False,
'is_admin': False,
'email': 'cloudserver2@hotmail.de',
'full_name': 'Test Account',
'user_id': 6085,
'is_active': True
}],
'cross_realm_bots': [{
'full_name': 'Notification Bot',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:29.035543+00:00',
'email': 'notification-bot@zulip.com',
'user_id': 5,
'is_admin': False,
'avatar_url': 'https://secure.gravatar.com/avatar/'
'0fc5476bdf03fe8640cc8fbc27a47549'
'?d=identicon&version=1'
}, {
'full_name': 'Email Gateway',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:29.037658+00:00',
'email': 'emailgateway@zulip.com',
'user_id': 6,
'is_admin': False,
'avatar_url': 'https://secure.gravatar.com/avatar/'
'99ac4226a594fca879bb598c1b36fb42'
'?d=identicon&version=1'
}, {
'full_name': 'Welcome Bot',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:29.033231+00:00',
'email': 'welcome-bot@zulip.com',
'user_id': 4,
'is_admin': False,
'avatar_url': 'https://secure.gravatar.com/avatar/'
'6a4e22d220487fb7ceb295fa706f39d5'
'?d=identicon&version=1'
}, {
'full_name': 'Zulip Feedback Bot',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:28.972281+00:00',
'email': 'feedback@zulip.com',
'user_id': 1,
'is_admin': False,
'avatar_url': 'https://secure.gravatar.com/avatar/'
'78eecc367eedd27e6ac9292dc966beb6'
'?d=identicon&version=1'
}],
'subscriptions': [{
'audible_notifications': False,
'description': '',
'stream_id': 86,
'is_old_stream': True,
'desktop_notifications': False,
'pin_to_top': False,
'stream_weekly_traffic': 0,
'invite_only': False,
'name': 'Django',
'push_notifications': False,
'email_address': '',
'color': '#94c849',
'in_home_view': True
}, {
'audible_notifications': False,
'description': 'The Google Summer of Code',
'stream_id': 14,
'is_old_stream': True,
'desktop_notifications': False,
'pin_to_top': False,
'stream_weekly_traffic': 53,
'invite_only': False,
'name': 'GSoC',
'push_notifications': False,
'email_address': '',
'color': '#c2c2c2',
'in_home_view': True
}, {
# This is a private stream;
# only description/stream_id/invite_only/name/color vary from above
'audible_notifications': False,
'description': 'Some private stream',
'stream_id': 99,
'is_old_stream': True,
'desktop_notifications': False,
'pin_to_top': False,
'stream_weekly_traffic': 53,
'invite_only': True,
'name': 'Secret stream',
'push_notifications': False,
'email_address': '',
'color': '#c3c3c3',
'in_home_view': True
}],
'msg': '',
'max_message_id': 552761,
'never_subscribed': [{
'invite_only': False,
'description': 'Announcements from the Zulip GCI Mentors',
'stream_id': 87,
'name': 'GCI announce',
'is_old_stream': True,
'stream_weekly_traffic': 0
}, {
'invite_only': False,
'description': 'General discussion',
'stream_id': 74,
'name': 'GCI general',
'is_old_stream': True,
'stream_weekly_traffic': 0
}],
'unread_msgs': {
'pms': [],
'count': 0,
'mentions': [],
'streams': [],
'huddles': []
},
'presences': {
'nyan.salmon+sns@gmail.com': {
'ZulipElectron': {
'pushable': False,
'client': 'ZulipElectron',
'status': 'idle',
'timestamp': 1522484059
},
'ZulipMobile': {
'pushable': False,
'client': 'ZulipMobile',
'status': 'idle',
'timestamp': 1522384165
},
'aggregated': {
'timestamp': 1522484059,
'client': 'ZulipElectron',
'status': 'idle'
}
},
'FOOBOO@gmail.com': {
'website': {
'pushable': True,
'client': 'website',
'status': 'active',
'timestamp': 1522458138
},
'ZulipMobile': {
'pushable': True,
'client': 'ZulipMobile',
'status': 'active',
'timestamp': 1522480103
},
'aggregated': {
'timestamp': 1522480103,
'client': 'ZulipMobile',
'status': 'active'
}
}
},
'last_event_id': -1,
'muted_topics': [],
}
@pytest.fixture
def initial_index():
return deepcopy(helper_initial_index)
@pytest.fixture
def empty_index():
return {
'pointer': defaultdict(set, {}),
'private': defaultdict(set, {}),
'all_messages': set(),
'all_private': set(),
'all_stream': defaultdict(set, {}),
'stream': defaultdict(dict, {}),
'search': set(),
'all_starred': set(),
'messages': defaultdict(dict, {
537286: {
'type': 'stream',
'sender_realm_str': '',
'is_me_message': False,
'content': 'Stream content here.',
'recipient_id': 6076,
'avatar_url': '/user_avatars/2/foo.png?x=x&version=2',
'client': 'website',
'stream_id': 205,
'subject_links': [],
'content_type': 'text/x-markdown',
'display_recipient': 'PTEST',
'reactions': [],
'sender_short_name': 'foo',
'id': 537286,
'flags': ['read'],
'sender_email': 'foo@zulip.com',
'timestamp': 1520918722,
'subject': 'Test',
'sender_id': 5140,
'sender_full_name': 'Foo Foo'
},
537287: {
'type': 'private',
'sender_realm_str': '',
'is_me_message': False,
'content': 'Hey PM content here.',
'recipient_id': 5780,
'client': 'website',
'subject': '',
'avatar_url': '/user_avatars/2/foo.png?x=x&version=2',
'content_type': 'text/x-markdown',
'display_recipient': [{
'id': 5179,
'full_name': 'Boo Boo',
'email': 'boo@zulip.com',
'short_name': 'boo',
'is_mirror_dummy': False
}, {
'id': 5140,
'full_name': 'Foo Foo',
'email': 'foo@zulip.com',
'short_name': 'foo',
'is_mirror_dummy': False
}],
'sender_short_name': 'foo',
'id': 537287,
'flags': ['read'],
'sender_email': 'foo@zulip.com',
'timestamp': 1520918736,
'reactions': [],
'sender_id': 5140,
'sender_full_name': 'Foo Foo',
'subject_links': []
},
537288: {
'id': 537288,
'sender_full_name': 'Foo Foo',
'timestamp': 1520918737,
'client': 'website',
'recipient_id': 5780, # FIXME Unsure
'is_me_message': False,
'sender_email': 'foo@zulip.com',
'flags': ['read'],
'sender_id': 5140,
'content_type': 'text/x-markdown',
'sender_realm_str': '',
'subject': '',
'reactions': [],
'type': 'private',
'avatar_url': '/user_avatars/2/foo.png?x=x&version=2',
'subject_links': [],
'sender_short_name': 'foo',
'content': 'Hey PM content here again.',
'display_recipient': [{
'id': 5179,
'is_mirror_dummy': False,
'full_name': 'Boo Boo',
'short_name': 'boo',
'email': 'boo@zulip.com',
}, {
'short_name': 'foo',
'id': 5140,
'is_mirror_dummy': False,
'full_name': 'Foo Foo',
'email': 'foo@zulip.com',
}, {
'short_name': 'bar',
'id': 5180,
'is_mirror_dummy': False,
'full_name': 'Bar Bar',
'email': 'bar@zulip.com',
}],
}
}),
}
@pytest.fixture
def index_all_messages(empty_index):
"""
Expected index of `initial_data` fixture when model.narrow = []
"""
return dict(empty_index, **{'all_messages': {537286, 537287, 537288}})
@pytest.fixture
def index_stream(empty_index):
"""
Expected index of initial_data when model.narrow = [['stream', '7']]
"""
diff = {'all_stream': defaultdict(set, {205: {537286}}),
'all_private': {537287, 537288}}
return dict(empty_index, **diff)
@pytest.fixture
def index_topic(empty_index):
"""
Expected index of initial_data when model.narrow = [['stream', '7'],
['topic', 'Test']]
"""
diff = {'stream': defaultdict(dict, {205: {'Test': {537286}}})}
return dict(empty_index, **diff)
@pytest.fixture
def index_user(empty_index):
"""
Expected index of initial_data when model.narrow = [['pm_with',
'boo@zulip.com'],
"""
diff = {'private': defaultdict(set, {frozenset({5179, 5140}): {537287}}),
'all_private': {537287, 537288}}
return dict(empty_index, **diff)
@pytest.fixture
def index_user_multiple(empty_index):
"""
Expected index of initial_data when model.narrow = [['pm_with',
'boo@zulip.com, bar@zulip.com'],
"""
diff = {'private': defaultdict(set,
{frozenset({5179, 5140, 5180}): {537288}}),
'all_private': {537287, 537288}}
return dict(empty_index, **diff)
@pytest.fixture(params=[
{537286, 537287, 537288},
{537286}, {537287}, {537288},
{537286, 537287}, {537286, 537288}, {537287, 537288},
])
def index_all_starred(empty_index, request):
msgs_with_stars = request.param
index = dict(empty_index, all_starred=msgs_with_stars,
all_private={537287, 537288})
for msg_id, msg in index['messages'].items():
if msg_id in msgs_with_stars and 'starred' not in msg['flags']:
msg['flags'].append('starred')
return index
@pytest.fixture(scope="module")
def user_profile():
return {
'max_message_id': 589270,
'short_name': 'FOO',
'full_name': 'FOO BOO',
'email': 'FOO@ZULIP.COM',
'is_bot': False,
'user_id': 5140,
'result': 'success',
'client_id': 'abcd',
'msg': '',
'is_admin': False,
'pointer': 589234
}
@pytest.fixture(scope="module")
def error_response():
return {
"msg": "Invalid API key",
"result": "error"
}
@pytest.fixture(scope="module")
def user_dict():
"""
User_dict created according to `initial_data` fixture.
"""
return {
'FOOBOO@gmail.com': {
'full_name': 'Tomás Farías',
'email': 'FOOBOO@gmail.com',
'status': 'active',
'user_id': 5827
},
'nyan.salmon+sns@gmail.com': {
'full_name': 'Jari Winberg',
'email': 'nyan.salmon+sns@gmail.com',
'status': 'offline',
'user_id': 6086
},
'cloudserver2@hotmail.de': {
'full_name': 'Test Account',
'email': 'cloudserver2@hotmail.de',
'status': 'inactive',
'user_id': 6085
},
'emailgateway@zulip.com': {
'email': 'emailgateway@zulip.com',
'full_name': 'Email Gateway',
'status': 'inactive',
'user_id': 6
},
'feedback@zulip.com': {
'email': 'feedback@zulip.com',
'full_name': 'Zulip Feedback Bot',
'status': 'inactive',
'user_id': 1
},
'notification-bot@zulip.com': {
'email': 'notification-bot@zulip.com',
'full_name': 'Notification Bot',
'status': 'inactive',
'user_id': 5
},
'welcome-bot@zulip.com': {
'email': 'welcome-bot@zulip.com',
'full_name': 'Welcome Bot',
'status': 'inactive',
'user_id': 4
},
}
@pytest.fixture(scope="module")
def user_list():
"""
List of users created corresponding to
`initial_data` fixture.
"""
# NOTE These are sorted active > idle, then according to full_name
return [{
'full_name': 'Tomás Farías',
'email': 'FOOBOO@gmail.com',
'status': 'active',
'user_id': 5827
}, {
'full_name': 'Jari Winberg',
'email': 'nyan.salmon+sns@gmail.com',
'status': 'offline',
'user_id': 6086
}, {
'email': 'emailgateway@zulip.com',
'full_name': 'Email Gateway',
'status': 'inactive',
'user_id': 6
}, {
'email': 'notification-bot@zulip.com',
'full_name': 'Notification Bot',
'status': 'inactive',
'user_id': 5
}, {
'full_name': 'Test Account',
'email': 'cloudserver2@hotmail.de',
'status': 'inactive',
'user_id': 6085
}, {
'email': 'welcome-bot@zulip.com',
'full_name': 'Welcome Bot',
'status': 'inactive',
'user_id': 4
}, {
'email': 'feedback@zulip.com',
'full_name': 'Zulip Feedback Bot',
'status': 'inactive',
'user_id': 1
}]
@pytest.fixture(scope="module")
def streams():
"""
List of streams created corresponding to
`initial_data` fixture.
"""
return [
['Django', 86, '#94c849', False],
['GSoC', 14, '#c2c2c2', False],
['Secret stream', 99, '#c3c3c3', True],
]
@pytest.fixture(scope="module")
def user_id():
"""
Default User id of the current
user, i.e., Tomás Farías
according to current Fixtures.
"""
return 5827
| 31.531557 | 79 | 0.464505 |
9131ceb5a28864ac4a63bb3ea7d4f6591e720df0 | 662 | py | Python | lab3.py | MorganEstep/IA241 | ef31672d24a1745a18169610c85f59187c911818 | [
"MIT"
] | null | null | null | lab3.py | MorganEstep/IA241 | ef31672d24a1745a18169610c85f59187c911818 | [
"MIT"
] | null | null | null | lab3.py | MorganEstep/IA241 | ef31672d24a1745a18169610c85f59187c911818 | [
"MIT"
] | null | null | null | """
lab 3 list and set
"""
#3.1
str_list = ['a','d','e','b','c']
print(str_list)
str_list.sort()
print(str_list)
#3.2
str_list.append('f')
print(str_list)
#3.3
str_list.remove('d')
print(str_list)
#3.4
print(str_list[2])
#3.5
my_list = ['a', '123', 123, 'b', 'B', 'False', False, 123, None, 'None']
print(len(set(my_list)))
#3.6
print(len("This is my third python lab.".split()))
#3.7
num_list = [12,32,43,35]
#print(max(num_list))
num_list.sort()
print(num_list[0])
print(num_list[-1])
#3.8
game_board = [ [0,0,0],
[0,0,0],
[0,0,0]]
'''
[ [0,0,0],
[0,1,0],
[0,0,0]]
'''
game_board[1][1]=1
print(game_board) | 13.24 | 72 | 0.557402 |
0a49359dea03184b14b8f76f85a74453a42dc7cb | 450 | py | Python | data/scripts/templates/object/draft_schematic/weapon/shared_razor_knuckler.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/weapon/shared_razor_knuckler.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/weapon/shared_razor_knuckler.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/weapon/shared_razor_knuckler.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.470588 | 76 | 0.728889 |
d7fe51695a772cb8f93b7de5f69ed2953b06cd75 | 7,861 | py | Python | read.py | moskalev/canvas-student-list | 8822e8a9de8751e61a2bf337f5df7f0cfdde4951 | [
"MIT"
] | null | null | null | read.py | moskalev/canvas-student-list | 8822e8a9de8751e61a2bf337f5df7f0cfdde4951 | [
"MIT"
] | null | null | null | read.py | moskalev/canvas-student-list | 8822e8a9de8751e61a2bf337f5df7f0cfdde4951 | [
"MIT"
] | null | null | null | # __author__ = 'dimitrios'
# later modified on 9/05/18, not the original version
from calls import APICalls
import os
import time
import json
class CanvasReader(object):
"""
Class that contains functions useful for downloading (reading) entities for a course.
Essentially a wrapper for API get calls for groups of data
Input always contains a course_id which is a string eg '1112'
Token that authorises this, has to have access to the course material in order for these to work. ie a professor
or TA
(Failure is not currently being handled ie you should handle your own exceptions :)
"""
def __init__(self, access_token, base_url, api_prefix='/api/v1', verbose=True):
self.api = APICalls(access_token, base_url + api_prefix, verbose=verbose)
def get_course_info(self, course_id):
"""
Returns information about a course
:param course_id: string eg '1112'
:return: a dictionary
dictionary keys: [u'default_view', u'is_public_to_auth_users', u'start_at', u'account_id', u'workflow_state',
u'restrict_enrollments_to_course_dates', u'storage_quota_mb', u'grading_standard_id', u'public_syllabus',
u'enrollment_term_id', u'hide_final_grades', u'end_at', u'apply_assignment_group_weights', u'calendar',
u'enrollments', u'is_public', u'course_code', u'id', u'name']
"""
return self.api.get('/courses/%s' % course_id, single=True)
def get_users(self, course_id):
"""
:param course_id: string eg: '1121'- you must have access to this course material for this to work
:return: list of dictionaries (one for each user)
dict has fields [u'sortable_name', u'id', u'short_name', u'name']
"""
return self.api.get('/courses/%s/users' % course_id)
def get_sections(self, course_id):
return self.api.get('/courses/%s/sections' % course_id)
def get_section_info(self, course_id, section_id):
parameters = {'include[]': ['students', 'enrollments']}
print(parameters)
return self.api.get('/courses/%s/sections/%s/' % (course_id, section_id), parameters=parameters, combine=False)
def get_student_assignment_submissions(self, course_id, students):
parameters = {'student_ids': students, 'grouped': True}
user_submissions = self.api.get('/courses/%s/students/submissions' % course_id, parameters=parameters)
return user_submissions
def get_assignments(self, course_id):
"""
All the assignments in the course
:param course_id: string
:return: list of dictionaries
dict keys [u'has_overrides', u'points_possible', u'updated_at', u'course_id', u'id', u'locked_for_user',
u'muted', u'moderated_grading', u'grading_type', u'peer_reviews', u'description', u'anonymous_peer_reviews',
u'grade_group_students_individually', u'grading_standard_id', u'html_url', u'has_submitted_submissions',
u'group_category_id', u'needs_grading_count', u'unlock_at', u'only_visible_to_overrides', u'name', u'due_at',
u'created_at', u'post_to_sis', u'lock_at', u'assignment_group_id', u'automatic_peer_reviews', u'published',
u'position', u'submission_types', u'submissions_download_url', u'unpublishable']
"""
return self.api.get('/courses/%s/assignments' % course_id)
def get_assignment_submissions(self, course_id, assignment_id, grouped=False):
"""
Returns the submissions for a particular assignment
Only returns those submissions that have actually been submitted, rather than potential submissions.
:param course_id: string
:param assignment_id: string
:return: list of dictionaries (one for each submission)
dict keys: [u'body', u'user_id', u'submitted_at', u'excused', u'workflow_state', u'url', u'attempt',
u'preview_url', u'late', u'grade', u'score', u'grade_matches_current_submission', u'grader_id', u'graded_at',
u'submission_type', u'id', u'assignment_id']
"""
parameters = {'grouped': grouped}
submissions = self.api.get('/courses/%s/assignments/%s/submissions' % (course_id, assignment_id),
parameters=parameters)
#return filter(lambda sub: sub['workflow_state'] != 'unsubmitted', submissions)
if (os.path.isdir(str(course_id) + "/" + str(assignment_id))):
print(submissions)
f = open(str(course_id) + "/" + str(assignment_id) + "/" + str(int(time.time())) + ".dat", "w")
#f.write(submissions)
json.dump(submissions, f)
f.close
return submissions
def get_assignment_groups(self, course_id):
"""
Assignements in cavnas are classified intro groups. This returns the info for all such groups
:param course_id: string
:return: list of dictionaries with group info
dictionary keys: [u'group_weight', u'position', u'rules', u'id', u'name']
"""
return self.api.get('/courses/%s/assignment_groups' % course_id)
def get_discussion_topics(self, course_id):
"""
Returns a list of all the topics in the discussion forum in the class
:param course_id: string
:return: list of dictionaries - one for each topic
dictionary keys: [u'attachments', u'delayed_post_at', u'last_reply_at', u'locked_for_user', u'can_group',
u'url', u'message', u'read_state', u'id', u'unread_count', u'subscribed', u'title', u'discussion_type',
u'can_unpublish', u'posted_at', u'require_initial_post', u'pinned', u'can_lock', u'allow_rating',
u'discussion_subentry_count', u'topic_children', u'user_name', u'sort_by_rating', u'root_topic_id',
u'podcast_has_student_posts', u'podcast_url', u'html_url', u'user_can_see_posts', u'permissions', u'locked',
u'group_category_id', u'only_graders_can_rate', u'lock_at', u'author', u'assignment_id', u'published',
u'position']
"""
return self.api.get('/courses/%s/discussion_topics' % course_id)
def get_discussion_topic(self, course_id, topic_id):
"""
Returns information about a specific topic. Single variable is needed here. Most important part is view
:param course_id: str
:param topic_id: str
:return: dictionary
dictionary keys: [u'new_entries', u'forced_entries', u'unread_entries', u'participants', u'entry_ratings',
u'view']
"""
p = dict()
p['include_new_entries'] = 1
return self.api.get('/courses/%s/discussion_topics/%s/view' % (course_id, topic_id), single=True, parameters=p)
def get_student_summary_analytics(self, course_id):
"""
Returns aggregated analytics for each user.
:param course_id: string
:return: list of dicitonaries (one for each student in the course)
dictionary keys: [u'participations', u'tardiness_breakdown', u'max_page_views', u'max_participations', u'page_views', u'id']
"""
return self.api.get('/courses/%s/analytics/student_summaries' % course_id)
def get_student_activity_analytics(self, course_id, user_id):
"""
Returns a dictionary with two keys 'page views', 'participations'
Each of those, is a list, with dictionaries which correspond to data points
:param course_id:
:param user_id:
:return:
"""
return self.api.get('/courses/%s/analytics/users/%s/activity' % (course_id, user_id), single=True)
def get_participation_analytics(self, course_id):
return self.api.get('/courses/%s/analytics/activity' % course_id)
def get_assignment_analytics(self, course_id):
return self.api.get('/courses/%s/analytics/assignments' % course_id)
| 48.524691 | 132 | 0.668108 |
8308c27f07b4c3cbbacacf7ef9e1f398a6e77f8b | 12,770 | py | Python | pymem/process.py | StarrFox/Pymem | 1d1b1095c097333941c01ef96125dd6f3b7f22a7 | [
"MIT"
] | 183 | 2015-06-29T08:44:38.000Z | 2022-03-21T08:55:19.000Z | pymem/process.py | StarrFox/Pymem | 1d1b1095c097333941c01ef96125dd6f3b7f22a7 | [
"MIT"
] | 56 | 2015-10-12T21:37:14.000Z | 2022-02-22T08:05:38.000Z | pymem/process.py | StarrFox/Pymem | 1d1b1095c097333941c01ef96125dd6f3b7f22a7 | [
"MIT"
] | 51 | 2015-11-11T03:12:18.000Z | 2022-03-21T08:55:18.000Z | import ctypes
import locale
import logging
import os
import pymem.ressources.advapi32
import pymem.ressources.kernel32
import pymem.ressources.psapi
import pymem.ressources.structure
logger = logging.getLogger(__name__)
def get_python_dll(version):
""""Given a python dll version will find its path using the current process as a placeholder
Parameters
----------
version: str
A string representation of python version as a dll (python38.dll)
Returns
-------
str
The full path of dll
"""
current_process_id = ctypes.c_void_p(os.getpid())
current_process_handle = pymem.process.open(current_process_id)
for module in pymem.process.enum_process_module(current_process_handle):
if module.name == version:
return module.filename
def inject_dll(handle, filepath):
"""Inject a dll into opened process.
Parameters
----------
handle: HANDLE
Handle to an open object
filepath: bytes
Dll to be injected filepath
Returns
-------
DWORD
The address of injected dll
"""
filepath_address = pymem.ressources.kernel32.VirtualAllocEx(
handle,
0,
len(filepath),
pymem.ressources.structure.MEMORY_STATE.MEM_COMMIT.value | pymem.ressources.structure.MEMORY_STATE.MEM_RESERVE.value,
pymem.ressources.structure.MEMORY_PROTECTION.PAGE_EXECUTE_READWRITE.value
)
pymem.ressources.kernel32.WriteProcessMemory(handle, filepath_address, filepath, len(filepath), None)
kernel32_handle = pymem.ressources.kernel32.GetModuleHandleW("kernel32.dll")
load_library_a_address = pymem.ressources.kernel32.GetProcAddress(kernel32_handle, b"LoadLibraryA")
thread_h = pymem.ressources.kernel32.CreateRemoteThread(
handle, None, 0, load_library_a_address, filepath_address, 0, None
)
pymem.ressources.kernel32.WaitForSingleObject(thread_h, -1)
pymem.ressources.kernel32.VirtualFreeEx(
handle, filepath_address, len(filepath), pymem.ressources.structure.MEMORY_STATE.MEM_RELEASE.value
)
dll_name = os.path.basename(filepath)
dll_name = dll_name.decode('ascii')
module_address = pymem.ressources.kernel32.GetModuleHandleW(dll_name)
return module_address
def get_luid(name):
"""
Get the LUID for the SeCreateSymbolicLinkPrivilege
"""
luid = pymem.ressources.structure.LUID()
res = pymem.ressources.advapi32.LookupPrivilegeValue(None, name, luid)
if not res > 0:
raise RuntimeError("Couldn't lookup privilege value")
return luid
def get_process_token():
"""
Get the current process token
"""
token = ctypes.c_void_p()
res = pymem.ressources.advapi32.OpenProcessToken(ctypes.windll.kernel32.GetCurrentProcess(), pymem.ressources.structure.TOKEN.TOKEN_ALL_ACCESS, token)
if not res > 0:
raise RuntimeError("Couldn't get process token")
return token
def set_debug_privilege(lpszPrivilege, bEnablePrivilege):
"""Leverage current process privileges.
:param lpszPrivilege: privilege name
:param bEnablePrivilege: Enable privilege
:type lpszPrivilege: str
:type bEnablePrivilege: bool
:return: True if privileges have been leveraged.
:rtype: bool
"""
# create a space in memory for a TOKEN_PRIVILEGES structure
# with one element
size = ctypes.sizeof(pymem.ressources.structure.TOKEN_PRIVILEGES)
size += ctypes.sizeof(pymem.ressources.structure.LUID_AND_ATTRIBUTES)
buffer = ctypes.create_string_buffer(size)
tp = ctypes.cast(buffer, ctypes.POINTER(pymem.ressources.structure.TOKEN_PRIVILEGES)).contents
tp.count = 1
tp.get_array()[0].LUID = get_luid(lpszPrivilege)
tp.get_array()[0].Attributes = (
pymem.ressources.structure.SE_TOKEN_PRIVILEGE.SE_PRIVILEGE_ENABLED if bEnablePrivilege else 0
)
token = get_process_token()
res = pymem.ressources.advapi32.AdjustTokenPrivileges(token, False, tp, 0, None, None)
if res == 0:
raise RuntimeError("AdjustTokenPrivileges error: 0x%08x\n" % ctypes.GetLastError())
ERROR_NOT_ALL_ASSIGNED = 1300
return ctypes.windll.kernel32.GetLastError() != ERROR_NOT_ALL_ASSIGNED
def base_module(handle):
"""Returns process base address, looking at its modules.
:param handle: A valid handle to an open object.
:type handle: ctypes.c_void_p
:param process_id: The identifier of the process.
:type process_id: ctypes.c_void_p
:return: The base address of the current process.
:rtype: ctypes.c_void_p
"""
hModules = (ctypes.c_void_p * 1024)()
process_module_success = pymem.ressources.psapi.EnumProcessModulesEx(
handle,
ctypes.byref(hModules),
ctypes.sizeof(hModules),
ctypes.byref(ctypes.c_ulong()),
pymem.ressources.structure.EnumProcessModuleEX.LIST_MODULES_ALL
)
if not process_module_success:
return # xxx
module_info = pymem.ressources.structure.MODULEINFO(handle)
pymem.ressources.psapi.GetModuleInformation(
handle,
ctypes.c_void_p(hModules[0]),
ctypes.byref(module_info),
ctypes.sizeof(module_info)
)
return module_info
def open(process_id, debug=None, process_access=None):
"""Open a process given its process_id.
By default the process is opened with full access and in debug mode.
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684320%28v=vs.85%29.aspx
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379588%28v=vs.85%29.aspx
:param process_id: The identifier of the process to be opened
:param debug: open process in debug mode
:param process_access: desired access level
:type process_id: ctypes.c_void_p
:type debug: bool
:type process_access: pymem.ressources.structure
:return: A handle of the given process_id
:rtype: ctypes.c_void_p
"""
if debug is None:
debug = True
if not process_access:
process_access = pymem.ressources.structure.PROCESS.PROCESS_ALL_ACCESS.value
if debug:
set_debug_privilege('SeDebugPrivilege', True)
process_handle = pymem.ressources.kernel32.OpenProcess(process_access, False, process_id)
return process_handle
def open_main_thread(process_id):
"""List given process threads and return a handle to first created one.
:param process_id: The identifier of the process
:type process_id: ctypes.c_void_p
:return: A handle to the first thread of the given process_id
:rtype: ctypes.c_void_p
"""
threads = enum_process_thread(process_id)
threads = sorted(threads, key=lambda t32: t32.creation_time)
if not threads:
return # todo: raise exception
main_thread = threads[0]
thread_handle = open_thread(main_thread.th32ThreadID)
return thread_handle
def open_thread(thread_id, thread_access=None):
"""Opens an existing thread object.
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684335%28v=vs.85%29.aspx
:param thread_id: The identifier of the thread to be opened.
:type thread_id: ctypes.c_void_p
:return: A handle to the first thread of the given process_id
:rtype: ctypes.c_void_p
"""
#XXX
if not thread_access:
thread_access = THREAD_ALL = 0x001F03FF
thread_handle = pymem.ressources.kernel32.OpenThread(thread_access, 0, thread_id)
return thread_handle
def close_handle(handle):
"""Closes an open object handle.
https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx
:param handle: A valid handle to an open object.
:type handle: ctypes.c_void_p
:return: If the function succeeds, the return value is nonzero.
:rtype: bool
"""
if not handle:
return
success = pymem.ressources.kernel32.CloseHandle(handle)
return success
def list_processes():
"""List all processes
https://msdn.microsoft.com/en-us/library/windows/desktop/ms682489%28v=vs.85%29.aspx
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684834%28v=vs.85%29.aspx
:return: a list of process entry 32.
:rtype: list(pymem.ressources.structure.ProcessEntry32)
"""
SNAPPROCESS = 0x00000002
hSnap = pymem.ressources.kernel32.CreateToolhelp32Snapshot(SNAPPROCESS, 0)
process_entry = pymem.ressources.structure.ProcessEntry32()
process_entry.dwSize = ctypes.sizeof(process_entry)
p32 = pymem.ressources.kernel32.Process32First(hSnap, ctypes.byref(process_entry))
if p32:
yield process_entry
while p32:
yield process_entry
p32 = pymem.ressources.kernel32.Process32Next(hSnap, ctypes.byref(process_entry))
pymem.ressources.kernel32.CloseHandle(hSnap)
def process_from_name(name):
"""Open a process given its name.
:param name: The name of the process to be opened
:type name: str
:return: The ProcessEntry32 structure of the given process.
:rtype: ctypes.c_void_p
"""
name = name.lower()
processes = list_processes()
for process in processes:
if name in process.szExeFile.decode(locale.getpreferredencoding()).lower():
return process
def process_from_id(process_id):
"""Open a process given its name.
:param process_id: The identifier of the process
:type process_id: ctypes.c_void_p
:return: The ProcessEntry32 structure of the given process.
:rtype: ctypes.c_void_p
"""
processes = list_processes()
for process in processes:
if process_id == process.th32ProcessID:
return process
def module_from_name(process_handle, module_name):
"""Retrieve a module loaded by given process.
ex:
d3d9 = module_from_name(process_handle, 'd3d9')
:param process_handle: A process handle
:param module_name: The module name
:type process_handle: ctypes.c_void_p
:type module_name: str
:return: MODULEINFO
"""
module_name = module_name.lower()
modules = enum_process_module(process_handle)
for module in modules:
if module.name.lower() == module_name:
return module
def enum_process_thread(process_id):
"""List all threads of given processes_id
:param process_id: The identifier of the process
:type process_id: ctypes.c_void_p
:return: a list of thread entry 32.
:rtype: list(pymem.ressources.structure.ThreadEntry32)
"""
TH32CS_SNAPTHREAD = 0x00000004
hSnap = pymem.ressources.kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0)
thread_entry = pymem.ressources.structure.ThreadEntry32()
ret = pymem.ressources.kernel32.Thread32First(hSnap, ctypes.byref(thread_entry))
if not ret:
raise pymem.exception.PymemError('Could not get Thread32First')
while ret:
if thread_entry.th32OwnerProcessID == process_id:
yield thread_entry
ret = pymem.ressources.kernel32.Thread32Next(hSnap, ctypes.byref(thread_entry))
pymem.ressources.kernel32.CloseHandle(hSnap)
def enum_process_module(handle):
"""List and retrieves the base names of the specified loaded module within a process
https://msdn.microsoft.com/en-us/library/windows/desktop/ms682633(v=vs.85).aspx
https://msdn.microsoft.com/en-us/library/windows/desktop/ms683196(v=vs.85).aspx
:param handle: A valid handle to an open object.
:type handle: ctypes.c_void_p
:return: a list of loaded modules
:rtype: list(pymem.ressources.structure.MODULEINFO)
"""
hModules = (ctypes.c_void_p * 1024)()
process_module_success = pymem.ressources.psapi.EnumProcessModulesEx(
handle,
ctypes.byref(hModules),
ctypes.sizeof(hModules),
ctypes.byref(ctypes.c_ulong()),
pymem.ressources.structure.EnumProcessModuleEX.LIST_MODULES_ALL
)
if process_module_success:
hModules = iter(m for m in hModules if m)
for hModule in hModules:
module_info = pymem.ressources.structure.MODULEINFO(handle)
pymem.ressources.psapi.GetModuleInformation(
handle,
ctypes.c_void_p(hModule),
ctypes.byref(module_info),
ctypes.sizeof(module_info)
)
yield module_info
def is_64_bit(handle):
"""Determines whether the specified process is running under WOW64 (emulation).
:param handle: A valid handle to an open object.
:type handle: ctypes.c_void_p
:return: True if the 32 bit process is running under WOW64.
:rtype: bool
"""
Wow64Process = ctypes.c_long()
response = pymem.ressources.kernel32.IsWow64Process(handle, ctypes.byref(Wow64Process))
return Wow64Process
| 33.693931 | 154 | 0.707126 |
db4eebfdc266c47bb6fad01e84e86864c94a6452 | 306 | py | Python | open/users/apps.py | awesome-archive/open | 12fb6267c7d1a7e1f1c08f6112073bb0739e7ed9 | [
"MIT"
] | 105 | 2019-06-01T08:34:47.000Z | 2022-03-15T11:48:36.000Z | open/users/apps.py | awesome-archive/open | 12fb6267c7d1a7e1f1c08f6112073bb0739e7ed9 | [
"MIT"
] | 111 | 2019-06-04T15:34:14.000Z | 2022-03-12T21:03:20.000Z | open/users/apps.py | awesome-archive/open | 12fb6267c7d1a7e1f1c08f6112073bb0739e7ed9 | [
"MIT"
] | 26 | 2019-09-04T06:06:12.000Z | 2022-01-03T03:40:11.000Z | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "open.users"
verbose_name = _("Users")
def ready(self):
try:
import open.users.signals # noqa F401
except ImportError:
pass
| 21.857143 | 54 | 0.647059 |
ecac7d2333007baefb1053e49c15bf26aec11a86 | 9,322 | py | Python | docs/conf.py | creativechain/crea-python-graphenelib | 14b0de84c47c21c8ad2f03a9ace7816135345681 | [
"MIT"
] | null | null | null | docs/conf.py | creativechain/crea-python-graphenelib | 14b0de84c47c21c8ad2f03a9ace7816135345681 | [
"MIT"
] | null | null | null | docs/conf.py | creativechain/crea-python-graphenelib | 14b0de84c47c21c8ad2f03a9ace7816135345681 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# crea-graphenelib documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 5 14:06:38 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../scripts/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'crea-graphenelib'
copyright = '2018, Crea DAO'
author = 'Crea DAO'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'crea-graphenelibdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'crea-graphenelib.tex', 'crea-graphenelib Documentation',
'Crea DAO', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'crea-graphenelib', 'crea-graphenelib Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'crea-graphenelib', 'crea-graphenelib Documentation',
author, 'crea-graphenelib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.480836 | 79 | 0.718515 |
9a8ae310689becc2f3b3f126d93145f73b5259d8 | 538 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/tight-hall-29349 | 9906b46a3172f0c133814a88fad986c6cae2cdb7 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/tight-hall-29349 | 9906b46a3172f0c133814a88fad986c6cae2cdb7 | [
"FTL",
"AML",
"RSA-MD"
] | 69 | 2021-08-02T22:00:40.000Z | 2021-12-12T15:09:25.000Z | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/tight-hall-29349 | 9906b46a3172f0c133814a88fad986c6cae2cdb7 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tight-hall-29349.botics.co"
site_params = {
"name": "Tight Hall",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.692308 | 61 | 0.654275 |
8c9732663be771ea70f197ec3605b25bee086230 | 3,262 | py | Python | plato/processors/registry.py | cuiboyuan/plato | 260b785cbbf8588c92331d6343211ff72321f90e | [
"Apache-2.0"
] | 135 | 2021-04-14T09:06:08.000Z | 2022-03-31T03:38:42.000Z | plato/processors/registry.py | cuiboyuan/plato | 260b785cbbf8588c92331d6343211ff72321f90e | [
"Apache-2.0"
] | 39 | 2021-05-16T00:34:08.000Z | 2022-03-10T22:03:57.000Z | plato/processors/registry.py | cuiboyuan/plato | 260b785cbbf8588c92331d6343211ff72321f90e | [
"Apache-2.0"
] | 41 | 2021-04-14T10:40:07.000Z | 2022-03-28T10:24:20.000Z | """
This registry for Processors contains framework-specific implementations of
Processors for data payloads.
Having a registry of all available classes is convenient for retrieving an instance
based on a configuration at run-time.
"""
import logging
from collections import OrderedDict
from typing import Tuple
from plato.config import Config
from plato.processors import pipeline
if not (hasattr(Config().trainer, 'use_tensorflow')
or hasattr(Config().trainer, 'use_mindspore')):
from plato.processors import (
base,
feature_randomized_response,
feature_gaussian,
feature_laplace,
feature_quantize,
feature_dequantize,
feature_unbatch,
inbound_feature_tensors,
outbound_feature_ndarrays,
model_deepcopy,
model_quantize,
model_dequantize,
model_randomized_response,
model_pruning,
)
registered_processors = OrderedDict([
('base', base.Processor),
('feature_randomized_response', feature_randomized_response.Processor),
('feature_gaussian', feature_gaussian.Processor),
('feature_laplace', feature_laplace.Processor),
('feature_quantize', feature_quantize.Processor),
('feature_dequantize', feature_dequantize.Processor),
('feature_unbatch', feature_unbatch.Processor),
('inbound_feature_tensors', inbound_feature_tensors.Processor),
('outbound_feature_ndarrays', outbound_feature_ndarrays.Processor),
('model_deepcopy', model_deepcopy.Processor),
('model_quantize', model_quantize.Processor),
('model_dequantize', model_dequantize.Processor),
('model_randomized_response', model_randomized_response.Processor),
('model_pruning', model_pruning.Processor),
])
def get(user: str,
processor_kwargs={},
**kwargs) -> Tuple[pipeline.Processor, pipeline.Processor]:
""" Get an instance of the processor. """
outbound_processors = []
inbound_processors = []
assert user in ("Server", "Client")
if user == "Server":
config = Config().server
else:
config = Config().clients
if hasattr(config, 'outbound_processors') and isinstance(
config.outbound_processors, list):
outbound_processors = config.outbound_processors
if hasattr(config, 'inbound_processors') and isinstance(
config.inbound_processors, list):
inbound_processors = config.inbound_processors
for processor in outbound_processors:
logging.info("%s: Using Processor for sending payload: %s", user,
processor)
for processor in inbound_processors:
logging.info("%s: Using Processor for receiving payload: %s", user,
processor)
def map_f(name):
if name in processor_kwargs:
this_kwargs = {**kwargs, **(processor_kwargs[name])}
else:
this_kwargs = kwargs
return registered_processors[name](**this_kwargs)
outbound_processors = list(map(map_f, outbound_processors))
inbound_processors = list(map(map_f, inbound_processors))
return pipeline.Processor(outbound_processors), pipeline.Processor(
inbound_processors)
| 34.702128 | 83 | 0.687308 |
c310f19e0bb3c6722b24301985877ebc2cbc4a41 | 2,788 | py | Python | services/core-api/app/api/mines/status/models/mine_status_xref.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | services/core-api/app/api/mines/status/models/mine_status_xref.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | services/core-api/app/api/mines/status/models/mine_status_xref.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import validates
from sqlalchemy.schema import FetchedValue
from app.extensions import db
from app.api.utils.models_mixins import AuditMixin, Base
from .mine_operation_status_code import MineOperationStatusCode
from .mine_operation_status_reason_code import MineOperationStatusReasonCode
from .mine_operation_status_sub_reason_code import MineOperationStatusSubReasonCode
class MineStatusXref(AuditMixin, Base):
__tablename__ = 'mine_status_xref'
mine_status_xref_guid = db.Column(UUID(as_uuid=True), primary_key=True)
mine_operation_status_code = db.Column(
db.String(3), db.ForeignKey('mine_operation_status_code.mine_operation_status_code'))
mine_operation_status = db.relationship('MineOperationStatusCode', lazy='joined')
mine_operation_status_reason_code = db.Column(
db.String(3),
db.ForeignKey('mine_operation_status_reason_code.mine_operation_status_reason_code'))
mine_operation_status_reason = db.relationship('MineOperationStatusReasonCode', lazy='joined')
mine_operation_status_sub_reason_code = db.Column(
db.String(3),
db.ForeignKey(
'mine_operation_status_sub_reason_code.mine_operation_status_sub_reason_code'))
mine_operation_status_sub_reason = db.relationship(
'MineOperationStatusSubReasonCode', lazy='joined')
description = db.Column(db.String(1024))
active_ind = db.Column(db.Boolean, nullable=False, server_default=FetchedValue())
def __repr__(self):
return '<MineStatusXref %r>' % self.mine_status_xref_guid
@classmethod
def get_active(cls):
return cls.query.filter_by(active_ind=True).all()
@classmethod
def find_by_mine_status_xref_guid(cls, _id):
return cls.query.filter_by(mine_status_xref_guid=_id).first()
@classmethod
def find_by_codes(cls,
_mine_operation_status_code,
_mine_operation_status_reason_code=None,
_mine_operation_status_sub_reason_code=None):
xref_query = cls.query \
.filter_by(mine_operation_status_code=_mine_operation_status_code) \
.order_by(
cls.mine_operation_status_reason_code.desc(),
cls.mine_operation_status_sub_reason_code.desc())
if _mine_operation_status_reason_code:
xref_query = xref_query.filter_by(
mine_operation_status_reason_code=_mine_operation_status_reason_code)
if _mine_operation_status_sub_reason_code:
xref_query = xref_query.filter_by(
mine_operation_status_sub_reason_code=_mine_operation_status_sub_reason_code)
return xref_query.first()
| 42.892308 | 98 | 0.748207 |
0f265892465262fbfe150661dbd5dbe78a24c808 | 512 | py | Python | BancoDadosPython/selecionar_com_filtro2.py | VictorMello1993/CursoPythonUdemy | d3e2e542a7c3d3f9635f2b88d0e75ab4fa84236d | [
"MIT"
] | null | null | null | BancoDadosPython/selecionar_com_filtro2.py | VictorMello1993/CursoPythonUdemy | d3e2e542a7c3d3f9635f2b88d0e75ab4fa84236d | [
"MIT"
] | 4 | 2021-04-08T21:54:09.000Z | 2022-02-10T14:35:13.000Z | BancoDadosPython/selecionar_com_filtro2.py | VictorMello1993/CursoPythonUdemy | d3e2e542a7c3d3f9635f2b88d0e75ab4fa84236d | [
"MIT"
] | null | null | null | from mysql.connector.errors import ProgrammingError
from bd import nova_conexao
sql = "SELECT * FROM contatos WHERE nome like 'Max%'"
try:
with nova_conexao() as conexao:
try:
datatable = conexao.cursor()
datatable.execute(sql)
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
for row in datatable:
print(row)
except ProgrammingError as e:
print(f'Erro CONEXÃO: {e.msg}') | 30.117647 | 53 | 0.572266 |
d861425c5f213834160399c16316f28c9ba0fccb | 2,412 | py | Python | omni-mini/prelinear/filelists/miniImagenet/write_cross_filelist.py | indy-lab/ProtoTransfer | 90a526bb209160e376b2b8290e99b0f62b240052 | [
"MIT"
] | 43 | 2020-06-23T08:09:08.000Z | 2022-03-03T03:05:34.000Z | omni-mini/prelinear/filelists/miniImagenet/write_cross_filelist.py | Asphalt93/ProtoTransfer | 2e186ffd5bd795244c6dd7192575b84f935c5749 | [
"MIT"
] | 5 | 2020-07-03T20:58:24.000Z | 2021-08-04T06:34:09.000Z | omni-mini/prelinear/filelists/miniImagenet/write_cross_filelist.py | Asphalt93/ProtoTransfer | 2e186ffd5bd795244c6dd7192575b84f935c5749 | [
"MIT"
] | 9 | 2020-07-21T03:26:29.000Z | 2021-09-26T06:10:37.000Z | import numpy as np
from os import listdir
from os.path import isfile, isdir, join
import os
import json
import random
import re
cwd = os.getcwd()
data_path = join(cwd,'ILSVRC2015/Data/CLS-LOC/train')
savedir = './'
dataset_list = ['base', 'val', 'novel']
#if not os.path.exists(savedir):
# os.makedirs(savedir)
cl = -1
folderlist = []
datasetmap = {'base':'train','val':'val','novel':'test'};
filelists = {'base':{},'val':{},'novel':{} }
filelists_flat = {'base':[],'val':[],'novel':[] }
labellists_flat = {'base':[],'val':[],'novel':[] }
for dataset in dataset_list:
with open(datasetmap[dataset] + ".csv", "r") as lines:
for i, line in enumerate(lines):
if i == 0:
continue
fid, _ , label = re.split(',|\.', line)
label = label.replace('\n','')
if not label in filelists[dataset]:
folderlist.append(label)
filelists[dataset][label] = []
fnames = listdir( join(data_path, label) )
fname_number = [ int(re.split('_|\.', fname)[1]) for fname in fnames]
sorted_fnames = list(zip( *sorted( zip(fnames, fname_number), key = lambda f_tuple: f_tuple[1] )))[0]
fid = int(fid[-5:])-1
fname = join( data_path,label, sorted_fnames[fid] )
filelists[dataset][label].append(fname)
for key, filelist in filelists[dataset].items():
cl += 1
random.shuffle(filelist)
filelists_flat[dataset] += filelist
labellists_flat[dataset] += np.repeat(cl, len(filelist)).tolist()
#cross setting use base/val/novel together
filelists_flat_all = filelists_flat['base'] + filelists_flat['val'] + filelists_flat['novel']
labellists_flat_all = labellists_flat['base'] + labellists_flat['val'] + labellists_flat['novel']
fo = open(savedir + "all.json", "w")
fo.write('{"label_names": [')
fo.writelines(['"%s",' % item for item in folderlist])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_names": [')
fo.writelines(['"%s",' % item for item in filelists_flat_all])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write('],')
fo.write('"image_labels": [')
fo.writelines(['%d,' % item for item in labellists_flat_all])
fo.seek(0, os.SEEK_END)
fo.seek(fo.tell()-1, os.SEEK_SET)
fo.write(']}')
fo.close()
print("all -OK")
| 33.041096 | 118 | 0.606551 |
1ebeb0c6615945e758bae4455c674657424932d5 | 1,719 | py | Python | scripts/analogy_exp.py | HusainZafar/dig-wikifier | 90b1e9764e46d584b537f134fa6b191aaae4cd67 | [
"MIT"
] | 3 | 2018-11-28T23:09:58.000Z | 2021-06-11T05:27:52.000Z | scripts/analogy_exp.py | HusainZafar/dig-wikifier | 90b1e9764e46d584b537f134fa6b191aaae4cd67 | [
"MIT"
] | null | null | null | scripts/analogy_exp.py | HusainZafar/dig-wikifier | 90b1e9764e46d584b537f134fa6b191aaae4cd67 | [
"MIT"
] | 3 | 2019-05-30T19:02:23.000Z | 2020-10-11T13:43:01.000Z | import redis
import argparse
from collections import defaultdict
import json
# Install dependencies hiredis, redis
import math
import json
import numpy as np
from gensim.models import Word2Vec
""" This script takes in a word2vec trained model and can perform some analogy queries, specifically x - y + z (a.k.a the king - man + woman) type queries. Initialize a list of x,y,z as tuples in the script below. The script requires the word2vec model file, along with a redis instance to connect with which contains a label for each Qnode id."""
parser = argparse.ArgumentParser()
parser.add_argument("-b","--binaryfile")
parser.add_argument("-x","--host")
parser.add_argument("-p","--port")
parser.add_argument("-o","--output")
args = parser.parse_args()
redis_client = redis.StrictRedis(
host=str(args.host),
port=int(args.port),
decode_responses=True)
# Load the nodemap for the mapping
# Load entity to vec file here
model = Word2Vec.load(args.binaryfile)
outfile = open(args.output,'w')
# Set the x,y,z triples here
queries = [
('Q47703','Q56094','Q42574')
]
print("Loaded embeddings")
for x in queries:
in1,in2,in3 = x
lb1 = redis_client.smembers('lbl:'+in1)
lb2 = redis_client.smembers('lbl:'+in2)
lb3 = redis_client.smembers('lbl:'+in3)
wv1, wv2, wv3 = model.wv[in1],model.wv[in2],model.wv[in3]
res = (wv1 - wv2) + wv3
x = model.similar_by_vector(res,topn=20,restrict_vocab = None)
r = {}
for item in x:
lb = redis_client.smembers('lbl:'+item[0])
r[item[0]] = list(lb)
outfile.write('Query = {} ({}) - {} ({}) + {} ({})\n'.format(in1,lb1,in2,lb2,in3,lb3))
for k in r:
outfile.write('{} {}\n'.format(k, r[k]))
| 31.833333 | 347 | 0.672484 |
b3e548cd18a8ca5e0ff85516f6d835a475ad57fd | 411 | py | Python | server/asgi.py | arsonite/lockkliye_backend | 8dc49654ff674b8b058cf40fb055e6437a58018c | [
"MIT"
] | null | null | null | server/asgi.py | arsonite/lockkliye_backend | 8dc49654ff674b8b058cf40fb055e6437a58018c | [
"MIT"
] | 10 | 2020-06-05T20:47:00.000Z | 2022-01-13T02:11:48.000Z | server/asgi.py | arsonite/lockkliye_backend | 8dc49654ff674b8b058cf40fb055e6437a58018c | [
"MIT"
] | null | null | null | """
ASGI config for lockkliye_backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lockkliye_backend.settings')
application = get_asgi_application()
| 24.176471 | 78 | 0.79562 |
41f8c5378d337a31ba80c8b6025df203f43daa46 | 3,221 | py | Python | linalg/decompose.py | Markus28/Scientific-Computing | 74c390c55561c9cbab3aaf0cab01eb3ff61225c2 | [
"MIT"
] | null | null | null | linalg/decompose.py | Markus28/Scientific-Computing | 74c390c55561c9cbab3aaf0cab01eb3ff61225c2 | [
"MIT"
] | null | null | null | linalg/decompose.py | Markus28/Scientific-Computing | 74c390c55561c9cbab3aaf0cab01eb3ff61225c2 | [
"MIT"
] | null | null | null | import numpy as np
def symmetric_gauss(A):
if(A.size == 1):
return np.array([1]), A.copy()
U = np.eye(A.shape[0])
U[0, 1:] = -A[0, 1:]/A[0,0]
U_next = np.eye(A.shape[0])
A_new = np.matmul(U.T, np.matmul(A, U))
U_next[1:, 1:], A_new[1:, 1:] = symmetric_gauss(A_new[1:, 1:])
return np.matmul(U, U_next), A_new.diagonal()
def cholesky(A):
U, D = symmetric_gauss(A)
E = np.diag(np.sqrt(D))
return solve_triangular(U.T, E, lower=True).T
def factor_QR(A):
if A.size == 1: #Base case
return A/np.linalg.norm(A), np.linalg.norm(A)
if A.size == 0:
return np.eye(A.shape[0]), A
x_bar = np.zeros((A.shape[0]))
x_bar[0] = np.linalg.norm(A[:,0])
u = A[:,0]-x_bar
u = u/np.linalg.norm(u)
mirror = (np.eye(A.shape[0])-2*np.outer(u, u))
B = np.matmul(mirror, A)
Q_next = np.eye((A.shape[0]))
Q_next[1:, 1:], B[1:, 1:] = factor_QR(B[1:, 1:])
return np.matmul(mirror.T, Q_next), B
def rotation_QR(A):
if A.size == 1: #Base case
return A/np.linalg.norm(A), np.linalg.norm(A)
if A.size == 0:
return np.eye(A.shape[0]), A
B = A.copy()
Q = np.eye(B.shape[0])
for i in range(1, B.shape[0]):
if B[i, 0]!=0:
r = np.sqrt(B[0, 0]**2 + B[i, 0]**2)
cosine_phi = B[0, 0]/r
sine_phi = B[i,0]/r
rot = np.eye(B.shape[0]) #Build the rotation matrix
rot[0, 0] = cosine_phi
rot[i, 0] = -sine_phi
rot[0, i] = sine_phi
rot[i, i] = cosine_phi
B = np.matmul(rot, B)
Q = np.matmul(Q, rot.T)
Q_next = np.eye(B.shape[0])
Q_next[1:, 1:], B[1:, 1:] = rotation_QR(B[1:, 1:]) #Recurse
return np.matmul(Q, Q_next), B
def gram_schmidt(A):
Q = np.zeros((A.shape[0], A.shape[0]), np.complex_)
R = np.zeros_like(A, np.complex_)
Q[:,0] = A[:, 0]
norm = np.linalg.norm(Q[:, 0])
Q[:, 0] = Q[:, 0]/norm
R[0,0] = norm
for i in range(1, A.shape[1]):
coefficients = np.zeros((i), np.complex_)
for k in range(i):
coefficients[k] = np.dot(np.conj(A[:, i]), Q[:, k])
Q[:, i] = A[:, i]-np.matmul(Q[:, :i], coefficients)
norm = np.linalg.norm(Q[:, i])
Q[:, i] = Q[:, i]/norm
R[:i, i] = coefficients
R[i,i] = norm
return Q, R
def modified_gram_schmidt(A):
R = np.zeros_like(A, np.complex_)
Q = np.zeros((A.shape[0], A.shape[0]), np.complex_)
V = A.astype(np.complex_)
for i in range(A.shape[1]):
R[i,i] = np.linalg.norm(V[:, i])
q = V[:, i]/R[i,i]
Q[:, i] = q
for k in range(i, A.shape[1]):
R[i, k] = np.dot(np.conj(q), V[:, k])
V[:, k] = V[:, k]-R[i,k]*q
return Q, R
if __name__=="__main__":
Q, R = rotation_QR(np.array([[1.0,2.0, -4, 1], [3.0, 4.0,234.4, -3], [2,1,2,0.0004], [0,0,2,0]]))
#Q, R = factor_QR(np.array([[1, -2], [1, 0], [1, 1], [1, 3]]).T)
print(np.matmul(Q, Q.T))
print()
print(R)
print()
print(np.matmul(Q, R))
| 25.362205 | 101 | 0.472524 |
0c890240ca54e8237bb05b823ac6aa0a8b5ee80f | 1,102 | py | Python | agent/indy_catalyst_agent/messaging/trustping/handlers/ping_handler.py | blhagadorn/indy-catalyst | c268dba024096d312f541fde40443a1757f21661 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/trustping/handlers/ping_handler.py | blhagadorn/indy-catalyst | c268dba024096d312f541fde40443a1757f21661 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/trustping/handlers/ping_handler.py | blhagadorn/indy-catalyst | c268dba024096d312f541fde40443a1757f21661 | [
"Apache-2.0"
] | null | null | null | """Ping handler."""
from ...base_handler import BaseHandler, BaseResponder, RequestContext
from ..messages.ping import Ping
from ..messages.ping_response import PingResponse
from ....models.thread_decorator import ThreadDecorator
class PingHandler(BaseHandler):
"""Ping handler class."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Handle ping message.
Args:
context: Request context
responder: Responder used to reply
"""
self._logger.debug(f"PingHandler called with context {context}")
assert isinstance(context.message, Ping)
self._logger.info("Received trust ping from: %s", context.sender_did)
if not context.connection_active:
self._logger.info(
"Connection not active, skipping ping response: %s", context.sender_did
)
return
if context.message.response_requested:
reply = PingResponse(_thread=ThreadDecorator(thid=context.message._id))
await responder.send_reply(reply)
| 31.485714 | 87 | 0.664247 |
c060bb97c10c561efaa33a86600b5c8cf481d5fe | 3,914 | py | Python | nipype/workflows/dmri/dipy/denoise.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/workflows/dmri/dipy/denoise.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/workflows/dmri/dipy/denoise.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# coding: utf-8
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from builtins import range
from ....pipeline import engine as pe
from ....interfaces import utility as niu
from ....interfaces import dipy
def nlmeans_pipeline(name='Denoise',
params={'patch_radius': 1, 'block_radius': 5}):
"""
Workflow that performs nlmeans denoising
Example
-------
>>> from nipype.workflows.dmri.dipy.denoise import nlmeans_pipeline
>>> denoise = nlmeans_pipeline()
>>> denoise.inputs.inputnode.in_file = 'diffusion.nii'
>>> denoise.inputs.inputnode.in_mask = 'mask.nii'
>>> denoise.run() # doctest: +SKIP
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_mask']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']),
name='outputnode')
nmask = pe.Node(niu.Function(input_names=['in_file', 'in_mask'],
output_names=['out_file'], function=bg_mask),
name='NoiseMsk')
nlmeans = pe.Node(dipy.Denoise(**params), name='NLMeans')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, nmask, [('in_file', 'in_file'),
('in_mask', 'in_mask')]),
(inputnode, nlmeans, [('in_file', 'in_file'),
('in_mask', 'in_mask')]),
(nmask, nlmeans, [('out_file', 'noise_mask')]),
(nlmeans, outputnode, [('out_file', 'out_file')])
])
return wf
def csf_mask(in_file, in_mask, out_file=None):
"""
Artesanal mask of csf in T2w-like images
"""
import nibabel as nb
import numpy as np
from scipy.ndimage import binary_erosion, binary_opening, label
import scipy.ndimage as nd
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_csfmask%s" % (fname, ext))
im = nb.load(in_file)
hdr = im.header.copy()
hdr.set_data_dtype(np.uint8)
hdr.set_xyzt_units('mm')
imdata = im.get_data()
msk = nb.load(in_mask).get_data()
msk = binary_erosion(msk,
structure=np.ones((15, 15, 10))).astype(np.uint8)
thres = np.percentile(imdata[msk > 0].reshape(-1), 90.0)
imdata[imdata < thres] = 0
imdata = imdata * msk
imdata[imdata > 0] = 1
imdata = binary_opening(imdata,
structure=np.ones((2, 2, 2))).astype(np.uint8)
label_im, nb_labels = label(imdata)
sizes = nd.sum(imdata, label_im, list(range(nb_labels + 1)))
mask_size = sizes != sizes.max()
remove_pixel = mask_size[label_im]
label_im[remove_pixel] = 0
label_im[label_im > 0] = 1
nb.Nifti1Image(label_im.astype(np.uint8), im.affine,
hdr).to_filename(out_file)
return out_file
def bg_mask(in_file, in_mask, out_file=None):
"""
Rough mask of background from brain masks
"""
import nibabel as nb
import numpy as np
from scipy.ndimage import binary_dilation
import scipy.ndimage as nd
import os.path as op
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
out_file = op.abspath("%s_bgmask%s" % (fname, ext))
im = nb.load(in_file)
hdr = im.header.copy()
hdr.set_data_dtype(np.uint8)
hdr.set_xyzt_units('mm')
imdata = im.get_data()
msk = nb.load(in_mask).get_data()
msk = 1 - binary_dilation(msk, structure=np.ones((20, 20, 20)))
nb.Nifti1Image(msk.astype(np.uint8), im.affine, hdr).to_filename(out_file)
return out_file
| 32.890756 | 78 | 0.597087 |
f5f3419772d94c855836bbd4d4a4cc618cbf6504 | 609 | py | Python | urls.py | crschmidt/labs-rectifier | eb384cb6948459ec7391eaf4cb7ce6d39b95aa3b | [
"BSD-3-Clause-Clear"
] | 6 | 2015-07-03T01:08:36.000Z | 2019-11-19T23:53:04.000Z | urls.py | alfcrisci/labs-rectifier | 0bb91d20dc00b133e3ec1936e09c974788550315 | [
"BSD-3-Clause-Clear"
] | 1 | 2020-05-13T18:45:18.000Z | 2020-05-13T18:45:18.000Z | urls.py | alfcrisci/labs-rectifier | 0bb91d20dc00b133e3ec1936e09c974788550315 | [
"BSD-3-Clause-Clear"
] | 4 | 2015-08-26T21:35:39.000Z | 2020-04-21T21:37:04.000Z | from django.conf.urls.defaults import *
from django.conf import settings
urlpatterns = patterns('',
(r'^', include('rectifier.main.urls')),
(r'^rectifier/', include('rectifier.main.urls')),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': 'static/'}),
(r'^rectifier/static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': 'static/'}),
(r'^uploads/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^rectifier/uploads/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| 43.5 | 111 | 0.646962 |
25e3b2533ac71a0756a9ccbdf83766c6baf4f2a2 | 1,098 | py | Python | src/module/get_feature_value.py | Ooshita/Mojikara | 0ee99a97d605c8fa033e89367cc2aee76df51fe4 | [
"Apache-2.0"
] | null | null | null | src/module/get_feature_value.py | Ooshita/Mojikara | 0ee99a97d605c8fa033e89367cc2aee76df51fe4 | [
"Apache-2.0"
] | null | null | null | src/module/get_feature_value.py | Ooshita/Mojikara | 0ee99a97d605c8fa033e89367cc2aee76df51fe4 | [
"Apache-2.0"
] | null | null | null | import MeCab
from collections import Counter
keywords = []
m = MeCab.Tagger(' -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
class Get_feature:
# 起動時に一度だけ動く処理
def __init__(self):
self.keywords = []
def morph_result(document):
node = m.parseToNode(document).next
while node:
nodeFeature = node.feature.split(",")
# 名詞,一般と名詞.名詞,固有名のみ抽出
if nodeFeature[0] == "名詞" and nodeFeature[1] == "一般" or nodeFeature[0] == "名詞" and nodeFeature[1] == "固有名詞":
keywords.append(node.surface)
node = node.next
return keywords
def most_word(keywords):
# 単語の頻出度を調べる
word_dict = {}
count_word = Counter(keywords)
for word, cnt in count_word.most_common():
# 単語(キー)と頻度を辞書に入れる
word_dict.update({word:cnt})
# 最大値のキーを表示
most_key = ""
for word, value in word_dict.items():
if value == max(word_dict.values()):
#print("最頻出単語: " + word)
most_key = word
return most_key
| 30.5 | 121 | 0.561931 |
c5aafee690eee6ce4a4a1a264299eb669b56fd16 | 17,017 | py | Python | data/microenv_params.py | PhysiCell-Models/three_types_NH | 86eb269df16322415be329d80a236fb032360b58 | [
"BSD-3-Clause"
] | 1 | 2021-06-22T19:00:01.000Z | 2021-06-22T19:00:01.000Z | data/microenv_params.py | PhysiCell-Models/three_types_NH | 86eb269df16322415be329d80a236fb032360b58 | [
"BSD-3-Clause"
] | null | null | null | data/microenv_params.py | PhysiCell-Models/three_types_NH | 86eb269df16322415be329d80a236fb032360b58 | [
"BSD-3-Clause"
] | 1 | 2020-11-10T14:44:23.000Z | 2020-11-10T14:44:23.000Z |
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box
class MicroenvTab(object):
def __init__(self):
micron_units = Label('micron') # use "option m" (Mac, for micro symbol)
constWidth = '180px'
tab_height = '500px'
stepsize = 10
#style = {'description_width': '250px'}
style = {'description_width': '25%'}
layout = {'width': '400px'}
name_button_layout={'width':'25%'}
widget_layout = {'width': '15%'}
widget2_layout = {'width': '10%'}
units_button_layout ={'width':'15%'}
desc_button_layout={'width':'45%'}
menv_var1 = Button(description='resource (none)', disabled=True, layout=name_button_layout)
menv_var1.style.button_color = 'tan'
param_name1 = Button(description='diffusion_coefficient', disabled=True, layout=name_button_layout)
self.resource_diffusion_coefficient = FloatText(value=100000.0,
step=10000,style=style, layout=widget_layout)
param_name2 = Button(description='decay_rate', disabled=True, layout=name_button_layout)
self.resource_decay_rate = FloatText(value=0,
step=0.01,style=style, layout=widget_layout)
param_name3 = Button(description='initial_condition', disabled=True, layout=name_button_layout)
self.resource_initial_condition = FloatText(value=1,style=style, layout=widget_layout)
param_name4 = Button(description='Dirichlet_boundary_condition', disabled=True, layout=name_button_layout)
self.resource_Dirichlet_boundary_condition = FloatText(value=1,style=style, layout=widget_layout)
self.resource_Dirichlet_boundary_condition_toggle = Checkbox(description='on/off', disabled=False,style=style, layout=widget_layout)
menv_var2 = Button(description='signal_A (none)', disabled=True, layout=name_button_layout)
menv_var2.style.button_color = 'lightgreen'
param_name5 = Button(description='diffusion_coefficient', disabled=True, layout=name_button_layout)
self.signal_A_diffusion_coefficient = FloatText(value=1000.0,
step=100,style=style, layout=widget_layout)
param_name6 = Button(description='decay_rate', disabled=True, layout=name_button_layout)
self.signal_A_decay_rate = FloatText(value=0.1,
step=0.01,style=style, layout=widget_layout)
param_name7 = Button(description='initial_condition', disabled=True, layout=name_button_layout)
self.signal_A_initial_condition = FloatText(value=0,style=style, layout=widget_layout)
param_name8 = Button(description='Dirichlet_boundary_condition', disabled=True, layout=name_button_layout)
self.signal_A_Dirichlet_boundary_condition = FloatText(value=0,style=style, layout=widget_layout)
self.signal_A_Dirichlet_boundary_condition_toggle = Checkbox(description='on/off', disabled=False,style=style, layout=widget_layout)
menv_var3 = Button(description='signal_B (none)', disabled=True, layout=name_button_layout)
menv_var3.style.button_color = 'tan'
param_name9 = Button(description='diffusion_coefficient', disabled=True, layout=name_button_layout)
self.signal_B_diffusion_coefficient = FloatText(value=1000.0,
step=100,style=style, layout=widget_layout)
param_name10 = Button(description='decay_rate', disabled=True, layout=name_button_layout)
self.signal_B_decay_rate = FloatText(value=.1,
step=0.01,style=style, layout=widget_layout)
param_name11 = Button(description='initial_condition', disabled=True, layout=name_button_layout)
self.signal_B_initial_condition = FloatText(value=0,style=style, layout=widget_layout)
param_name12 = Button(description='Dirichlet_boundary_condition', disabled=True, layout=name_button_layout)
self.signal_B_Dirichlet_boundary_condition = FloatText(value=0,style=style, layout=widget_layout)
self.signal_B_Dirichlet_boundary_condition_toggle = Checkbox(description='on/off', disabled=False,style=style, layout=widget_layout)
menv_var4 = Button(description='signal_C (none)', disabled=True, layout=name_button_layout)
menv_var4.style.button_color = 'lightgreen'
param_name13 = Button(description='diffusion_coefficient', disabled=True, layout=name_button_layout)
self.signal_C_diffusion_coefficient = FloatText(value=1000.0,
step=100,style=style, layout=widget_layout)
param_name14 = Button(description='decay_rate', disabled=True, layout=name_button_layout)
self.signal_C_decay_rate = FloatText(value=.1,
step=0.01,style=style, layout=widget_layout)
param_name15 = Button(description='initial_condition', disabled=True, layout=name_button_layout)
self.signal_C_initial_condition = FloatText(value=0,style=style, layout=widget_layout)
param_name16 = Button(description='Dirichlet_boundary_condition', disabled=True, layout=name_button_layout)
self.signal_C_Dirichlet_boundary_condition = FloatText(value=0,style=style, layout=widget_layout)
self.signal_C_Dirichlet_boundary_condition_toggle = Checkbox(description='on/off', disabled=False,style=style, layout=widget_layout)
self.calculate_gradient = Checkbox(description='calculate_gradients', disabled=False, layout=desc_button_layout)
self.track_internal = Checkbox(description='track_in_agents', disabled=False, layout=desc_button_layout)
# ------- micronenv info
menv_units_button1 = Button(description='micron^2/min', disabled=True, layout=units_button_layout)
menv_units_button2 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button1 = Button(description='', disabled=True, layout=units_button_layout)
units_button2 = Button(description='', disabled=True, layout=units_button_layout)
menv_units_button5 = Button(description='micron^2/min', disabled=True, layout=units_button_layout)
menv_units_button6 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button3 = Button(description='', disabled=True, layout=units_button_layout)
units_button4 = Button(description='', disabled=True, layout=units_button_layout)
menv_units_button9 = Button(description='micron^2/min', disabled=True, layout=units_button_layout)
menv_units_button10 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button5 = Button(description='', disabled=True, layout=units_button_layout)
units_button6 = Button(description='', disabled=True, layout=units_button_layout)
menv_units_button13 = Button(description='micron^2/min', disabled=True, layout=units_button_layout)
menv_units_button14 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button7 = Button(description='', disabled=True, layout=units_button_layout)
units_button8 = Button(description='', disabled=True, layout=units_button_layout)
row_resource = [menv_var1, ]
row1 = [param_name1, self.resource_diffusion_coefficient, menv_units_button1]
row2 = [param_name2, self.resource_decay_rate, menv_units_button2]
row3 = [param_name3, self.resource_initial_condition, units_button1]
row4 = [param_name4, self.resource_Dirichlet_boundary_condition, units_button2, self.resource_Dirichlet_boundary_condition_toggle]
row_signal_A = [menv_var2, ]
row5 = [param_name5, self.signal_A_diffusion_coefficient, menv_units_button5]
row6 = [param_name6, self.signal_A_decay_rate, menv_units_button6]
row7 = [param_name7, self.signal_A_initial_condition, units_button3]
row8 = [param_name8, self.signal_A_Dirichlet_boundary_condition, units_button4, self.signal_A_Dirichlet_boundary_condition_toggle]
row_signal_B = [menv_var3, ]
row9 = [param_name9, self.signal_B_diffusion_coefficient, menv_units_button9]
row10 = [param_name10, self.signal_B_decay_rate, menv_units_button10]
row11 = [param_name11, self.signal_B_initial_condition, units_button5]
row12 = [param_name12, self.signal_B_Dirichlet_boundary_condition, units_button6, self.signal_B_Dirichlet_boundary_condition_toggle]
row_signal_C = [menv_var4, ]
row13 = [param_name13, self.signal_C_diffusion_coefficient, menv_units_button13]
row14 = [param_name14, self.signal_C_decay_rate, menv_units_button14]
row15 = [param_name15, self.signal_C_initial_condition, units_button7]
row16 = [param_name16, self.signal_C_Dirichlet_boundary_condition, units_button8, self.signal_C_Dirichlet_boundary_condition_toggle]
row17 = [self.calculate_gradient,]
row18 = [self.track_internal,]
box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%')
box_resource = Box(children=row_resource, layout=box_layout)
box1 = Box(children=row1, layout=box_layout)
box2 = Box(children=row2, layout=box_layout)
box3 = Box(children=row3, layout=box_layout)
box4 = Box(children=row4, layout=box_layout)
box_signal_A = Box(children=row_signal_A, layout=box_layout)
box5 = Box(children=row5, layout=box_layout)
box6 = Box(children=row6, layout=box_layout)
box7 = Box(children=row7, layout=box_layout)
box8 = Box(children=row8, layout=box_layout)
box_signal_B = Box(children=row_signal_B, layout=box_layout)
box9 = Box(children=row9, layout=box_layout)
box10 = Box(children=row10, layout=box_layout)
box11 = Box(children=row11, layout=box_layout)
box12 = Box(children=row12, layout=box_layout)
box_signal_C = Box(children=row_signal_C, layout=box_layout)
box13 = Box(children=row13, layout=box_layout)
box14 = Box(children=row14, layout=box_layout)
box15 = Box(children=row15, layout=box_layout)
box16 = Box(children=row16, layout=box_layout)
box17 = Box(children=row17, layout=box_layout)
box18 = Box(children=row18, layout=box_layout)
self.tab = VBox([
box_resource,
box1,
box2,
box3,
box4,
box_signal_A,
box5,
box6,
box7,
box8,
box_signal_B,
box9,
box10,
box11,
box12,
box_signal_C,
box13,
box14,
box15,
box16,
box17,
box18,
])
# Populate the GUI widgets with values from the XML
def fill_gui(self, xml_root):
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp = [] # pointers to <variable> nodes
if uep:
for var in uep.findall('variable'):
vp.append(var)
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
self.resource_diffusion_coefficient.value = float(vp[0].find('.//diffusion_coefficient').text)
self.resource_decay_rate.value = float(vp[0].find('.//decay_rate').text)
self.resource_initial_condition.value = float(vp[0].find('.//initial_condition').text)
self.resource_Dirichlet_boundary_condition.value = float(vp[0].find('.//Dirichlet_boundary_condition').text)
if vp[0].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
self.resource_Dirichlet_boundary_condition_toggle.value = True
else:
self.resource_Dirichlet_boundary_condition_toggle.value = False
self.signal_A_diffusion_coefficient.value = float(vp[1].find('.//diffusion_coefficient').text)
self.signal_A_decay_rate.value = float(vp[1].find('.//decay_rate').text)
self.signal_A_initial_condition.value = float(vp[1].find('.//initial_condition').text)
self.signal_A_Dirichlet_boundary_condition.value = float(vp[1].find('.//Dirichlet_boundary_condition').text)
if vp[1].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
self.signal_A_Dirichlet_boundary_condition_toggle.value = True
else:
self.signal_A_Dirichlet_boundary_condition_toggle.value = False
self.signal_B_diffusion_coefficient.value = float(vp[2].find('.//diffusion_coefficient').text)
self.signal_B_decay_rate.value = float(vp[2].find('.//decay_rate').text)
self.signal_B_initial_condition.value = float(vp[2].find('.//initial_condition').text)
self.signal_B_Dirichlet_boundary_condition.value = float(vp[2].find('.//Dirichlet_boundary_condition').text)
if vp[2].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
self.signal_B_Dirichlet_boundary_condition_toggle.value = True
else:
self.signal_B_Dirichlet_boundary_condition_toggle.value = False
self.signal_C_diffusion_coefficient.value = float(vp[3].find('.//diffusion_coefficient').text)
self.signal_C_decay_rate.value = float(vp[3].find('.//decay_rate').text)
self.signal_C_initial_condition.value = float(vp[3].find('.//initial_condition').text)
self.signal_C_Dirichlet_boundary_condition.value = float(vp[3].find('.//Dirichlet_boundary_condition').text)
if vp[3].find('.//Dirichlet_boundary_condition').attrib['enabled'].lower() == 'true':
self.signal_C_Dirichlet_boundary_condition_toggle.value = True
else:
self.signal_C_Dirichlet_boundary_condition_toggle.value = False
if uep.find('.//options//calculate_gradients').text.lower() == 'true':
self.calculate_gradient.value = True
else:
self.calculate_gradient.value = False
if uep.find('.//options//track_internalized_substrates_in_each_agent').text.lower() == 'true':
self.track_internal.value = True
else:
self.track_internal.value = False
# Read values from the GUI widgets to enable editing XML
def fill_xml(self, xml_root):
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp = [] # pointers to <variable> nodes
if uep:
for var in uep.findall('variable'):
vp.append(var)
uep = xml_root.find('.//microenvironment_setup') # find unique entry point
vp[0].find('.//diffusion_coefficient').text = str(self.resource_diffusion_coefficient.value)
vp[0].find('.//decay_rate').text = str(self.resource_decay_rate.value)
vp[0].find('.//initial_condition').text = str(self.resource_initial_condition.value)
vp[0].find('.//Dirichlet_boundary_condition').text = str(self.resource_Dirichlet_boundary_condition.value)
vp[0].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.resource_Dirichlet_boundary_condition_toggle.value).lower()
vp[1].find('.//diffusion_coefficient').text = str(self.signal_A_diffusion_coefficient.value)
vp[1].find('.//decay_rate').text = str(self.signal_A_decay_rate.value)
vp[1].find('.//initial_condition').text = str(self.signal_A_initial_condition.value)
vp[1].find('.//Dirichlet_boundary_condition').text = str(self.signal_A_Dirichlet_boundary_condition.value)
vp[1].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.signal_A_Dirichlet_boundary_condition_toggle.value).lower()
vp[2].find('.//diffusion_coefficient').text = str(self.signal_B_diffusion_coefficient.value)
vp[2].find('.//decay_rate').text = str(self.signal_B_decay_rate.value)
vp[2].find('.//initial_condition').text = str(self.signal_B_initial_condition.value)
vp[2].find('.//Dirichlet_boundary_condition').text = str(self.signal_B_Dirichlet_boundary_condition.value)
vp[2].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.signal_B_Dirichlet_boundary_condition_toggle.value).lower()
vp[3].find('.//diffusion_coefficient').text = str(self.signal_C_diffusion_coefficient.value)
vp[3].find('.//decay_rate').text = str(self.signal_C_decay_rate.value)
vp[3].find('.//initial_condition').text = str(self.signal_C_initial_condition.value)
vp[3].find('.//Dirichlet_boundary_condition').text = str(self.signal_C_Dirichlet_boundary_condition.value)
vp[3].find('.//Dirichlet_boundary_condition').attrib['enabled'] = str(self.signal_C_Dirichlet_boundary_condition_toggle.value).lower()
uep.find('.//options//calculate_gradients').text = str(self.calculate_gradient.value)
uep.find('.//options//track_internalized_substrates_in_each_agent').text = str(self.track_internal.value)
| 56.723333 | 142 | 0.711994 |
117fdae52edda3b4ec9b4ecd349f7acba961fa18 | 7,709 | py | Python | fs/tests/test_path.py | jwilk-forks/pyfilesystem | 44573f70e72b2cf378ee20d1c8bc2084ba975e16 | [
"BSD-3-Clause"
] | 314 | 2015-04-11T10:52:26.000Z | 2022-01-26T07:00:30.000Z | fs/tests/test_path.py | jwilk-forks/pyfilesystem | 44573f70e72b2cf378ee20d1c8bc2084ba975e16 | [
"BSD-3-Clause"
] | 94 | 2015-04-11T10:43:16.000Z | 2021-10-06T11:21:26.000Z | fs/tests/test_path.py | jwilk-forks/pyfilesystem | 44573f70e72b2cf378ee20d1c8bc2084ba975e16 | [
"BSD-3-Clause"
] | 95 | 2015-04-21T02:13:20.000Z | 2021-11-26T05:07:59.000Z | """
fs.tests.test_path: testcases for the fs path functions
"""
import unittest
import fs.tests
from fs.path import *
class TestPathFunctions(unittest.TestCase):
"""Testcases for FS path functions."""
def test_normpath(self):
tests = [ ("\\a\\b\\c", "\\a\\b\\c"),
(".", ""),
("./", ""),
("", ""),
("/.", "/"),
("/a/b/c", "/a/b/c"),
("a/b/c", "a/b/c"),
("a/b/../c/", "a/c"),
("/","/"),
(u"a/\N{GREEK SMALL LETTER BETA}/c",u"a/\N{GREEK SMALL LETTER BETA}/c"),
]
for path, result in tests:
self.assertEqual(normpath(path), result)
def test_pathjoin(self):
tests = [ ("", "a", "a"),
("a", "a", "a/a"),
("a/b", "../c", "a/c"),
("a/b/../c", "d", "a/c/d"),
("/a/b/c", "d", "/a/b/c/d"),
("/a/b/c", "../../../d", "/d"),
("a", "b", "c", "a/b/c"),
("a/b/c", "../d", "c", "a/b/d/c"),
("a/b/c", "../d", "/a", "/a"),
("aaa", "bbb/ccc", "aaa/bbb/ccc"),
("aaa", "bbb\\ccc", "aaa/bbb\\ccc"),
("aaa", "bbb", "ccc", "/aaa", "eee", "/aaa/eee"),
("a/b", "./d", "e", "a/b/d/e"),
("/", "/", "/"),
("/", "", "/"),
(u"a/\N{GREEK SMALL LETTER BETA}","c",u"a/\N{GREEK SMALL LETTER BETA}/c"),
]
for testpaths in tests:
paths = testpaths[:-1]
result = testpaths[-1]
self.assertEqual(pathjoin(*paths), result)
self.assertRaises(ValueError, pathjoin, "..")
self.assertRaises(ValueError, pathjoin, "../")
self.assertRaises(ValueError, pathjoin, "/..")
self.assertRaises(ValueError, pathjoin, "./../")
self.assertRaises(ValueError, pathjoin, "a/b", "../../..")
self.assertRaises(ValueError, pathjoin, "a/b/../../../d")
def test_relpath(self):
tests = [ ("/a/b", "a/b"),
("a/b", "a/b"),
("/", "") ]
for path, result in tests:
self.assertEqual(relpath(path), result)
def test_abspath(self):
tests = [ ("/a/b", "/a/b"),
("a/b", "/a/b"),
("/", "/") ]
for path, result in tests:
self.assertEqual(abspath(path), result)
def test_iteratepath(self):
tests = [ ("a/b", ["a", "b"]),
("", [] ),
("aaa/bbb/ccc", ["aaa", "bbb", "ccc"]),
("a/b/c/../d", ["a", "b", "d"]) ]
for path, results in tests:
for path_component, expected in zip(iteratepath(path), results):
self.assertEqual(path_component, expected)
self.assertEqual(list(iteratepath("a/b/c/d", 1)), ["a", "b/c/d"])
self.assertEqual(list(iteratepath("a/b/c/d", 2)), ["a", "b", "c/d"])
def test_pathsplit(self):
tests = [ ("a/b", ("a", "b")),
("a/b/c", ("a/b", "c")),
("a", ("", "a")),
("", ("", "")),
("/", ("/", "")),
("/foo", ("/", "foo")),
("foo/bar", ("foo", "bar")),
("foo/bar/baz", ("foo/bar", "baz")),
]
for path, result in tests:
self.assertEqual(pathsplit(path), result)
def test_recursepath(self):
self.assertEquals(recursepath("/"),["/"])
self.assertEquals(recursepath("hello"),["/","/hello"])
self.assertEquals(recursepath("/hello/world/"),["/","/hello","/hello/world"])
self.assertEquals(recursepath("/hello/world/",reverse=True),["/hello/world","/hello","/"])
self.assertEquals(recursepath("hello",reverse=True),["/hello","/"])
self.assertEquals(recursepath("",reverse=True),["/"])
def test_isdotfile(self):
for path in ['.foo',
'.svn',
'foo/.svn',
'foo/bar/.svn',
'/foo/.bar']:
self.assert_(isdotfile(path))
for path in ['asfoo',
'df.svn',
'foo/er.svn',
'foo/bar/test.txt',
'/foo/bar']:
self.assertFalse(isdotfile(path))
def test_dirname(self):
tests = [('foo', ''),
('foo/bar', 'foo'),
('foo/bar/baz', 'foo/bar'),
('/foo/bar', '/foo'),
('/foo', '/'),
('/', '/')]
for path, test_dirname in tests:
self.assertEqual(dirname(path), test_dirname)
def test_basename(self):
tests = [('foo', 'foo'),
('foo/bar', 'bar'),
('foo/bar/baz', 'baz'),
('/', '')]
for path, test_basename in tests:
self.assertEqual(basename(path), test_basename)
def test_iswildcard(self):
self.assert_(iswildcard('*'))
self.assert_(iswildcard('*.jpg'))
self.assert_(iswildcard('foo/*'))
self.assert_(iswildcard('foo/{}'))
self.assertFalse(iswildcard('foo'))
self.assertFalse(iswildcard('img.jpg'))
self.assertFalse(iswildcard('foo/bar'))
def test_realtivefrom(self):
tests = [('/', '/foo.html', 'foo.html'),
('/foo', '/foo/bar.html', 'bar.html'),
('/foo/bar/', '/egg.html', '../../egg.html'),
('/a/b/c/d', 'e', '../../../../e'),
('/a/b/c/d', 'a/d', '../../../d'),
('/docs/', 'tags/index.html', '../tags/index.html'),
('foo/bar', 'baz/index.html', '../../baz/index.html'),
('', 'a', 'a'),
('a', 'b/c', '../b/c')
]
for base, path, result in tests:
self.assertEqual(relativefrom(base, path), result)
class Test_PathMap(unittest.TestCase):
def test_basics(self):
map = PathMap()
map["hello"] = "world"
self.assertEquals(map["/hello"],"world")
self.assertEquals(map["/hello/"],"world")
self.assertEquals(map.get("hello"),"world")
def test_iteration(self):
map = PathMap()
map["hello/world"] = 1
map["hello/world/howareya"] = 2
map["hello/world/iamfine"] = 3
map["hello/kitty"] = 4
map["hello/kitty/islame"] = 5
map["batman/isawesome"] = 6
self.assertEquals(set(map.iterkeys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/hello/kitty/islame","/batman/isawesome")))
self.assertEquals(sorted(map.values()),range(1,7))
self.assertEquals(sorted(map.items("/hello/world/")),[("/hello/world",1),("/hello/world/howareya",2),("/hello/world/iamfine",3)])
self.assertEquals(zip(map.keys(),map.values()),map.items())
self.assertEquals(zip(map.keys("batman"),map.values("batman")),map.items("batman"))
self.assertEquals(set(map.iternames("hello")),set(("world","kitty")))
self.assertEquals(set(map.iternames("/hello/kitty")),set(("islame",)))
del map["hello/kitty/islame"]
self.assertEquals(set(map.iternames("/hello/kitty")),set())
self.assertEquals(set(map.iterkeys()),set(("/hello/world","/hello/world/howareya","/hello/world/iamfine","/hello/kitty","/batman/isawesome")))
self.assertEquals(set(map.values()),set(range(1,7)) - set((5,)))
| 38.545 | 172 | 0.442729 |
2ff2c9a17601cca061749fe1d16d83cbf1125e0b | 1,845 | py | Python | modules/thirdparty/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py | chunzhang-hub/PaddleHub | c5cfd021f77fd59340fb26e223e09a592e6a345f | [
"Apache-2.0"
] | 2 | 2020-12-03T12:35:16.000Z | 2021-07-17T09:30:34.000Z | modules/thirdparty/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py | chunzhang-hub/PaddleHub | c5cfd021f77fd59340fb26e223e09a592e6a345f | [
"Apache-2.0"
] | null | null | null | modules/thirdparty/image/Image_gan/style_transfer/animegan_v2_shinkai_33/module.py | chunzhang-hub/PaddleHub | c5cfd021f77fd59340fb26e223e09a592e6a345f | [
"Apache-2.0"
] | null | null | null | import os
from paddlehub import Module
from paddlehub.module.module import moduleinfo, serving
from animegan_v2_shinkai_33.model import Model
from animegan_v2_shinkai_33.processor import base64_to_cv2, cv2_to_base64, Processor
@moduleinfo(
name="animegan_v2_shinkai_33", # 模型名称
type="CV/style_transfer", # 模型类型
author="jm12138", # 作者名称
author_email="jm12138@qq.com", # 作者邮箱
summary="animegan_v2_shinkai_33", # 模型介绍
version="1.0.2" # 版本号
)
class Animegan_V2_Shinkai_33(Module):
# 初始化函数
def __init__(self, name=None, use_gpu=False):
# 设置模型路径
self.model_path = os.path.join(self.directory, "animegan_v2_shinkai_33")
# 加载模型
self.model = Model(modelpath=self.model_path, use_gpu=use_gpu, use_mkldnn=False, combined=False)
# 关键点检测函数
def style_transfer(self,
images=None,
paths=None,
batch_size=1,
output_dir='output',
visualization=False,
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images, paths, batch_size, output_dir, min_size, max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
# 结果后处理
results = processor.postprocess(outputs, visualization)
# 返回结果
return results
# Hub Serving
@serving
def serving_method(self, images, **kwargs):
# 获取输入数据
images_decode = [base64_to_cv2(image) for image in images]
# 图片风格转换
results = self.style_transfer(images_decode, **kwargs)
# 对输出图片进行编码
encodes = []
for result in results:
encode = cv2_to_base64(result)
encodes.append(encode)
# 返回结果
return encodes
| 28.384615 | 104 | 0.609756 |
8d5d0ea833aa63434256ea0c6cb20aab009db814 | 739 | py | Python | tests/core/test_save_config.py | NRauschmayr/sagemaker-debugger | 24ed65631143fcc0457fb325a102500ebd69adfc | [
"Apache-2.0"
] | 133 | 2019-12-03T18:56:27.000Z | 2022-03-18T19:54:49.000Z | tests/core/test_save_config.py | NRauschmayr/sagemaker-debugger | 24ed65631143fcc0457fb325a102500ebd69adfc | [
"Apache-2.0"
] | 384 | 2019-12-04T03:04:14.000Z | 2022-03-31T20:42:48.000Z | tests/core/test_save_config.py | NRauschmayr/sagemaker-debugger | 24ed65631143fcc0457fb325a102500ebd69adfc | [
"Apache-2.0"
] | 64 | 2019-12-05T20:39:51.000Z | 2022-03-25T13:30:54.000Z | # Third Party
# First Party
from smdebug import modes
from smdebug.core.save_config import SaveConfig
def test_export_load():
r1 = SaveConfig(save_interval=11, start_step=10, save_steps=[50])
r2 = SaveConfig.from_json(r1.to_json())
assert r1.to_json() == r2.to_json()
assert r1 == r2
def test_load_empty():
r1 = SaveConfig()
assert r1 == SaveConfig.from_json(r1.to_json())
def test_load_none():
r1 = SaveConfig(start_step=100)
assert r1 == SaveConfig.from_json(r1.to_json())
def test_end_step():
s = SaveConfig(end_step=0)
assert s.should_save_step(modes.GLOBAL, 0) is False
assert s.should_save_step(modes.GLOBAL, 19) is False
assert s.should_save_step(modes.GLOBAL, 100) is False
| 24.633333 | 69 | 0.713126 |
844940db19429d65a138ab68b849747326e71c4f | 21,649 | py | Python | google_scraper/selenium_scraper.py | FabianGruben/Google_Image_Scraper | eb2569d598f6444c3ab1a27bf9632450a8e9981b | [
"MIT"
] | null | null | null | google_scraper/selenium_scraper.py | FabianGruben/Google_Image_Scraper | eb2569d598f6444c3ab1a27bf9632450a8e9981b | [
"MIT"
] | null | null | null | google_scraper/selenium_scraper.py | FabianGruben/Google_Image_Scraper | eb2569d598f6444c3ab1a27bf9632450a8e9981b | [
"MIT"
] | null | null | null | #%%" ai
from time import sleep
import requests
from requests.exceptions import RequestException
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from custom_exceptions.exceptions import TimeoutExceptionShowMoreImagesButton
from custom_exceptions.exceptions import\
NoSuchElementExceptionShowMoreImagesButton
from custom_exceptions.exceptions import NotEnoughResultsError
from custom_exceptions.exceptions import FileTypeNotImage
from custom_exceptions.exceptions import KeyErrorNoContentTypeHTTPError
from custom_exceptions.exceptions import ScrollNotWorkingError
from custom_expected_conditions import custom_expected_conditions
from logger import setup_custom_logger
logger = setup_custom_logger("GOOGLE IMAGE SCRAPER")
if (logger.hasHandlers()):
logger.handlers.clear()
logger = setup_custom_logger("GOOGLE IMAGE SCRAPER")
# To Do: 1. cleaner better exception handling
# 2. Make it a python package and make a nicer github page for it
# 4. logger instead of print
class GoogleImageScraper():
"""
Initialize and control the instances of the class ImageQuery, which
download the files for the different search terms.
For some reason Selenium does not work if the WebDriver is
re-initialized in the same class for repeated search queries:
Selenium only works for repeated search queries if the WebDriver is
re-initialized in a new class (=ImageQuery) every time.
Instance variables:
-search_url: String of search URL of Google Image Search Query
with q="searchstring" replaced by q={q}
-search_term_list: List of search terms
-DRIVER_PATH: path to chromedriver
-number_files_wanted: Number of files to download for each search
term
-results: Empty dictionary that is filled with with keys equal
to search terms and values containing a list of tuples of links
and relative file paths on harddrive for successfully downloaded
images
Methods:
-.download_images(): Initialize instances of ImageQuery and
use them to download images for search terms
"""
def __init__(
self,
search_url,
search_term_list,
DRIVER_PATH,
number_files_wanted,
chrome_options):
self.search_url = search_url
self.driver_path = DRIVER_PATH
self.search_term_list = search_term_list
self.number_files_wanted = number_files_wanted
self.chrome_options = chrome_options
self.results = {}
def factory(self, aClass, *pargs, **kargs):
return aClass(*pargs, **kargs)
def download_images(self):
""" Initialize instances of ImageQuery and download images for
search terms.
Uses the ImageQuery method
download_image_batches_for_search_term() to download images for
each search term.
"""
for search_term in self.search_term_list:
image_query = self.factory(
ImageQuery,
search_term = search_term,
driver_path = self.driver_path,
chrome_options = self.chrome_options,
number_files_wanted = self.number_files_wanted,
search_url = self.search_url
)
image_query.download_images_for_search_term()
class ImageQuery():
"""Class that manages individual queries to Google Image to
download images.
Additional Instance variables:
-thumbnail_list: List of all thumbnail WebElements in the viewing
port of Chrome
-search_url: String of search URL of Google Image Search Query
with q="searchstring" replaced by q={q}
-search_term_list: List of search terms
-driver: Initialized Chrome Webdriver
-DRIVER_PATH: path to chromedriver
-number_files_wanted: Number of files to download for each search
term
-results: Empty dictionary that is filled with with keys equal
to search terms and values containing a list of tuples of links
and relative file paths on harddrive for successfully downloaded
images
-download_counter: integer used for enumerating the filenames
when saving images on hard disk
-thumbnail_nr: integer used for indexing current thumbnail
in thumbnail_list
-thumbnail: stores current WebElement of thumbnail on Google
Search results page
(Public) Methods:
-.download_images(): Initialize instances of ImageQuery and
use them to download images for search terms
"""
def __init__(
self,
search_term,
driver_path,
chrome_options,
number_files_wanted,
search_url
):
# self.driver = webdriver.Chrome(
# executable_path= driver_path,
# options= chrome_options
# )
self.driver = webdriver.Remote("selenium_standalone_chrome:4444/wd/hub",
options = chrome_options)
self.number_files_wanted = number_files_wanted
self.search_url = search_url
self.number_files_downloaded = 0
self.thumbnail_list = []
self.thumbnail_list_current = []
self.thumbnail_nr = 0
self.thumbnail = None
self.link = None
self.relative_file_path = None
self.search_url_plus_term = None
self.wanted_height = None
self.search_term = search_term
def download_images_for_search_term(self):
""" Download Images for a given seach_term.
Steps:
1. Search for images on google Images using the Chrome driver
with self.search_term and self.search_url as an input
2. While loop to download images with self.start_download_
chain()
Returns:
True if successful
"""
self.search_url_plus_term = self.search_url.format(q=f"{self.search_term}")
self.driver.get(self.search_url_plus_term)
logger.info("Searching for Images on Google Image using the following URL:"
f"{self.search_url_plus_term}"
)
logger.info(
f"Scanning the {self.number_files_wanted} needed WebElements"
"for thumbnails on Google Image Result Page"
)
try:
self.__get_thumbnail_list()
logger.info(
f"Managed to collect {len(self.thumbnail_list)} number of thumbnail"
"WebElements"
)
except NotEnoughResultsError:
logger.error(
f"Not enough thumbnail WebElements available in Google Image search results"
f"Number of files wanted: {self.number_files_wanted}"
f"vs. Number of thumbnails available: {len(self.thumbnail_list)}"
" - Continuing with the number of thumbnails available"
)
except ScrollNotWorkingError:
logger.error(
"Scroll Down (window.scrollBy(x,y)) for __testing_end_of_page()"
" not working. Please troubleshoot the code."
)
# deep copying the list of Thumbnail Webelements for use in
# self.__download_nr_thumbnails()
self.thumbnail_list_current = self.thumbnail_list[:]
max_images_possible = max(
len(self.thumbnail_list),
self.number_files_wanted
)
logger.info(
"Starting Downloads for the number of images possible after"
f"using Google Image Search {max_images_possible}"
)
download_nr_success_tuple = self.__download_nr_thumbnails(
max_images_possible
)
if download_nr_success_tuple[0]:
logger.info(
f"Successfully downloaded {max_images_possible} for search"
f" term '{self.search_term}'"
)
self.driver.quit()
return True
else:
logger.info(
f"Only managed to download {download_nr_success_tuple[1]}"
f" of {max_images_possible} images for search term '{self.search_term}'"
)
self.driver.quit()
return False
def __download_nr_thumbnails(self, number_thumbnails_requested):
""" Recursively tries to download a certain number of requested images
from the self.thumbnail_list by removing the topmost element of
self.thumbnail_list after successfully downloading it and calling
itself again, if not all images were downloaded as requested and
there are still thumbnails in self.thumbnail_list
Returns:
(True, number_missing_images) if the number of thumbnails downloaded is as requested
(False, number_missing_images), if the number of thumbnails
successfully downloaded is less than requested
"""
number_downloaded_images = 0
for self.thumbnail_nr, self.thumbnail in enumerate(self.thumbnail_list_current):
logger.info(
f"Starting __Download_chain() for Thumbnail Nr."
f" {self.thumbnail_list.index(self.thumbnail)}"
f" of {number_thumbnails_requested}"
)
download_success_bool = self.__start_download_chain()
if download_success_bool:
number_downloaded_images +=1
self.thumbnail_list_current.pop()
if number_downloaded_images == number_thumbnails_requested:
break
number_missing_images = number_thumbnails_requested \
- number_downloaded_images
if len(self.thumbnail_list_current) == 0:
return (False, number_missing_images)
elif number_missing_images == 0:
return (True, number_missing_images)
else:
self.__download_nr_thumbnails(number_missing_images)
def __get_thumbnail_list(self):
""" Saves all WebElements of the thumbnails on the
Google Image Search results page to self.thumbnail_list
Returns:
True, if len(self.thumbnail_list) >= self.number_files_wanted
Raises:
NotEnoughResultsError: If there are not enough results on the
google image search results page for the number of files wanted
"""
while True:
self.thumbnail_list = self.driver.find_elements_by_css_selector(
"img.Q4LuWd")
self.thumbnail = self.thumbnail_list[len(self.thumbnail_list) - 1]
logger.info(
"Scrolling to last thumbnail in self.thumbnail_list"
)
self.__scroll_to_thumbnail()
logger.info(
"Checking and clicking more images button"
)
self.__check_click_more_images_button()
logger.info(
"Checking whether end of Google Image Search page is reached"
)
if self.__testing_end_of_page():
break
if len(self.thumbnail_list) >= (self.number_files_wanted):
return True
else:
raise NotEnoughResultsError
def __start_download_chain(self):
""" Try to download the image behind the thumbnail
indexed by thumbnail_nr in the Google Image Search results
page.
Calls the following methods:
2. self.scroll_to_thumbnail(): Scroll to WebElement.
3. self.click_thumbnail(): Click WebElement to enter into closer
view in Google Image results.
4. self.__open_save_image(): Open Image in new tab and save to disk
Expected exceptions in the scraping process are handled and
logged here using the exceptions in self.expected_exceptions #todo
Returns:
[bool]: True, if download chain was successful.
False, if download chain was not successful.
"""
#put expected exception in a class variable
#Add: exception selenium.common.exceptions.JavascriptException(
# msg=None, screen=None, stacktrace=None) for scroll_to_thumbnail
expected_exception_tuple = (
TimeoutException,
ElementClickInterceptedException,
NoSuchElementException,
NotEnoughResultsError,
StaleElementReferenceException,
RequestException,
KeyErrorNoContentTypeHTTPError,
)
try:
logger.info(
f"Scrolling to Thumbnail for Thumbnail Nr. {self.thumbnail_nr} "
"on Google Image Result Page"
)
self.__scroll_to_thumbnail()
logger.info(
f"Clicking Thumbnail Nr. {self.thumbnail_nr} "
"on Google Image Result Page"
)
self.__click_thumbnail()
logger.info(
f"Opening Thumbnail Nr. {self.thumbnail_nr} "
f"in a new tab and saving to disk"
)
self.__open_save_image()
return True
except expected_exception_tuple as e:
try:
print(repr(e))
except AttributeError:
print("Attribute Error", e)
return False
return True
def __scroll_to_thumbnail(self):
"""Scroll to thumbnail WebElement saved as self.thumbnail."""
self.driver.execute_script(
"arguments[0].scrollIntoView(true);",
self.thumbnail,
)
def __check_click_more_images_button(self):
""" Click the "Show more Images"-Button on the Google Image
search result page if possible.
"""
#Wait until page is fully loaded
WebDriverWait(self.driver, 5).until(
lambda d: d.execute_script(
'return document.readyState') == 'complete'
)
#checking for existence of "show more images"-button and click it
try:
show_more_images_button = self.driver.find_element_by_css_selector(
"input.mye4qd"
)
print
WebDriverWait(self.driver, 2).until(
custom_expected_conditions.ElementClickableTest(
show_more_images_button)).click()
except NoSuchElementException:
raise NoSuchElementException("Show More Images WebElement not found")
except TimeoutException:
logger.info(
"Timeout for clickability of show more images WebElement -"
"show more images button is probably not visible yet/anymore"
)
def __click_thumbnail(self):
""" Clicks WebElement in self.thumbnail
after waiting for WebElement to be clickable"""
WebDriverWait(self.driver, 5).until(
custom_expected_conditions.ElementClickableTest(self.thumbnail)
).click()
def __open_save_image(self):
""" Opens the image behind the thumbnail (that appears after thumbnail
in Google Image search results is clicked) in a new tab and saves
the image to disk
"""
# store the ID of the original window
original_window = self.driver.current_window_handle
# get the image link
self.__get_image_link()
# open a new tab using self.link
open_window_script = "window.open('" + self.link + "');"
self.driver.execute_script(open_window_script)
# wait for the new window or tab to open
WebDriverWait(self.driver,5).until(EC.number_of_windows_to_be(2))
# loop trhough until we find a new window handle and switch focus to it
for window_handle in self.driver.window_handles:
if window_handle != original_window:
self.driver.switch_to.window(window_handle)
break
# wait until the img has been loaded
WebDriverWait(self.driver, 10).until(
lambda d: d.execute_script(
'return document.readyState') == 'complete'
)
img_element = self.driver.find_element_by_css_selector("img")
name_of_file = f"{self.search_term}" + "_" + f"{self.thumbnail_nr}"
img_element.screenshot(name_of_file)
# close opened tab
self.driver.close()
# switch focus back to original window
self.driver.switch_to.window(original_window)
return True
def __get_image_link(self):
"""
Extracts the image link behind a clicked thumbnail (self.thumbnail)
in Google Image results and saves it under self.link
Returns:
True, if no exceptions are raised
Details:
WebDriverWait with wait.until(ElementSrcString()) is needed,
because sometimes at first the src is a data url
(e.g. data:image/gif;base64,R0lGOD...). An if-clause is
needed, because the css selector 'img.n3VNCb' selects a list
of three images on the result page: the previous one([0]),
the current one[1] and the next one[2]. For the
thumbnail_nr == 0 the previous one does not exist,
which moves the current one to [0] of the list.
"""
if self.thumbnail_nr == 0:
element = WebDriverWait(self.driver, 5).until(
custom_expected_conditions.ElementSrcString(
(By.CSS_SELECTOR, 'img.n3VNCb'),
"http",
"encrypted-tbn0.gstatic.com",
0,
self.driver,
)
)
self.link = element.get_attribute('src')
else:
element = WebDriverWait(self.driver, 5).until(
custom_expected_conditions.ElementSrcString(
(By.CSS_SELECTOR, 'img.n3VNCb'),
"http",
"encrypted-tbn0.gstatic.com",
1,
self.driver,
)
)
self.link = element.get_attribute('src')
return True
def __testing_end_of_page(self):
"""Tests whether the end of the page of google image search
results is reached
Returns:
True: If end of page is reached
False: If end of page is not reached
"""
#Wait until page is fully loaded
WebDriverWait(self.driver, 10).until(
lambda d: d.execute_script(
'return document.readyState') == 'complete'
)
#recording the height of the page
old_height = self.driver.execute_script(
"return window.scrollY"
)
new_height = old_height
max_scroll_height = self.driver.execute_script(
"return document.body.scrollHeight;"
)
# for loop to deal with the problem that sometimes window.scrollBy(0,10)
# does not work on the first try (for unknown reasons)
for i in range(20):
self.driver.execute_script("window.scrollBy(0,10);")
new_height = self.driver.execute_script(
"return pageYOffset"
)
if old_height < new_height:
break
# + 2000 to account for a margin of error because max_scroll_height
# is bigger than maximum pageYOffset reachable through scrolling
if i == 19 and max_scroll_height > new_height + 2000:
raise ScrollNotWorkingError
# waiting until scroll down has been successful by testing
# whether old_height =! new_height
try:
WebDriverWait(self.driver, 2).until(
custom_expected_conditions.ScrollSuccessTest(
old_height,
self.driver,
)
)
except TimeoutException:
logger.info
(
f"{TimeoutException} Scrolling down had no effect after waiting"
" 2 seconds - End of page reached"
)
return True
else:
return False
CHROME_OPTIONS = Options()
# chrome_options.add_argument("--window-size=1920,1080")
CHROME_OPTIONS.add_argument('--headless')
CHROME_OPTIONS.add_argument('--no-sandbox')
CHROME_OPTIONS.add_argument('--disable-dev-shm-usage')
CHROME_OPTIONS.add_argument('window-size=1920x1480')
DRIVER_PATH = './chromedriver_dir/chromedriver'
SEARCH_URL = """https://www.google.com/search?as_st=y&tbm=isch&hl=en&as_q={q}&as
epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs=itp:photo"""
testrun = GoogleImageScraper(
search_url = SEARCH_URL,
search_term_list =["kitten"],
DRIVER_PATH = DRIVER_PATH,
number_files_wanted = 700,
chrome_options = CHROME_OPTIONS
)
testrun.download_images()
| 35.490164 | 96 | 0.628435 |
3ea518c9613c15f42971dbacfd6683e59a18e446 | 6,435 | py | Python | download_data/npz_to_csv.py | pdgwelle/salinity_prediction | 37ed034b4ba466c99eae89c1daaa7e89215d7020 | [
"MIT"
] | null | null | null | download_data/npz_to_csv.py | pdgwelle/salinity_prediction | 37ed034b4ba466c99eae89c1daaa7e89215d7020 | [
"MIT"
] | null | null | null | download_data/npz_to_csv.py | pdgwelle/salinity_prediction | 37ed034b4ba466c99eae89c1daaa7e89215d7020 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import math
import tifffile as tf
import dirs
def final_data_processing(npz_directory, csv_directory, FieldID, start_year = 2007, end_year = 2013):
#satellite_data[rows(height), columns(width), {band1,band2,band3,band4,band5,band7,cfmask,year,datecode},images}
def averaging_per_year(satellite_data_nan_dict, start_year, end_year):
satellite_data = satellite_data_nan_dict['satellite_data']
dict = {}
for i in range(start_year, end_year+1):
index = np.where(satellite_data[0,0,7,:] == float(i))
dict.update({'mean_' + str(i): np.nanmean(satellite_data[:, :, :6, index[0]], axis=3)})
return dict
def calculate_max_CRSI_pixel(mean_dict):
l = list()
L = list()
for i in mean_dict:
year_mean = mean_dict[str(i)]
numerator = (year_mean[:, :, 3] * year_mean[:, :, 2]) - (year_mean[:, :, 1] * year_mean[:, :, 0])
denumerator =((year_mean[:, :, 3] * year_mean[:, :, 2]) + (year_mean[:, :, 1] * year_mean[:, :, 0]))
with np.errstate(divide='ignore', invalid='ignore'):
l.append(np.sqrt(numerator / denumerator))
L.append(np.ones((np.shape(year_mean)[0], np.shape(year_mean)[1])) * int(str(i)[5:]))
crsi_all = np.stack(l, axis=2)
years_all = np.stack(L, axis=2)
indices = np.argmax(crsi_all, axis=2)
row, column = np.indices(np.shape(indices))
CRSI = crsi_all[row, column, indices]
max_year = years_all[row, column, indices]
return {'max_CRSI' : CRSI, 'maxCRSI_year' : max_year}
def create_csv_for_Regression(npzfile, mean_dict, CRSI_dict, path, start_year, FieldID):
def get_indices_with_existing_salinity(npzfile):
salinity = npzfile['salinity']
indices = np.where(np.logical_not(np.isnan(salinity)))
return indices, {'salinity': salinity[indices]}
def get_single_bands_year(mean_dict, indices, start_year):
bands = np.shape(mean_dict['mean_' + str(start_year)])[2]
dict = {}
for i in mean_dict:
for j in range(1, bands + 1):
if j < bands:
temp = mean_dict[str(i)][indices]
dict.update({'band_' + str(j) + '_' + str(i): temp[:, j - 1]})
else:
temp = mean_dict[str(i)][indices]
dict.update({'band_' + str(j + 1) + '_' + str(i): temp[:, j - 1]})
return dict
def get_CRSI_year(CRSI_dict, indices):
dict = {}
for i in CRSI_dict:
dict.update({str(i): CRSI_dict[str(i)][indices]})
return dict
def get_elev_crop(npzfile, indices, max_year, FieldID, start_year):
elev = npzfile['elevation'][indices]
crop = npzfile['crop_layer'][indices]
print(np.shape(crop))
crops = np.zeros(len(max_year))
for j in range(0, len(max_year)):
crops[j] = crop[j, int(max_year[j]) - start_year]
Field_ID = np.ones(len(max_year))*FieldID
return {'elevation': elev[:, 0], 'aspect': elev[:, 1],
'slope': elev[:, 2], 'crops_max_year': crops, 'Field_ID': Field_ID}
# def get_elev_crop(npzfile, indices):
# elev = npzfile['elevation'][indices]
# crop = npzfile['crop_layer'][indices]
# return {'elevation': elev[:, 0], 'aspect': elev[:, 1],
# 'slope': elev[:, 2], 'crops_2007': crop[:, 0],
# 'crops_2008': crop[:, 1], 'crops_2009': crop[:, 2],
# 'crops_2010': crop[:, 3], 'crops_2011': crop[:, 4],
# 'crops_2012': crop[:, 5], 'crops_2013': crop[:, 6]}
def get_temp_of_CRSI_year(npzfile, max_year, start_year):
temp = npzfile['temperature']
i = 0
ave_temp = np.zeros((len(temp) / 12))
while i < len(temp):
ave_temp[i / 12] = np.mean(temp[i:(12 + i)])
i = 12 + i
temperature = np.zeros(len(max_year))
for j in range(0, len(max_year)):
temperature[j] = ave_temp[int(max_year[j]) - start_year]
return {'average_temperature': temperature}
def get_total_precip_of_CRSI_year(npzfile, indices, max_year, start_year):
precip_all = npzfile['precipitation']
precip_month = precip_all[indices]
total_precip = np.zeros((np.shape(precip_month)[0], np.shape(precip_month)[1] / 12))
i = 0
while i < np.shape(precip_month)[1]:
total_precip[:, i / 12] = np.sum(precip_month[:,i:(12+i)], axis=1)
i = 12 + i
precip = np.zeros(len(max_year))
for j in range(0, len(max_year)):
precip[j] = total_precip[j, int(max_year[j]) - start_year]
return {'total_precipitation': precip}
def merge_dicts(*dicts):
out_dict = {}
for dictionary in dicts:
out_dict.update(dictionary)
return out_dict
def dict_to_csv(dict, path):
data = pd.DataFrame.from_dict(data=dict, orient='columns')
data.sort_index(axis=1)
data.to_csv(path_or_buf=path)
return data
indices, sal = get_indices_with_existing_salinity(npzfile)
bands_years = get_single_bands_year(mean_dict, indices, start_year)
CRSI = get_CRSI_year(CRSI_dict, indices)
max_year = CRSI['maxCRSI_year']
additional = get_elev_crop(npzfile, indices, max_year, FieldID, start_year)
temp = get_temp_of_CRSI_year(npzfile, max_year, start_year)
precip = get_total_precip_of_CRSI_year(npzfile, indices, max_year, start_year)
dict = merge_dicts(sal, CRSI, bands_years, additional, temp, precip)
data = dict_to_csv(dict, path)
return data
npzfile = np.load(npz_directory)
satellite_data_nan_dict = {'satellite_data': npzfile['satellite_data']}
mean_dict = averaging_per_year(satellite_data_nan_dict, start_year, end_year)
CRSI_dict = calculate_max_CRSI_pixel(mean_dict)
data = create_csv_for_Regression(npzfile,mean_dict=mean_dict,CRSI_dict=CRSI_dict, path=csv_directory, start_year=start_year, FieldID=FieldID)
return data
| 46.294964 | 145 | 0.579021 |
ac86799b6b8c41bc751b16da496ec9bcee91da3a | 1,513 | py | Python | tests/python/relay/test_to_graph_normal_form.py | mostafaelhoushi/tvm | ae21eddf5f13ffa82d514e8311c87f38bcac559a | [
"Apache-2.0"
] | 1 | 2021-03-07T15:30:16.000Z | 2021-03-07T15:30:16.000Z | tests/python/relay/test_to_graph_normal_form.py | mostafaelhoushi/tvm | ae21eddf5f13ffa82d514e8311c87f38bcac559a | [
"Apache-2.0"
] | null | null | null | tests/python/relay/test_to_graph_normal_form.py | mostafaelhoushi/tvm | ae21eddf5f13ffa82d514e8311c87f38bcac559a | [
"Apache-2.0"
] | 1 | 2020-02-09T10:42:31.000Z | 2020-02-09T10:42:31.000Z | import numpy as np
import tvm
from tvm import relay
from tvm.relay.ir_pass import to_graph_normal_form, to_a_normal_form, alpha_equal
from tvm.relay import op, create_executor
from tvm.relay.backend.interpreter import Value, TupleValue
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
if mod is None:
mod = relay.Module()
ctx = tvm.context("llvm", 0)
intrp = create_executor(mod=mod, ctx=ctx, target="llvm")
result = intrp.evaluate(expr)(*args)
np.testing.assert_allclose(result.asnumpy(), expected_result, rtol=rtol)
def test_implicit_share():
x = relay.Var('x')
y = relay.Var('y')
z = relay.Var('z')
body = relay.Let(z, op.add(y, y), op.add(z, z))
body = relay.Let(y, op.add(x, x), body)
f = relay.Function([], relay.Let(x, relay.const(1), body))
g = to_graph_normal_form(f)
assert "let" in f.astext()
assert not "let" in g.astext()
check_eval(f, [], 8.0)
check_eval(g, [], 8.0)
def test_round_trip():
x = relay.Var('x')
y = relay.Var('y')
z = relay.Var('z')
body = relay.Let(z, op.add(y, y), op.add(z, z))
body = relay.Let(y, op.add(x, x), body)
f = relay.Function([], relay.Let(x, relay.const(1), body))
g = to_graph_normal_form(f)
h = to_a_normal_form(g)
assert "let" in f.astext()
assert not "let" in g.astext()
check_eval(f, [], 8.0)
check_eval(g, [], 8.0)
check_eval(h, [], 8.0)
if __name__ == '__main__':
test_implicit_share()
test_round_trip()
| 29.096154 | 81 | 0.633179 |
6d978ba15d7fcec25df22e89ae533b230aa920a4 | 2,737 | py | Python | neutron_lbaas/tests/tempest/v2/clients/listeners_client.py | gotostack/neutron-lbaas | aea6f1f3c512ef94c89210d919f3b807b907edbe | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/tests/tempest/v2/clients/listeners_client.py | gotostack/neutron-lbaas | aea6f1f3c512ef94c89210d919f3b807b907edbe | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/tests/tempest/v2/clients/listeners_client.py | gotostack/neutron-lbaas | aea6f1f3c512ef94c89210d919f3b807b907edbe | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Rackspace US Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_serialization import jsonutils
from tempest.common import service_client
class ListenersClientJSON(service_client.ServiceClient):
"""
Tests Listeners API
"""
def list_listeners(self, params=None):
"""List all listeners."""
url = 'v2.0/lbaas/listeners'
if params:
url = '{0}?{1}'.format(url, urllib.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, body['listeners'])
def get_listener(self, listener_id, params=None):
"""Get listener details."""
url = 'v2.0/lbaas/listeners/{0}'.format(listener_id)
if params:
url = '{0}?{1}'.format(url, urllib.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body['listener'])
def create_listener(self, **kwargs):
"""Create a listener build."""
post_body = jsonutils.dumps({'listener': kwargs})
resp, body = self.post('v2.0/lbaas/listeners', post_body)
body = jsonutils.loads(body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body['listener'])
def update_listener(self, listener_id, **kwargs):
"""Update an listener build."""
put_body = jsonutils.dumps({'listener': kwargs})
resp, body = self.put('v2.0/lbaas/listeners/{0}'
.format(listener_id), put_body)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body['listener'])
def delete_listener(self, listener_id):
"""Delete an existing listener build."""
resp, body = self.delete("v2.0/lbaas/listeners/{0}"
.format(listener_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
| 39.666667 | 78 | 0.652174 |
ef6dffd6a92e726f5ce293c4c1ca893b8bf44951 | 6,753 | py | Python | src/scs_philips_hue/data/light/light.py | south-coast-science/scs_philips_hue | 80f4e7f0e7b3b7a11af2326b4c7e18871c5a7040 | [
"MIT"
] | null | null | null | src/scs_philips_hue/data/light/light.py | south-coast-science/scs_philips_hue | 80f4e7f0e7b3b7a11af2326b4c7e18871c5a7040 | [
"MIT"
] | null | null | null | src/scs_philips_hue/data/light/light.py | south-coast-science/scs_philips_hue | 80f4e7f0e7b3b7a11af2326b4c7e18871c5a7040 | [
"MIT"
] | null | null | null | """
Created on 28 Oct 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
example:
{
"state": {
"on": true,
"bri": 254,
"hue": 8418,
"sat": 140,
"effect": "none",
"xy": [0.4573, 0.41],
"ct": 366,
"alert": "select",
"colormode": "ct",
"reachable": true
},
"swupdate": {
"state": "noupdates",
"lastinstall": null
},
"light_type": "Extended color light",
"name": "Hue color lamp 1",
"modelid": "LCT015",
"manufacturername": "Philips",
"uniqueid": "00:17:88:01:03:54:25:66-0b",
"swversion": "1.19.0_r19755",
"swconfigid": "A724919D",
"productid": "Philips-LCT015-1-A19ECLv5"
}
"""
# from scs_core.data.json import JSONify
from collections import OrderedDict
from scs_core.data.json import JSONable
from scs_philips_hue.data.light.light_state import ReportedLightState
from scs_philips_hue.data.light.sw_update import SWUpdate
# --------------------------------------------------------------------------------------------------------------------
class Light(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
# print(JSONify.dumps(jdict))
if not jdict:
return None
state = ReportedLightState.construct_from_jdict(jdict.get('state'))
swupdate = SWUpdate.construct_from_jdict(jdict.get('swupdate'))
light_type = jdict.get('type')
name = jdict.get('name')
model_id = jdict.get('modelid')
manufacturer_name = jdict.get('manufacturername')
unique_id = jdict.get('uniqueid')
sw_version = jdict.get('swversion')
sw_config_id = jdict.get('swconfigid')
product_id = jdict.get('productid')
return cls(state, swupdate, light_type, name, model_id, manufacturer_name, unique_id,
sw_version, sw_config_id, product_id)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, state, swupdate, light_type, name, model_id, manufacturer_name,
unique_id, sw_version, sw_config_id, product_id):
"""
Constructor
"""
self.__state = state # ReportedLightState
self.__swupdate = swupdate # SWUpdate
self.__light_type = light_type # string
self.__name = name # string
self.__model_id = model_id # string
self.__manufacturer_name = manufacturer_name # string
self.__unique_id = unique_id # string
self.__sw_version = sw_version # string
self.__sw_config_id = sw_config_id # string
self.__product_id = product_id # string
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['state'] = self.state
jdict['swupdate'] = self.swupdate
jdict['type'] = self.light_type
jdict['name'] = self.name
jdict['modelid'] = self.model_id
jdict['manufacturername'] = self.manufacturer_name
jdict['uniqueid'] = self.unique_id
jdict['swversion'] = self.sw_version
jdict['swconfigid'] = self.sw_config_id
jdict['productid'] = self.product_id
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def state(self):
return self.__state
@property
def swupdate(self):
return self.__swupdate
@property
def light_type(self):
return self.__light_type
@property
def name(self):
return self.__name
@property
def model_id(self):
return self.__model_id
@property
def manufacturer_name(self):
return self.__manufacturer_name
@property
def unique_id(self):
return self.__unique_id
@property
def sw_version(self):
return self.__sw_version
@property
def sw_config_id(self):
return self.__sw_config_id
@property
def product_id(self):
return self.__product_id
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Light:{state:%s, swupdate:%s, light_type:%s, name:%s, model_id:%s, manufacturer_name:%s, " \
"unique_id:%s, sw_version:%s, sw_config_id:%s, product_id:%s}" % \
(self.state, self.swupdate, self.light_type, self.name, self.model_id, self.manufacturer_name,
self.unique_id, self.sw_version, self.sw_config_id, self.product_id)
# --------------------------------------------------------------------------------------------------------------------
class LightListEntry(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, index, jdict):
if not jdict:
return None
light = Light.construct_from_jdict(jdict)
return LightListEntry(index, light)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, index, light):
"""
Constructor
"""
self.__index = index # index
self.__light = light # Light
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict[self.index] = self.light
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def index(self):
return self.__index
@property
def light(self):
return self.__light
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "LightListEntry:{index:%s, light:%s}" % (self.index, self.light)
| 28.614407 | 118 | 0.451503 |
88e4b706aeb313d09576afffa1da171de1557ac2 | 1,109 | py | Python | output/models/nist_data/atomic/name/schema_instance/nistschema_sv_iv_atomic_name_enumeration_3_xsd/nistschema_sv_iv_atomic_name_enumeration_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/name/schema_instance/nistschema_sv_iv_atomic_name_enumeration_3_xsd/nistschema_sv_iv_atomic_name_enumeration_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/name/schema_instance/nistschema_sv_iv_atomic_name_enumeration_3_xsd/nistschema_sv_iv_atomic_name_enumeration_3.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-Name-enumeration-3-NS"
class NistschemaSvIvAtomicNameEnumeration3Type(Enum):
UPROCESSORS_FOR_PUBLISHING_METHODS_FOR_AN_WITH_INCLUDED_IMPLEME = "uprocessors:for.publishing:methods.for:an-with:included:impleme"
AND_DAT = "_and-dat"
IBUSINESS_PROCESSES_LANGUAGE_CHAIN = "ibusiness_processes-language-chain"
SPECIFICATIO = ":specificatio"
MEDIUM_SIZED_TESTING_TO_USERS_AND_REGISTRIES_SU = ":medium-sized-testing-to:users:and-registries_su"
TO_THE_HAS_TO_LAUNCHI = "_to_the:has_to:launchi"
GPROVIDE_BACK_THE_ARE_AND_SHIFT_AND_CREATION_IS = "gprovide-back.the-are:and_shift.and-creation:is-"
@dataclass
class NistschemaSvIvAtomicNameEnumeration3:
class Meta:
name = "NISTSchema-SV-IV-atomic-Name-enumeration-3"
namespace = "NISTSchema-SV-IV-atomic-Name-enumeration-3-NS"
value: Optional[NistschemaSvIvAtomicNameEnumeration3Type] = field(
default=None,
metadata={
"required": True,
}
)
| 36.966667 | 135 | 0.766456 |
13102b9a3cf44cc087d8bc53886dab673ccb0856 | 1,425 | py | Python | vice/testing/decorators.py | astrobeard/VICEdev | c78804ec63b48a760ce3e50b8d3afc7b699ec75f | [
"MIT"
] | null | null | null | vice/testing/decorators.py | astrobeard/VICEdev | c78804ec63b48a760ce3e50b8d3afc7b699ec75f | [
"MIT"
] | null | null | null | vice/testing/decorators.py | astrobeard/VICEdev | c78804ec63b48a760ce3e50b8d3afc7b699ec75f | [
"MIT"
] | null | null | null | """
This file implements the unittest and moduletest decorators for testing
"""
from __future__ import absolute_import
__all__ = ["moduletest", "unittest"]
from .moduletest import _moduletest
from .unittest import _unittest
import functools
import inspect
def moduletest(function):
"""
A decorator which will construct a moduletest automatically from a
description and a list of unittest objects
"""
@functools.wraps(function)
def wrapper(run = True):
try:
description, unittests = function()
except TypeError:
print(inspect.getfile(function))
print(function)
raise
test = _moduletest(description)
if unittests is None:
test.new(skip_dummy(description))
else:
for i in unittests:
test.new(i)
if run:
test.run(print_results = True)
else:
return test
return wrapper
def unittest(function):
"""
A decorator which will construct a unittest automatically from a
description and a function which runs the test
"""
@functools.wraps(function)
def wrapper(*args):
"""
Some unittests are for objects, and will require a call to self as the
first argument
"""
description, testfunc = function(*args)
return _unittest(description, testfunc)
return wrapper
@unittest
def skip_dummy(description):
r"""
Produces a skip message for a module test
"""
def test():
return None
return [description, test]
| 21.923077 | 73 | 0.706667 |
1a4f9c56709069ce5bcf8a7a39563ba80269abb0 | 479 | py | Python | runserver.py | NotMyRealNameHahaha/Flask-YtDl | be1d6f4353fb6c53ff56fae18680a02428cecb22 | [
"BSD-3-Clause"
] | null | null | null | runserver.py | NotMyRealNameHahaha/Flask-YtDl | be1d6f4353fb6c53ff56fae18680a02428cecb22 | [
"BSD-3-Clause"
] | null | null | null | runserver.py | NotMyRealNameHahaha/Flask-YtDl | be1d6f4353fb6c53ff56fae18680a02428cecb22 | [
"BSD-3-Clause"
] | null | null | null | from YTR import app
# Python imports
import subprocess
import json
import os
def open_browser():
# open config.JSON as r+
config = open("config.JSON", "r+")
my_config = json.load(config)
if my_config['OS'] == 'Linux':
subprocess.run("sensible-browser 127.0.0.1:5100", shell=True)
elif my_config['OS'] == 'OSX':
subprocess.run("open 127.0.0.1:5100", shell=True)
# open_browser()
if __name__ == '__main__':
app.run(debug=True, port=5100)
| 23.95 | 69 | 0.649269 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.