hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfd2f045de63c5a11c77cbd50e261fbd949e363
| 4,014
|
py
|
Python
|
tests/test_01_util.py
|
sklemer1/oidcendpoint
|
bc2cd9222bd05aec7b7ba5c7c7f593c2143357f3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_01_util.py
|
sklemer1/oidcendpoint
|
bc2cd9222bd05aec7b7ba5c7c7f593c2143357f3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_01_util.py
|
sklemer1/oidcendpoint
|
bc2cd9222bd05aec7b7ba5c7c7f593c2143357f3
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from cryptojwt.exception import UnknownAlgorithm
from oidcmsg.oidc import RegistrationResponse
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.provider_config import ProviderConfiguration
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.oidc.userinfo import UserInfo
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.util import get_sign_and_encrypt_algorithms
conf = {
"issuer": "https://example.com/",
"password": "mycket hemligt",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": {},
"jwks_uri": 'https://example.com/jwks.json',
'endpoint': {
'provider_config': {
'path': '{}/.well-known/openid-configuration',
'class': ProviderConfiguration,
'kwargs': {}
},
'registration_endpoint': {
'path': '{}/registration',
'class': Registration,
'kwargs': {}
},
'authorization_endpoint': {
'path': '{}/authorization',
'class': Authorization,
'kwargs': {}
},
'token_endpoint': {
'path': '{}/token',
'class': AccessToken,
'kwargs': {}
},
'userinfo_endpoint': {
'path': '{}/userinfo',
'class': UserInfo,
'kwargs': {'db_file': 'users.json'}
}
},
'authentication': [
{
'acr': INTERNETPROTOCOLPASSWORD,
'name': 'NoAuthn',
'kwargs': {'user': 'diana'}
}
],
'template_dir': 'template'
}
def test_get_sign_algorithm():
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
algs = get_sign_and_encrypt_algorithms(endpoint_context, client_info,
'id_token',
sign=True)
# default signing alg
assert algs == {'sign': True, 'encrypt': False, 'sign_alg': 'RS256'}
def test_no_default_encrypt_algorithms():
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
with pytest.raises(UnknownAlgorithm):
get_sign_and_encrypt_algorithms(endpoint_context, client_info,
'id_token',
sign=True, encrypt=True)
def test_get_sign_algorithm_2():
client_info = RegistrationResponse(id_token_signed_response_alg='RS512')
endpoint_context = EndpointContext(conf)
algs = get_sign_and_encrypt_algorithms(endpoint_context, client_info,
'id_token',
sign=True)
# default signing alg
assert algs == {'sign': True, 'encrypt': False, 'sign_alg': 'RS512'}
def test_get_sign_algorithm_3():
client_info = RegistrationResponse()
endpoint_context = EndpointContext(conf)
endpoint_context.jwx_def["signing_alg"] = {'id_token': 'RS384'}
algs = get_sign_and_encrypt_algorithms(endpoint_context, client_info,
'id_token',
sign=True)
# default signing alg
assert algs == {'sign': True, 'encrypt': False, 'sign_alg': 'RS384'}
def test_get_sign_algorithm_4():
client_info = RegistrationResponse(id_token_signed_response_alg='RS512')
endpoint_context = EndpointContext(conf)
endpoint_context.jwx_def["signing_alg"] = {'id_token': 'RS384'}
algs = get_sign_and_encrypt_algorithms(endpoint_context, client_info,
'id_token',
sign=True)
# default signing alg
assert algs == {'sign': True, 'encrypt': False, 'sign_alg': 'RS512'}
| 34.904348
| 76
| 0.599651
|
acfd2ffca5a54db81604890618ebe567981e87b5
| 3,107
|
py
|
Python
|
kunquat/tracker/ui/errordialog.py
|
kagu/kunquat
|
83a2e972121e6a114ecc5ef4392b501ce926bb06
|
[
"CC0-1.0"
] | 13
|
2016-09-01T21:52:49.000Z
|
2022-03-24T06:07:20.000Z
|
kunquat/tracker/ui/errordialog.py
|
kagu/kunquat
|
83a2e972121e6a114ecc5ef4392b501ce926bb06
|
[
"CC0-1.0"
] | 290
|
2015-03-14T10:59:25.000Z
|
2022-03-20T08:32:17.000Z
|
kunquat/tracker/ui/errordialog.py
|
kagu/kunquat
|
83a2e972121e6a114ecc5ef4392b501ce926bb06
|
[
"CC0-1.0"
] | 7
|
2015-03-19T13:28:11.000Z
|
2019-09-03T16:21:16.000Z
|
# -*- coding: utf-8 -*-
#
# Author: Tomi Jylhä-Ollila, Finland 2014-2018
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
import os
import sys
import traceback
from kunquat.tracker.ui.qt import *
from kunquat.tracker.ui.views.utils import get_abs_window_size, get_scaled_font
from kunquat.tracker.errorbase import *
MESSAGE_RICH_FMT = \
"""<h3>We are sorry, but Kunquat Tracker
encountered an error and needs to close.</h3>
<p>This is a programming error. If you would like to help us fix it,
please submit an issue to Kunquat issue tracker at
<a{} href="https://github.com/kunquat/kunquat/issues">https://github.com/kunquat/kunquat/issues</a>
with the following information attached.</p>"""
class ErrorDetails(QTextEdit):
def __init__(self):
super().__init__()
self.setAcceptRichText(False)
self.setReadOnly(True)
self._details = ''
self.setPlainText(
'This window did not receive error details.'
' Please check terminal output if available.')
def set_details(self, details):
self._details = details
self.setPlainText(details)
def update_style(self, style_mgr):
self.setMinimumHeight(style_mgr.get_scaled_size(25))
def get_details(self):
return self._details
class ErrorDialog(QDialog):
exceptionReceived = Signal(str, name='exceptionReceived')
def __init__(self):
super().__init__()
self.setWindowTitle('Oh no!')
self._message = QLabel(MESSAGE_RICH_FMT.format(''))
self._message.setWordWrap(True)
self._details = ErrorDetails()
self._closebutton = QPushButton('Exit Kunquat')
self._size_hint = QSize(768, 512)
v = QVBoxLayout()
v.addWidget(self._message)
v.addWidget(self._details)
h = QHBoxLayout()
h.addWidget(self._closebutton)
v.addItem(h)
self.setLayout(v)
self.exceptionReceived.connect(self._show_dialog)
self._closebutton.clicked.connect(self.close)
sys.excepthook = self._excepthook
def update_style(self, style_mgr):
self._size_hint = get_abs_window_size(0.5, 0.5)
self._details.update_style(style_mgr)
colour = style_mgr.get_link_colour()
style = ' style="color: {};"'.format(colour)
self._message.setText(MESSAGE_RICH_FMT.format(style))
def _excepthook(self, eclass, einst, trace):
if eclass == KeyboardInterrupt:
os.abort()
return
print_error_msg(eclass, einst, trace)
log_error(eclass, einst, trace)
details = get_error_details(eclass, einst, trace)
self.exceptionReceived.emit(details)
def _show_dialog(self, details):
details = str(details)
self._details.set_details(details)
self.exec_()
os.abort()
def sizeHint(self):
return self._size_hint
| 27.254386
| 99
| 0.667525
|
acfd31c60a9e1fb9ffffb828179a7cd86e87c7ba
| 516
|
py
|
Python
|
Immutability test.py
|
KhalidFilali/Bytecoin
|
d3b6d4b75658d394a3741a9cc509054021012eb8
|
[
"MIT"
] | null | null | null |
Immutability test.py
|
KhalidFilali/Bytecoin
|
d3b6d4b75658d394a3741a9cc509054021012eb8
|
[
"MIT"
] | null | null | null |
Immutability test.py
|
KhalidFilali/Bytecoin
|
d3b6d4b75658d394a3741a9cc509054021012eb8
|
[
"MIT"
] | null | null | null |
#blockchain hack
from blockchain import Blockchain
new_transactions = [{'amount': '30', 'sender':'alice', 'receiver':'bob'},
{'amount': '55', 'sender':'bob', 'receiver':'alice'}]
#new blockchain
my_blockchain = Blockchain()
#adding block
my_blockchain.add_block(new_transactions)
#displaying blockchain
my_blockchain.print_blocks()
#Changing the data of block 1
my_blockchain.chain[1].transactions = "fake_transactions"
#Validating blockchain
my_blockchain.validate_chain()
| 24.571429
| 74
| 0.713178
|
acfd31e075433d3dd3938d3a04ccbdbb0e90be41
| 2,495
|
py
|
Python
|
em/views_transaction.py
|
thejeshpr/EM
|
a580905c831c6073748e75c176dfe1a2c112e7c2
|
[
"MIT"
] | 1
|
2021-11-04T18:05:26.000Z
|
2021-11-04T18:05:26.000Z
|
em/views_transaction.py
|
thejeshpr/em
|
a580905c831c6073748e75c176dfe1a2c112e7c2
|
[
"MIT"
] | null | null | null |
em/views_transaction.py
|
thejeshpr/em
|
a580905c831c6073748e75c176dfe1a2c112e7c2
|
[
"MIT"
] | null | null | null |
# import datetime
from calendar import monthrange
from datetime import date, timedelta, datetime
from dateutil import relativedelta
from django.db.models import Sum, Count
from django.shortcuts import get_object_or_404, render
from django.views import generic
from django.urls import reverse_lazy
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.db.models import Avg, Count, Min, Sum
from .models import Transaction
# class TransactionDayView(generic.ListView):
# model = Transaction
# template_name = ''
# context_object_name = 'transactions'
# def get_context_data()
@login_required(login_url='/login/')
def transactions_day_view(request):
context = dict()
context['summary'] = Transaction.objects.values('date').annotate(Sum('amount')).order_by('-date')
context['avg'] = Transaction.objects.aggregate(avg=Avg('amount')).get('avg')
context['avg'] = int(context['avg']) if context['avg'] else 0
context['total_amt'] = Transaction.objects.aggregate(amount=Sum('amount')).get('amount')
return render(request, 'em/transactions/transaction_day-view.html', context=context)
@login_required(login_url='/login/')
def transactions_month_view(request):
context = dict()
dt_fmt = '%m-%Y'
ref_dt = request.GET.get('ref_dt')
dt = datetime.strptime(ref_dt, dt_fmt) if ref_dt else date.today()
filters = dict(
date__month=dt.month,
date__year=dt.year
)
context['prev_month'] = dt - relativedelta.relativedelta(months=1)
context['next_month'] = dt + relativedelta.relativedelta(months=1)
context['cur_month'] = dt
context['summary'] = Transaction.objects.values('date')\
.filter(**filters)\
.annotate(Sum('amount'))\
.order_by('-date')
context['avg'] = Transaction.objects.filter(**filters).aggregate(avg=Avg('amount')).get('avg')
context['avg'] = int(context['avg']) if context['avg'] else 0
context['total_amt'] = Transaction.objects.filter(**filters).aggregate(amount=Sum('amount')).get('amount') or 0
if context['total_amt']:
days = monthrange(date.today().year, date.today().month)
context['avg'] = context['total_amt'] // days[1]
else:
context['avg'] = 0
return render(request, 'em/transactions/transaction_month-view.html', context=context)
| 37.80303
| 115
| 0.68497
|
acfd3306f8775ff95270dde819730e0918614ab6
| 1,670
|
py
|
Python
|
assets/code_box/pg_level2_kakao.py
|
happyOBO/happyOBO.github.io
|
96e60a67b9b84c26f01312f8ca5ade35803c521f
|
[
"MIT"
] | 2
|
2020-10-24T03:25:30.000Z
|
2021-08-01T05:18:18.000Z
|
assets/code_box/pg_level2_kakao.py
|
happyOBO/happyOBO.github.io
|
96e60a67b9b84c26f01312f8ca5ade35803c521f
|
[
"MIT"
] | 2
|
2020-12-05T14:31:19.000Z
|
2020-12-06T05:09:43.000Z
|
assets/code_box/pg_level2_kakao.py
|
happyOBO/happyOBO.github.io
|
96e60a67b9b84c26f01312f8ca5ade35803c521f
|
[
"MIT"
] | 4
|
2020-08-26T10:02:11.000Z
|
2020-10-22T05:55:18.000Z
|
def check(i,j,board):
pre = board[i][j]
for x in range(2):
for y in range(2):
if(pre != board[i+x][j+y] or board[i+x][j+y] == "X"):
return False
return True
def solution(m, n, board):
is_excute = True
board_list = []
answer = 0
for i in range(m):
r_b = []
for j in range(n):
r_b.append(board[i][j])
board_list.append(r_b)
while(is_excute):
is_excute = False
res = []
for i in range(m):
r = []
r_b = []
for j in range(n):
r.append(0)
r_b.append(board[i][j])
res.append(r)
board_list.append(r_b)
for i in range(m-1):
for j in range(n-1):
if(check(i,j, board_list)):
is_excute = True
for x in range(2):
for y in range(2):
res[i+x][j+y] = 1
# for i in range(m):
# print(res[i])
for i in range(m):
for j in range(n):
if( i+1 < m and res[i+1][j] == 1):
for k in range(0,i+1):
board_list[i+1-k][j] = board_list[i-k][j]
board_list[i-k][j] = "X"
for i in range(m):
print(board_list[i])
print()
for i in range(m):
for j in range(n):
if(board_list[i][j] == "X"):
answer += 1
return answer
a = solution(4,6,["CCBDEF", "AAADEF", "AAABFF", "CCBBFF" ])
print(a)
# "CCBDE"
# "AAADE"
# "AAABF"
# "CCBBF"
| 23.194444
| 65
| 0.404192
|
acfd341f1ca7db6490e5e306ac186833637c6fb0
| 9,135
|
py
|
Python
|
qtutils/measurements/param_viewer/param_viewer_GUI_main.py
|
albe-jj/qtutils
|
c515baa6a4ef61de70b883d8bc9feda5c4d259b3
|
[
"BSD-2-Clause"
] | null | null | null |
qtutils/measurements/param_viewer/param_viewer_GUI_main.py
|
albe-jj/qtutils
|
c515baa6a4ef61de70b883d8bc9feda5c4d259b3
|
[
"BSD-2-Clause"
] | null | null | null |
qtutils/measurements/param_viewer/param_viewer_GUI_main.py
|
albe-jj/qtutils
|
c515baa6a4ef61de70b883d8bc9feda5c4d259b3
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Optional
from core_tools.GUI.param_viewer.param_viewer_GUI_window import Ui_MainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
from functools import partial
import qcodes as qc
from qcodes import Station
import numpy as np
from dataclasses import dataclass
@dataclass
class param_data_obj:
param_parameter : any
gui_input_param : any
division : any
class param_viewer(QtWidgets.QMainWindow, Ui_MainWindow):
"""docstring for virt_gate_matrix_GUI"""
def __init__(self, station : Station, gates_object: Optional[object] = None, param_ls=None):
if type(station) is not Station:
raise Exception('Syntax changed, to support RF_settings now supply station')
self.real_gates = list()
self.virtual_gates = list()
self.rf_settings = list()
self.station = station
if gates_object:
self.gates_object = gates_object
else:
try:
self.gates_object = self.station.gates
except:
raise ValueError('Default guess for gates object wrong, please supply manually')
self._step_size = 1 #mV
instance_ready = True
# set graphical user interface
self.app = QtCore.QCoreApplication.instance()
if self.app is None:
instance_ready = False
self.app = QtWidgets.QApplication([])
super(QtWidgets.QMainWindow, self).__init__()
self.setupUi(self)
# add RF parameters
# for src_name in self.gates_object.hardware.RF_source_names:
# inst = getattr(station, src_name)
# for RFpar in self.gates_object.hardware.RF_params:
# param = getattr(inst, RFpar)
# self._add_RFset(param)
# add real gates
if not param_ls:
param_ls = list(self.gates_object.parameters)
param_ls = [p_name for p_name in param_ls if p_name not in ['IDN', 'reps', 'seat']] #remove IDN
for param_name in param_ls:
param = getattr(self.gates_object, param_name)
self._add_gate(param, False)
# add virtual gates
# for virt_gate_set in self.gates_object.hardware.virtual_gates:
# for gate_name in virt_gate_set.virtual_gate_names:
# param = getattr(self.gates_object, gate_name)
# self._add_gate(param, True)
self.step_size.valueChanged.connect(partial(self._update_step, self.step_size.value))
self._finish_gates_GUI()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(partial(self._update_parameters))
self.timer.start(500)
self.show()
if instance_ready == False:
self.app.exec()
def _update_step(self, value):
self.update_step(value())
def update_step(self, value : float):
""" Update step size of the parameter GUI elements with the specified value """
self._step_size = value
for gate in self.real_gates:
gate.gui_input_param.setSingleStep(value)
for gate in self.virtual_gates:
gate.gui_input_param.setSingleStep(value)
#for changing step hack it here
self.step_size.setValue(value)
def _add_RFset(self, parameter : qc.Parameter):
''' Add a new RF.
Args:
parameter (QCoDeS parameter object) : parameter to add.
'''
i = len(self.rf_settings)
layout = self.layout_RF
name = parameter.full_name
unit = parameter.unit
step_size = 0.5
division = 1
if parameter.name[0:10] == 'frequency':
division = 1e6
step_size = 0.1
unit = f'M{unit}'
_translate = QtCore.QCoreApplication.translate
set_name = QtWidgets.QLabel(self.RFsettings)
set_name.setObjectName(name)
set_name.setMinimumSize(QtCore.QSize(100, 0))
set_name.setText(_translate("MainWindow", name))
layout.addWidget(set_name, i, 0, 1, 1)
set_input = QtWidgets.QDoubleSpinBox(self.RFsettings)
set_input.setObjectName(name + "_input")
set_input.setMinimumSize(QtCore.QSize(100, 0))
# TODO collect boundaries out of the harware
set_input.setRange(-1e9,1e9)
set_input.valueChanged.connect(partial(self._set_set, parameter, set_input.value,division))
set_input.setKeyboardTracking(False)
set_input.setSingleStep(step_size)
layout.addWidget(set_input, i, 1, 1, 1)
set_unit = QtWidgets.QLabel(self.RFsettings)
set_unit.setObjectName(name + "_unit")
set_unit.setText(_translate("MainWindow", unit))
layout.addWidget(set_unit, i, 2, 1, 1)
self.rf_settings.append(param_data_obj(parameter, set_input, division))
def _add_gate(self, parameter : qc.Parameter, virtual : bool):
'''
add a new gate.
Args:
parameter (QCoDeS parameter object) : parameter to add.
virtual (bool) : True in case this is a virtual gate.
'''
i = len(self.real_gates)
layout = self.layout_real
if virtual == True:
i = len(self.virtual_gates)
layout = self.layout_virtual
name = parameter.name
unit = parameter.unit
_translate = QtCore.QCoreApplication.translate
gate_name = QtWidgets.QLabel(self.virtualgates)
gate_name.setObjectName(name)
gate_name.setMinimumSize(QtCore.QSize(100, 0))
gate_name.setText(_translate("MainWindow", name))
layout.addWidget(gate_name, i, 0, 1, 1)
voltage_input = QtWidgets.QDoubleSpinBox(self.virtualgates)
voltage_input.setObjectName( name + "_input")
voltage_input.setMinimumSize(QtCore.QSize(100, 0))
# TODO collect boundaries out of the harware
voltage_input.setRange(-20e3,20e3)
# voltage_input.valueChanged.connect(partial(self._set_gate, parameter, voltage_input.value))
voltage_input.setKeyboardTracking(False)
layout.addWidget(voltage_input, i, 1, 1, 1)
gate_unit = QtWidgets.QLabel(self.virtualgates)
gate_unit.setObjectName(name + "_unit")
gate_unit.setText(_translate("MainWindow", unit))
layout.addWidget(gate_unit, i, 2, 1, 1)
if virtual == False:
self.real_gates.append(param_data_obj(parameter, voltage_input, 1))
else:
self.virtual_gates.append(param_data_obj(parameter, voltage_input, 1))
def _set_gate(self, gate, value):
# TODO add support if out of range.
gate.set(value())
def _set_set(self, setting, value, division):
# TODO add support if out of range.
setting.set(value()*division)
self.gates_object.hardware.RF_settings[setting.full_name] = value()*division
self.gates_object.hardware.sync_data()
def _finish_gates_GUI(self):
for items, layout_widget in [ (self.real_gates, self.layout_real), (self.virtual_gates, self.layout_virtual),
(self.rf_settings, self.layout_RF)]:
i = len(items) + 1
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
layout_widget.addItem(spacerItem, i, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
layout_widget.addItem(spacerItem1, 0, 3, 1, 1)
self.setWindowTitle(f'Viewer for {self.gates_object}')
def _update_parameters(self):
'''
updates the values of all the gates in the parameterviewer periodically
'''
idx = self.tab_menu.currentIndex()
if idx == 0:
params = self.real_gates
elif idx == 1:
params = self.virtual_gates
elif idx == 2:
params = self.rf_settings
else:
return
for param in params:
# do not update when a user clicks on it.
if not param.gui_input_param.hasFocus():
param.gui_input_param.setValue(param.param_parameter()/param.division)
if __name__ == "__main__":
import sys
import qcodes as qc
from V2_software.drivers.virtual_gates.examples.hardware_example import hardware_example
from V2_software.drivers.virtual_gates.instrument_drivers.virtual_dac import virtual_dac
from V2_software.drivers.virtual_gates.instrument_drivers.gates import gates
my_dac_1 = virtual_dac("dac_a", "virtual")
my_dac_2 = virtual_dac("dac_b", "virtual")
my_dac_3 = virtual_dac("dac_c", "virtual")
my_dac_4 = virtual_dac("dac_d", "virtual")
hw = hardware_example("hw")
hw.RF_source_names = []
my_gates = gates("my_gates", hw, [my_dac_1, my_dac_2, my_dac_3, my_dac_4])
# app = QtWidgets.QApplication(sys.argv)
# MainWindow = QtWidgets.QMainWindow()
station=qc.Station(my_gates)
ui = param_viewer(station, my_gates)
MainWindow.show()
sys.exit(app.exec_())
| 35.823529
| 119
| 0.644554
|
acfd348d64f141c6db191691c7779bd4e0ee9e3a
| 2,585
|
py
|
Python
|
test/vpp_gre_interface.py
|
fazega/vpp
|
9dba7816f6c3b5e4c879f839bb4bc61dcdbde757
|
[
"Apache-2.0"
] | 1
|
2019-06-04T16:51:10.000Z
|
2019-06-04T16:51:10.000Z
|
test/vpp_gre_interface.py
|
Semihalf/marvell-vpp
|
fb5148b4dffaea52fffa2ec3acbad6899377c08b
|
[
"Apache-2.0"
] | null | null | null |
test/vpp_gre_interface.py
|
Semihalf/marvell-vpp
|
fb5148b4dffaea52fffa2ec3acbad6899377c08b
|
[
"Apache-2.0"
] | 1
|
2022-03-07T06:44:26.000Z
|
2022-03-07T06:44:26.000Z
|
from vpp_interface import VppInterface
import socket
class VppGreInterface(VppInterface):
"""
VPP GRE interface
"""
def __init__(self, test, src_ip, dst_ip, outer_fib_id=0, is_teb=0):
""" Create VPP GRE interface """
self._sw_if_index = 0
super(VppGreInterface, self).__init__(test)
self._test = test
self.t_src = src_ip
self.t_dst = dst_ip
self.t_outer_fib = outer_fib_id
self.t_is_teb = is_teb
def add_vpp_config(self):
s = socket.inet_pton(socket.AF_INET, self.t_src)
d = socket.inet_pton(socket.AF_INET, self.t_dst)
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_teb=self.t_is_teb)
self._sw_if_index = r.sw_if_index
self.generate_remote_hosts()
def remove_vpp_config(self):
s = socket.inet_pton(socket.AF_INET, self.t_src)
d = socket.inet_pton(socket.AF_INET, self.t_dst)
self.unconfig()
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_add=0)
class VppGre6Interface(VppInterface):
"""
VPP GRE IPv6 interface
"""
def __init__(self, test, src_ip, dst_ip, outer_fib_id=0, is_teb=0):
""" Create VPP GRE interface """
self._sw_if_index = 0
super(VppGre6Interface, self).__init__(test)
self._test = test
self.t_src = src_ip
self.t_dst = dst_ip
self.t_outer_fib = outer_fib_id
self.t_is_teb = is_teb
def add_vpp_config(self):
s = socket.inet_pton(socket.AF_INET6, self.t_src)
d = socket.inet_pton(socket.AF_INET6, self.t_dst)
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_teb=self.t_is_teb,
is_ip6=1)
self._sw_if_index = r.sw_if_index
self.generate_remote_hosts()
def remove_vpp_config(self):
s = socket.inet_pton(socket.AF_INET6, self.t_src)
d = socket.inet_pton(socket.AF_INET6, self.t_dst)
self.unconfig()
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_add=0,
is_ip6=1)
| 35.902778
| 76
| 0.54352
|
acfd34b8744fc7cb2bae5682307344f50448cc5f
| 8,862
|
py
|
Python
|
stock-forecasting-v3/agent.py
|
huseinzol05/Tensorflow-JS-Projects
|
d9df5ee615b953701d22710bd66c8a17a5760fa6
|
[
"Apache-2.0"
] | 20
|
2018-06-19T09:06:12.000Z
|
2021-08-12T10:40:18.000Z
|
stock-forecasting-v3/agent.py
|
huseinzol05/Tensorflow-JS-Projects
|
d9df5ee615b953701d22710bd66c8a17a5760fa6
|
[
"Apache-2.0"
] | null | null | null |
stock-forecasting-v3/agent.py
|
huseinzol05/Tensorflow-JS-Projects
|
d9df5ee615b953701d22710bd66c8a17a5760fa6
|
[
"Apache-2.0"
] | 9
|
2018-06-19T06:33:48.000Z
|
2020-10-15T17:44:25.000Z
|
import numpy as np
import json
from flask import Flask, request
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
def detect_outliers(close, date, threshold, **kwargs):
signal = np.abs((np.array(close) - np.mean(close)) / np.std(close))
threshold = (np.max(signal)-np.min(signal)) * threshold
x, y = [], []
for i in np.where(signal>threshold)[0]:
x.append(date[i])
y.append(close[i])
return {'date':x,'close':y}
def get_state(data, t, n):
d = t - n + 1
block = data[d:t + 1] if d >= 0 else -d * [data[0]] + data[0:t + 1]
res = []
for i in range(n - 1):
res.append(block[i + 1] - block[i])
return np.array([res])
class Deep_Evolution_Strategy:
inputs = None
def __init__(self, weights, reward_function, population_size, sigma, learning_rate):
self.weights = weights
self.reward_function = reward_function
self.population_size = population_size
self.sigma = sigma
self.learning_rate = learning_rate
def _get_weight_from_population(self, weights, population):
return [weights[i] + self.sigma * population[i] for i in range(len(population))]
def get_weights(self):
return self.weights
def train(self, epoch = 100):
for i in range(epoch):
population = []
rewards = np.zeros(self.population_size)
for k in range(self.population_size):
population.append([np.random.randn(*self.weights[w].shape) for w in range(len(self.weights))])
for k in range(self.population_size):
weights_population = self._get_weight_from_population(self.weights, population[k])
rewards[k] = self.reward_function(weights_population)
rewards = (rewards - np.mean(rewards)) / np.std(rewards)
for w in range(len(self.weights)):
A = np.array([p[w] for p in population])
self.weights[w] += self.learning_rate/(self.population_size * self.sigma) * np.dot(A.T, rewards).T
class Model:
def __init__(self, input_size, layer_size, **kwargs):
self.weights = [np.random.randn(input_size, layer_size),
np.random.randn(layer_size, 3),
np.random.randn(layer_size, 1),
np.random.randn(1, layer_size)]
def predict(self, inputs):
feed = np.dot(inputs, self.weights[0]) + self.weights[-1]
decision = np.dot(feed, self.weights[1])
buy = np.dot(feed, self.weights[2])
return decision, buy
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
class Agent:
def __init__(
self, population_size, sigma, learning_rate, model,
money, max_buy, max_sell, skip, window_size, l, close, **kwargs):
self.window_size = window_size
self.skip = skip
self.POPULATION_SIZE = population_size
self.SIGMA = sigma
self.LEARNING_RATE = learning_rate
self.model = model
self.initial_money = money
self.max_buy = max_buy
self.max_sell = max_sell
self.l = l
self.close = close
self.es = Deep_Evolution_Strategy(
self.model.get_weights(),
self.get_reward,
self.POPULATION_SIZE,
self.SIGMA, self.LEARNING_RATE)
def act(self, sequence):
decision, buy = self.model.predict(np.array(sequence))
return np.argmax(decision[0]), int(buy[0])
def get_reward(self, weights):
initial_money = self.initial_money
starting_money = initial_money
self.model.weights = weights
state = get_state(self.close, 0, self.window_size + 1)
inventory = []
quantity = 0
for t in range(0,self.l,self.skip):
action, buy = self.act(state)
next_state = get_state(self.close, t + 1, self.window_size + 1)
if action == 1 and initial_money >= self.close[t]:
if buy < 0:
buy = 1
if buy > self.max_buy:
buy_units = self.max_buy
else:
buy_units = buy
total_buy = buy_units * self.close[t]
initial_money -= total_buy
inventory.append(total_buy)
quantity += buy_units
elif action == 2 and len(inventory) > 0:
if quantity > self.max_sell:
sell_units = self.max_sell
else:
sell_units = quantity
quantity -= sell_units
total_sell = sell_units * self.close[t]
initial_money += total_sell
state = next_state
return ((initial_money - starting_money) / starting_money) * 100
def fit(self, iterations):
self.es.train(iterations)
def buy(self, dates):
initial_money = self.initial_money
state = get_state(self.close, 0, self.window_size + 1)
starting_money = initial_money
inventory = []
quantity = 0
states_sell_X, states_sell_Y = [], []
states_buy_X, states_buy_Y = [], []
outputs = []
for t in range(0,self.l,self.skip):
action, buy = self.act(state)
next_state = get_state(self.close, t + 1, self.window_size + 1)
if action == 1 and initial_money >= self.close[t]:
if buy < 0:
buy = 1
if buy > self.max_buy:
buy_units = self.max_buy
else:
buy_units = buy
total_buy = buy_units * self.close[t]
initial_money -= total_buy
inventory.append(total_buy)
quantity += buy_units
states_buy_X.append(dates[t])
states_buy_Y.append(self.close[t])
outputs.append("<tr><td>"+dates[t] \
+"</td><td>buy "+str(buy_units)+" units</td><td>" \
+str(total_buy)+"</td><td>NULL</td><td>" \
+str(initial_money)+"</td></tr>")
elif action == 2 and len(inventory) > 0:
bought_price = inventory.pop(0)
if quantity > self.max_sell:
sell_units = self.max_sell
else:
sell_units = quantity
if sell_units < 1:
continue
quantity -= sell_units
total_sell = sell_units * self.close[t]
initial_money += total_sell
states_sell_X.append(dates[t])
states_sell_Y.append(self.close[t])
try:
invest = ((total_sell - bought_price) / bought_price) * 100
except:
invest = 0
outputs.append("<tr><td>"+dates[t] \
+"</td><td>sell "+str(sell_units)+" units</td><td>" \
+str(total_sell)+"</td><td>"+str(invest)+"%</td><td>"+str(initial_money)+"</td></tr>")
state = next_state
invest = ((initial_money - starting_money) / starting_money) * 100
return {'overall gain':(initial_money-starting_money),
'overall investment':invest,
'sell_Y':states_sell_Y,'sell_X':states_sell_X,
'buy_Y':states_buy_Y,'buy_X':states_buy_X,'output':outputs}
@app.route('/stock', methods = ['POST'])
def upload_file():
try:
stock_data = json.loads(request.form['data'])
stock_data['l'] = len(stock_data['close'])-1
stock_data['input_size'] = stock_data['window_size']
if stock_data['window_size'] > 30:
return json.dumps({'error':'window_size is too big, make sure it less than 30'})
if stock_data['population_size'] > 40:
return json.dumps({'error':'population_size is too big, make sure it less than 40'})
if stock_data['layer_size'] > 200:
return json.dumps({'error':'layer_size is too big, make sure it less than 200'})
if stock_data['epoch'] > 100:
return json.dumps({'error':'epoch is too big, make sure it less than 100'})
"""
Model takes 2 parameters, input_size, layer_size
Agent takes 9 parameters, population_size, sigma, learning_rate,
model, money, max_buy, max_sell, skip, window_size, close
Targeted every request not more than 10 sec.
"""
model = Model(**stock_data)
stock_data['model'] = model
agent = Agent(**stock_data)
agent.fit(stock_data['epoch'])
return json.dumps(
{'agent':agent.buy(stock_data['date']),
'outliers':detect_outliers(**stock_data)
})
except Exception as e:
return json.dumps({'error':str(e)})
application = app
| 39.039648
| 114
| 0.565109
|
acfd34d9911b6d07369302e54708a2a657a675d3
| 173
|
py
|
Python
|
aat/tests/core/order_book/test_collector.py
|
mthomascarcamo/aat
|
fd86f513ccf79625516d2236be655498b24ec742
|
[
"Apache-2.0"
] | 305
|
2020-02-24T02:25:43.000Z
|
2022-03-26T22:53:43.000Z
|
aat/tests/core/order_book/test_collector.py
|
mthomascarcamo/aat
|
fd86f513ccf79625516d2236be655498b24ec742
|
[
"Apache-2.0"
] | 79
|
2020-02-20T21:00:58.000Z
|
2022-03-27T14:06:26.000Z
|
aat/tests/core/order_book/test_collector.py
|
mthomascarcamo/aat
|
fd86f513ccf79625516d2236be655498b24ec742
|
[
"Apache-2.0"
] | 71
|
2020-05-10T11:52:25.000Z
|
2022-03-29T07:51:48.000Z
|
from aat.core.order_book.collector import _Collector
class TestCollector:
def test_collector(self):
c = _Collector(lambda *args: print(args))
assert c
| 21.625
| 52
| 0.705202
|
acfd3600d2d7ed786b46254160853b5a0a408a51
| 959
|
py
|
Python
|
examples/wandb_demo.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | 3
|
2020-12-03T07:30:02.000Z
|
2021-02-07T13:37:33.000Z
|
examples/wandb_demo.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | null | null | null |
examples/wandb_demo.py
|
Jie-Yuan/MeUtils
|
2bb191b0d35b809af037c0f65b37570b8828bea3
|
[
"Apache-2.0"
] | 1
|
2021-02-07T13:37:38.000Z
|
2021-02-07T13:37:38.000Z
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from numpy.random.mtrand import permutation
from sklearn.datasets import load_iris
import wandb
# initialize wandb run
wandb.init()
# load data
iris = load_iris()
X = iris.data
y = iris.target
labels = iris.target_names
features = iris.feature_names
y[y != 0] = 1
# shuffle data
X, y = shuffle(X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# create model
model = RandomForestClassifier()
model.fit(X, y)
y_pred = model.predict(X_test)
y_probas = model.predict_proba(X_test)
# visualize model
# wandb.sklearn.plot_classifier(model, X_train, X_test, y_train, y_test, y_pred, y_probas, labels, True, 'RandomForest', features)
# wandb.log({'roc': wandb.plots.ROC(y_test, y_probas, labels)})
# wandb.log({'pr': wandb.plots.precision_recall(y_test, y_probas, labels)})
| 28.205882
| 130
| 0.769552
|
acfd367063d89262f2f288b68d7cd8a02ae3a014
| 990
|
py
|
Python
|
server/client.py
|
nishio/jscc
|
9a1fa813fc71a9eacbe6b3a7b94e7ff69cc98728
|
[
"MIT"
] | 1
|
2021-03-09T04:29:29.000Z
|
2021-03-09T04:29:29.000Z
|
server/client.py
|
nishio/jscc
|
9a1fa813fc71a9eacbe6b3a7b94e7ff69cc98728
|
[
"MIT"
] | null | null | null |
server/client.py
|
nishio/jscc
|
9a1fa813fc71a9eacbe6b3a7b94e7ff69cc98728
|
[
"MIT"
] | null | null | null |
import re
import json
from datetime import datetime
import urllib2
import urllib
data = {"error": None, "warning": None}
messages = []
for line in open("lint.log"):
if line.startswith("Line"):
# sample: Line 58, E:0002: Missing space after ","
messages.append(line.split(":", 2)[1])
data["lint"] = len(messages)
success = False
for line in open("compile.log"):
if "error(s)" in line or "warning(s)" in line:
# sample: 44 error(s), 0 warning(s)
err, warn = re.match("(\d+) error.* (\d+) warn", line).groups()
data["error"] = int(err)
data["warning"] = int(warn)
if "closurebuilder.py: JavaScript compilation succeeded" in line:
success = True
if data["error"] == None: data["error"] = 0
if data["warning"] == None: data["warning"] = 0
data["when"] = datetime.now().isoformat()
data["success"] = success
urllib2.urlopen("http://localhost:8104/api/put?" + urllib.urlencode({"json": json.dumps(data)}))
| 29.117647
| 96
| 0.615152
|
acfd367466e342376216925e5dac42e1891cf064
| 9,403
|
py
|
Python
|
src/som_vis.py
|
FabianTraxler/SOM_Neighbourhood_Graphs
|
11d85c2dbb5220ec0fc63dc76d08cb7c89e19710
|
[
"Apache-2.0"
] | null | null | null |
src/som_vis.py
|
FabianTraxler/SOM_Neighbourhood_Graphs
|
11d85c2dbb5220ec0fc63dc76d08cb7c89e19710
|
[
"Apache-2.0"
] | null | null | null |
src/som_vis.py
|
FabianTraxler/SOM_Neighbourhood_Graphs
|
11d85c2dbb5220ec0fc63dc76d08cb7c89e19710
|
[
"Apache-2.0"
] | null | null | null |
# %%
"""
This module contains copies of the classes SOMToolBox_Parse and SomViz provided by the lecturers.
"""
import pandas as pd
import numpy as np
import gzip
from scipy.spatial import distance_matrix, distance
from ipywidgets import Layout, HBox, Box, widgets, interact
import plotly.graph_objects as go
class SOMToolBox_Parse:
def __init__(self, filename):
self.filename = filename
def read_weight_file(self, ):
df = pd.DataFrame()
if self.filename[-3:len(self.filename)] == '.gz':
with gzip.open(self.filename, 'rb') as file:
df, vec_dim, xdim, ydim = self._read_vector_file_to_df(df, file)
else:
with open(self.filename, 'rb') as file:
df, vec_dim, xdim, ydim = self._read_vector_file_to_df(df, file)
file.close()
return df.astype('float64'), vec_dim, xdim, ydim
def _read_vector_file_to_df(self, df, file):
xdim, ydim, vec_dim, position = 0, 0, 0, 0
for byte in file:
line = byte.decode('UTF-8')
if line.startswith('$'):
xdim, ydim, vec_dim = self._parse_vector_file_metadata(line, xdim, ydim, vec_dim)
if xdim > 0 and ydim > 0 and len(df.columns) == 0:
df = pd.DataFrame(index=range(0, ydim * xdim), columns=range(0, vec_dim))
else:
if len(df.columns) == 0 or vec_dim == 0:
raise ValueError('Weight file has no correct Dimensional information.')
position = self._parse_weight_file_data(line, position, vec_dim, df)
return df, vec_dim, xdim, ydim
def _parse_weight_file_data(self, line, position, vec_dim, df):
splitted = line.split(' ')
try:
df.values[position] = list(np.array(splitted[0:vec_dim]).astype(float))
position += 1
except:
raise ValueError('The input-vector file does not match its unit-dimension.')
return position
def _parse_vector_file_metadata(self, line, xdim, ydim, vec_dim):
splitted = line.split(' ')
if splitted[0] == '$XDIM':
xdim = int(splitted[1])
elif splitted[0] == '$YDIM':
ydim = int(splitted[1])
elif splitted[0] == '$VEC_DIM':
vec_dim = int(splitted[1])
return xdim, ydim, vec_dim
# %%
class SomViz:
def __init__(self, weights, m, n):
self.weights = weights
self.m = m
self.n = n
def umatrix(self, som_map=None, color="Viridis", interp="best", title=""):
um = np.zeros((self.m * self.n, 1))
neuron_locs = list()
for i in range(self.m):
for j in range(self.n):
neuron_locs.append(np.array([i, j]))
neuron_distmat = distance_matrix(neuron_locs, neuron_locs)
for i in range(self.m * self.n):
neighbor_idxs = neuron_distmat[i] <= 1
neighbor_weights = self.weights[neighbor_idxs]
um[i] = distance_matrix(np.expand_dims(self.weights[i], 0), neighbor_weights).mean()
if som_map == None:
return self.plot(um.reshape(self.m, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = um.reshape(self.m, self.n)
def hithist(self, som_map=None, idata=[], color='RdBu', interp="best", title=""):
hist = [0] * self.n * self.m
for v in idata:
position = np.argmin(np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1)))
hist[position] += 1
if som_map == None:
return self.plot(np.array(hist).reshape(self.m, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = np.array(hist).reshape(self.m, self.n)
def component_plane(self, som_map=None, component=0, color="Viridis", interp="best", title=""):
if som_map == None:
return self.plot(self.weights[:, component].reshape(-1, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = self.weights[:, component].reshape(-1, self.n)
def sdh(self, som_map=None, idata=[], sdh_type=1, factor=1, draw=True, color="Cividis", interp="best", title=""):
import heapq
sdh_m = [0] * self.m * self.n
cs = 0
for i in range(0, factor): cs += factor - i
for vector in idata:
dist = np.sqrt(np.sum(np.power(self.weights - vector, 2), axis=1))
c = heapq.nsmallest(factor, range(len(dist)), key=dist.__getitem__)
if (sdh_type == 1):
for j in range(0, factor): sdh_m[c[j]] += (factor - j) / cs # normalized
if (sdh_type == 2):
for j in range(0, factor): sdh_m[c[j]] += 1.0 / dist[c[j]] # based on distance
if (sdh_type == 3):
dmin = min(dist)
for j in range(0, factor): sdh_m[c[j]] += 1.0 - (dist[c[j]] - dmin) / (max(dist) - dmin)
if som_map == None:
return self.plot(np.array(sdh_m).reshape(-1, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = np.array(sdh_m).reshape(-1, self.n)
def project_data(self, som_m=None, idata=[], title=""):
data_y = []
data_x = []
for v in idata:
position = np.argmin(np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1)))
x, y = position % self.n, position // self.n
data_x.extend([x])
data_y.extend([y])
if som_m != None: som_m.add_trace(
go.Scatter(x=data_x, y=data_y, mode="markers", marker_color='rgba(255, 255, 255, 0.8)', ))
def time_series(self, som_m=None, idata=[], wsize=50, title=""): # not tested
data_y = []
data_x = [i for i in range(0, len(idata))]
data_x2 = []
data_y2 = []
qmin = np.Inf
qmax = 0
step = 1
ps = []
for v in idata:
matrix = np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1))
position = np.argmin(matrix)
qerror = matrix[position]
if qmin > qerror: qmin = qerror
if qmax < qerror: qmax = qerror
ps.append((position, qerror))
markerc = []
for v in ps:
data_y.extend([v[0]])
rez = v[1] / qmax
markerc.append('rgba(0, 0, 0, ' + str(rez) + ')')
x, y = v[0] % self.n, v[0] // self.n
if x == 0:
y = np.random.uniform(low=y, high=y + .1)
elif x == self.m - 1:
y = np.random.uniform(low=y - .1, high=y)
elif y == 0:
x = np.random.uniform(low=x, high=x + .1)
elif y == self.n - 1:
x = np.random.uniform(low=x - .1, high=x)
else:
x, y = np.random.uniform(low=x - .1, high=x + .1), np.random.uniform(low=y - .1, high=y + .1)
data_x2.extend([x])
data_y2.extend([y])
ts_plot = go.FigureWidget(go.Scatter(x=[], y=[], mode="markers", marker_color=markerc,
marker=dict(colorscale='Viridis', showscale=True,
color=np.random.randn(500))))
ts_plot.update_xaxes(range=[0, wsize])
ts_plot.data[0].x, ts_plot.data[0].y = data_x, data_y
som_m.add_trace(go.Scatter(x=data_x2, y=data_y2, mode="markers", ))
som_m.layout.height = 500
ts_plot.layout.height = 500
som_m.layout.width = 500
ts_plot.layout.width = 1300
return HBox([go.FigureWidget(som_m), go.FigureWidget(ts_plot)])
def plot(self, matrix, color="Viridis", interp="best", title=""):
return go.FigureWidget(go.Heatmap(z=matrix, zsmooth=interp, showscale=False, colorscale=color),
layout=go.Layout(width=700, height=700, title=title, title_x=0.5, ))
if __name__ == "__main__":
from sklearn import datasets, preprocessing
from src.NeighbourhoodGraph import NeighbourhoodGraph
iris = datasets.load_iris().data
# min_max_scaler = preprocessing.MinMaxScaler()
# iris = min_max_scaler.fit_transform(iris)
smap = SOMToolBox_Parse('../input/iris/iris.wgt.gz')
s_weights, sdim, smap_x_dim, smap_y_dim = smap.read_weight_file()
s_weights = s_weights.to_numpy()
ng_iris = NeighbourhoodGraph(s_weights, smap_x_dim, smap_y_dim, input_data=iris)
ng_iris_trace_3nn = ng_iris.get_trace(knn=3)
go.FigureWidget(data=ng_iris_trace_3nn,
layout=go.Layout(width=700, height=700, title="Iris: NeighbourhoodGraph (3-NN)")).show()
vis_iris = SomViz(s_weights, smap_x_dim, smap_y_dim)
um_iris = vis_iris.umatrix(title="Iris: Umatrix + NeighbourhoodGraph (3-NN)")
um_iris.add_trace(ng_iris_trace_3nn)
um_iris.show()
# We can reuse all traces
ng_iris_trace_05r = ng_iris.get_trace(radius=0.5)
um_iris.data = [um_iris.data[0]]
um_iris.add_trace(ng_iris_trace_05r)
um_iris.layout = go.Layout(width=700, height=700, title="Iris: Umatrix + NeighbourhoodGraph (0.5 Radius)",
title_x=0.5, )
um_iris.show()
hithist_iris = vis_iris.hithist(idata=iris, title="Iris: HisHist + NeighbourhoodGraph (3-NN)")
hithist_iris.add_trace(ng_iris_trace_3nn)
hithist_iris.show()
| 38.695473
| 117
| 0.57354
|
acfd372e7daec2e07d7408953d59a6d85004aece
| 1,074
|
py
|
Python
|
test/test_messageproto.py
|
drbeh/NVFlare
|
efea2c04e641051537604a4de2f85e570fb4f3a7
|
[
"Apache-2.0"
] | null | null | null |
test/test_messageproto.py
|
drbeh/NVFlare
|
efea2c04e641051537604a4de2f85e570fb4f3a7
|
[
"Apache-2.0"
] | null | null | null |
test/test_messageproto.py
|
drbeh/NVFlare
|
efea2c04e641051537604a4de2f85e570fb4f3a7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.private.admin_defs import Message
from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message
class TestMessageProto:
def test_message_proto_convert(self):
message = Message(topic="topic", body="{'id': 100}")
message.set_header("Content-Type", "application/json")
message_proto = message_to_proto(message)
new_message = proto_to_message(message_proto)
assert new_message.__dict__ == message.__dict__
| 41.307692
| 85
| 0.75419
|
acfd37b34744d15dc77a310cae58c303b6917813
| 104,859
|
py
|
Python
|
petrarch/petrarch.py
|
openeventdata/petrarch
|
7b36d67e8e2005cdb0c7c9577bba016721407c06
|
[
"MIT"
] | 40
|
2015-02-23T13:21:24.000Z
|
2021-04-20T06:38:25.000Z
|
petrarch/petrarch.py
|
openeventdata/petrarch
|
7b36d67e8e2005cdb0c7c9577bba016721407c06
|
[
"MIT"
] | 19
|
2015-01-12T20:56:49.000Z
|
2016-06-12T23:47:40.000Z
|
petrarch/petrarch.py
|
openeventdata/petrarch
|
7b36d67e8e2005cdb0c7c9577bba016721407c06
|
[
"MIT"
] | 29
|
2015-03-07T18:02:40.000Z
|
2021-01-25T03:04:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import glob
import time
import types
import logging
import argparse
import xml.etree.ElementTree as ET
# petrarch.py
##
# Automated event data coder
##
# SYSTEM REQUIREMENTS
# This program has been successfully run under Mac OS 10.10; it is standard Python 2.7
# so it should also run in Unix or Windows.
#
# INITIAL PROVENANCE:
# Programmers:
# Philip A. Schrodt
# Parus Analytics
# Charlottesville, VA, 22901 U.S.A.
# http://eventdata.parusanalytics.com
#
# John Beieler
# Caerus Associates/Penn State University
# Washington, DC / State College, PA, 16801 U.S.A.
# http://caerusassociates.com
# http://bdss.psu.edu
#
# GitHub repository: https://github.com/openeventdata/petrarch
#
# Copyright (c) 2014 Philip A. Schrodt. All rights reserved.
#
# This project is part of the Open Event Data Alliance tool set; earlier developments
# were funded in part by National Science Foundation grant SES-1259190
#
# This code is covered under the MIT license
#
# Report bugs to: schrodt735@gmail.com
#
# REVISION HISTORY:
# 22-Nov-13: Initial version
# Summer-14: Numerous modifications to handle synonyms in actor and verb dictionaries
# 20-Nov-14: write_actor_root/text added to parse_Config
# ------------------------------------------------------------------------
import PETRglobals # global variables
import PETRreader # input routines
import PETRwriter
import utilities
# ================================ DEBUGGING GLOBALS ==================== #
# (comment out the second line in the pair to activate. Like you couldn't
# figure that out.)
# prints ParseList in evaluate_validation_record()/code_record() following
# NE assignment
ShowParseList = True
ShowParseList = False
# displays parse trees in read_TreeBank
ShowRTTrees = True
ShowRTTrees = False
ShowCodingSeq = True
ShowCodingSeq = False
ShowPattMatch = True
ShowPattMatch = False
ShowNEParsing = True
ShowNEParsing = False
ShowMarkCompd = True
ShowMarkCompd = False
SentenceID = ""
# ================== EXCEPTIONS ================== #
class DupError(Exception): # template
pass
class HasParseError(Exception): # exit the coding due to parsing error
pass
class SkipRecord(Exception): # skip a validation record
pass
class UnbalancedTree(Exception): # unbalanced () in the parse tree
pass
# problems were found at some point in read_TreeBank
class IrregularPattern(Exception):
pass
# problems were found in a specific pattern in check_verbs [make this
# local to that function?]
class CheckVerbsError(Exception):
pass
# ================== ERROR FUNCTIONS ================== #
def raise_ParseList_error(call_location_string):
"""
Handle problems found at some point during the coding/evaluation of ParseList, and is
called when the problem seems sufficiently important that the record should not be coded.
Logs the error and raises HasParseError.
"""
#global SentenceID, ValidError
warningstr = call_location_string + \
'; record skipped: {}'.format(SentenceID)
logger = logging.getLogger('petr_log')
logger.warning(warningstr)
raise HasParseError
# ========================== DEBUGGING FUNCTIONS ========================== #
def get_version():
return "0.4.0"
def show_tree_string(sent):
"""
Indexes the () or (~in a string tree and prints as an indented list.
"""
newlev = False
level = -1
prevlevel = -1
ka = 0
nopen = 0
nclose = 0
sout = ''
while ka < len(sent):
if sent[ka] == '(':
level += 1
nopen += 1
newlev = True
if level != prevlevel or 'VP' == sent[
ka + 1:ka + 3] or 'SB' == sent[ka + 1:ka + 3]:
# new line only with change in level, also with (VP, (SB
sout += '\n' + level * ' '
sout += '(-' + str(level) + ' '
elif sent[ka] == ')' or sent[ka] == '~':
nclose += 1
prevlevel = level
if not newlev:
sout += '\n' + level * ' '
if sent[ka] == ')':
sout += str(level) + '-)'
else:
sout += str(level) + '~'
level -= 1
newlev = False
else:
sout += sent[ka]
ka += 1
if nopen == nclose:
print("Balanced:", end=' ')
else:
print("Unbalanced:", end=' ')
print("Open", nopen, "Close", nclose, '\n')
if nopen != nclose and PETRglobals.StoponError:
raise HasParseError
def check_balance(ParseList):
"""
Check the (/~ count in a ParseList and raises UnbalancedTree if it is not
balanced.
"""
nopen = 0
ka = 0
stop = len(ParseList)
while ka < stop:
if ParseList[ka][0] == '(':
nopen += 1
elif ParseList[ka][0] == '~':
nopen -= 1
ka += 1
if nopen != 0:
raise UnbalancedTree
# ========================== VALIDATION FUNCTIONS ========================== #
def change_Config_Options(line):
"""Changes selected configuration options."""
# need more robust error checking
theoption = line['option']
value = line['value']
#print("<Config>: changing", theoption, "to", value)
if theoption == 'new_actor_length':
try:
PETRglobals.NewActorLength = int(value)
except ValueError:
logger.warning(
"<Config>: new_actor_length must be an integer; command ignored")
elif theoption == 'require_dyad':
PETRglobals.RequireDyad = not 'false' in value.lower()
elif theoption == 'stop_on_error':
PETRglobals.StoponError = not 'false' in value.lower()
elif 'comma_' in theoption:
try:
cval = int(value)
except ValueError:
logger.warning(
"<Config>: comma_* value must be an integer; command ignored")
return
if '_min' in theoption:
PETRglobals.CommaMin = cval
elif '_max' in theoption:
PETRglobals.CommaMax = cval
elif '_bmin' in theoption:
PETRglobals.CommaBMin = cval
elif '_bmax' in theoption:
PETRglobals.CommaBMax = cval
elif '_emin' in theoption:
PETRglobals.CommaEMin = cval
elif '_emax' in theoption:
PETRglobals.CommaEMax = cval
else:
logger.warning(
"<Config>: unrecognized option beginning with comma_; command ignored")
# insert further options here in elif clauses as this develops; also
# update the docs in open_validation_file():
else:
logger.warning("<Config>: unrecognized option")
def _check_envr(environ):
for elem in environ:
if elem.tag == 'Verbfile':
PETRglobals.VerbFileName = elem.text
if elem.tag == 'Actorfile':
PETRglobals.ActorFileList[0] = elem.text
if elem.tag == 'Agentfile':
PETRglobals.AgentFileName = elem.text
if elem.tag == 'Discardfile':
PETRglobals.DiscardFileName = elem.text
if elem.tag == 'Errorfile':
print('This is deprecated. Using a different errorfile. ¯\_(ツ)_/¯')
if elem.tag == 'Include':
ValidInclude = elem.text.split()
print('<Include> categories', ValidInclude)
if 'valid' in ValidInclude:
ValidOnly = True
ValidInclude.remove('valid')
else:
ValidInclude = ''
if elem.tag == 'Exclude':
ValidExclude = elem.tag.split()
print('<Exclude> categories', ValidExclude)
else:
ValidExclude = ''
if elem.tag == 'Pause':
theval = elem.text
if 'lways' in theval:
ValidPause = 1 # skip first char to allow upper/lower case
elif 'ever' in theval:
ValidPause = 2
elif 'top' in theval:
ValidPause = 3
else:
ValidPause = 0
return ValidInclude, ValidExclude, ValidPause, ValidOnly
# ================== TEXTFILE INPUT ================== #
def read_TreeBank(tree_string):
"""
Reads parsed sentence in the Penn TreeBank II format and puts the linearized version
in the list ParseList. Sets ParseStart. Leaves global input file fin at line
following </parse>. The routine is appears to be agnostic towards the line-feed and tab
formatting of the parse tree
TO DO <14.09.03>: Does this handle an unexpected EOF error?
TO DO <14.09.03>: This really belongs as a separate module and the code seems
sufficiently stable now that this could be done
read_TreeBank() can raise quite a few different named errors which are handled by
check_irregulars(); these can be checked as ValidErrorType. ParseList should come out
of this balanced. In addition to the error trapping there is extensive commented-out
debugging code.
======= ParseList coding =========
Because they are still based in a shallow parsing approach, the KEDS/TABARI/PETR
dictionaries are based on linear string matching rather than a tree representation,
which differs from the VRA-Reader and BBN-Serif approach, but is much faster, or
perhaps more accurately, let the Treebank parser do the work once, rather than
re-evaluating a tree every time events are coded. The information in the tree is used
primarily for clause delineation.
The read_TreeBank() function is currently the "first line of defense" in
modifying the fully parsed input to a form that will work with the
dictionaries developed under the older shallow parser. As of <13.11.25>
this is focused on converting noun phrases ('(NP') to a shallower 'NE'
(named-entity) format. Additional modifications may follow.
Clauses are generally delineated using (XXX for the beginning and ~XXX for
the end, where XXX are the TreeBank tags. The current code will leave some
excess ')' at the end.
Additional markup:
1. Simple noun phrases -- those which are delineated by '(NP ... '))' --
have their tag converted to 'NE' and the intermediate POS TreeBank marking
removed. These are the only phrases that can match actors and agents. A
placeholder code '---' is added to this structure.
Note that because CoreNLP separates the two components of a possessive
marking (that is, noun + apostrophe-S), this cannot be used as part of
an actor string,
so for example
CHINA'S BLUEWATER NAVY
is going to look like
CHINA 'S BLUEWATER NAVY
In the rather unlikely case that the actor with and without the
possessive would map to different code, do a global substitution, for
example 'S -> QX and then match that, i.e.
CHINAQX BLUEWATER NAVY
Realistically, however, a noun and its possessive will be equivalent in
actor coding.
2. The possessive structure (NP (NP ... (POS )) ... ) is converted to an NE
with the (POS 'S) eliminated, so this also cannot be in a dictionary
3. The prepositional phrase structure (NP (NP ... )) (PP ) NP( ... )) is
converted to an NE; the preposition (IN ...) is retained
4. The text of an (SBAR inside an (NP is retained
5. (VP and complex (NP are indexed so that the end of the phrase can be
identified so these have the form (XXn and ~XXn
The routine check_irregulars() handles a variety of conditions where the input
or the parsing is not going well; check the various error messages for details
<13.11.27> Reflections of PETR vs TABARI parsing
As is well known, the shallow parsing of TABARI, while getting things wrong
for the wrong reasons, also frequently got things right for the wrong
reasons, which is to say it was rather robust on variations, grammatical or
otherwise, in the sentences. With the use of CoreNLP, we no longer have
this advantage, and it is likely to take some coder experimentation with an
extensive set of real texts to determine the various contingencies that
needs to be accommodated.
"""
ParseList = ""
ParseStart = 0
treestr = tree_string
def check_irregulars(knownerror=''):
"""
Checks for some known idiosyncratic ParseList patterns that indicate problems in the
the input text or, if knownrecord != '', just raises an already detected error. In
either case, logs the specific issue, sets the global ValidError (for unit tests)
and raises IrregularPattern.
Currently tracking:
-- bad_input_parse
-- empty_nplist
-- bad_final_parse
-- get_forward_bounds
-- get_enclosing_bounds
-- resolve_compounds
-- get_NE_error
-- dateline [pattern]
"""
#global ValidError
if knownerror:
if knownerror == 'bad_input_parse':
warningstr = '<Parse>...</Parse> input was not balanced; record skipped: {}'
elif knownerror == 'empty_nplist':
warningstr = 'Empty np_list in read_Tree; record skipped: {}'
elif knownerror == 'bad_final_parse':
warningstr = 'ParseList unbalanced at end of read_Tree; record skipped: {}'
elif knownerror == 'get_forward_bounds':
warningstr = 'Upper bound error in get_forward_bounds in read_Tree; record skipped: {}'
elif knownerror == 'get_enclosing_bounds':
warningstr = 'Lower bound error in get_enclosing_bounds in read_Tree; record skipped: {}'
elif knownerror == 'resolve_compounds':
warningstr = 'get_NE() error in resolve_compounds() in read_Tree; record skipped: {}'
elif knownerror == 'get_NE_error':
warningstr = 'get_NE() error in main loop of read_Tree; record skipped: {}'
else:
warningstr = """Unknown error type encountered in check_irregulars()
--------- this is a programming bug but nonetheless the record was skipped: {}"""
logger = logging.getLogger('petr_log')
#logger.warning(warningstr.format(SentenceID))
ValidError = knownerror
raise IrregularPattern
ntag = 0
taglist = []
ka = 0
while ka < len(ParseList):
if ParseList[ka][0] == '(':
taglist.append(ParseList[ka])
ntag += 1
if ntag > 2:
break # this is all we need for dateline
ka += 1
if taglist[:3] == ['(ROOT', '(NE', '(NEC']:
logger = logging.getLogger('petr_log')
#logger.warning(
# 'Dateline pattern found in ParseList; record skipped: {}'.format(SentenceID))
ValidError = 'dateline'
raise IrregularPattern
def get_NE(NPphrase):
"""
Convert (NP...) ) to NE: copies any (NEC phrases with markup, remainder of
the phrase without any markup
Can raise IrregularPattern, which is caught and re-raised at the calling point
"""
nplist = ['(NE --- ']
seg = NPphrase.split()
if ShowNEParsing:
print('List:', seg)
print("gNE input tree", end=' ')
show_tree_string(NPphrase)
print('List:', seg)
ka = 1
while ka < len(seg):
if seg[ka] == '(NEC': # copy the phrase
nplist.append(seg[ka])
ka += 1
nparen = 1 # paren count
while nparen > 0:
if ka >= len(seg):
raise IrregularPattern
if seg[ka][0] == '(':
nparen += 1
elif seg[ka] == ')':
nparen -= 1
nplist.append(seg[ka])
ka += 1
# copy the phrase without the markup
elif seg[ka][0] != '(' and seg[ka] != ')':
nplist.append(seg[ka])
ka += 1
else:
ka += 1
nplist.append(')')
return nplist
def get_forward_bounds(ka):
"""
Returns the bounds of a phrase in treestr that begins at ka, including the final space.
""" # <13.12.07> see note above
kb = ka + 1
nparen = 1 # paren count
while nparen > 0:
if kb >= len(treestr):
check_irregulars('get_forward_bounds')
if treestr[kb] == '(':
nparen += 1
elif treestr[kb] == ')':
nparen -= 1
kb += 1
return [ka, kb]
def get_enclosing_bounds(ka):
"""
Returns the bounds of a phrase in treestr that encloses the phrase beginning at ka
"""
kstart = ka - 1
nparen = 0 # paren count
while nparen <= 0: # back out to the phrase tag that encloses this
if kstart < 0:
check_irregulars('get_enclosing_bounds')
if treestr[kstart] == '(':
nparen += 1
elif treestr[kstart] == ')':
nparen -= 1
kstart -= 1
return [kstart + 1, get_forward_bounds(kstart + 1)[1]]
def resolve_compounds(ka, fline):
"""
Assign indices, eliminates the internal commas and (CC, and duplicate
any initial adjectives inside a compound.
This leaves the (NEC with leaving just the (NE.
Returns treestr loc (ka) past the end of the phrase.
Index assignment may involve just a simple (NNP or (NNS.
Parsing bug note: <14.01.13>
In what appear to be rare circumstances, CoreNLP does not correctly
delimit two consecutive nouns in a compound as (NP. Specifically,
in the test sentence
Mordor and the Shire welcomed a resumption of formal
diplomatic ties between Minas Tirith and Osgiliath.
the second compound phrase is marked as
(NP (NNP Minas) (NNP Tirith) (CC and) (NNP Osgiliath))
but if "Osgiliath" is changed to "Hong Kong" it gives the correct
(NP (NP (NNP Minas) (NNP Tirith)) (CC and) (NP (NNP Hong) (NNP Kong))
A systematic check of one of the GigaWord files shows that this
appears to occur only very rarely -- and in any case is a parsing
error -- so this routine does not check for it.
"""
fullline = fline
necbds = get_forward_bounds(ka) # get the bounds of the NEC phrase
if ShowMarkCompd:
print('rc/RTB: NEC:', necbds, treestr[necbds[0]:necbds[1]])
ka += 4
adjlist = [] # get any adjectives prior to first noun
while not treestr.startswith(
'(NP', ka) and not treestr.startswith('(NN', ka):
if treestr.startswith('(JJ', ka):
npbds = get_forward_bounds(ka)
if ShowMarkCompd:
print('rc/RTB-1: JJ:', npbds, treestr[npbds[0]:npbds[1]])
adjlist.extend(treestr[npbds[0]:npbds[1]].split())
ka += 1
while ka < necbds[1]: # convert all of the NP, NNS and NNP to NE
if treestr.startswith('(NP', ka) or treestr.startswith('(NN', ka):
npbds = get_forward_bounds(ka)
if ShowMarkCompd:
print('rc/RTB-1: NE:', npbds, treestr[npbds[0]:npbds[1]])
# just a single element, so get it
if treestr.startswith('(NN', ka):
seg = treestr[npbds[0]:npbds[1]].split()
nplist = ['(NE --- ']
if len(adjlist) > 0:
nplist.extend(adjlist)
nplist.extend([seg[1], ' ) '])
else:
try:
nplist = get_NE(treestr[npbds[0]:npbds[1]])
except IrregularPattern:
check_irregulars('resolve_compounds')
if ShowMarkCompd:
print('rc/RTB-2: NE:', nplist)
for kb in range(len(nplist)):
fullline += nplist[kb] + ' '
ka = npbds[1]
ka += 1
fullline += ' ) ' # closes the nec
if ShowMarkCompd:
print('rc/RTB3: NE:', fullline)
return necbds[1] + 1, fullline
def process_preposition(ka):
"""
Process (NP containing a (PP and return an nephrase: if this doesn't have a
simple structure of (NP (NP ...) (PP...) (NP/NEC ...)) without any further
(PP -- i.e. multiple levels of prep phrases -- it returns a null string.
"""
# this should be a (NP (NP
bds = get_enclosing_bounds(ka)
if treestr.startswith('(NP (NP', bds[0]):
# placeholder: this will get converted
nepph = '(NP '
npbds = get_forward_bounds(bds[0] + 4) # get the initial (NP
nepph += treestr[npbds[0] + 4:npbds[1] - 2]
elif treestr.startswith('(NP (NEC', bds[0]):
nepph = '(NP (NEC ' # placeholder:
npbds = get_forward_bounds(bds[0] + 4) # get the initial (NEC
# save the closing ' ) '
nepph += treestr[npbds[0] + 4:npbds[1] + 1]
else:
# not what we are expecting, so bail
return ''
# get the preposition and transfer it
ka = treestr.find('(IN ', npbds[1])
nepph += treestr[ka:treestr.find(' ) ', ka + 3) + 3]
# find first (NP or (NEC after prep
kp = treestr.find('(NP ', ka + 4, bds[1])
kec = treestr.find('(NEC ', ka + 4, bds[1])
if kp < 0 and kec < 0:
# not what we are expecting, so bail
return ''
if kp < 0:
# no (NP gives priority to (NEC and vice versa
kp = len(treestr)
if kec < 0:
kec = len(treestr)
if kp < kec:
kb = kp
else:
kb = kec
npbds = get_forward_bounds(kb)
if '(PP' in treestr[npbds[0]:npbds[1]]:
# there's another level of (PP here <14.04.21: can't we just
return ('')
if treestr[kb + 2] == 'E':
nepph += treestr[kb:npbds[1] + 1] # pick up a ') '
else:
# skip the (NP and pick up the final ' ' (we're using this to close
# the original (NP
nepph += treestr[npbds[0] + 4:npbds[1] - 1]
if '(SBR' in treestr[npbds[1]:]: # transfer the phrase
kc = treestr.find('(SBR', npbds[1])
nepph += treestr[kc:treestr.find(') ', kc) + 2]
nepph += ')' # close the phrase
return nepph
logger = logging.getLogger('petr_log')
fullline = ''
vpindex = 1
npindex = 1
ncindex = 1
if ShowRTTrees:
print('RT1 treestr:', treestr) # debug
print('RT1 count:', treestr.count('('), treestr.count(')'))
show_tree_string(treestr)
if treestr.count('(') != treestr.count(')'):
check_irregulars('bad_input_parse')
if '~' in treestr:
treestr = treestr.replace('~', '-TILDA-')
##############################
# Mark Compounds#
##############################
ka = -1
while ka < len(treestr):
ka = treestr.find('(CC', ka + 3) #
if ka < 0:
break
kc = treestr.find(')', ka + 3)
bds = get_enclosing_bounds(ka)
kb = bds[0]
if ShowMarkCompd:
print('\nMC1:', treestr[kb:])
if '(VP' in treestr[bds[0]:bds[1]] or '(S' in treestr[bds[0]:bds[1]]:
treestr = treestr[:ka + 3] + 'P' + treestr[ka + 3:]
if ShowMarkCompd:
print('\nMC2:', treestr[kb:])
elif treestr[bds[0]:bds[1]].count('(CC') > 1:
# convert CC to CCP: see note above
treestr = treestr[:ka + 4] + 'P' + treestr[ka + 4:]
if ShowMarkCompd:
print('\nMC3:', treestr[kb:])
elif treestr[kb + 1:kb + 3] == 'NP':
# make sure we actually have multiple nouns in the phrase
if treestr.count('(N', bds[0], bds[1]) >= 3:
treestr = treestr[:kb + 2] + 'EC' + \
treestr[kb + 3:] # convert NP to NEC
if ShowMarkCompd:
print('\nMC4:', treestr[kb:])
if ShowRTTrees:
print('RT1.5 count:', treestr.count('('), treestr.count(')'))
ka = 0
while ka < len(treestr):
if treestr.startswith('(NP ', ka):
npbds = get_forward_bounds(ka)
ksb = treestr.find(
'(SBAR ',
npbds[0],
npbds[1])
while ksb >= 0:
########################
# REDUCE SBAR
#######################
bds = get_enclosing_bounds(ksb + 5)
frag = ''
segm = treestr[bds[0]:bds[1]]
kc = 0
while kc < len(segm):
kc = segm.find(' ', kc)
if kc < 0:
break
if segm[kc + 1] != '(': # skip markup, just get words
kd = segm.find(' )', kc)
frag += segm[kc:kd]
kc = kd + 3
else:
kc += 2
# bound with '(SBR ' and ' )'
treestr = treestr[:bds[0]] + \
'(SBR ' + frag + treestr[bds[1] - 2:]
#########################
# recompute the bounds because treestr has been modified
npbds = get_forward_bounds(ka)
ksb = treestr.find('(SBAR ', npbds[0], npbds[1])
nephrase = ''
if ShowNEParsing:
print('BBD: ', treestr[npbds[0]:npbds[1]])
if '(POS' in treestr[ka + 3:npbds[1]]: # get the (NP possessive
kb = treestr.find('(POS', ka + 4)
nephrase = treestr[ka + 4:kb - 1] # get string prior to (POS
if treestr[kb + 12] == 's':
incr = 14
else:
incr = 13 # allow for (POS ')
# skip over (POS 's) and get the remainder of the NP
nephrase += ' ' + treestr[kb + incr:npbds[1]]
if ShowNEParsing:
print('RTPOS: NE:', nephrase)
elif '(PP' in treestr[ka + 3:npbds[1]]: # prepositional phrase
if False:
print('PPP-1: ', treestr[ka:npbds[1]])
print(
'PPP-1a: ',
treestr.find(
'(PP',
ka + 3,
npbds[1]),
ka,
npbds[1])
print(
'PPP-1b: ',
get_enclosing_bounds(
treestr.find(
'(PP',
ka + 3,
npbds[1])))
nephrase = process_preposition(
treestr.find('(PP', ka + 3, npbds[1]))
if ShowNEParsing:
print('RTPREP: NE:', nephrase)
# no further (NPs, so convert to NE
elif '(NP' not in treestr[ka + 3:npbds[1]] and '(NEC' not in treestr[ka + 3:npbds[1]]:
nephrase = treestr[ka:npbds[1]]
if ShowNEParsing:
print('RTNP: NE:', nephrase)
if len(nephrase) > 0:
try:
nplist = get_NE(nephrase)
except IrregularPattern:
check_irregulars('get_NE_error')
if not nplist:
check_irregulars('empty_nplist')
for kb in range(len(nplist)):
fullline += nplist[kb] + ' '
ka = npbds[1] + 1
else: # it's something else...
fullline += '(NP' + str(npindex) + ' ' # add index
npindex += 1
ka += 4
elif treestr.startswith('(NEC ', ka):
fullline += '(NEC' + str(ncindex) + ' '
ncindex += 1
ka, fullline = resolve_compounds(ka, fullline)
elif treestr.startswith('(VP ', ka): # assign index to VP
fullline += '(VP' + str(vpindex) + ' '
vpindex += 1
ka += 4
else:
fullline += treestr[ka]
ka += 1
# convert the text to ParseList format; convert ')' to ~XX tags
ParseList = fullline.split()
kopen = 0
kclose = 0
for item in ParseList:
if item.startswith('('):
kopen += 1
elif item == ')':
kclose += 1
if ShowRTTrees:
print('RT2 count:', kopen, kclose)
ka = 0
opstack = []
while ka < len(ParseList):
if ParseList[ka][0] == '(':
opstack.append(ParseList[ka][1:])
if ParseList[ka][0] == ')':
if len(opstack) == 0:
break
op = opstack.pop()
ParseList[ka] = '~' + op
ka += 1
if ShowRTTrees:
print('RT2:', ParseList)
show_tree_string(' '.join(ParseList))
ParseStart = 2 if ParseList[0] == '(' else 1
check_irregulars()
try:
check_balance(ParseList)
except UnbalancedTree:
check_irregulars('bad_final_parse')
return ParseList, ParseStart
# ================== CODING ROUTINES ================== #
def get_loccodes(thisloc, CodedEvents, UpperSeq, LowerSeq):
"""
Returns the list of codes from a compound, or just a single code if not compound
Extracting noun phrases which are not in the dictionary: If no actor or agent
generating a non-null code can be found using the source/target rules, PETRARCH can
output the noun phrase in double-quotes. This is controlled by the configuration file
option new_actor_length, which is set to an integer which gives the maximum length
for new actor phrases extracted. If this is set to zero [default], no extraction is
done and the behavior is the same as TABARI. Setting this to a large number will
extract anything found in a (NP noun phrase, though usually true actors contain a
small number of words. These phrases can then be processed with named-entity-resolution
software to extend the dictionaries.
"""
def get_ne_text(neloc, isupperseq):
""" Returns the text of the phrase from UpperSeq/LowerSeq starting at neloc. """
if isupperseq:
acphr = UpperSeq[neloc - 1]
ka = neloc - 2 # UpperSeq is stored in reverse order
# we can get an unbalanced sequence when multi-word verbs cut into
# the noun phrase: see DEMO-30 in unit-tests
while ka >= 0 and UpperSeq[ka][0] != '~':
acphr += ' ' + UpperSeq[ka]
ka -= 1
else:
acphr = LowerSeq[neloc + 1]
ka = neloc + 2
while LowerSeq[ka][0] != '~':
acphr += ' ' + LowerSeq[ka]
ka += 1
return acphr
def add_code(neloc, isupperseq, cl):
"""
Appends the code or phrase from UpperSeq/LowerSeq starting at neloc.
isupperseq determines the choice of sequence
If PETRglobals.WriteActorText is True, root phrase is added to the code following the
string PETRglobals.TextPrimer
"""
codelist = cl
if isupperseq:
# "add_code neitem"; nothing to do with acne...
acneitem = UpperSeq[neloc]
else:
acneitem = LowerSeq[neloc]
accode = acneitem[acneitem.find('>') + 1:]
if accode != '---':
codelist.append(accode)
elif PETRglobals.NewActorLength > 0: # get the phrase
acphr = '"' + get_ne_text(neloc, isupperseq) + '"'
if acphr.count(' ') < PETRglobals.NewActorLength:
codelist.append(acphr)
else:
codelist.append(accode)
if PETRglobals.WriteActorRoot:
codelist[-1] += PETRglobals.RootPrimer + '---'
if PETRglobals.WriteActorText and len(codelist) > 0:
codelist[-1] += PETRglobals.TextPrimer + \
get_ne_text(neloc, isupperseq)
return codelist
codelist = []
if thisloc[1]:
try:
neitem = UpperSeq[thisloc[0]]
except IndexError:
raise_ParseList_error(
'Initial index error on UpperSeq in get_loccodes()')
# extract the compound codes from the (NEC ... ~NEC sequence
if '(NEC' in neitem:
ka = thisloc[0] - 1 # UpperSeq is stored in reverse order
while '~NEC' not in UpperSeq[ka]:
if '(NE' in UpperSeq[ka]:
codelist = add_code(ka, True, codelist)
ka -= 1
if ka < 0:
raise_ParseList_error(
'Bounds underflow on UpperSeq in get_loccodes()')
else:
codelist = add_code(thisloc[0], True, codelist) # simple code
else:
try:
neitem = LowerSeq[thisloc[0]]
except IndexError:
raise_ParseList_error(
'Initial index error on LowerSeq in get_loccodes()')
if '(NEC' in neitem: # extract the compound codes
ka = thisloc[0] + 1
while '~NEC' not in LowerSeq[ka]:
if '(NE' in LowerSeq[ka]:
add_code(ka, False, codelist)
ka += 1
if ka >= len(LowerSeq):
raise_ParseList_error(
'Bounds overflow on LowerSeq in get_loccodes()')
else:
codelist = add_code(thisloc[0], False, codelist) # simple code
if len(codelist) == 0: # this can occur if all codes in an (NEC are null
codelist = ['---']
return codelist
def find_source(UpperSeq, LowerSeq, Src, Trg):
"""
Assign SourceLoc to the first coded or compound (NE in the UpperSeq; if
neither found then first (NE with --- code Note that we are going through
the sentence in normal order, so we go through UpperSeq in reverse order.
Also note that this matches either (NE and (NEC: these are processed
differently in make_event_string()
"""
SourceLoc = Src
kseq = 0
while kseq < len(UpperSeq):
if ('(NEC' in UpperSeq[kseq]) and not UpperSeq[kseq].endswith(LowerSeq[Trg[0]].split('>')[1]):
SourceLoc = [kseq, True]
return SourceLoc
if ('(NE' in UpperSeq[kseq]) and ('>---' not in UpperSeq[kseq]) and not UpperSeq[kseq].endswith(LowerSeq[Trg[0]].split('>')[1]):
SourceLoc = [kseq, True]
return SourceLoc
kseq += 1
kseq = 0
while kseq < len(UpperSeq):
if ('(NE' in UpperSeq[kseq]) and not UpperSeq[kseq].endswith(LowerSeq[Trg[0]].split('>')[1]):
SourceLoc = [kseq, True]
return SourceLoc
kseq += 1
return SourceLoc
def find_target(LowerSeq, TargetLoc):
"""
Assigns TargetLoc
Priorities for assigning target:
1. first coded (NE in LowerSeq that does not have the same code as
SourceLoc; codes are not checked with either SourceLoc or the
candidate target are compounds (NEC
2. first null-coded (NE in LowerSeq ;
3. first coded (NE in UpperSeq -- that is, searching backwards from
the verb -- that does not have the same code as SourceLoc;
4. first null-coded (NE in UpperSeq
"""
# Look in the lower phrase after the verb
k = 0
for item in LowerSeq:
if item.startswith('(NEC'):
return [k, False]
if item.startswith('(NE') and not item.endswith('>---'):
return [k, False]
k += 1
k = 0
for item in LowerSeq:
if item.startswith('(NE'):
return [k, False]
k += 1
return TargetLoc
def get_upper_seq(kword, ParseList, ParseStart):
"""
Generate the upper sequence starting from kword; Upper sequence currently
terminated by ParseStart, ~S or ~,
"""
UpperSeq = []
while kword >= ParseStart:
# print(kword,UpperSeq)
if ('~,' in ParseList[kword]):
break
if ('(NE' == ParseList[kword]):
code = UpperSeq.pop() # remove the code
UpperSeq.append(
ParseList[kword] +
'<' +
str(kword) +
'>' +
code) # <pas 13.07.26> See Note-1
elif ('NEC' in ParseList[kword]):
UpperSeq.append(ParseList[kword])
elif ('~NE' in ParseList[kword]):
UpperSeq.append(ParseList[kword])
elif (ParseList[kword][0] != '(') and (ParseList[kword][0] != '~'):
UpperSeq.append(ParseList[kword])
kword -= 1
if kword < 0:
# error is handled in check_verbs
raise_ParseList_error('Bounds underflow in get_upper_seq()')
return # not needed, right?
if ShowCodingSeq:
print("Upper sequence:", UpperSeq)
return UpperSeq
def get_lower_seq(kword, endtag, ParseList):
"""
Generate the lower sequence starting from kword; lower sequence includes only
words in the VP
"""
LowerSeq = []
# limit this to the verb phrase itself
while (endtag not in ParseList[kword]):
if ('(NE' == ParseList[kword]):
LowerSeq.append(
ParseList[kword] +
'<' +
str(kword) +
'>' +
ParseList[
kword +
1]) # <pas 13.07.26> See Note-1
kword += 1 # skip code
elif ('NEC' in ParseList[kword]):
LowerSeq.append(ParseList[kword])
elif ('~NE' in ParseList[kword]):
LowerSeq.append(ParseList[kword])
elif (ParseList[kword][0] != '(') and (ParseList[kword][0] != '~'):
LowerSeq.append(ParseList[kword])
kword += 1
# <14.04.23>: need to just set this to len(ParseList)?
if kword >= len(ParseList):
# error is handled in check_verbs
raise_ParseList_error('Bounds overflow in get_lower_seq()')
return LowerSeq # not needed, right?
if ShowCodingSeq:
print("Lower sequence:", LowerSeq)
return LowerSeq
def make_multi_sequences(multilist, verbloc, endtag, ParseList, ParseStart):
"""
Check if the multi-word list in multilist is valid for the verb at ParseList[verbloc],
then create the upper and lower sequences to be checked by the verb patterns. Lower
sequence includes only words in the VP; upper sequence currently terminated by ParseStart,
~S or ~, Returns False if the multilist is not valid, True otherwise.
"""
logger = logging.getLogger('petr_log')
ka = 1
if multilist[0]: # words follow the verb
kword = verbloc + 1
while ka < len(multilist):
if (ParseList[kword][0] != '(') and (ParseList[kword][0] != '~'):
if ParseList[kword] == multilist[ka]:
ka += 1
else:
return False, "", ""
kword += 1
upper = get_upper_seq(verbloc - 1, ParseList, ParseStart)
lower = get_lower_seq(kword, endtag, ParseList)
return True, upper, lower
else:
kword = verbloc - 1
while ka < len(multilist):
if (ParseList[kword][0] != '(') and (ParseList[kword][0] != '~'):
if ParseList[kword] == multilist[ka]:
ka += 1
else:
return False, "", ""
kword -= 1
upper = get_upper_seq(kword, ParseList, ParseStart)
lower = get_lower_seq(verbloc + 1, endtag, ParseList)
return True, upper, lower
def skip_item(item):
""" Determines whether a particular item in the parse needs to be skipped """
if item[0] in "~(":
return 1
if item in ["THE", "A", "AN", "IT", "HE", "THEY",
"HER", "HAS", "HAD", "HAVE", "SOME", "FEW", "THAT"]:
return 2
if item in ["HUNDRED", "THOUSAND", "MILLION", "BILLION", "TRILLION", "DOZEN",
"ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]:
return 3
if item in ["DOLLAR", "DUCAT"]:
return 5
try:
int(item)
return 4
except:
return 0
def check_verbs(ParseList, ParseStart, CodedEv):
"""
Primary coding loop which looks for verbs, checks whether any of their
patterns match, then fills in the source and target if there has been a
match. Stores events using make_event_strings().
Note: the "upper" sequence is the part before the verb -- that is, higher
on the screen -- and the "lower" sequence is the part after the verb.
Assuming, of course, that I've used these consistently.
SourceLoc, TargetLoc structure
[0]: the location in *Seq where the NE begins
[1]: True - located in UpperSeq, otherwise in LowerSeq
"""
CodedEvents = CodedEv
SourceLoc = ""
def raise_CheckVerbs_error(kloc, call_location_string):
"""
Handle problems found at some point internal to check_verbs: skip the verb that
caused the problem but do [not?] skip the sentence. Logs the error and information on the
verb phrase and raises CheckVerbsError.
This is currently only used for check_passive()
15.04.29: pas -- is that supposed to be "not"?
"""
global SentenceID
warningstr = call_location_string + \
'in check_verbs; verb sequence {} skipped: {}'.format(
' '.join(
ParseList[
kloc:kloc +
5]),
SentenceID)
logger = logging.getLogger('petr_log')
logger.warning(warningstr)
raise CheckVerbsError
def check_passive(kitem):
"""
Check whether the verb phrase beginning at kitem is passive; returns
location of verb if true, zero otherwise.
"""
try:
cpendtag = ParseList.index('~' + ParseList[kitem][1:])
except ValueError:
raise_CheckVerbs_error(kitem, "check_passive()")
# no point in looking before + 3 since we need an auxiliary verb
if '(VBN' in ParseList[kitem + 3:cpendtag]:
ppvloc = ParseList.index('~VBN', kitem + 3)
if 'BY' not in ParseList[ppvloc + 3:cpendtag]:
return 0
else: # check for the auxiliary verb
ka = ppvloc - 3
while ka > kitem:
if '~VB' in ParseList[ka]:
if ParseList[ka - 1] in ['WAS', 'IS', 'BEEN', 'WAS']:
return (
# <14.04.30> replace this with a synset? Or a
# tuple? Or has the compiler done that anyway?
ppvloc - 1
)
ka -= 1
return 0
else:
return 0
kitem = ParseStart
while kitem < len(ParseList):
upper = []
lower = []
if ('(VP' in ParseList[kitem]) and ('(VB' in ParseList[kitem + 1]):
vpstart = kitem # check_passive could change this
try:
pv = check_passive(kitem)
except CheckVerbsError:
kitem += 1
continue
IsPassive = (pv > 0)
if IsPassive:
kitem = pv - 2 # kitem + 2 is now at the passive verb
targ = ParseList[kitem + 2]
if ShowPattMatch:
print(
"CV-0",
"'" +
targ +
"'",
targ in PETRglobals.VerbDict['verbs'])
if targ in PETRglobals.VerbDict['verbs']:
SourceLoc = ""
TargetLoc = ""
if ShowPattMatch:
print("CV-1 Found", targ)
endtag = '~' + ParseList[vpstart][1:]
hasmatch = False
patternlist = PETRglobals.VerbDict['verbs'][targ]
verbcode = '---'
# Find verb boundaries, verb code
verb_start = kitem + 2
verb_end = kitem + 2
meaning = ''
# print(targ)
verbdata = {}
hasmatch = False
if not patternlist.keys() == ['#']:
# compound verb, look ahead
i = kitem + 3
found_flag = True
while found_flag:
skipcheck = skip_item(ParseList[i])
if skipcheck:
i += 1
continue
if ParseList[i] in patternlist:
if '#' in patternlist[ParseList[i]]:
found_flag = False
verb_end = i
upper_compound = patternlist[ParseList[i]]['#']
hasmatch = True
if not '#' in upper_compound:
# this verb is compounded in both directions
#don't know how SNLP will parse this
# Does english even have these?
raise_CheckVerbs_error()
verbdata = upper_compound['#']
else:
i += 1
else:
if '#' in patternlist:
verbdata = patternlist['#']['#']
else:
# No match found on the verb.
raise_CheckVerbs_error()
break
if not hasmatch:
if not patternlist['#'].keys() == ['#']:
# Compound verb, look behind
i = kitem - 1
found_flag = True
while found_flag and i >= 0:
skipcheck = skip_item(ParseList[i])
if skipcheck:
i -= 1
continue
if ParseList[i] in patternlist['#']:
if '#' in patternlist['#'][ParseList[i]]:
found_flag = False
verb_start = i
verbdata = patternlist['#'][
ParseList[i]]['#']
hasmatch = True
else:
i -= 1
else:
if '#' in patternlist:
verbdata = patternlist['#']['#']
break
if not hasmatch:
# Simple verb
if '#' in patternlist['#']:
verbdata = patternlist['#']['#']
hasmatch = True
if not verbdata == {}:
meaning = verbdata['meaning']
verbcode = verbdata['code']
line = verbdata['line']
upper = get_upper_seq(verb_start - 1, ParseList, ParseStart)
lower = get_lower_seq(verb_end + 1, endtag, ParseList)
if not meaning == '':
patternlist = PETRglobals.VerbDict['phrases'][meaning]
if ShowPattMatch:
print("CV-2 patlist",patternlist.keys())
vpm, lowsrc, lowtar = verb_pattern_match(
patternlist, upper, lower)
hasmatch = False
if not vpm == {}:
hasmatch = True
EventCode = vpm[0]['code']
line = vpm[0]['line']
SourceLoc = lowsrc if not lowsrc == "" else vpm[2]
TargetLoc = lowtar if not lowtar == "" else vpm[1]
if hasmatch and EventCode == '---':
hasmatch = False
if not hasmatch and verbcode != '---':
if ShowPattMatch:
print(
"Matched on the primary verb",
targ,
meaning,
line)
EventCode = verbcode
hasmatch = True
if hasmatch:
if TargetLoc == "":
TargetLoc = find_target(lower, TargetLoc)
if ShowPattMatch:
print("CV-3 trg", TargetLoc)
if not TargetLoc == "":
if SourceLoc == "":
SourceLoc = find_source(
upper,
lower,
SourceLoc,
TargetLoc)
if not SourceLoc == "":
if ShowPattMatch:
print("CV-3 src", SourceLoc)
CodedEvents = make_event_strings(
CodedEvents,
upper,
lower,
SourceLoc,
TargetLoc,
IsPassive,
EventCode)
if hasmatch:
while (endtag not in ParseList[kitem]):
kitem += 1 # resume search past the end of VP
kitem += 1
return CodedEvents, SourceLoc
def verb_pattern_match(patlist, upper, lower):
"""
##########################################
##
## Symbols:
## $ = Source
## + = Target
## ^ ="Skip to end of the (NE
## % = Compound
##
## I'm sorry this is so long, but upper and lower matches are just different
## enough where this actually makes sense
##
##########################################
"""
VPMPrint =False
def find_actor(phrase, i):
for j in range(i, len(phrase)):
if phrase[j][0] == "(":
return j
print("NO ACTOR FOUND",phrase,j)
def upper_match(pathdict):
########################
# Match upper phrase
########################
in_NE = False
in_NEC = False
phrase_actor = ""
phrase_actors = {}
phrase = upper
matchlist = []
option = 0
path = pathdict
pathleft = [(pathdict, 0, 0)]
source = ""
target = ""
if VPMPrint:
print("\nChecking upper", upper,path.keys())
i = 0
while i < len(phrase):
if VPMPrint:
print("Checking", phrase[i],path.keys())
skipcheck = skip_item(upper[i])
# check direct word match
if phrase[i] in path and not option > 0:
if VPMPrint:
print("upper matched a word", phrase[i])
matchlist.append(phrase[i])
pathleft.append((path, i, 1))
path = path[phrase[i]]
# maybe a synset match
elif 'synsets' in path and not option > 1:
if VPMPrint:
print("could be a synset")
matchflag = False
for set in path['synsets'].keys():
if upper[i] in PETRglobals.VerbDict['verbs'][set]:
if VPMPrint:
print("We found a synset match")
pathleft.append((path, i, 2))
path = path['synsets'][set]
matchlist.append(set)
i += 1
matchflag = True
break
option = 0 if matchflag else 2
continue
# check for target match
elif in_NE and (not option > 2) and '+' in path:
pathleft.append((path, i, 3, target))
i = find_actor(upper, i)
target = [i, True]
path = path['+']
matchlist += ['+']
if VPMPrint:
print("Matching phrase target", target)
continue
elif in_NE and (not option > 3) and '$' in path:
pathleft.append((path, i, 4, source))
i = find_actor(upper, i)
source = [i, True]
path = path['$']
matchlist.append(source)
if VPMPrint:
# check for source match
print("Matching phrase source")
continue
elif in_NE and (not option > 4) and '^' in path:
j = i
if VPMPrint:
print("Matching phrase skip")
matchlist.append('^')
while j >= 0:
if "~NE" == upper[j]:
pathleft.append((path, i, 5))
path = path['^']
i = j - 1
break
j -= 1
if j >= 0:
continue
elif (not in_NE) and in_NEC and (not option > 5) and '%' in path:
if VPMPrint:
print("Matching compound", upper, i)
ka = i
while '(NEC' not in upper[ka]:
ka += 1
if ka >= len(upper):
option = 6
break
if option == 6:
continue
source = [ka, True]
target = source
pathleft.append((path, i, 6))
path = path['%']
matchlist.append('%')
i = ka
continue
if skipcheck > 0:
if VPMPrint:
print("skipping",i,len(lower))
if "~NEC" in upper[i]:
in_NEC = not in_NEC
elif "~NE" in upper[i]:
in_NE = not in_NE
if i < len(lower) -1:
i +=1
continue
if not '#' in path:
return False,{}
if VPMPrint:
print("Upper pattern matched at end", matchlist)
return True, (path['#'], target, source)
if (not i >= len(upper)) and not option > 6:
i += 1
pathleft.append((path, i, 7))
if VPMPrint:
print("Skipping")
option = 0
matchlist.append("*")
continue
elif "#" in path:
if VPMPrint:
print("Upper pattern matched", matchlist)
return True, (path['#'], target, source)
# return to last point of departure
elif not pathleft[-1][2] == 0:
if VPMPrint:
print("retracing", upper[i], path, upper[i] in path)
p = pathleft.pop()
path = p[0]
i = p[1] + 1
option = p[2]
if option == 3:
target = p[3]
elif option == 4:
source = p[3]
matchlist.pop()
continue
else:
if VPMPrint:
print("no match in upper", pathleft[-1][0].keys())
return False, {}
i += 1
option = 0
print("MATCHED",matchlist,path.keys())
if "#" in path:
return True, (path['#'], target, source)
if VPMPrint:
print("NO MATCH IN UPPER")
return False, {}
#################################################
# Match lower phrase via Depth-First-ish Search
#################################################
# Stack is of 3-tuples (path,index,option)
path = patlist
phrase_return = True
option = 0
i = 0
matchlist = []
pathleft = [(path, 0, 0)]
target = ""
source = ""
in_NEC = False
phrase_actors = {}
in_NE = False
if VPMPrint:
print("\nChecking phrase", lower)
phrase_actor = ""
while i < len(lower):
if pathleft == []:
pathleft = [(path, i, 0)]
if VPMPrint:
print(
"checking",
"'" +
lower[i] +
"'",
option,
phrase_actor,
in_NE,path.keys())
skipcheck = skip_item(lower[i])
# return to last point of departure
if skipcheck > 0 and option > -1:
if VPMPrint:
print("Skipping")
if "NEC" in lower[i]:
in_NEC = not in_NEC
elif "NE" in lower[i]:
in_NE = not in_NE
if len(lower[i]) > 3:
phrase_actor = i
phrase_actors[i] = i
if i < len(lower) -1 :
i +=1
continue
if '#' in path:
option = 7
elif i == len(lower) - 1 and not pathleft[-1][2] == 0:
if VPMPrint:
print("retracing ", len(pathleft))
p = pathleft.pop()
path = p[0]
i = p[1] + 1
option = p[2]
matchlist.pop()
phrase_actors[i] = phrase_actors.setdefault(i,phrase_actor)
continue
phrase_actors[i] = phrase_actors.setdefault(i,phrase_actor)
# check direct word match
if lower[i] in path and not option > 0:
if VPMPrint:
print("matched a word", lower[i])
matchlist.append(lower[i])
pathleft.append((path, i, 1))
path = path[lower[i]]
# maybe a synset match
elif 'synsets' in path and not option > 1:
#print("could be a synset")
matchflag = False
if VPMPrint:
print("Checking for synset")
for set in path['synsets'].keys():
if lower[i] in PETRglobals.VerbDict['verbs'][set]:
if VPMPrint:
print("We found a synset match")
pathleft.append((path, i, 2))
path = path['synsets'][set]
matchlist.append(set)
i += 1
matchflag = True
break
option = 0 if matchflag else 2
continue
# check for target match
elif in_NE and (not option > 2) and '+' in path:
pathleft.append((path, i, 3, target))
target = [phrase_actors[i], False]
path = path['+']
matchlist += [target]
if VPMPrint:
print("Matching phrase target")
continue
elif in_NE and (not option > 3) and '$' in path:
pathleft.append((path, i, 4, source))
source = [phrase_actors[i], False]
path = path['$']
matchlist.append(source)
if VPMPrint:
# check for source match
print("Matching phrase source")
continue
elif in_NE and (not option > 4) and '^' in path:
j = i
if VPMPrint:
print("Matching phrase skip")
matchlist.append('^')
while j < len(lower):
if "~NE" == lower[j]:
pathleft.append((path, i, 5))
path = path['^']
i = j + 1
in_NE = False
break
j += 1
if not j < len(lower):
i += 1
continue
elif not in_NE and in_NEC and (not option > 5) and '%' in path:
if VPMPrint:
print("Matching compound", upper, i)
ka = i
# print(ka)
while '(NEC' not in upper[ka]:
# print(upper[ka])
ka += 1
if ka >= len(upper):
option = 6
break
if option == 6:
continue
source = lower[ka][-3:]
target = source
pathleft.append((path, i, 6))
path = path['%']
matchlist.append('%')
continue
elif i + 1 < len(lower) and not option > 6:
if VPMPrint:
print("skipping")
option = 0
pathleft.append((path, i, 7))
i += 1
matchlist.append("*")
continue
elif "#" in path:
if VPMPrint:
print(
"Lower pattern matched",
matchlist) # now check upper
result, data = upper_match(path['#'])
if result:
return data, source, target
if VPMPrint:
print("retracing", len(pathleft))
p = pathleft.pop()
path = p[0]
i = p[1] + 1
option = p[2]
if option == 3:
target = p[3]
elif option == 4:
source = p[3]
if not matchlist == []:
m = matchlist.pop()
if m == '$':
source = ""
continue
# return to last point of departure
elif not pathleft[-1][2] == 0:
if VPMPrint:
print("retracing", len(pathleft))
p = pathleft.pop()
path = p[0]
i = p[1] + 1
option = p[2]
if option == 3:
target = p[3]
elif option == 4:
source = p[3]
matchlist.pop()
continue
else:
if VPMPrint:
print("no match in lower", pathleft.keys())
phrase_return = False
break
i += 1
option = 0
return {}, "", ""
def get_actor_code(index, SentenceOrdDate):
""" Get the actor code, resolving date restrictions. """
logger = logging.getLogger('petr_log')
thecode = None
try:
codelist = PETRglobals.ActorCodes[index]
except IndexError:
logger.warning(
'\tError processing actor in get_actor_code. Index: {}'.format(index))
thecode = '---'
if len(codelist) == 1 and len(codelist[0]) == 1:
thecode = codelist[0][0] # no restrictions: the most common case
for item in codelist:
if len(item) > 1: # interval date restriction
if item[0] == 0 and SentenceOrdDate <= item[1]:
thecode = item[2]
break
if item[0] == 1 and SentenceOrdDate >= item[1]:
thecode = item[2]
break
if item[0] == 2 and SentenceOrdDate >= item[
1] and SentenceOrdDate <= item[2]:
thecode = item[3]
break
# if interval search failed, look for an unrestricted code
if not thecode:
# assumes even if PETRglobals.WriteActorRoot, the actor name at the end
# of the list will have length >1 if
for item in codelist:
if len(item) == 1:
thecode = item[0]
if not thecode:
thecode = '---'
elif PETRglobals.WriteActorRoot:
thecode += PETRglobals.RootPrimer + codelist[-1]
return thecode
def actor_phrase_match(patphrase, phrasefrag):
"""
Determines whether the actor pattern patphrase occurs in phrasefrag. Returns True if
match is successful. Insha'Allah...
"""
ret = False
APMprint = False
connector = patphrase[1]
kfrag = 1 # already know first word matched
kpatword = 2 # skip code and connector
if APMprint:
# debug
print(
"APM-1",
len(patphrase),
patphrase,
"\nAPM-2",
len(phrasefrag),
phrasefrag)
if len(patphrase) == 2:
if APMprint:
print("APM-2.1: singleton match") # debug
return True, 1 # root word is a sufficient match
# <14.02.28>: these both do the same thing, except one handles a string of
# the form XXX and the other XXX_. This is probably unnecessary. though it
# might be...I suppose those are two distinct cases.
if len(patphrase) == 3 and patphrase[2][0] == "":
if APMprint:
print("APM-2.2: singleton match") # debug
return True, 1 # root word is a sufficient match
if kfrag >= len(phrasefrag):
return False, 0 # end of phrase with more to match
while kpatword < len(patphrase): # iterate over the words in the pattern
if APMprint:
# debug
print(
"APM-3",
kfrag,
kpatword,
"\n APM Check:",
kpatword,
phrasefrag[kfrag],
patphrase[kpatword][0])
if phrasefrag[kfrag] == patphrase[kpatword][0]:
if APMprint:
print(" APM match") # debug
connector = patphrase[kpatword][1]
kfrag += 1
kpatword += 1
# final element is just the terminator
if kpatword >= len(patphrase) - 1:
return True, kfrag # complete pattern matched
else:
if APMprint:
print(" APM fail") # debug
if connector == '_':
return False, 0 # consecutive match required, so fail
else:
kfrag += 1 # intervening words are allowed
if kfrag >= len(phrasefrag):
return False, 0 # end of phrase with more to match
return (
# complete pattern matched (I don't think we can ever hit this)
True, len(phrasefrag)
)
def check_NEphrase(nephrase, date):
"""
This function tries to find actor and agent patterns matching somewhere in
the phrase. The code for the first actor in the phrase is used as the
base; there is no further search for actors
All agents with distinct codes that are in the phrase are used -- including
phrases which are subsets of other phrases (e.g. 'REBEL OPPOSITION GROUP
[ROP]' and 'OPPOSITION GROUP' [OPP]) and they are appended in the order
they are found. If an agent generates the same 3-character code (e.g.
'PARLIAMENTARY OPPOSITION GROUP [OOP]' and 'OPPOSITION GROUP' [OPP]) the
code is appended only the first time it is found.
Note: In order to avoid accidental matches across codes, this checks in
increments of 3 character blocks. That is, it assumes the CAMEO convention
where actor and agent codes are usually 3 characters, occasionally 6 or 9,
but always multiples of 3.
If PETRglobals.WriteActorRoot is True, root phrase is added to the code following the
string PETRglobals.RootPrimer
"""
kword = 0
actorcode = ""
actor_index = [-1, -1]
if ShowNEParsing:
print("CNEPh initial phrase", nephrase)
# iterate through the phrase looking for actors
while kword < len(nephrase):
phrasefrag = nephrase[kword:]
if ShowNEParsing:
print("CNEPh Actor Check", phrasefrag[0])
if phrasefrag[0] in PETRglobals.ActorDict:
if ShowNEParsing:
print(" Found", phrasefrag[0])
patlist = PETRglobals.ActorDict[nephrase[kword]]
if ShowNEParsing:
print("CNEPh Mk1:", patlist)
actor_index = (kword, kword)
for index in range(len(patlist)):
val, phraselen = actor_phrase_match(patlist[index], phrasefrag)
if val:
actor_index = (kword, kword + phraselen)
actorcode = get_actor_code(patlist[index][0], date)
if ShowNEParsing:
print("CNEPh Mk2:", actorcode)
break
if len(actorcode) > 0:
break
else:
kword += 1
kword = 0
agentlist = []
while kword < len(nephrase): # now look for agents
if kword >= actor_index[0] and kword < actor_index[1]:
kword += 1 # Don't look for agents in the actor phrase
continue
phrasefrag = nephrase[kword:]
if ShowNEParsing:
print("CNEPh Agent Check", phrasefrag[0])
if phrasefrag[0] in PETRglobals.AgentDict:
if ShowNEParsing:
print(" Found", phrasefrag[0])
patlist = PETRglobals.AgentDict[nephrase[kword]]
for index in range(len(patlist)):
val = actor_phrase_match(patlist[index], phrasefrag)
if val[0]:
agentlist.append(patlist[index][0])
kword += val[1] - 1
break
kword += 1 # continue looking for more agents
if len(agentlist) == 0:
if len(actorcode) == 0:
return [False]
else:
return [True, actorcode]
if len(actorcode) == 0:
actorcode = '---' # unassigned agent
if PETRglobals.WriteActorRoot:
part = actorcode.partition(PETRglobals.RootPrimer)
actorcode = part[0]
actorroot = part[2]
for agentcode in agentlist: # assemble the composite code
if agentcode[0] == '~':
agc = agentcode[1:] # extract the code
else:
agc = agentcode[:-1]
aglen = len(agc) # set increment to the length of the agent code
ka = 0 # check if the agent code is already present
while ka <= len(actorcode) - aglen:
if agc == actorcode[ka:ka + aglen]:
ka = -1 # signal duplicate
break
ka += 3
if ka < 0:
break
if agentcode[0] == '~':
actorcode += agc
else:
actorcode = agc + actorcode
if PETRglobals.WriteActorRoot:
actorcode += PETRglobals.RootPrimer + actorroot
return [True, actorcode]
def check_commas(plist):
"""
Removes comma-delimited clauses from ParseList.
Note that the order here is to remove initial, remove terminal, then remove
intermediate. Initial and terminal remove are done only once; the
intermediate is iterated. In a sentence where the clauses can in fact be
removed without affecting the structure, the result will still be balanced.
If this is not the case, the routine raises a Skip_Record rather than
continuing with whatever mess is left.
Because this is working with ParseList, any commas inside (NP should
already have had their tags removed as they were converted to (NE
This was a whole lot simpler in TABARI, but TABARI also made some really
weird matches following comma-clause deletion.
"""
ParseList = plist
def count_word(loclow, lochigh):
"""
Returns the number of words in ParseList between loclow and lochigh - 1
"""
cwkt = 0
ka = loclow
while ka < lochigh:
if ParseList[ka] == '(NE':
ka += 2 # skip over codes
else:
if ParseList[ka][0] != '(' and ParseList[ka][
0] != '~' and ParseList[ka][0].isalpha():
cwkt += 1
ka += 1
return cwkt
def find_end():
"""
Returns location of tag on punctuation at end of phrase, defined as
last element without ~
"""
ka = len(ParseList) - 1
while ka >= 2 and ParseList[ka][0] == '~':
ka -= 1
return ka - 1
logger = logging.getLogger('petr_log')
# displays trees at various points as ParseList is mangled
ShowCCtrees = True
ShowCCtrees = False
if '(,' not in ParseList:
return ParseList
if ShowCCtrees:
print('chkcomma-1-Parselist::', ParseList)
show_tree_string(' '.join(ParseList))
if PETRglobals.CommaBMax != 0: # check for initial phrase
"""
Initial phrase elimination in check_commas(): delete_phrases() will tend to leave
a lot of (xx opening tags in place, making the tree a grammatical mess, which is
why initial clause deletion is turned off by default.
"""
kount = count_word(2, ParseList.index('(,'))
if kount >= PETRglobals.CommaBMin and kount <= PETRglobals.CommaBMax:
# leave the comma in place so an internal can catch it
loclow = 2
lochigh = ParseList.index('(,')
##################
# DELETE PHRASES
##################
stack = [] # of course we use a stack...this is a tree...
ka = lochigh - 1
while ka >= loclow:
if ParseList[ka][0] == '~':
stack.append(ParseList[ka][1:])
# remove this complete phrase
elif len(stack) > 0 and ParseList[ka][0] == '(' and ParseList[ka][1:] == stack[-1]:
targ = '~' + ParseList[ka][1:]
ParseList = ParseList[:ka] + \
ParseList[ParseList.index(targ, ka + 1) + 1:]
stack.pop()
ka -= 1
#################
if ShowCCtrees:
print('chkcomma-1a-Parselist::', ParseList)
show_tree_string(' '.join(ParseList))
if PETRglobals.CommaEMax != 0: # check for terminal phrase
kend = find_end()
ka = kend - 1 # terminal: reverse search for '('
while ka >= 2 and ParseList[ka] != '(,':
ka -= 1
if ParseList[ka] == '(,':
kount = count_word(ka, len(ParseList))
if kount >= PETRglobals.CommaEMin and kount <= PETRglobals.CommaEMax:
# leave the comma in place so an internal can catch it
#################
# DELETE PHRASES
###################
loclow = ka + 3
lochigh = kend
stack = [] # of course we use a stack...this is a tree...
ka = lochigh - 1
while ka >= loclow:
if ParseList[ka][0] == '~':
stack.append(ParseList[ka][1:])
# remove this complete phrase
elif len(stack) > 0 and ParseList[ka][0] == '(' and ParseList[ka][1:] == stack[-1]:
targ = '~' + ParseList[ka][1:]
ParseList = ParseList[:ka] + \
ParseList[ParseList.index(targ, ka + 1) + 1:]
stack.pop()
ka -= 1
####################
if ShowCCtrees:
print('chkcomma-2a-Parselist::')
show_tree_string(' '.join(ParseList))
print("cc-2t:", kount)
if PETRglobals.CommaMax != 0:
ka = ParseList.index('(,')
while True:
try:
kb = ParseList.index('(,', ka + 1)
except ValueError:
break
kount = count_word(ka + 2, kb) # ka+2 skips over , ~,
if kount >= PETRglobals.CommaMin and kount <= PETRglobals.CommaMax:
#################
# DELETE PHRASES
#################
loclow = ka
lochigh = kb
stack = [] # of course we use a stack...this is a tree...
ka = lochigh - 1
while ka >= loclow:
if ParseList[ka][0] == '~':
stack.append(ParseList[ka][1:])
# remove this complete phrase
elif len(stack) > 0 and ParseList[ka][0] == '(' and ParseList[ka][1:] == stack[-1]:
targ = '~' + ParseList[ka][1:]
ParseList = ParseList[:ka] + \
ParseList[ParseList.index(targ, ka + 1) + 1:]
stack.pop()
ka -= 1
###############
ka = kb
if ShowCCtrees:
print('chkcomma-3a-Parselist::')
show_tree_string(' '.join(ParseList))
# check for dangling initial or terminal (, , ~,
ka = ParseList.index('(,') # initial
if count_word(2, ka) == 0:
ParseList = ParseList[:ka] + ParseList[ka + 3:]
kend = find_end()
ka = kend - 1 # terminal: reverse search for '(,'
while ka >= 2 and ParseList[ka] != '(,':
ka -= 1
if ParseList[ka] == '(,':
if count_word(ka + 1, kend) == 0:
ParseList = ParseList[:ka] + ParseList[ka + 3:]
if ShowCCtrees:
print('chkcomma-end-Parselist::')
show_tree_string(' '.join(ParseList))
try:
check_balance(ParseList)
except UnbalancedTree:
raise_ParseList_error('check_balance at end of check_comma()')
return ParseList
def assign_NEcodes(plist, ParseStart, date):
"""
Assigns non-null codes to NE phrases where appropriate.
"""
def expand_compound_element(kstart, plist2):
"""
An almost but not quite a recursive call on expand_compound_NEPhrase().
This difference is that the (NEC has already been established so we are just
adding elements inside the list and there is no further check: we're not allowing
any further nesting of compounds. That could doubtlessly be done fairly easily
with some possibly too-clever additional code but such constructions are virtually
unknown in actual news stories.
"""
ParseList = plist2
try:
kend = ParseList.index('~NE', kstart)
ncstart = ParseList.index('(NEC', kstart, kend)
ncend = ParseList.index('~NEC', ncstart, kend)
except ValueError:
raise_ParseList_error(
'expand_compound_element() in assign_NEcodes')
# first element is always '(NE'
prelist = ParseList[kstart + 1:ncstart]
postlist = ParseList[ncend + 1:kend]
# **',postlist
newlist = []
ka = ncstart + 1
while ka < ncend - 1: # convert all of the NP, NNS and NNP to NE
# any TreeBank (N* tag is legitimate here
if '(N' in ParseList[ka]:
endtag = '~' + ParseList[ka][1:]
itemlist = ['(NE', '---']
itemlist.extend(prelist)
ka += 1
while ParseList[ka] != endtag:
itemlist.append(ParseList[ka])
ka += 1
itemlist.extend(postlist)
itemlist.append('~NE')
newlist.extend(itemlist)
ka += 1 # okay to increment since next item is (, or (CC
ParseList = ParseList[:kstart] + newlist + ParseList[kend + 1:]
return kstart + len(newlist), ParseList
def expand_compound_NEPhrase(kstart, kend, plist1):
"""
Expand the compound phrases inside an (NE: this replaces these with a
list of NEs with the remaining text simply duplicated. Code and agent
resolution will then be done on these phrases as usual. This will
handle two separate (NECs, which is as deep as one generally
encounters.
"""
ParseList = plist1
ncstart = ParseList.index('(NEC', kstart, kend)
ncend = ParseList.index('~NEC', ncstart, kend)
prelist = ParseList[kstart + 1:ncstart - 1]
postlist = ParseList[ncend + 1:kend]
newlist = ['(NEC']
ka = ncstart + 1
while ka < ncend - 1: # convert all of the NP, NNS and NNP to NE
if '(N' in ParseList[ka]:
endtag = '~' + ParseList[ka][1:]
itemlist = ['(NE', '---']
itemlist.extend(prelist)
ka += 1
while ParseList[ka] != endtag:
itemlist.append(ParseList[ka])
ka += 1
itemlist.extend(postlist)
itemlist.append('~NE')
newlist.extend(itemlist)
ka += 1 # okay to increment since next item is (, or (CC
newlist.append('~NEC')
newlist.append('~TLTL')
ParseList = ParseList[:kstart] + newlist + ParseList[kend + 1:]
if '(NEC' in newlist[1:-1]: # expand next set of (NEC if it exists
ka = kstart + 1
while '(NE' in ParseList[ka:ParseList.index('~TLTL', ka)]:
ka, ParseList = expand_compound_element(ka, ParseList)
ParseList.remove('~TLTL') # tell-tale is no longer needed
return ParseList
ParseList = plist
kitem = ParseStart
while kitem < len(ParseList):
if '(NE' == ParseList[kitem]:
if ShowNEParsing:
print("NE-0:", kitem, ParseList[kitem - 1:])
nephrase = []
kstart = kitem
kcode = kitem + 1
kitem += 2 # skip NP, code
if kitem >= len(ParseList):
raise_ParseList_error(
'Bounds overflow in (NE search in assign_NEcodes')
while '~NE' != ParseList[kitem]:
if ParseList[kitem][1:3] != 'NN':
nephrase.append(ParseList[kitem])
kitem += 1
if kitem >= len(ParseList):
raise_ParseList_error(
'Bounds overflow in ~NE search in assign_NEcodes')
if ShowNEParsing:
print("aNEc", kcode, ":", nephrase) # debug
if '(NEC' in nephrase:
ParseList = expand_compound_NEPhrase(kstart, kitem, ParseList)
kitem = kstart - 1 # process the (NEs following the expansion
else:
result = check_NEphrase(nephrase, date)
if result[0]:
ParseList[kcode] = result[1]
if ShowNEParsing:
print("Assigned", result[1]) # debug
kitem += 1
return ParseList
def make_event_strings(
CodedEv, UpperSeq, LowerSeq, SourceLoc, TargetLoc, IsPassive, EventCode):
"""
Creates the set of event strings, handing compound actors and symmetric
events.
"""
CodedEvents = CodedEv
global SentenceLoc, SentenceID
def extract_code_fields(fullcode):
""" Returns list containing actor code and optional root and text strings """
if PETRglobals.CodePrimer in fullcode:
maincode = fullcode[:fullcode.index(PETRglobals.CodePrimer)]
rootstrg = None
textstrg = None
if PETRglobals.WriteActorRoot:
part = fullcode.partition(PETRglobals.RootPrimer)
if PETRglobals.WriteActorText:
rootstrg = part[2].partition(PETRglobals.TextPrimer)[0]
else:
rootstrg = part[2]
if PETRglobals.WriteActorText:
textstrg = fullcode.partition(PETRglobals.TextPrimer)[2]
return [maincode, rootstrg, textstrg]
else:
return [fullcode, None, None]
def make_events(codessrc, codestar, codeevt, CodedEvents_):
"""
Create events from each combination in the actor lists except self-references
"""
CodedEvents = CodedEvents_
global SentenceLoc
for thissrc in codessrc:
if '(NEC' in thissrc:
logger.warning(
'(NEC source code found in make_event_strings(): {}'.format(SentenceID))
CodedEvents = []
return
srclist = extract_code_fields(thissrc)
if srclist[0][0:3] == '---' and len(SentenceLoc) > 0:
# add location if known <14.09.24: this still hasn't been
# implemented <>
srclist[0] = SentenceLoc + srclist[0][3:]
for thistar in codestar:
if '(NEC' in thistar:
logger.warning(
'(NEC target code found in make_event_strings(): {}'.format(SentenceID))
CodedEvents = []
return
tarlist = extract_code_fields(thistar)
# skip self-references based on code
if srclist[0] != tarlist[0]:
if tarlist[0][0:3] == '---' and len(SentenceLoc) > 0:
# add location if known -- see note above
tarlist[0] = SentenceLoc + tarlist[0][3:]
if IsPassive:
templist = srclist
srclist = tarlist
tarlist = templist
CodedEvents.append([srclist[0], tarlist[0], codeevt])
if PETRglobals.WriteActorRoot:
CodedEvents[-1].extend([srclist[1], tarlist[1]])
if PETRglobals.WriteActorText:
CodedEvents[-1].extend([srclist[2], tarlist[2]])
return CodedEvents
def expand_compound_codes(codelist):
"""
Expand coded compounds, that is, codes of the format XXX/YYY
"""
for ka in range(len(codelist)):
if '/' in codelist[ka]:
parts = codelist[ka].split('/')
# this will insert in order, which isn't necessary but might be
# helpful
kb = len(parts) - 2
codelist[ka] = parts[kb + 1]
while kb >= 0:
codelist.insert(ka, parts[kb])
kb -= 1
logger = logging.getLogger('petr_log')
try:
srccodes = get_loccodes(SourceLoc, CodedEvents, UpperSeq, LowerSeq)
expand_compound_codes(srccodes)
tarcodes = get_loccodes(TargetLoc, CodedEvents, UpperSeq, LowerSeq)
expand_compound_codes(tarcodes)
except:
logger.warning(
'tuple error when attempting to extract src and tar codes in make_event_strings(): {}'.format(SentenceID))
return CodedEvents
SentenceLoc = ''
if len(srccodes) == 0 or len(tarcodes) == 0:
logger.warning(
'Empty codes in make_event_strings(): {}'.format(SentenceID))
return CodedEvents
if ':' in EventCode: # symmetric event
if srccodes[0] == '---' or tarcodes[0] == '---':
if tarcodes[0] == '---':
tarcodes = srccodes
else:
srccodes = tarcodes
ecodes = EventCode.partition(':')
CodedEvents = make_events(srccodes, tarcodes, ecodes[0], CodedEvents)
CodedEvents = make_events(tarcodes, srccodes, ecodes[2], CodedEvents)
else:
CodedEvents = make_events(srccodes, tarcodes, EventCode, CodedEvents)
if PETRglobals.RequireDyad:
ka = 0
# need to evaluate the bound every time through the loop
while ka < len(CodedEvents):
if CodedEvents[ka][0] == '---' or CodedEvents[ka][1] == '---':
del CodedEvents[ka]
else:
ka += 1
if len(CodedEvents) == 0:
return CodedEvents
# remove duplicates
ka = 0
# need to evaluate the bound every time through the loop
while ka < len(CodedEvents) - 1:
kb = ka + 1
while kb < len(CodedEvents):
if CodedEvents[ka] == CodedEvents[kb]:
del CodedEvents[kb]
else:
kb += 1
ka += 1
return CodedEvents
# ========================== PRIMARY CODING FUNCTIONS ====================== #
def check_discards(SentenceText):
"""
Checks whether any of the discard phrases are in SentenceText, giving
priority to the + matches. Returns [indic, match] where indic
0 : no matches
1 : simple match
2 : story match [+ prefix]
"""
sent = SentenceText.upper().split() # case insensitive matching
size = len(sent)
level = PETRglobals.DiscardList
depart_index = [0]
discardPhrase = ""
for i in range(len(sent)):
if '+' in level:
return [2, '+ ' + discardPhrase]
elif '$' in level:
return [1, ' ' + discardPhrase]
elif sent[i] in level:
depart_index.append(i)
level = level[sent[i]]
discardPhrase += " " + sent[i]
else:
if len(depart_index) == 0:
continue
i = depart_index[0]
level = PETRglobals.DiscardList
return [0, '']
def get_issues(SentenceText):
"""
Finds the issues in SentenceText, returns as a list of [code,count]
<14.02.28> stops coding and sets the issues to zero if it finds *any*
ignore phrase
"""
sent = SentenceText.upper() # case insensitive matching
issues = []
for target in PETRglobals.IssueList:
if target[0] in sent: # found the issue phrase
code = PETRglobals.IssueCodes[target[1]]
if code[0] == '~': # ignore code, so bail
return []
ka = 0
gotcode = False
while ka < len(issues):
if code == issues[ka][0]:
issues[ka][1] += 1
break
ka += 1
if ka == len(issues): # didn't find the code, so add it
issues.append([code, 1])
return issues
def code_record(plist1, pstart, date):
"""
Code using ParseList read_TreeBank, then return results in StoryEventList
first element of StoryEventList for each sentence -- this signals the start
of a list events for a sentence -- followed by lists containing
source/target/event triples.
"""
plist = plist1
global SentenceID
global NEmpty
# code triples that were produced; this is set in make_event_strings
CodedEvents = []
logger = logging.getLogger('petr_log')
try:
plist = check_commas(plist)
except IndexError:
raise_ParseList_error('Index error in check_commas()')
try:
plist = assign_NEcodes(plist, pstart, date)
except NameError:
print(date)
if ShowParseList:
print('code_rec-Parselist::', plist)
try:
# this can throw HasParseError which is caught in do_coding
CodedEvents, SourceLoc = check_verbs(plist, pstart, CodedEvents)
except Exception as e:
logger.warning('\tIndexError in parsing, but HasParseError should have caught this. Probably a bad sentence.')
NEmpty = 0
if len(CodedEvents) == 0:
NEmpty += 1
return CodedEvents, plist, NEmpty
def do_coding(event_dict, out_file):
"""
Main coding loop Note that entering any character other than 'Enter' at the
prompt will stop the program: this is deliberate.
<14.02.28>: Bug: PETRglobals.PauseByStory actually pauses after the first
sentence of the *next* story
"""
treestr = ""
NStory = 0
NSent = 0
NEvents = 0
NEmpty = 0
NDiscardSent = 0
NDiscardStory = 0
logger = logging.getLogger('petr_log')
times = 0
sents = 0
for key, val in event_dict.items():
prev_code = []
SkipStory = False
logger.info('\n\nProcessing {}'.format(key))
StoryDate = event_dict[key]['meta']['date']
StorySource = 'TEMP'
for sent in val['sents']:
if 'parsed' in event_dict[key]['sents'][sent]:
if 'config' in val['sents'][sent]:
for id, config in event_dict[key][
'sents'][sent]['config'].items():
change_Config_Options(config)
SentenceID = '{}_{}'.format(key, sent)
#if not SentenceID == "NEST_2.75":
# continue
coded_events = []
logger.info('\tProcessing {}'.format(SentenceID))
SentenceText = event_dict[key]['sents'][sent]['content']
SentenceDate = event_dict[key]['meta']['date']
Date = PETRreader.dstr_to_ordate(SentenceDate)
SentenceSource = 'TEMP'
parsed = event_dict[key]['sents'][sent]['parsed']
treestr = utilities._format_parsed_str(parsed)
disc = check_discards(SentenceText)
if disc[0] > 0:
if disc[0] == 1:
print("Discard sentence:", disc[1])
logger.info('\tSentence discard. {}'.format(disc[1]))
NDiscardSent += 1
continue
else:
print("Discard story:", disc[1])
logger.info('\tStory discard. {}'.format(disc[1]))
SkipStory = True
NDiscardStory += 1
break
else:
try:
ParseList, ParseStart = read_TreeBank(treestr)
except IrregularPattern:
continue
try:
coded_events, ParseList, emptyCount = code_record(
ParseList, ParseStart, Date)
NEmpty += emptyCount
except HasParseError:
coded_events = None
if coded_events:
event_dict[key]['sents'][sent]['events'] = coded_events
if coded_events and PETRglobals.IssueFileName != "":
event_issues = get_issues(SentenceText)
if event_issues:
event_dict[key]['sents'][sent]['issues'] = event_issues
if PETRglobals.PauseBySentence:
if len(input("Press Enter to continue...")) > 0:
sys.exit()
prev_code = coded_events
#print("\n\n",SentenceID,"\n",SentenceText,"\n\t",coded_events)
else:
print("NO INFO")
logger.info(
'{} has no parse information. Passing.'.format(SentenceID))
pass
if SkipStory:
event_dict[key]['sents'] = None
print("Summary:")
print(
"Stories read:",
NStory,
" Sentences coded:",
NSent,
" Events generated:",
NEvents)
print(
"Discards: Sentence",
NDiscardSent,
" Story",
NDiscardStory,
" Sentences without events:",
NEmpty)
return event_dict
def parse_cli_args():
"""Function to parse the command-line arguments for PETRARCH."""
__description__ = """
PETRARCH
(https://openeventdata.github.io/) (v. 0.01)
"""
aparse = argparse.ArgumentParser(prog='petrarch',
description=__description__)
sub_parse = aparse.add_subparsers(dest='command_name')
parse_command = sub_parse.add_parser('parse', help=""" DEPRACATED Command to run the
PETRARCH parser. Do not use unless you've used it before. If you need to
process unparsed text, see the README""",
description="""DEPRACATED Command to run the
PETRARCH parser. Do not use unless you've used it before.If you need to
process unparsed text, see the README""")
parse_command.add_argument('-i', '--inputs',
help='File, or directory of files, to parse.',
required=True)
parse_command.add_argument('-P', '--parsed', action='store_true',
default=False, help="""Whether the input
document contains StanfordNLP-parsed text.""")
parse_command.add_argument('-o', '--output',
help='File to write parsed events.',
required=True)
parse_command.add_argument('-c', '--config',
help="""Filepath for the PETRARCH configuration
file. Defaults to PETR_config.ini""",
required=False)
batch_command = sub_parse.add_parser('batch', help="""Command to run a batch
process from parsed files specified by
an optional config file.""",
description="""Command to run a batch
process from parsed files specified by
an optional config file.""")
batch_command.add_argument('-c', '--config',
help="""Filepath for the PETRARCH configuration
file. Defaults to PETR_config.ini""",
required=False)
batch_command.add_argument('-i', '--inputs',
help="""Filepath for the input XML file. Defaults to
data/text/Gigaword.sample.PETR.xml""",
required=False)
args = aparse.parse_args()
return args
def main():
cli_args = parse_cli_args()
utilities.init_logger('PETRARCH.log')
logger = logging.getLogger('petr_log')
PETRglobals.RunTimeString = time.asctime()
if cli_args.command_name == 'parse' or cli_args.command_name == 'batch':
if cli_args.config:
print('Using user-specified config: {}'.format(cli_args.config))
logger.info(
'Using user-specified config: {}'.format(cli_args.config))
PETRreader.parse_Config(cli_args.config)
else:
logger.info('Using default config file.')
PETRreader.parse_Config(utilities._get_data('data/config/',
'PETR_config.ini'))
read_dictionaries()
start_time = time.time()
print('\n\n')
paths = PETRglobals.TextFileList
if cli_args.inputs or cli_args.command_name == 'parse':
if os.path.isdir(cli_args.inputs):
if cli_args.inputs[-1] != '/':
paths = glob.glob(cli_args.inputs + '/*.xml')
else:
paths = glob.glob(cli_args.inputs + '*.xml')
elif os.path.isfile(cli_args.inputs):
paths = [cli_args.inputs]
elif cli_args.command_name == 'parse':
print(
'\nFatal runtime error:\n"' +
cli_args.inputs +
'" could not be located\nPlease enter a valid directory or file of source texts.')
sys.exit()
if cli_args.command_name == 'parse':
run(paths, cli_args.output, cli_args.parsed)
else:
run(paths, PETRglobals.EventFileName, True)
print("Coding time:", time.time() - start_time)
print("Finished")
def start_logger():
utilities.init_logger('PETRARCH.log')
def read_dictionaries(validation=False):
if validation:
verb_path = utilities._get_data(
'data/dictionaries/',
'PETR.Validate.verbs.txt')
actor_path = utilities._get_data(
'data/dictionaries',
'PETR.Validate.actors.txt')
agent_path = utilities._get_data(
'data/dictionaries/',
'PETR.Validate.agents.txt')
discard_path = utilities._get_data(
'data/dictionaries/',
'PETR.Validate.discards.txt')
return
print('Verb dictionary:', PETRglobals.VerbFileName)
verb_path = utilities._get_data(
'data/dictionaries',
PETRglobals.VerbFileName)
PETRreader.read_verb_dictionary(verb_path)
print('Actor dictionaries:', PETRglobals.ActorFileList)
for actdict in PETRglobals.ActorFileList:
actor_path = utilities._get_data('data/dictionaries', actdict)
PETRreader.read_actor_dictionary(actor_path)
print('Agent dictionary:', PETRglobals.AgentFileName)
agent_path = utilities._get_data('data/dictionaries',
PETRglobals.AgentFileName)
PETRreader.read_agent_dictionary(agent_path)
print('Discard dictionary:', PETRglobals.DiscardFileName)
discard_path = utilities._get_data('data/dictionaries',
PETRglobals.DiscardFileName)
PETRreader.read_discard_list(discard_path)
if PETRglobals.IssueFileName != "":
print('Issues dictionary:', PETRglobals.IssueFileName)
issue_path = utilities._get_data('data/dictionaries',
PETRglobals.IssueFileName)
PETRreader.read_issue_list(issue_path)
def run(filepaths, out_file, s_parsed):
events = PETRreader.read_xml_input(filepaths, s_parsed)
if not s_parsed:
events = utilities.stanford_parse(events)
updated_events = do_coding(events, 'TEMP')
PETRwriter.write_events(updated_events, out_file)
def run_pipeline(data, out_file=None, config=None, write_output=True,
parsed=False):
utilities.init_logger('PETRARCH.log')
logger = logging.getLogger('petr_log')
if config:
print('Using user-specified config: {}'.format(config))
logger.info('Using user-specified config: {}'.format(config))
PETRreader.parse_Config(config)
else:
logger.info('Using default config file.')
logger.info('Config path: {}'.format(utilities._get_data('data/config/',
'PETR_config.ini')))
PETRreader.parse_Config(utilities._get_data('data/config/',
'PETR_config.ini'))
read_dictionaries()
logger.info('Hitting read events...')
events = PETRreader.read_pipeline_input(data)
if parsed:
logger.info('Hitting do_coding')
updated_events = do_coding(events, 'TEMP')
else:
events = utilities.stanford_parse(events)
updated_events = do_coding(events, 'TEMP')
if not write_output:
output_events = PETRwriter.pipe_output(updated_events)
return output_events
elif write_output and not out_file:
print('Please specify an output file...')
logger.warning('Need an output file. ¯\_(ツ)_/¯')
sys.exit()
elif write_output and out_file:
PETRwriter.write_events(updated_events, out_file)
if __name__ == '__main__':
main()
| 36.08362
| 136
| 0.509179
|
acfd38bd8a67e599f912cc3eea35e499cd0fac0f
| 743
|
py
|
Python
|
keilib/__init__.py
|
shimadamacy/keilog
|
7ee6388dd79a30e6d4c6d2f81d113ae2dbf47bdd
|
[
"MIT"
] | 8
|
2019-12-21T07:01:45.000Z
|
2021-11-12T07:10:10.000Z
|
keilib/__init__.py
|
shimadamacy/keilog
|
7ee6388dd79a30e6d4c6d2f81d113ae2dbf47bdd
|
[
"MIT"
] | null | null | null |
keilib/__init__.py
|
shimadamacy/keilog
|
7ee6388dd79a30e6d4c6d2f81d113ae2dbf47bdd
|
[
"MIT"
] | 1
|
2021-05-05T03:05:17.000Z
|
2021-05-05T03:05:17.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Bルート、シリアル通信、各種センサーからのデータを取得し、整理して保存するライブラリ
機能と特徴:
* シリアルポートからのデータ読み込み
- 不正データの除外
- 重複データの除外
- 外れ値の除外
* スマートメーターから電力情報の取得
- RL7023 Stick-D/DSS への対応
* ファイルへの保存
- タイムスタンプを付加する
- 10分平均を計算し、別ファイルとして保存
- 日付ごとにファイルを作る
ToDo:
* 他の WiSUN ドングルへの対応
- 特に Bルート専用の片面タイプ
* データの処理方法を柔軟に設定できるように。
- ファイルに保存するデータ
- アップロードするデータ
− 表示機に送信するデータ
- ツイートするデータ
- udpで垂れ流すデータ
* センサーのクラスを充実
- DS18B20 温度センサー
- sht21 温度・湿度センサー
- mcp3208 ADC
- パルスセンサー
'''
__author__ = "MATSUDA, Koji <kjmatsuda@gmail.com>"
__version__ = "0.1.1"
__date__ = "2019-12-14"
| 18.575
| 51
| 0.572005
|
acfd390cb3d892aa4979e34fd57fb3c897d56d6c
| 1,508
|
py
|
Python
|
scripts/rabbitmq-test.py
|
james-portman/openstack-ansible
|
882d98c94b6e429dec9d66d26722ed457e3b18b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/rabbitmq-test.py
|
james-portman/openstack-ansible
|
882d98c94b6e429dec9d66d26722ed457e3b18b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/rabbitmq-test.py
|
james-portman/openstack-ansible
|
882d98c94b6e429dec9d66d26722ed457e3b18b9
|
[
"Apache-2.0"
] | 1
|
2018-05-21T18:41:51.000Z
|
2018-05-21T18:41:51.000Z
|
#!/usr/bin/env python
#
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2017, Jean-Philippe Evrard <jean-philippe.evrard@rackspace.co.uk>
#
"""Tests rabbitmq with our hardcoded test credentials"""
import argparse
import sys
try:
import pika
except Exception:
sys.exit("Can't import pika")
def rabbitmq_connect(ip=None):
"""Connects to ip using standard port and credentials."""
credentials = pika.credentials.PlainCredentials('testguest', 'secrete')
parameters = pika.ConnectionParameters(
host=ip, virtual_host='/test', credentials=credentials)
try:
connection = pika.BlockingConnection(parameters)
connection.channel()
except Exception:
sys.exit("Can't connect to %s" % ip)
else:
print("Connected.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("ip", help="The IP to connect to")
args = parser.parse_args()
rabbitmq_connect(args.ip)
| 31.416667
| 75
| 0.714854
|
acfd39d57d107a717e27a65ce2c29cf069a75e63
| 280
|
py
|
Python
|
fdk_client/platform/models/Sections.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/platform/models/Sections.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/platform/models/Sections.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class Sections(BaseSchema):
# Theme swagger.json
attributes = fields.Str(required=False)
| 15.555556
| 43
| 0.725
|
acfd3a91544aecfca78cb55124c9cd585c87789b
| 32,713
|
py
|
Python
|
Server/src/virtualenv/Lib/encodings/cp775.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
Server/src/virtualenv/Lib/encodings/cp775.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
Server/src/virtualenv/Lib/encodings/cp775.py
|
ppyordanov/HCI_4_Future_Cities
|
4dc7dc59acccf30357bde66524c2d64c29908de8
|
[
"MIT"
] | null | null | null |
""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_map)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
u'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
u'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\xa2' # 0x0096 -> CENT SIGN
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\xa4' # 0x009f -> CURRENCY SIGN
u'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
u'\xa6' # 0x00a7 -> BROKEN BAR
u'\xa9' # 0x00a8 -> COPYRIGHT SIGN
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
u'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
u'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
u'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
u'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
u'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
u'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| 46.533428
| 103
| 0.6432
|
acfd3b285f20528bfe49272c743391809cc86626
| 3,035
|
py
|
Python
|
setup.py
|
eugene-yang/priorsgd
|
7213cd1a215b28191c59461f8f103799594ab8c6
|
[
"MIT"
] | 1
|
2020-08-27T08:28:33.000Z
|
2020-08-27T08:28:33.000Z
|
setup.py
|
eugene-yang/priorsgd
|
7213cd1a215b28191c59461f8f103799594ab8c6
|
[
"MIT"
] | null | null | null |
setup.py
|
eugene-yang/priorsgd
|
7213cd1a215b28191c59461f8f103799594ab8c6
|
[
"MIT"
] | null | null | null |
## setup script for compiling the modified version of SGDClassifier
import os
from os.path import join
import setuptools
from setuptools import setup
import numpy
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
# from sklearn._build_utils.get_blas_info
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
def configuration():
config = Configuration('priorsgd', parent_package='', top_path='')
cblas_libs, blas_info = get_blas_info()
cblas_includes = [join('priorsgd', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('seq_dataset',
sources=join('priorsgd', 'seq_dataset.pyx'),
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=join('priorsgd', 'weight_vector.pyx'),
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension('sgd_fast',
sources=join('priorsgd', 'sgd_fast.pyx'),
include_dirs=cblas_includes,
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == '__main__':
config = configuration()
setup(name=config.name,
version="0.0.1",
author="Eugene Yang",
author_email="eugene@ir.cs.georgetown.edu",
description="Stochastic Gradient Descent with Priors (priorsgd)",
long_description=open('./README.md').read(),
long_description_content_type="text/markdown",
url="https://github.com/eugene-yang/priorsgd",
setup_requires=['setuptools>=18.0',
'numpy>=1.14.0',
'cython>=0.27.3'],
install_requires=['scipy>=1.1.0', 'scikit-learn>=0.19.1'],
packages=setuptools.find_packages(),
ext_modules=config.ext_modules)
| 34.885057
| 79
| 0.556507
|
acfd3bab425516b1b6132b6c49763253ed014b6e
| 2,008
|
py
|
Python
|
chapter04/disparity.py
|
ankona/Learning-OpenCV-4-Computer-Vision-with-Python-Third-Edition
|
caa9326e310253fba1aab624b46ea899ce16a21f
|
[
"BSD-3-Clause"
] | 286
|
2019-06-29T11:47:40.000Z
|
2022-03-29T08:41:28.000Z
|
chapter04/disparity.py
|
chihhao428/Learning-OpenCV-4-Computer-Vision-with-Python-Third-Edition
|
ee29cfefb4f21ba5acf6222aa69ef1c05c8fc05d
|
[
"BSD-3-Clause"
] | 8
|
2020-10-01T17:48:04.000Z
|
2022-03-26T04:27:06.000Z
|
chapter04/disparity.py
|
chihhao428/Learning-OpenCV-4-Computer-Vision-with-Python-Third-Edition
|
ee29cfefb4f21ba5acf6222aa69ef1c05c8fc05d
|
[
"BSD-3-Clause"
] | 153
|
2019-07-01T02:53:02.000Z
|
2022-03-28T08:43:44.000Z
|
import numpy as np
import cv2
minDisparity = 16
numDisparities = 192 - minDisparity
blockSize = 5
uniquenessRatio = 1
speckleWindowSize = 3
speckleRange = 3
disp12MaxDiff = 200
P1 = 600
P2 = 2400
stereo = cv2.StereoSGBM_create(
minDisparity = minDisparity,
numDisparities = numDisparities,
blockSize = blockSize,
uniquenessRatio = uniquenessRatio,
speckleRange = speckleRange,
speckleWindowSize = speckleWindowSize,
disp12MaxDiff = disp12MaxDiff,
P1 = P1,
P2 = P2
)
imgL = cv2.imread('../images/color1_small.jpg')
imgR = cv2.imread('../images/color2_small.jpg')
def update(sliderValue = 0):
stereo.setBlockSize(
cv2.getTrackbarPos('blockSize', 'Disparity'))
stereo.setUniquenessRatio(
cv2.getTrackbarPos('uniquenessRatio', 'Disparity'))
stereo.setSpeckleWindowSize(
cv2.getTrackbarPos('speckleWindowSize', 'Disparity'))
stereo.setSpeckleRange(
cv2.getTrackbarPos('speckleRange', 'Disparity'))
stereo.setDisp12MaxDiff(
cv2.getTrackbarPos('disp12MaxDiff', 'Disparity'))
disparity = stereo.compute(
imgL, imgR).astype(np.float32) / 16.0
cv2.imshow('Left', imgL)
cv2.imshow('Right', imgR)
cv2.imshow('Disparity',
(disparity - minDisparity) / numDisparities)
cv2.namedWindow('Disparity')
cv2.createTrackbar('blockSize', 'Disparity', blockSize, 21,
update)
cv2.createTrackbar('uniquenessRatio', 'Disparity',
uniquenessRatio, 50, update)
cv2.createTrackbar('speckleWindowSize', 'Disparity',
speckleWindowSize, 200, update)
cv2.createTrackbar('speckleRange', 'Disparity',
speckleRange, 50, update)
cv2.createTrackbar('disp12MaxDiff', 'Disparity',
disp12MaxDiff, 250, update)
# Initialize the disparity map. Show the disparity map and images.
update()
# Wait for the user to press any key.
# Meanwhile, update() will be called anytime the user moves a slider.
cv2.waitKey()
| 28.28169
| 69
| 0.685757
|
acfd3bc3cf3141908ad2a74d803569cbd560d973
| 557
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/scattergeo/_mode.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/scattergeo/_mode.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/scattergeo/_mode.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class ModeValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="mode", parent_name="scattergeo", **kwargs):
super(ModeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["lines", "markers", "text"]),
role=kwargs.pop("role", "info"),
**kwargs
)
| 37.133333
| 79
| 0.617594
|
acfd3c74369c970b84f3f5f69ef2ed406ae1c419
| 11,709
|
py
|
Python
|
melati/full_node/coin_store.py
|
a96009467/melati-blockchain
|
28b8cd1590ee8fa860554c66d639a1fefc0d3c41
|
[
"Apache-2.0"
] | 12
|
2021-07-13T15:39:57.000Z
|
2022-02-09T04:32:12.000Z
|
melati/full_node/coin_store.py
|
a96009467/melati-blockchain
|
28b8cd1590ee8fa860554c66d639a1fefc0d3c41
|
[
"Apache-2.0"
] | 1
|
2021-07-16T12:41:41.000Z
|
2021-07-16T12:42:48.000Z
|
melati/full_node/coin_store.py
|
a96009467/melati-blockchain
|
28b8cd1590ee8fa860554c66d639a1fefc0d3c41
|
[
"Apache-2.0"
] | 3
|
2021-07-13T05:35:30.000Z
|
2021-08-06T13:11:14.000Z
|
from typing import List, Optional
import aiosqlite
from melati.types.blockchain_format.coin import Coin
from melati.types.blockchain_format.sized_bytes import bytes32
from melati.types.coin_record import CoinRecord
from melati.types.full_block import FullBlock
from melati.util.db_wrapper import DBWrapper
from melati.util.ints import uint32, uint64
from melati.util.lru_cache import LRUCache
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute("pragma journal_mode=wal")
await self.coin_record_db.execute("pragma synchronous=2")
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]):
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
"""
if block.is_transaction_block() is False:
return None
assert block.foliage_transaction_block is not None
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
False,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(record, False)
included_reward_coins = block.get_included_reward_coins()
if block.height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
True,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(reward_coin_r, False)
total_amount_spent: int = 0
for coin_name in tx_removals:
total_amount_spent += await self._set_spent(coin_name, block.height)
# Sanity check, already checked in block_body_validation
assert sum([a.amount for a in tx_additions]) <= total_amount_spent
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_parent_ids(
self,
include_spent_coins: bool,
parent_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(parent_ids) == 0:
return []
coins = set()
parent_ids_db = tuple([pid.hex() for pid in parent_ids])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_parent in ({"?," * (len(parent_ids_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
parent_ids_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def rollback_to_block(self, block_index: int):
"""
Note that block_index can be negative, in which case everything is rolled back
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
# Store CoinRecord in DB and ram cache
async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None:
if self.coin_record_cache.get(record.coin.name()) is not None:
self.coin_record_cache.remove(record.coin.name())
cursor = await self.coin_record_db.execute(
f"INSERT {'OR REPLACE ' if allow_replace else ''}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.timestamp,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64:
current: Optional[CoinRecord] = await self.get_coin_record(coin_name)
if current is None:
raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}")
assert not current.spent # Redundant sanity check, already checked in block_body_validation
spent: CoinRecord = CoinRecord(
current.coin,
current.confirmed_block_index,
index,
True,
current.coinbase,
current.timestamp,
) # type: ignore # noqa
await self._add_coin_record(spent, True)
return current.coin.amount
| 39.962457
| 118
| 0.606542
|
acfd3cc64f3396b88dd3524e696399aaa7d7d446
| 3,027
|
py
|
Python
|
examples/ParaView/ContourGeometry/DynamicLocalRemoteRendering/app.py
|
gomberg5264/py-web-vue
|
eb7efc9e19ed5f7033478afac7bfa1a93b417cdf
|
[
"BSD-3-Clause"
] | null | null | null |
examples/ParaView/ContourGeometry/DynamicLocalRemoteRendering/app.py
|
gomberg5264/py-web-vue
|
eb7efc9e19ed5f7033478afac7bfa1a93b417cdf
|
[
"BSD-3-Clause"
] | null | null | null |
examples/ParaView/ContourGeometry/DynamicLocalRemoteRendering/app.py
|
gomberg5264/py-web-vue
|
eb7efc9e19ed5f7033478afac7bfa1a93b417cdf
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
# -----------------------------------------------------------------------------
# Virtual Environment handling
# -----------------------------------------------------------------------------
if "--virtual-env" in sys.argv:
virtualEnvPath = sys.argv[sys.argv.index("--virtual-env") + 1]
virtualEnv = virtualEnvPath + "/bin/activate_this.py"
exec(open(virtualEnv).read(), {"__file__": virtualEnv})
# -----------------------------------------------------------------------------
import os
from pywebvue import App
from pywebvue.modules import ParaView
from paraview import simple
# -----------------------------------------------------------------------------
# Web App setup
# -----------------------------------------------------------------------------
app = App("ParaView contour - Synch rendering")
app.state = {
"data_range": [0, 1],
"contour_value": 0,
"override": "auto",
}
app.enableModule(ParaView)
# -----------------------------------------------------------------------------
# ParaView pipeline
# -----------------------------------------------------------------------------
simple.LoadDistributedPlugin("AcceleratedAlgorithms", remote=False, ns=globals())
data_directory = os.path.join(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
),
"data",
)
head_vti = os.path.join(data_directory, "head.vti")
reader = simple.XMLImageDataReader(FileName=[head_vti])
# contour = simple.Contour(Input=reader) # Default filter => no plugin but slow
contour = FlyingEdges3D(Input=reader) # Faster processing => make it interactive
# Extract data range => Update store/state
array = reader.GetPointDataInformation().GetArray(0)
data_name = array.GetName()
data_range = array.GetRange()
app.set("data_range", data_range)
app.set("contour_value", 0.5 * (data_range[0] + data_range[1]))
contour.ContourBy = ["POINTS", data_name]
contour.Isosurfaces = [app.get("contour_value")]
contour.ComputeNormals = 1
contour.ComputeScalars = 0
# Rendering setup
view = simple.GetRenderView()
view.OrientationAxesVisibility = 0
representation = simple.Show(contour, view)
view = simple.Render()
simple.ResetCamera()
view.CenterOfRotation = view.CameraFocalPoint
# Start with "local" rendering rather than "remote"
viewHelper = ParaView.view(view, "demo", mode="local")
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
@app.change("contour_value")
def update_contour():
contour.Isosurfaces = [app.get("contour_value")]
# -----------------------------------------------------------------------------
# MAIN
# /opt/paraview/bin/pvpython ./examples/.../app.py --port 1234 --virtual-env ~/Documents/code/Web/vue-py/py-lib
# -----------------------------------------------------------------------------
if __name__ == "__main__":
app.on_ready = viewHelper.push_geometry
app.run_server()
| 32.548387
| 113
| 0.516022
|
acfd3d0595650bbcdcd04e9b7f78c2cb0ed0e6c1
| 194
|
py
|
Python
|
examples/sweeper.py
|
keakon/delayed
|
1b7a90a7c579e77e8ac0d9e77c839334ae20944a
|
[
"MIT"
] | 4
|
2019-06-03T10:33:01.000Z
|
2021-03-02T09:32:08.000Z
|
examples/sweeper.py
|
keakon/delayed
|
1b7a90a7c579e77e8ac0d9e77c839334ae20944a
|
[
"MIT"
] | null | null | null |
examples/sweeper.py
|
keakon/delayed
|
1b7a90a7c579e77e8ac0d9e77c839334ae20944a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from delayed.logger import setup_logger
from delayed.sweeper import Sweeper
from .client import queue
setup_logger()
sweeper = Sweeper(queues=[queue])
sweeper.run()
| 14.923077
| 39
| 0.742268
|
acfd3d1b1edb0105be104695097d835fce73af10
| 10,094
|
py
|
Python
|
mpsim/server.py
|
LaudateCorpus1/ZFM
|
c92db4637010c304fe7df5057813f591a3577322
|
[
"Apache-2.0"
] | null | null | null |
mpsim/server.py
|
LaudateCorpus1/ZFM
|
c92db4637010c304fe7df5057813f591a3577322
|
[
"Apache-2.0"
] | null | null | null |
mpsim/server.py
|
LaudateCorpus1/ZFM
|
c92db4637010c304fe7df5057813f591a3577322
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache v2.0 license.
#
import re
import os
import sys
import json
import glob
import copy
import importlib
from log import Log
from threading import Thread
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
# ----------------------------------------------------------------------------------------------------------------------
def browser_update(node):
def is_redfish(value): return type(value) is str and value.startswith('/redfish/v1')
def href(value): return '<a href={0}>{0}</a>'.format(value)
if type(node) is list:
for i,value in enumerate(node):
if is_redfish(value): node[i] = href(value)
browser_update(node[i])
elif type(node) is dict:
for key,value in node.items():
if is_redfish(value): node[key] = href(value)
browser_update(value)
# ----------------------------------------------------------------------------------------------------------------------
#
# Method Scope Semantics
# ------- ---------- ----------------------------------------------------
# GET collection Retrieve all resources in a collection
# GET resource Retrieve a single resource
# HEAD collection Retrieve all resources in a collection (header only)
# HEAD resource Retrieve a single resource (header only)
# POST collection Create a new resource in a collection
# PUT resource Update a resource
# PATCH resource Update a resource
# DELETE resource Delete a resource
# OPTIONS any Return available HTTP methods and other options
#
# ----------------------------------------------------------------------------------------------------------------------
class RestHandler(BaseHTTPRequestHandler):
def normalize_path(self, path):
new_path = path
#
# Prepend '/' if needed.
# If the file name doesn't begin with '/redfish/v1', then add it.
# If the file name ends with 'index.json', remove it.
# Strip off the trailing '/'.
#
if new_path == '/':
new_path = self.server.env['redfish_base']
if new_path[0] != '/':
new_path = '/{}'.format(new_path)
if not new_path.startswith(self.server.env['redfish_base']):
new_path = '{}/{}'.format(self.server.env['redfish_base'], new_path)
if new_path[-1] == '/':
new_path = new_path[:-1]
if new_path.endswith('/index.json'):
new_path = new_path.rsplit('/',1)[0]
return new_path
def log_message(self, format, *args):
return
# ----------------------------------------------------------------------------------------------
def reply(self, status, headers=None, data=None):
encoded_data = data.encode() if data else None
if headers and 'Content-Length' not in headers:
headers['Content-Length'] = str(len(encoded_data))
try:
self.send_response(status)
if headers:
for key,value in headers.items():
self.send_header(key, value)
self.end_headers()
if encoded_data:
self.wfile.write(encoded_data)
except:
Log.info('can\'t reply to requester')
# ----------------------------------------------------------------------------------------------
def do_HEAD(self):
Log.info('HEAD {}', self.path)
# ----------------------------------------------------------------------------------------------
def do_GET(self):
Log.info('GET {}', self.path)
path = self.normalize_path(self.path)
#
# If we don't know this resource, send 404.
#
if path not in self.server.attributes:
self.reply(404)
return
#
# Get the resource. Update the links if requested.
#
data = copy.deepcopy(self.server.attributes[path])
if self.server.env['browser']:
browser_update(data)
data = '<pre>' + json.dumps(data, indent=4, separators=(',', ': ')) + '</pre>'
content_type = 'text/html'
else:
data = json.dumps(data, indent=4, separators=(',', ': '))
content_type = 'application/json'
headers = { 'Content-Type' : content_type,
'Cache-Control' : 'no-cache, no-store, must-revalidate',
'Pragma' : 'no-cache',
'Expires' : '0' }
self.reply(200, headers, data)
# ----------------------------------------------------------------------------------------------
def do_POST(self):
Log.info('POST {}', self.path)
path = self.normalize_path(self.path)
data_length = int(self.headers['Content-Length'])
try:
data = json.loads(self.rfile.read(data_length).decode('utf-8'))
except Exception as e:
Log.info('invalid POST request - JSON improperly formatted')
self.reply(400)
return
#
# If the resource doesn't exist, then 404.
# If the resource isn't a collection, then 405.
# Otherwise, 204
#
if path not in self.server.attributes:
self.reply(404)
elif 'Members' not in self.server.attributes[path]:
self.reply(405)
else:
#
# Find a resource id for the new entry.
#
resource = self.server.attributes[path]
members = resource['Members']
members_id = sorted([ int(x.get('@odata.id').rsplit('/',1)[1]) for x in members ])
last = members_id[0]
for x in members_id[1:]:
if x != last+1: break
last = x
#
# Name the new entry.
#
new_id = last + 1
data_id = '{}/{}'.format(path, new_id)
data['@odata.id'] = data_id
#
# Update the resource to include the new entry.
#
resource['Members'].append({'@odata.id' : data_id })
resource['Members@odata.count'] += 1
#
# Put the new entry into the tree.
#
self.server.attributes[data_id] = data
#
# Reply to the user.
#
headers = { 'Location' : data_id }
self.reply(204, headers)
# ----------------------------------------------------------------------------------------------
def do_PUT(self):
Log.info('PUT {}', self.path)
self.reply(405)
# ----------------------------------------------------------------------------------------------
def do_PATCH(self):
Log.info('PATCH {}', self.path)
path = self.normalize_path(self.path)
data_length = int(self.headers['Content-Length'])
try:
data = json.loads(self.rfile.read(data_length).decode('utf-8'))
except Exception as e:
Log.info('invalid PATCH request - JSON improperly formatted {} -> {}', data_length, data)
self.reply(400)
return
#
# If the resource doesn't exist, then 404.
# If the resource is a collection, then 405.
# Otherwise, 204.
#
if path not in self.server.attributes:
status = 404
elif 'Members' in self.server.attributes[path]:
status = 405
else:
status = 204
self.server.node.do_PATCH(path, data)
#
# Reply to user.
#
self.reply(status)
# ----------------------------------------------------------------------------------------------
def do_DEEPPATCH(self):
Log.info('DEEPPATCH {}', self.path)
self.reply(405)
# ----------------------------------------------------------------------------------------------
def do_DELETE(self):
Log.info('DELETE {}', self.path)
path = self.normalize_path(self.path)
parent_path = path.rsplit('/', 1)[0]
#
# If the resource doesn't exist, then 404.
# If the parent doesn't exist, then 405.
# If the parent isn't a collection, then 405.
# Otherwise, 204
#
if path not in self.server.attributes:
status = 404
elif parent_path not in self.server.attributes:
status = 405
elif 'Members' not in self.server.attributes[parent_path]:
status = 405
else:
status = 204
del self.server.attributes[path]
for i,m in enumerate(self.server.attributes[parent_path]['Members']):
if m['@odata.id'] == self.path:
del self.server.attributes[parent_path]['Members'][i]
self.server.attributes[parent_path]['Members@odata.count'] -= 1
break
#
# Reply to user.
#
self.reply(status)
# ----------------------------------------------------------------------------------------------------------------------
class RedfishServer():
def __init__(self, node):
#
# Setup the REDfish server.
#
addr,_,port = node.env['profile']['address'].partition(':')
if not port: port = '8081'
self.server = HTTPServer((addr, int(port)), RestHandler)
self.server.node = node
self.server.env = node.env
self.server.attributes = node.env['attributes']
#
# Create the REDfish thread.
#
self.thread = Thread(target=self.run, daemon=True)
def run(self):
self.server.serve_forever()
def start(self):
self.thread.start()
# ----------------------------------------------------------------------------------------------------------------------
| 32.56129
| 120
| 0.464632
|
acfd3e86ce1faea68ce968ac03212f45a581a707
| 1,443
|
py
|
Python
|
Lista_2/exercicio_9.py
|
carlosjrbk/Logica-de-Programa--o---IFPE
|
2201f56c5a5641dc33895c2e575983be51d87a86
|
[
"MIT"
] | null | null | null |
Lista_2/exercicio_9.py
|
carlosjrbk/Logica-de-Programa--o---IFPE
|
2201f56c5a5641dc33895c2e575983be51d87a86
|
[
"MIT"
] | null | null | null |
Lista_2/exercicio_9.py
|
carlosjrbk/Logica-de-Programa--o---IFPE
|
2201f56c5a5641dc33895c2e575983be51d87a86
|
[
"MIT"
] | null | null | null |
print('='*60)
print('CATEGORIA-------- CODIGIO ---------------MARGEM DE LUCRO')
print()
print('HORTIFRUTI-----------1----------------------------80%---')
print('LATICINIOS-----------2----------------------------80%---')
print('CARNES---------------3---------------------------100%---')
print('PEIXES---------------4---------------------------100%---')
print('AVES-----------------5----------------------------90%---')
print('OVOS-----------------6----------------------------90%---')
print('='*60)
print('')
pc = input('INFORME O PREÇO DE CUSTO DO PRODUTO: ')
pc = pc.replace(",",".")
pc = float(pc)
cod = int(input('INFORME UM CODIGO DA LISTA ACIMA: '))
if cod == 1:
cod = 0.8
preco = pc + (pc * cod)
print('O PREÇO DE VENDA DO PRODUTO É DE: R$:%4.2f' %(preco))
elif cod == 2:
cod = 0.8
preco = pc + (pc * cod)
print('O PREÇO DE VENDA DO PRODUTO É DE: R$:%4.2f' %(preco))
elif cod == 3:
cod = 1
preco = pc + (pc * cod)
print('O PREÇO DE VENDA DO PRODUTO É DE: R$:%4.2f' %(preco))
elif cod == 4:
cod = 1
preco = pc + (pc * cod)
print('O PREÇO DE VENDA DO PRODUTO É DE: R$:%4.2f' %(preco))
elif cod == 5:
cod = 0.9
preco = pc + (pc * cod)
print('O PREÇO DE VENDA DO PRODUTO É DE: R$:%4.2f' %(preco))
elif cod == 6:
cod = 0.9
preco = pc + (pc * cod)
print('O PREÇO DE VENDA DO PRODUTO É DE: R$:%4.2f' %(preco))
elif cod >= 7:
print('ERRO!! INSIRA UM CODIGO DA TABELA')
| 34.357143
| 65
| 0.458073
|
acfd3ed52010c095baf5dc8dfa19d04de7bd467b
| 15
|
py
|
Python
|
examples/01-regular-print.py
|
indradhanush/autopalette
|
66c0b5b50c2c4c8ebc92e031c3a942ca831fd7d4
|
[
"BSD-2-Clause"
] | 11
|
2018-06-01T20:20:59.000Z
|
2020-09-23T01:13:06.000Z
|
examples/01-regular-print.py
|
indradhanush/autopalette
|
66c0b5b50c2c4c8ebc92e031c3a942ca831fd7d4
|
[
"BSD-2-Clause"
] | 3
|
2018-06-01T19:09:16.000Z
|
2018-06-04T01:47:18.000Z
|
examples/01-regular-print.py
|
indradhanush/autopalette
|
66c0b5b50c2c4c8ebc92e031c3a942ca831fd7d4
|
[
"BSD-2-Clause"
] | 1
|
2018-06-02T10:35:41.000Z
|
2018-06-02T10:35:41.000Z
|
print("Tring!")
| 15
| 15
| 0.666667
|
acfd3f31deca0921058bdd196eff1f0fc0ab82a2
| 2,010
|
py
|
Python
|
sdk/core/azure-core-tracing-opentelemetry/samples/sample_storage.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/core/azure-core-tracing-opentelemetry/samples/sample_storage.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/core/azure-core-tracing-opentelemetry/samples/sample_storage.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 1
|
2021-12-18T20:01:22.000Z
|
2021-12-18T20:01:22.000Z
|
"""
Examples to show usage of the azure-core-tracing-opentelemetry
with the storage SDK.
This example traces calls for creating a container in storage and exports it
using the ConsoleSpanExporter.
An alternative path to export using AzureMonitor is also mentioned in the sample. Please take
a look at the commented code.
"""
# Declare OpenTelemetry as enabled tracing plugin for Azure SDKs
from azure.core.settings import settings
from azure.core.tracing.ext.opentelemetry_span import OpenTelemetrySpan
settings.tracing_implementation = OpenTelemetrySpan
# In the below example, we use a simple console exporter, uncomment these lines to use
# the Azure Monitor Exporter. It can be installed from https://pypi.org/project/opentelemetry-azure-monitor/
# Example of Azure Monitor exporter, but you can use anything OpenTelemetry supports
# from azure_monitor import AzureMonitorSpanExporter
# exporter = AzureMonitorSpanExporter(
# instrumentation_key="uuid of the instrumentation key (see your Azure Monitor account)"
# )
# Regular open telemetry usage from here, see https://github.com/open-telemetry/opentelemetry-python
# for details
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import ConsoleSpanExporter
from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor
# Simple console exporter
exporter = ConsoleSpanExporter()
trace.set_tracer_provider(TracerProvider())
tracer = trace.get_tracer(__name__)
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(exporter)
)
# Example with Storage SDKs
import os
from azure.storage.blob import BlobServiceClient
connection_string = os.environ['AZURE_STORAGE_CONNECTION_STRING']
container_name = os.environ['AZURE_STORAGE_BLOB_CONTAINER_NAME']
with tracer.start_as_current_span(name="MyApplication"):
client = BlobServiceClient.from_connection_string(connection_string)
client.create_container(container_name) # Call will be traced
| 38.653846
| 108
| 0.825373
|
acfd3f9e433bb8966833b5962cd90d85c15698f5
| 45,856
|
py
|
Python
|
Science_Project.py
|
herleraja/Holo.lab
|
e3a4568b4a4f0a6b110ce48f516f29a400934859
|
[
"Apache-2.0"
] | null | null | null |
Science_Project.py
|
herleraja/Holo.lab
|
e3a4568b4a4f0a6b110ce48f516f29a400934859
|
[
"Apache-2.0"
] | null | null | null |
Science_Project.py
|
herleraja/Holo.lab
|
e3a4568b4a4f0a6b110ce48f516f29a400934859
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\NUCER\Google Drive\HiWi\Science_Project\Science_Project.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 566)
MainWindow.setAcceptDrops(True)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Images/research4-1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setAcceptDrops(True)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setMovable(True)
self.tabWidget.setObjectName("tabWidget")
self.display_tab = QtWidgets.QWidget()
self.display_tab.setObjectName("display_tab")
self.gridLayout_2 = QtWidgets.QGridLayout(self.display_tab)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem = QtWidgets.QSpacerItem(109, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 2, 6, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(80, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 2, 0, 1, 1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.verticalLayout_2.addItem(spacerItem2)
self.fifth_data_not_available_label = QtWidgets.QLabel(self.display_tab)
self.fifth_data_not_available_label.setEnabled(False)
font = QtGui.QFont()
font.setItalic(True)
self.fifth_data_not_available_label.setFont(font)
self.fifth_data_not_available_label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.fifth_data_not_available_label.setObjectName("fifth_data_not_available_label")
self.verticalLayout_2.addWidget(self.fifth_data_not_available_label)
self.fifth_pushButton = QtWidgets.QPushButton(self.display_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fifth_pushButton.sizePolicy().hasHeightForWidth())
self.fifth_pushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(20)
self.fifth_pushButton.setFont(font)
self.fifth_pushButton.setStyleSheet("QPushButton {background-color: #ffa500; border: 1px solid white;border-radius: 10px; } QPushButton:checked{ background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(255, 165, 0, 255), stop:1 rgba(252, 194, 88, 255)); border: 5px solid #ffa500;border-radius: 30px;}")
self.fifth_pushButton.setCheckable(True)
self.fifth_pushButton.setObjectName("fifth_pushButton")
self.verticalLayout_2.addWidget(self.fifth_pushButton)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label_41 = QtWidgets.QLabel(self.display_tab)
font = QtGui.QFont()
font.setPointSize(12)
self.label_41.setFont(font)
self.label_41.setObjectName("label_41")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_41)
self.fifth_current_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fifth_current_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fifth_current_lineEdit.sizePolicy().hasHeightForWidth())
self.fifth_current_lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.fifth_current_lineEdit.setFont(font)
self.fifth_current_lineEdit.setObjectName("fifth_current_lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.fifth_current_lineEdit)
self.label_45 = QtWidgets.QLabel(self.display_tab)
self.label_45.setObjectName("label_45")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_45)
self.fifth_a_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fifth_a_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fifth_a_lineEdit.sizePolicy().hasHeightForWidth())
self.fifth_a_lineEdit.setSizePolicy(sizePolicy)
self.fifth_a_lineEdit.setObjectName("fifth_a_lineEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.fifth_a_lineEdit)
self.label_46 = QtWidgets.QLabel(self.display_tab)
self.label_46.setObjectName("label_46")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_46)
self.fifth_b_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fifth_b_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fifth_b_lineEdit.sizePolicy().hasHeightForWidth())
self.fifth_b_lineEdit.setSizePolicy(sizePolicy)
self.fifth_b_lineEdit.setObjectName("fifth_b_lineEdit")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.fifth_b_lineEdit)
self.label_47 = QtWidgets.QLabel(self.display_tab)
self.label_47.setObjectName("label_47")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_47)
self.fifth_c_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fifth_c_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fifth_c_lineEdit.sizePolicy().hasHeightForWidth())
self.fifth_c_lineEdit.setSizePolicy(sizePolicy)
self.fifth_c_lineEdit.setObjectName("fifth_c_lineEdit")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.fifth_c_lineEdit)
self.label_48 = QtWidgets.QLabel(self.display_tab)
self.label_48.setObjectName("label_48")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_48)
self.fifth_d_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fifth_d_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fifth_d_lineEdit.sizePolicy().hasHeightForWidth())
self.fifth_d_lineEdit.setSizePolicy(sizePolicy)
self.fifth_d_lineEdit.setObjectName("fifth_d_lineEdit")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.fifth_d_lineEdit)
self.verticalLayout_2.addLayout(self.formLayout)
self.gridLayout_2.addLayout(self.verticalLayout_2, 2, 5, 1, 1)
self.first_verticalLayout = QtWidgets.QVBoxLayout()
self.first_verticalLayout.setObjectName("first_verticalLayout")
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.first_verticalLayout.addItem(spacerItem3)
self.first_data_not_available_label = QtWidgets.QLabel(self.display_tab)
self.first_data_not_available_label.setEnabled(False)
font = QtGui.QFont()
font.setItalic(True)
self.first_data_not_available_label.setFont(font)
self.first_data_not_available_label.setAutoFillBackground(False)
self.first_data_not_available_label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.first_data_not_available_label.setObjectName("first_data_not_available_label")
self.first_verticalLayout.addWidget(self.first_data_not_available_label)
self.first_pushButton = QtWidgets.QPushButton(self.display_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.first_pushButton.sizePolicy().hasHeightForWidth())
self.first_pushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(20)
self.first_pushButton.setFont(font)
self.first_pushButton.setStyleSheet("QPushButton {background-color:#07a2ef; border: 1px solid white;border-radius: 10px; } QPushButton:checked{background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(7, 162, 239, 255), stop:1 rgba(111, 198, 242)); border: 5px solid #07a2ef;border-radius: 30px;}")
self.first_pushButton.setCheckable(True)
self.first_pushButton.setObjectName("first_pushButton")
self.first_verticalLayout.addWidget(self.first_pushButton)
self.first_formLayout = QtWidgets.QFormLayout()
self.first_formLayout.setObjectName("first_formLayout")
self.label_3 = QtWidgets.QLabel(self.display_tab)
font = QtGui.QFont()
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.first_formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.first_current_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.first_current_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.first_current_lineEdit.sizePolicy().hasHeightForWidth())
self.first_current_lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.first_current_lineEdit.setFont(font)
self.first_current_lineEdit.setObjectName("first_current_lineEdit")
self.first_formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.first_current_lineEdit)
self.label_7 = QtWidgets.QLabel(self.display_tab)
self.label_7.setObjectName("label_7")
self.first_formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.first_a_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.first_a_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.first_a_lineEdit.sizePolicy().hasHeightForWidth())
self.first_a_lineEdit.setSizePolicy(sizePolicy)
self.first_a_lineEdit.setObjectName("first_a_lineEdit")
self.first_formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.first_a_lineEdit)
self.label_8 = QtWidgets.QLabel(self.display_tab)
self.label_8.setObjectName("label_8")
self.first_formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.first_b_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.first_b_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.first_b_lineEdit.sizePolicy().hasHeightForWidth())
self.first_b_lineEdit.setSizePolicy(sizePolicy)
self.first_b_lineEdit.setObjectName("first_b_lineEdit")
self.first_formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.first_b_lineEdit)
self.label_9 = QtWidgets.QLabel(self.display_tab)
self.label_9.setObjectName("label_9")
self.first_formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.first_c_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.first_c_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.first_c_lineEdit.sizePolicy().hasHeightForWidth())
self.first_c_lineEdit.setSizePolicy(sizePolicy)
self.first_c_lineEdit.setObjectName("first_c_lineEdit")
self.first_formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.first_c_lineEdit)
self.label_10 = QtWidgets.QLabel(self.display_tab)
self.label_10.setObjectName("label_10")
self.first_formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_10)
self.first_d_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.first_d_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.first_d_lineEdit.sizePolicy().hasHeightForWidth())
self.first_d_lineEdit.setSizePolicy(sizePolicy)
self.first_d_lineEdit.setObjectName("first_d_lineEdit")
self.first_formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.first_d_lineEdit)
self.first_verticalLayout.addLayout(self.first_formLayout)
self.gridLayout_2.addLayout(self.first_verticalLayout, 0, 1, 1, 1)
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.verticalLayout_6.addItem(spacerItem4)
self.second_data_not_available_label = QtWidgets.QLabel(self.display_tab)
self.second_data_not_available_label.setEnabled(False)
font = QtGui.QFont()
font.setItalic(True)
self.second_data_not_available_label.setFont(font)
self.second_data_not_available_label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.second_data_not_available_label.setObjectName("second_data_not_available_label")
self.verticalLayout_6.addWidget(self.second_data_not_available_label)
self.second_pushButton = QtWidgets.QPushButton(self.display_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.second_pushButton.sizePolicy().hasHeightForWidth())
self.second_pushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(20)
self.second_pushButton.setFont(font)
self.second_pushButton.setStyleSheet("QPushButton {background-color: #f44253; border: 1px solid white;border-radius: 10px; } QPushButton:checked{ background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(244, 66, 83, 255), stop:1 rgba(247, 111, 124, 255)); border: 5px solid #f44253;border-radius: 30px;}")
self.second_pushButton.setCheckable(True)
self.second_pushButton.setObjectName("second_pushButton")
self.verticalLayout_6.addWidget(self.second_pushButton)
self.formLayout_5 = QtWidgets.QFormLayout()
self.formLayout_5.setObjectName("formLayout_5")
self.label_13 = QtWidgets.QLabel(self.display_tab)
font = QtGui.QFont()
font.setPointSize(12)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.formLayout_5.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.second_current_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.second_current_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.second_current_lineEdit.sizePolicy().hasHeightForWidth())
self.second_current_lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.second_current_lineEdit.setFont(font)
self.second_current_lineEdit.setObjectName("second_current_lineEdit")
self.formLayout_5.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.second_current_lineEdit)
self.label_17 = QtWidgets.QLabel(self.display_tab)
self.label_17.setObjectName("label_17")
self.formLayout_5.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_17)
self.second_a_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.second_a_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.second_a_lineEdit.sizePolicy().hasHeightForWidth())
self.second_a_lineEdit.setSizePolicy(sizePolicy)
self.second_a_lineEdit.setObjectName("second_a_lineEdit")
self.formLayout_5.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.second_a_lineEdit)
self.label_18 = QtWidgets.QLabel(self.display_tab)
self.label_18.setObjectName("label_18")
self.formLayout_5.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_18)
self.second_b_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.second_b_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.second_b_lineEdit.sizePolicy().hasHeightForWidth())
self.second_b_lineEdit.setSizePolicy(sizePolicy)
self.second_b_lineEdit.setObjectName("second_b_lineEdit")
self.formLayout_5.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.second_b_lineEdit)
self.label_19 = QtWidgets.QLabel(self.display_tab)
self.label_19.setObjectName("label_19")
self.formLayout_5.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_19)
self.second_c_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.second_c_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.second_c_lineEdit.sizePolicy().hasHeightForWidth())
self.second_c_lineEdit.setSizePolicy(sizePolicy)
self.second_c_lineEdit.setObjectName("second_c_lineEdit")
self.formLayout_5.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.second_c_lineEdit)
self.label_20 = QtWidgets.QLabel(self.display_tab)
self.label_20.setObjectName("label_20")
self.formLayout_5.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_20)
self.second_d_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.second_d_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.second_d_lineEdit.sizePolicy().hasHeightForWidth())
self.second_d_lineEdit.setSizePolicy(sizePolicy)
self.second_d_lineEdit.setObjectName("second_d_lineEdit")
self.formLayout_5.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.second_d_lineEdit)
self.verticalLayout_6.addLayout(self.formLayout_5)
self.gridLayout_2.addLayout(self.verticalLayout_6, 0, 3, 1, 1)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.verticalLayout_4.addItem(spacerItem5)
self.third_data_not_available_label = QtWidgets.QLabel(self.display_tab)
self.third_data_not_available_label.setEnabled(False)
font = QtGui.QFont()
font.setItalic(True)
self.third_data_not_available_label.setFont(font)
self.third_data_not_available_label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.third_data_not_available_label.setObjectName("third_data_not_available_label")
self.verticalLayout_4.addWidget(self.third_data_not_available_label)
self.third_pushButton = QtWidgets.QPushButton(self.display_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.third_pushButton.sizePolicy().hasHeightForWidth())
self.third_pushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(20)
self.third_pushButton.setFont(font)
self.third_pushButton.setStyleSheet("QPushButton {background-color: #00d157; border: 1px solid white;border-radius: 10px; } QPushButton:checked{ background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(0, 209, 87, 255), stop:1 rgba(110, 239, 164, 255)); border: 5px solid #00d157;border-radius: 30px;}")
self.third_pushButton.setCheckable(True)
self.third_pushButton.setObjectName("third_pushButton")
self.verticalLayout_4.addWidget(self.third_pushButton)
self.formLayout_3 = QtWidgets.QFormLayout()
self.formLayout_3.setObjectName("formLayout_3")
self.label_23 = QtWidgets.QLabel(self.display_tab)
font = QtGui.QFont()
font.setPointSize(12)
self.label_23.setFont(font)
self.label_23.setObjectName("label_23")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_23)
self.third_current_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.third_current_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.third_current_lineEdit.sizePolicy().hasHeightForWidth())
self.third_current_lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.third_current_lineEdit.setFont(font)
self.third_current_lineEdit.setObjectName("third_current_lineEdit")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.third_current_lineEdit)
self.label_27 = QtWidgets.QLabel(self.display_tab)
self.label_27.setObjectName("label_27")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_27)
self.third_a_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.third_a_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.third_a_lineEdit.sizePolicy().hasHeightForWidth())
self.third_a_lineEdit.setSizePolicy(sizePolicy)
self.third_a_lineEdit.setObjectName("third_a_lineEdit")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.third_a_lineEdit)
self.label_28 = QtWidgets.QLabel(self.display_tab)
self.label_28.setObjectName("label_28")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_28)
self.third_b_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.third_b_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.third_b_lineEdit.sizePolicy().hasHeightForWidth())
self.third_b_lineEdit.setSizePolicy(sizePolicy)
self.third_b_lineEdit.setObjectName("third_b_lineEdit")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.third_b_lineEdit)
self.label_29 = QtWidgets.QLabel(self.display_tab)
self.label_29.setObjectName("label_29")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_29)
self.third_c_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.third_c_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.third_c_lineEdit.sizePolicy().hasHeightForWidth())
self.third_c_lineEdit.setSizePolicy(sizePolicy)
self.third_c_lineEdit.setObjectName("third_c_lineEdit")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.third_c_lineEdit)
self.label_30 = QtWidgets.QLabel(self.display_tab)
self.label_30.setObjectName("label_30")
self.formLayout_3.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_30)
self.third_d_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.third_d_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.third_d_lineEdit.sizePolicy().hasHeightForWidth())
self.third_d_lineEdit.setSizePolicy(sizePolicy)
self.third_d_lineEdit.setObjectName("third_d_lineEdit")
self.formLayout_3.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.third_d_lineEdit)
self.verticalLayout_4.addLayout(self.formLayout_3)
self.gridLayout_2.addLayout(self.verticalLayout_4, 0, 5, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(68, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem6, 2, 4, 1, 1)
spacerItem7 = QtWidgets.QSpacerItem(68, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem7, 2, 2, 1, 1)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.verticalLayout_3.addItem(spacerItem8)
self.fourth_data_not_available_label = QtWidgets.QLabel(self.display_tab)
self.fourth_data_not_available_label.setEnabled(False)
font = QtGui.QFont()
font.setItalic(True)
self.fourth_data_not_available_label.setFont(font)
self.fourth_data_not_available_label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.fourth_data_not_available_label.setObjectName("fourth_data_not_available_label")
self.verticalLayout_3.addWidget(self.fourth_data_not_available_label)
self.fourth_pushButton = QtWidgets.QPushButton(self.display_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fourth_pushButton.sizePolicy().hasHeightForWidth())
self.fourth_pushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(20)
self.fourth_pushButton.setFont(font)
self.fourth_pushButton.setStyleSheet("QPushButton {background-color: #e9f416; border: 1px solid white;border-radius: 10px;}QPushButton:checked{ background-color:qlineargradient(spread:pad, x1:0, y1:0, x2:0, y2:1, stop:0 rgba(233, 244, 22, 255), stop:1 rgba(244, 252, 100, 255)); border: 5px solid #e9f416;border-radius: 30px;}")
self.fourth_pushButton.setCheckable(True)
self.fourth_pushButton.setObjectName("fourth_pushButton")
self.verticalLayout_3.addWidget(self.fourth_pushButton)
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.label_32 = QtWidgets.QLabel(self.display_tab)
font = QtGui.QFont()
font.setPointSize(12)
self.label_32.setFont(font)
self.label_32.setObjectName("label_32")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_32)
self.fourth_current_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fourth_current_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fourth_current_lineEdit.sizePolicy().hasHeightForWidth())
self.fourth_current_lineEdit.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(12)
self.fourth_current_lineEdit.setFont(font)
self.fourth_current_lineEdit.setObjectName("fourth_current_lineEdit")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.fourth_current_lineEdit)
self.label_36 = QtWidgets.QLabel(self.display_tab)
self.label_36.setObjectName("label_36")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_36)
self.fourth_a_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fourth_a_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fourth_a_lineEdit.sizePolicy().hasHeightForWidth())
self.fourth_a_lineEdit.setSizePolicy(sizePolicy)
self.fourth_a_lineEdit.setObjectName("fourth_a_lineEdit")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.fourth_a_lineEdit)
self.label_37 = QtWidgets.QLabel(self.display_tab)
self.label_37.setObjectName("label_37")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_37)
self.fourth_b_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fourth_b_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fourth_b_lineEdit.sizePolicy().hasHeightForWidth())
self.fourth_b_lineEdit.setSizePolicy(sizePolicy)
self.fourth_b_lineEdit.setObjectName("fourth_b_lineEdit")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.fourth_b_lineEdit)
self.label_38 = QtWidgets.QLabel(self.display_tab)
self.label_38.setObjectName("label_38")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_38)
self.fourth_c_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fourth_c_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fourth_c_lineEdit.sizePolicy().hasHeightForWidth())
self.fourth_c_lineEdit.setSizePolicy(sizePolicy)
self.fourth_c_lineEdit.setObjectName("fourth_c_lineEdit")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.fourth_c_lineEdit)
self.label_39 = QtWidgets.QLabel(self.display_tab)
self.label_39.setObjectName("label_39")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_39)
self.fourth_d_lineEdit = QtWidgets.QLineEdit(self.display_tab)
self.fourth_d_lineEdit.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fourth_d_lineEdit.sizePolicy().hasHeightForWidth())
self.fourth_d_lineEdit.setSizePolicy(sizePolicy)
self.fourth_d_lineEdit.setObjectName("fourth_d_lineEdit")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.fourth_d_lineEdit)
self.verticalLayout_3.addLayout(self.formLayout_2)
self.gridLayout_2.addLayout(self.verticalLayout_3, 2, 1, 1, 1)
self.tabWidget.addTab(self.display_tab, "")
self.settings_tab = QtWidgets.QWidget()
self.settings_tab.setObjectName("settings_tab")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.settings_tab)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.groupBox = QtWidgets.QGroupBox(self.settings_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName("groupBox")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_4.setObjectName("gridLayout_4")
self.formLayout_4 = QtWidgets.QFormLayout()
self.formLayout_4.setObjectName("formLayout_4")
self.baudrateLabel = QtWidgets.QLabel(self.groupBox)
self.baudrateLabel.setLayoutDirection(QtCore.Qt.LeftToRight)
self.baudrateLabel.setObjectName("baudrateLabel")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.baudrateLabel)
self.baudrateComboBox = QtWidgets.QComboBox(self.groupBox)
self.baudrateComboBox.setMaxVisibleItems(20)
self.baudrateComboBox.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToMinimumContentsLength)
self.baudrateComboBox.setObjectName("baudrateComboBox")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.baudrateComboBox.addItem("")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.baudrateComboBox)
self.portLabel = QtWidgets.QLabel(self.groupBox)
self.portLabel.setObjectName("portLabel")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.portLabel)
self.portlineEdit = QtWidgets.QLineEdit(self.groupBox)
self.portlineEdit.setClearButtonEnabled(True)
self.portlineEdit.setObjectName("portlineEdit")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.portlineEdit)
self.gridLayout_4.addLayout(self.formLayout_4, 0, 0, 1, 1)
self.verticalLayout_7.addWidget(self.groupBox)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem9)
self.read_pushButton = QtWidgets.QPushButton(self.settings_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.read_pushButton.sizePolicy().hasHeightForWidth())
self.read_pushButton.setSizePolicy(sizePolicy)
self.read_pushButton.setStyleSheet("")
self.read_pushButton.setCheckable(True)
self.read_pushButton.setObjectName("read_pushButton")
self.horizontalLayout_2.addWidget(self.read_pushButton)
self.stop_pushButton = QtWidgets.QPushButton(self.settings_tab)
self.stop_pushButton.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stop_pushButton.sizePolicy().hasHeightForWidth())
self.stop_pushButton.setSizePolicy(sizePolicy)
self.stop_pushButton.setStyleSheet("")
self.stop_pushButton.setCheckable(True)
self.stop_pushButton.setObjectName("stop_pushButton")
self.horizontalLayout_2.addWidget(self.stop_pushButton)
spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem10)
self.verticalLayout_7.addLayout(self.horizontalLayout_2)
self.ei_logo_label = QtWidgets.QLabel(self.settings_tab)
self.ei_logo_label.setText("")
self.ei_logo_label.setPixmap(QtGui.QPixmap("Images/EI_logo_with-icons-300x212.png"))
self.ei_logo_label.setScaledContents(False)
self.ei_logo_label.setAlignment(QtCore.Qt.AlignCenter)
self.ei_logo_label.setObjectName("ei_logo_label")
self.verticalLayout_7.addWidget(self.ei_logo_label)
spacerItem11 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem11)
self.tabWidget.addTab(self.settings_tab, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Holo.lab"))
self.fifth_data_not_available_label.setText(_translate("MainWindow", "<< Data not available >>"))
self.fifth_pushButton.setText(_translate("MainWindow", "5"))
self.label_41.setText(_translate("MainWindow", "Current"))
self.fifth_current_lineEdit.setText(_translate("MainWindow", "0"))
self.label_45.setText(_translate("MainWindow", "A"))
self.fifth_a_lineEdit.setText(_translate("MainWindow", "0"))
self.label_46.setText(_translate("MainWindow", "B"))
self.fifth_b_lineEdit.setText(_translate("MainWindow", "0"))
self.label_47.setText(_translate("MainWindow", "C"))
self.fifth_c_lineEdit.setText(_translate("MainWindow", "0"))
self.label_48.setText(_translate("MainWindow", "D"))
self.fifth_d_lineEdit.setText(_translate("MainWindow", "0"))
self.first_data_not_available_label.setText(_translate("MainWindow", "<< Data not available >>"))
self.first_pushButton.setText(_translate("MainWindow", "1"))
self.label_3.setText(_translate("MainWindow", "Current"))
self.first_current_lineEdit.setText(_translate("MainWindow", "0"))
self.label_7.setText(_translate("MainWindow", "A"))
self.first_a_lineEdit.setText(_translate("MainWindow", "0"))
self.label_8.setText(_translate("MainWindow", "B"))
self.first_b_lineEdit.setText(_translate("MainWindow", "0"))
self.label_9.setText(_translate("MainWindow", "C"))
self.first_c_lineEdit.setText(_translate("MainWindow", "0"))
self.label_10.setText(_translate("MainWindow", "D"))
self.first_d_lineEdit.setText(_translate("MainWindow", "0"))
self.second_data_not_available_label.setText(_translate("MainWindow", "<< Data not available >>"))
self.second_pushButton.setText(_translate("MainWindow", "2"))
self.label_13.setText(_translate("MainWindow", "Current"))
self.second_current_lineEdit.setText(_translate("MainWindow", "0"))
self.label_17.setText(_translate("MainWindow", "A"))
self.second_a_lineEdit.setText(_translate("MainWindow", "0"))
self.label_18.setText(_translate("MainWindow", "B"))
self.second_b_lineEdit.setText(_translate("MainWindow", "0"))
self.label_19.setText(_translate("MainWindow", "C"))
self.second_c_lineEdit.setText(_translate("MainWindow", "0"))
self.label_20.setText(_translate("MainWindow", "D"))
self.second_d_lineEdit.setText(_translate("MainWindow", "0"))
self.third_data_not_available_label.setText(_translate("MainWindow", "<< Data not available >>"))
self.third_pushButton.setText(_translate("MainWindow", "3"))
self.label_23.setText(_translate("MainWindow", "Current"))
self.third_current_lineEdit.setText(_translate("MainWindow", "0"))
self.label_27.setText(_translate("MainWindow", "A"))
self.third_a_lineEdit.setText(_translate("MainWindow", "0"))
self.label_28.setText(_translate("MainWindow", "B"))
self.third_b_lineEdit.setText(_translate("MainWindow", "0"))
self.label_29.setText(_translate("MainWindow", "C"))
self.third_c_lineEdit.setText(_translate("MainWindow", "0"))
self.label_30.setText(_translate("MainWindow", "D"))
self.third_d_lineEdit.setText(_translate("MainWindow", "0"))
self.fourth_data_not_available_label.setText(_translate("MainWindow", "<< Data not available >>"))
self.fourth_pushButton.setText(_translate("MainWindow", "4"))
self.label_32.setText(_translate("MainWindow", "Current"))
self.fourth_current_lineEdit.setText(_translate("MainWindow", "0"))
self.label_36.setText(_translate("MainWindow", "A"))
self.fourth_a_lineEdit.setText(_translate("MainWindow", "0"))
self.label_37.setText(_translate("MainWindow", "B"))
self.fourth_b_lineEdit.setText(_translate("MainWindow", "0"))
self.label_38.setText(_translate("MainWindow", "C"))
self.fourth_c_lineEdit.setText(_translate("MainWindow", "0"))
self.label_39.setText(_translate("MainWindow", "D"))
self.fourth_d_lineEdit.setText(_translate("MainWindow", "0"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.display_tab), _translate("MainWindow", "Display"))
self.groupBox.setTitle(_translate("MainWindow", "Serial Port Settings"))
self.baudrateLabel.setText(_translate("MainWindow", "Baudrate"))
self.baudrateComboBox.setCurrentText(_translate("MainWindow", "115200"))
self.baudrateComboBox.setItemText(0, _translate("MainWindow", "115200"))
self.baudrateComboBox.setItemText(1, _translate("MainWindow", "57600"))
self.baudrateComboBox.setItemText(2, _translate("MainWindow", "38400"))
self.baudrateComboBox.setItemText(3, _translate("MainWindow", "28800"))
self.baudrateComboBox.setItemText(4, _translate("MainWindow", "19200"))
self.baudrateComboBox.setItemText(5, _translate("MainWindow", "14400"))
self.baudrateComboBox.setItemText(6, _translate("MainWindow", "9600"))
self.baudrateComboBox.setItemText(7, _translate("MainWindow", "4800"))
self.baudrateComboBox.setItemText(8, _translate("MainWindow", "2400"))
self.baudrateComboBox.setItemText(9, _translate("MainWindow", "1200"))
self.baudrateComboBox.setItemText(10, _translate("MainWindow", "900"))
self.baudrateComboBox.setItemText(11, _translate("MainWindow", "600"))
self.baudrateComboBox.setItemText(12, _translate("MainWindow", "300"))
self.portLabel.setText(_translate("MainWindow", "Port"))
self.portlineEdit.setText(_translate("MainWindow", "COM3"))
self.read_pushButton.setText(_translate("MainWindow", "Read"))
self.stop_pushButton.setText(_translate("MainWindow", "Stop"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.settings_tab), _translate("MainWindow", "Settings"))
| 66.170274
| 339
| 0.744766
|
acfd408ca5ad8f06f35dbc8662278490715b954f
| 610
|
py
|
Python
|
ckan/migration/versions/003_add_user_object.py
|
florianm/ckan
|
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
|
[
"Apache-2.0"
] | 12
|
2015-08-28T16:59:07.000Z
|
2020-03-08T01:39:30.000Z
|
ckan/migration/versions/003_add_user_object.py
|
florianm/ckan
|
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
|
[
"Apache-2.0"
] | 13
|
2019-05-02T21:01:28.000Z
|
2020-10-20T23:34:48.000Z
|
ckan/migration/versions/003_add_user_object.py
|
florianm/ckan
|
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
|
[
"Apache-2.0"
] | 10
|
2015-05-08T04:33:20.000Z
|
2020-03-03T15:17:58.000Z
|
from sqlalchemy import *
from migrate import *
import uuid
def make_uuid():
return unicode(uuid.uuid4())
def upgrade(migrate_engine):
metadata = MetaData()
user_table = Table('user', metadata,
Column('id', UnicodeText, primary_key=True, default=make_uuid),
Column('name', UnicodeText),
Column('apikey', UnicodeText, default=make_uuid)
)
metadata.bind = migrate_engine
apikey_table = Table('apikey', metadata, autoload=True)
user_table.create()
apikey_table.drop()
def downgrade(migrate_engine):
raise NotImplementedError()
| 23.461538
| 75
| 0.670492
|
acfd40ae1eed55fab37f9ac9298e113482d1cb0b
| 12,332
|
py
|
Python
|
alipay/aop/api/domain/AlipayCommerceMedicalInstcardCreateandpayModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayCommerceMedicalInstcardCreateandpayModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayCommerceMedicalInstcardCreateandpayModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceMedicalInstcardCreateandpayModel(object):
def __init__(self):
self._bill_no = None
self._body = None
self._buyer_id = None
self._extend_params = None
self._gmt_out_create = None
self._industry = None
self._is_insurance = None
self._medical_card_id = None
self._medical_card_inst_id = None
self._org_name = None
self._org_no = None
self._out_trade_no = None
self._patient_card_no = None
self._patient_card_type = None
self._patient_mobile = None
self._patient_name = None
self._request_content = None
self._scene = None
self._seller_id = None
self._serial_no = None
self._subject = None
self._timeout_express = None
self._total_amount = None
@property
def bill_no(self):
return self._bill_no
@bill_no.setter
def bill_no(self, value):
self._bill_no = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
@property
def buyer_id(self):
return self._buyer_id
@buyer_id.setter
def buyer_id(self, value):
self._buyer_id = value
@property
def extend_params(self):
return self._extend_params
@extend_params.setter
def extend_params(self, value):
self._extend_params = value
@property
def gmt_out_create(self):
return self._gmt_out_create
@gmt_out_create.setter
def gmt_out_create(self, value):
self._gmt_out_create = value
@property
def industry(self):
return self._industry
@industry.setter
def industry(self, value):
self._industry = value
@property
def is_insurance(self):
return self._is_insurance
@is_insurance.setter
def is_insurance(self, value):
self._is_insurance = value
@property
def medical_card_id(self):
return self._medical_card_id
@medical_card_id.setter
def medical_card_id(self, value):
self._medical_card_id = value
@property
def medical_card_inst_id(self):
return self._medical_card_inst_id
@medical_card_inst_id.setter
def medical_card_inst_id(self, value):
self._medical_card_inst_id = value
@property
def org_name(self):
return self._org_name
@org_name.setter
def org_name(self, value):
self._org_name = value
@property
def org_no(self):
return self._org_no
@org_no.setter
def org_no(self, value):
self._org_no = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def patient_card_no(self):
return self._patient_card_no
@patient_card_no.setter
def patient_card_no(self, value):
self._patient_card_no = value
@property
def patient_card_type(self):
return self._patient_card_type
@patient_card_type.setter
def patient_card_type(self, value):
self._patient_card_type = value
@property
def patient_mobile(self):
return self._patient_mobile
@patient_mobile.setter
def patient_mobile(self, value):
self._patient_mobile = value
@property
def patient_name(self):
return self._patient_name
@patient_name.setter
def patient_name(self, value):
self._patient_name = value
@property
def request_content(self):
return self._request_content
@request_content.setter
def request_content(self, value):
self._request_content = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def serial_no(self):
return self._serial_no
@serial_no.setter
def serial_no(self, value):
self._serial_no = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def timeout_express(self):
return self._timeout_express
@timeout_express.setter
def timeout_express(self, value):
self._timeout_express = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
def to_alipay_dict(self):
params = dict()
if self.bill_no:
if hasattr(self.bill_no, 'to_alipay_dict'):
params['bill_no'] = self.bill_no.to_alipay_dict()
else:
params['bill_no'] = self.bill_no
if self.body:
if hasattr(self.body, 'to_alipay_dict'):
params['body'] = self.body.to_alipay_dict()
else:
params['body'] = self.body
if self.buyer_id:
if hasattr(self.buyer_id, 'to_alipay_dict'):
params['buyer_id'] = self.buyer_id.to_alipay_dict()
else:
params['buyer_id'] = self.buyer_id
if self.extend_params:
if hasattr(self.extend_params, 'to_alipay_dict'):
params['extend_params'] = self.extend_params.to_alipay_dict()
else:
params['extend_params'] = self.extend_params
if self.gmt_out_create:
if hasattr(self.gmt_out_create, 'to_alipay_dict'):
params['gmt_out_create'] = self.gmt_out_create.to_alipay_dict()
else:
params['gmt_out_create'] = self.gmt_out_create
if self.industry:
if hasattr(self.industry, 'to_alipay_dict'):
params['industry'] = self.industry.to_alipay_dict()
else:
params['industry'] = self.industry
if self.is_insurance:
if hasattr(self.is_insurance, 'to_alipay_dict'):
params['is_insurance'] = self.is_insurance.to_alipay_dict()
else:
params['is_insurance'] = self.is_insurance
if self.medical_card_id:
if hasattr(self.medical_card_id, 'to_alipay_dict'):
params['medical_card_id'] = self.medical_card_id.to_alipay_dict()
else:
params['medical_card_id'] = self.medical_card_id
if self.medical_card_inst_id:
if hasattr(self.medical_card_inst_id, 'to_alipay_dict'):
params['medical_card_inst_id'] = self.medical_card_inst_id.to_alipay_dict()
else:
params['medical_card_inst_id'] = self.medical_card_inst_id
if self.org_name:
if hasattr(self.org_name, 'to_alipay_dict'):
params['org_name'] = self.org_name.to_alipay_dict()
else:
params['org_name'] = self.org_name
if self.org_no:
if hasattr(self.org_no, 'to_alipay_dict'):
params['org_no'] = self.org_no.to_alipay_dict()
else:
params['org_no'] = self.org_no
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.patient_card_no:
if hasattr(self.patient_card_no, 'to_alipay_dict'):
params['patient_card_no'] = self.patient_card_no.to_alipay_dict()
else:
params['patient_card_no'] = self.patient_card_no
if self.patient_card_type:
if hasattr(self.patient_card_type, 'to_alipay_dict'):
params['patient_card_type'] = self.patient_card_type.to_alipay_dict()
else:
params['patient_card_type'] = self.patient_card_type
if self.patient_mobile:
if hasattr(self.patient_mobile, 'to_alipay_dict'):
params['patient_mobile'] = self.patient_mobile.to_alipay_dict()
else:
params['patient_mobile'] = self.patient_mobile
if self.patient_name:
if hasattr(self.patient_name, 'to_alipay_dict'):
params['patient_name'] = self.patient_name.to_alipay_dict()
else:
params['patient_name'] = self.patient_name
if self.request_content:
if hasattr(self.request_content, 'to_alipay_dict'):
params['request_content'] = self.request_content.to_alipay_dict()
else:
params['request_content'] = self.request_content
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.serial_no:
if hasattr(self.serial_no, 'to_alipay_dict'):
params['serial_no'] = self.serial_no.to_alipay_dict()
else:
params['serial_no'] = self.serial_no
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.timeout_express:
if hasattr(self.timeout_express, 'to_alipay_dict'):
params['timeout_express'] = self.timeout_express.to_alipay_dict()
else:
params['timeout_express'] = self.timeout_express
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceMedicalInstcardCreateandpayModel()
if 'bill_no' in d:
o.bill_no = d['bill_no']
if 'body' in d:
o.body = d['body']
if 'buyer_id' in d:
o.buyer_id = d['buyer_id']
if 'extend_params' in d:
o.extend_params = d['extend_params']
if 'gmt_out_create' in d:
o.gmt_out_create = d['gmt_out_create']
if 'industry' in d:
o.industry = d['industry']
if 'is_insurance' in d:
o.is_insurance = d['is_insurance']
if 'medical_card_id' in d:
o.medical_card_id = d['medical_card_id']
if 'medical_card_inst_id' in d:
o.medical_card_inst_id = d['medical_card_inst_id']
if 'org_name' in d:
o.org_name = d['org_name']
if 'org_no' in d:
o.org_no = d['org_no']
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'patient_card_no' in d:
o.patient_card_no = d['patient_card_no']
if 'patient_card_type' in d:
o.patient_card_type = d['patient_card_type']
if 'patient_mobile' in d:
o.patient_mobile = d['patient_mobile']
if 'patient_name' in d:
o.patient_name = d['patient_name']
if 'request_content' in d:
o.request_content = d['request_content']
if 'scene' in d:
o.scene = d['scene']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'serial_no' in d:
o.serial_no = d['serial_no']
if 'subject' in d:
o.subject = d['subject']
if 'timeout_express' in d:
o.timeout_express = d['timeout_express']
if 'total_amount' in d:
o.total_amount = d['total_amount']
return o
| 33.239892
| 91
| 0.599578
|
acfd4282cef5115c403dd8583f91de2b2dc0fe1b
| 2,836
|
py
|
Python
|
rllib/agents/marwil/tests/test_bc.py
|
jamesliu/ray
|
11ab412db1fa3603a3006e8ed414e80dd1f11c0c
|
[
"Apache-2.0"
] | 3
|
2021-06-24T17:00:18.000Z
|
2021-09-20T15:49:11.000Z
|
rllib/agents/marwil/tests/test_bc.py
|
jamesliu/ray
|
11ab412db1fa3603a3006e8ed414e80dd1f11c0c
|
[
"Apache-2.0"
] | 227
|
2021-10-01T08:00:01.000Z
|
2021-12-28T16:47:26.000Z
|
rllib/agents/marwil/tests/test_bc.py
|
gramhagen/ray
|
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
|
[
"Apache-2.0"
] | 1
|
2020-12-03T20:36:00.000Z
|
2020-12-03T20:36:00.000Z
|
import os
from pathlib import Path
import unittest
import ray
import ray.rllib.agents.marwil as marwil
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import check_compute_single_action, \
check_train_results, framework_iterator
tf1, tf, tfv = try_import_tf()
class TestBC(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_bc_compilation_and_learning_from_offline_file(self):
"""Test whether a BCTrainer can be built with all frameworks.
And learns from a historic-data file (while being evaluated on an
actual env using evaluation_num_workers > 0).
"""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.BC_DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
config["evaluation_interval"] = 3
config["evaluation_num_workers"] = 1
config["evaluation_duration"] = 5
config["evaluation_parallel_to_training"] = True
# Evaluate on actual environment.
config["evaluation_config"] = {"input": "sampler"}
# Learn from offline data.
config["input"] = [data_file]
num_iterations = 350
min_reward = 70.0
# Test for all frameworks.
for _ in framework_iterator(config, frameworks=("tf", "torch")):
trainer = marwil.BCTrainer(config=config, env="CartPole-v0")
learnt = False
for i in range(num_iterations):
results = trainer.train()
check_train_results(results)
print(results)
eval_results = results.get("evaluation")
if eval_results:
print("iter={} R={}".format(
i, eval_results["episode_reward_mean"]))
# Learn until good reward is reached in the actual env.
if eval_results["episode_reward_mean"] > min_reward:
print("learnt!")
learnt = True
break
if not learnt:
raise ValueError(
"BCTrainer did not reach {} reward from expert offline "
"data!".format(min_reward))
check_compute_single_action(
trainer, include_prev_action_reward=True)
trainer.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 34.168675
| 77
| 0.594499
|
acfd42de33079998fe37798b4eb77119ea659f90
| 4,819
|
py
|
Python
|
napari_mosquito_bb_annotations/_reader.py
|
czbiohub/napari-mosquito-species-bb-annotations
|
f05740cb37396bb59c3763a043884b61e1537723
|
[
"BSD-3-Clause"
] | null | null | null |
napari_mosquito_bb_annotations/_reader.py
|
czbiohub/napari-mosquito-species-bb-annotations
|
f05740cb37396bb59c3763a043884b61e1537723
|
[
"BSD-3-Clause"
] | 9
|
2021-06-21T23:54:03.000Z
|
2021-08-03T17:42:39.000Z
|
napari_mosquito_bb_annotations/_reader.py
|
czbiohub/napari-mosquito-species-bb-annotations
|
f05740cb37396bb59c3763a043884b61e1537723
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module is an example of a barebones numpy reader plugin for napari.
It implements the ``napari_get_reader`` hook specification, (to create
a reader plugin) but your plugin may choose to implement any of the hook
specifications offered by napari.
see: https://napari.org/docs/plugins/hook_specifications.html
Replace code below accordingly. For complete documentation see:
https://napari.org/docs/plugins/for_plugin_developers.html
"""
import glob
import os
import numpy as np
import pandas as pd
from PIL import Image
from napari_plugin_engine import napari_hook_implementation
from napari_mosquito_bb_annotations.constants_lumi import IMAGE_FORMATS
def create_dir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
@napari_hook_implementation
def napari_get_reader(path):
"""A basic implementation of the napari_get_reader hook specification.
Parameters
----------
path : str or list of str
Path to file, or list of paths.
Returns
-------
function or None
If the path is a recognized format, return a function that accepts the
same path or list of paths, and returns a list of layer data tuples.
"""
if isinstance(path, list):
# reader plugins may be handed single path, or a list of paths.
# if it is a list, it is assumed to be an image stack...
# so we are only going to look at the first file.
path = path[0]
assert os.path.exists(path)
return reader_function
def reader_function(path):
"""Take a path or list of paths and return a list of LayerData tuples.
Readers are expected to return data as a list of tuples, where each tuple
is (data, [add_kwargs, [layer_type]]), "add_kwargs" and "layer_type" are
both optional.
Parameters
----------
path : str or list of str
Path to file, or list of paths.
Returns
-------
layer_data : list of tuples
A list of LayerData tuples where each tuple in the list contains
(data, metadata, layer_type), where data is a numpy array, metadata is
a dict of keyword arguments for the corresponding viewer.add_* method
in napari,
and layer_type is a lower-case string naming the type of layer.
Both "meta", and "layer_type" are optional. napari will default to
layer_type=="image" if not provided
"""
path = path + os.sep if not path.endswith(os.sep) else path
dirname = os.path.dirname(path)
save_overlay_path = os.path.abspath(os.path.join(dirname, "overlay_dir"))
csv_path = os.path.join(path, "bb_labels.csv")
# If annotated previously, csv path is in the input folder
if os.path.exists(csv_path):
# Modify csv file to replace image_id column's dirname if different
df = pd.read_csv(csv_path, index_col=False)
df.replace("(.+)\/([^\/]+)/", path, regex=True, inplace=True)
df.to_csv(csv_path, index=False)
all_files = []
create_dir_if_not_exists(save_overlay_path)
for format_of_files in IMAGE_FORMATS:
format_of_files = format_of_files.lower()
all_files.extend(glob.glob(os.path.join(path, "*" + format_of_files)))
# stack arrays into single array
if format_of_files in ["tiff"]:
image = Image.open(all_files[0])
if image.n_frames > 1:
raise AssertionError("Multipage tiff not supported")
return
# stack arrays into single array
shape = np.asarray(Image.open(all_files[0]).convert("RGB"), dtype=np.uint8).shape
total_files = len(all_files)
if len(shape) == 3:
stack = np.zeros((total_files, shape[0], shape[1], shape[2]), dtype=np.uint8)
else:
stack = np.zeros((total_files, shape[0], shape[1]), dtype=np.uint8)
for i in range(total_files):
stack[i] = np.asarray(Image.open(all_files[i]).convert("RGB"), dtype=np.uint8)
layer_type = "image" # optional, default is "image"
num_files = len(all_files)
metadata = {
"metadata": {
"save_overlay_path": save_overlay_path,
"all_files": all_files,
"box_annotations": [""],
"new_labels": [],
"loaded": [False] * num_files,
"updated": False,
"inferenced": [False] * num_files,
"model": "",
},
"name": "Image",
}
text_kwargs = {"text": "box_label", "size": 8, "color": "green"}
add_kwargs = dict(
face_color="black",
properties={"box_label": ["class"]},
ndim=3,
text=text_kwargs,
name="Shapes",
edge_color="black",
opacity=0.5,
blending="additive",
shape_type="rectangle",
)
layer_list = [(stack, metadata, layer_type), (None, add_kwargs, "shapes")]
return layer_list
| 35.696296
| 86
| 0.650965
|
acfd4575968fc8c7fc310c4820d98558cb6bd3b6
| 80
|
py
|
Python
|
myproject/__init__.py
|
kolypto/py-_project-template
|
7f03690e4588e8cf9fb2bdb2eb326aa455fa945d
|
[
"MIT"
] | null | null | null |
myproject/__init__.py
|
kolypto/py-_project-template
|
7f03690e4588e8cf9fb2bdb2eb326aa455fa945d
|
[
"MIT"
] | null | null | null |
myproject/__init__.py
|
kolypto/py-_project-template
|
7f03690e4588e8cf9fb2bdb2eb326aa455fa945d
|
[
"MIT"
] | null | null | null |
__version__ = __import__('pkg_resources').get_distribution('myproject').version
| 40
| 79
| 0.825
|
acfd46f860d9a779ab81af8fe376f03ab3dc5e73
| 1,463
|
py
|
Python
|
testing/main.py
|
ajbradberry96/SeniorDesignAntiNN
|
c88cbdf699e68102cb705d8a7000616922cb73ee
|
[
"MIT"
] | null | null | null |
testing/main.py
|
ajbradberry96/SeniorDesignAntiNN
|
c88cbdf699e68102cb705d8a7000616922cb73ee
|
[
"MIT"
] | 12
|
2019-12-16T21:30:53.000Z
|
2022-03-11T23:40:28.000Z
|
testing/main.py
|
ajbradberry96/SeniorDesignAntiNN
|
c88cbdf699e68102cb705d8a7000616922cb73ee
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from testing import forward_model
from testing import plot_results
from testing import adv_example
from testing import image_processing
from testing import detect_adversarial
import sys
from urllib.request import urlretrieve
import PIL
tf.logging.set_verbosity(tf.logging.ERROR)
sess = tf.InteractiveSession()
img = PIL.Image.open("media/norm_image_raw.jpg")
img = img.resize((300, 300), PIL.Image.ANTIALIAS)
img.save("media/norm_img.png")
forward_model.init(sess)
print(forward_model.get_imagenet_labels())
adv_img = adv_example.generate_adversarial_example(img, sess, adv_class='jellyfish')
adv_img.save("media/adv_img.png")
input("Continue? ")
img = PIL.Image.open("media/norm_img.png")
adv_img = PIL.Image.open("media/adv_img.png")
#img = PIL.Image.open("media/cat.png")
#adv_img = PIL.Image.open("media/adversarial_cat.png")
norm_probs = forward_model.predict(img)
adv_probs = forward_model.predict(adv_img)
plot_results.plot(img,norm_probs)
plot_results.plot(adv_img,adv_probs)
plot_results.plt.show()
input("Continue? ")
img = PIL.Image.open("media/norm_img.png")
adv_img = PIL.Image.open("media/adv_img.png")
#img = PIL.Image.open("media/cat.png")
#adv_img = PIL.Image.open("media/adversarial_cat.png")
print("NORMAL IMAGE: ")
detect_adversarial.detect(img)
print()
print()
plot_results.plt.show()
print("ADVERSARIAL IMAGE: ")
detect_adversarial.detect(adv_img)
print()
print()
plot_results.plt.show()
| 20.041096
| 84
| 0.773069
|
acfd47c5b2090f9e252703ba5f8435653dad6868
| 650
|
py
|
Python
|
esmvaltool/utils/nclcodestyle/tokenize.py
|
yifatdzigan/ESMValTool
|
83320b0e0b24ddde965599961bb80428e180a731
|
[
"Apache-2.0"
] | 148
|
2017-02-07T13:16:03.000Z
|
2022-03-26T02:21:56.000Z
|
esmvaltool/utils/nclcodestyle/tokenize.py
|
yifatdzigan/ESMValTool
|
83320b0e0b24ddde965599961bb80428e180a731
|
[
"Apache-2.0"
] | 2,026
|
2017-02-03T12:57:13.000Z
|
2022-03-31T15:11:51.000Z
|
esmvaltool/utils/nclcodestyle/tokenize.py
|
yifatdzigan/ESMValTool
|
83320b0e0b24ddde965599961bb80428e180a731
|
[
"Apache-2.0"
] | 113
|
2017-01-27T13:10:19.000Z
|
2022-02-03T13:42:11.000Z
|
"""Custom tokenize module that is both Python 2 and 3 compatible.
The following commands were used to generate the code:
sed "s/'#/';/g" lib/python2.7/tokenize.py > tokenize2.py_
sed "s/'#/';/g" lib/python3.6/tokenize.py > tokenize3.py_
Changes to the original tokenize.py for Python 2:
add r"->" to Operator at line 83
change "#" to ";" as comment symbol
Changes to the original tokenize.py for Python 3:
change "#" to ";" as comment symbol
"""
import os
import sys
_FILENAME = '{}{}.py_'.format(
os.path.splitext(__file__)[0], sys.version_info.major)
with open(_FILENAME) as src:
exec(compile(src.read(), _FILENAME, mode='exec'))
| 30.952381
| 65
| 0.7
|
acfd48874c62552de1b3f4b76b53b6d44c4d4074
| 662
|
py
|
Python
|
python/manage.py
|
horitaku1124/ml-advisor
|
93902280589aefe0833898cc20b4a770900e8a61
|
[
"MIT"
] | null | null | null |
python/manage.py
|
horitaku1124/ml-advisor
|
93902280589aefe0833898cc20b4a770900e8a61
|
[
"MIT"
] | 1
|
2022-03-28T14:12:45.000Z
|
2022-03-28T14:12:45.000Z
|
python/manage.py
|
horitaku1124/ml-advisor
|
93902280589aefe0833898cc20b4a770900e8a61
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testPj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.782609
| 73
| 0.678248
|
acfd48cd2a3216269540c9104b00e328df1cd054
| 507
|
py
|
Python
|
appConf/serializers/notificationSerializer.py
|
salah-walid/Ecommerce-Shopping-App
|
c828701b03dab74c72538943f7aac5192fa20be6
|
[
"MIT"
] | 1
|
2021-12-12T12:53:14.000Z
|
2021-12-12T12:53:14.000Z
|
appConf/serializers/notificationSerializer.py
|
JetLightStudio/Ecommerce-Shopping-App
|
c828701b03dab74c72538943f7aac5192fa20be6
|
[
"MIT"
] | null | null | null |
appConf/serializers/notificationSerializer.py
|
JetLightStudio/Ecommerce-Shopping-App
|
c828701b03dab74c72538943f7aac5192fa20be6
|
[
"MIT"
] | 1
|
2021-06-08T10:22:08.000Z
|
2021-06-08T10:22:08.000Z
|
from rest_framework import serializers
from appConf.models.notification import notification
from appConf.serializers.userSerializer import userSimplifiedSerialiser
class notificationSerializer(serializers.ModelSerializer):
isRead = serializers.SerializerMethodField("is_read")
class Meta:
model = notification
fields = ('id' ,'title', "content", "isRead")
def is_read(self, obj):
usr = self.context.get("request").user
return usr in obj.notificationRead.all()
| 33.8
| 71
| 0.741617
|
acfd4a450e61351b0fa191c44da5674e20be328b
| 51
|
py
|
Python
|
test_main.py
|
mengzhidu/python-nose-2016-10
|
42199e3e71c15c68f9e534268afff6a848607f1b
|
[
"MIT"
] | null | null | null |
test_main.py
|
mengzhidu/python-nose-2016-10
|
42199e3e71c15c68f9e534268afff6a848607f1b
|
[
"MIT"
] | null | null | null |
test_main.py
|
mengzhidu/python-nose-2016-10
|
42199e3e71c15c68f9e534268afff6a848607f1b
|
[
"MIT"
] | null | null | null |
#!/usr/local/python
def test():
assert 3 == 3
| 10.2
| 19
| 0.568627
|
acfd4b5c1a5178c5845a7329f86bbdb4830e87f9
| 12,260
|
py
|
Python
|
bin/iterate_davis2011kinase.py
|
samgoldman97/kinase-cpi-reanalysis
|
e45e055280b51abee95e2d15da91577a913328f2
|
[
"MIT"
] | 19
|
2020-09-28T06:05:17.000Z
|
2022-03-24T22:27:19.000Z
|
bin/iterate_davis2011kinase.py
|
samgoldman97/kinase-cpi-reanalysis
|
e45e055280b51abee95e2d15da91577a913328f2
|
[
"MIT"
] | 3
|
2020-11-24T15:52:17.000Z
|
2022-02-09T23:53:47.000Z
|
bin/iterate_davis2011kinase.py
|
brianhie/uncertainty
|
2a9dd1966632abefa94ecf4c5cf33020cca9d903
|
[
"MIT"
] | 7
|
2020-08-31T01:46:57.000Z
|
2022-02-26T00:58:59.000Z
|
from utils import tprint, plt
import numpy as np
from scipy.stats import rankdata
import sys
from gaussian_process import SparseGPRegressor
from hybrid import HybridMLPEnsembleGP
from process_davis2011kinase import process, visualize_heatmap
from train_davis2011kinase import train
def acquisition_rank(y_pred, var_pred, beta=1.):
return rankdata(y_pred) + (beta * rankdata(-var_pred))
def acquisition_ucb(y_pred, var_pred, beta=1.):
return y_pred - (beta * var_pred)
def acquisition_scatter(y_unk_pred, var_unk_pred, acquisition, regress_type):
y_unk_pred = y_unk_pred[:]
y_unk_pred[y_unk_pred > 10000] = 10000
plt.figure()
plt.scatter(y_unk_pred, var_unk_pred, alpha=0.5, c=-acquisition,
cmap='hot')
plt.title(regress_type.title())
plt.xlabel('Predicted score')
plt.ylabel('Variance')
plt.savefig('figures/acquisition_unknown_{}.png'
.format(regress_type), dpi=200)
plt.close()
def debug_selection(regress_type='gp'):
y_unk_pred = np.loadtxt('target/ypred_unknown_regressors{}.txt'
.format(regress_type))
var_unk_pred = np.loadtxt('target/variance_unknown_regressors{}.txt'
.format(regress_type))
for beta in [ 'rank', 100000, 500000, 1000000, ]:
if beta == 'rank':
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
else:
acquisition = acquisition_ucb(y_unk_pred, var_unk_pred, beta=beta)
acquisition_scatter(y_unk_pred, var_unk_pred, acquisition, regress_type)
for beta in range(1, 11):
acquisition = acquisition_rank(y_unk_pred, var_unk_pred, beta=beta)
print('beta: {}, Kd: {}'.format(beta, y_obs_pred[np.argmax(acquisition)]))
exit()
def select_candidates(point=False, **kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
n_candidates = kwargs['n_candidates']
chems = kwargs['chems']
prots = kwargs['prots']
if 'beta' in kwargs:
beta = kwargs['beta']
else:
beta = 20.
if point:
tprint('Exploiting (using point prediction only)...')
acquisition = acquisition_rank(y_unk_pred, var_unk_pred, beta=0.)
else:
tprint('Exploiting...')
acquisition = acquisition_rank(y_unk_pred, var_unk_pred, beta=beta)
max_acqs = np.argsort(-acquisition)[:n_candidates]
for max_acq in max_acqs:
i, j = idx_unk[max_acq]
chem = chems[i]
prot = prots[j]
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {:.3f}'
' and uncertainty {:.3f}'
.format((i, j), chem, prot, y_unk_pred[max_acq],
var_unk_pred[max_acq]**2))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[max_acq]))
return list(max_acqs)
def select_candidates_per_quadrant(explore=False, **kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
n_candidates = kwargs['n_candidates']
chems = kwargs['chems']
prots = kwargs['prots']
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
acquired = []
quad_names = [ 'side', 'repurpose', 'novel' ]
orig_idx = np.array(list(range(X_unk.shape[0])))
for quad_name in quad_names:
if explore:
tprint('Exploring quadrant {}'.format(quad_name))
else:
tprint('Considering quadrant {}'.format(quad_name))
quad = [ i for i, idx in enumerate(idx_unk)
if idx in set(kwargs['idx_' + quad_name]) ]
y_unk_quad = y_unk_pred[quad]
var_unk_quad = var_unk_pred[quad]
idx_unk_quad = [ idx for i, idx in enumerate(idx_unk)
if idx in set(kwargs['idx_' + quad_name]) ]
if explore:
max_acqs = sorted(set([
np.argmax(acquisition_rank(y_unk_quad, var_unk_quad, cand))
for cand in range(1, n_candidates + 1)
]))
else:
max_acqs = np.argsort(-acquisition[quad])[:n_candidates]
for max_acq in max_acqs:
i, j = idx_unk_quad[max_acq]
chem = chems[i]
prot = prots[j]
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {}'
.format((i, j), chem, prot, y_unk_quad[max_acq]))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[quad][max_acq]))
acquired += list(orig_idx[quad][max_acqs])
return acquired
def select_candidates_per_protein(**kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
chems = kwargs['chems']
prots = kwargs['prots']
n_candidates = kwargs['n_candidates']
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
acquired = []
orig_idx = np.array(list(range(X_unk.shape[0])))
for prot_idx, prot in enumerate(prots):
involves_prot = [ j == prot_idx for i, j in idx_unk ]
idx_unk_prot = [ (i, j) for i, j in idx_unk if j == prot_idx ]
max_acqs = np.argsort(-acquisition[involves_prot])[:n_candidates]
tprint('Protein {}'.format(prot))
for max_acq in max_acqs:
i, j = idx_unk_prot[max_acq]
chem = chems[i]
prot = prots[j]
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {:.3f}'
' and uncertainty {:.3f}'
.format((i, j), chem, prot, y_unk_pred[involves_prot][max_acq],
var_unk_pred[involves_prot][max_acq]**2))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[involves_prot][max_acq]))
acquired.append(orig_idx[involves_prot][max_acq])
return acquired
def select_candidates_per_partition(**kwargs):
y_unk_pred = kwargs['y_unk_pred']
var_unk_pred = kwargs['var_unk_pred']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
n_partitions = kwargs['n_candidates']
chems = kwargs['chems']
prots = kwargs['prots']
chem2feature = kwargs['chem2feature']
acquisition = acquisition_rank(y_unk_pred, var_unk_pred)
if 'partition' in kwargs:
partition = kwargs['partition']
else:
# Partition unknown space using k-means on chemicals.
from sklearn.cluster import KMeans
labels = KMeans(
n_clusters=n_partitions,
init='k-means++',
n_init=3,
random_state=10,
).fit_predict(np.array([
chem2feature[chem] for chem in chems
]))
partition = []
for p in range(n_partitions):
partition.append([
idx for idx, (i, j) in enumerate(idx_unk)
if labels[i] == p
])
orig2new_idx = { i: i for i in range(X_unk.shape[0]) }
for pi in range(len(partition)):
if len(partition[pi]) == 0:
tprint('Partition {} is empty'.format(pi))
continue
partition_pi = set(list(partition[pi]))
idx_unk_part = [ idx for i, idx in enumerate(idx_unk)
if i in partition_pi ]
max_acq = np.argmax(acquisition[partition[pi]])
i, j = idx_unk_part[max_acq]
chem = chems[i]
prot = prots[j]
tprint('Partition {}'.format(pi))
if y_unk is None:
tprint('\tAcquire {} {} <--> {} with predicted Kd value {:.3f}'
' and uncertainty {:.3f}'
.format((i, j), chem, prot, y_unk_pred[partition[pi]][max_acq],
var_unk_pred[partition[pi]][max_acq]**2))
else:
tprint('\tAcquire {} {} <--> {} with real Kd value {}'
.format((i, j), chem, prot, y_unk[partition[pi]][max_acq]))
orig_max_acq = partition[pi][max_acq]
for i in orig2new_idx:
if i == orig_max_acq:
orig2new_idx[i] = None
elif orig2new_idx[i] is None:
pass
elif i > orig_max_acq:
orig2new_idx[i] -= 1
# Acquire one point per partition.
acquired = sorted([ i for i in orig2new_idx if orig2new_idx[i] is None ])
# Make sure new partition indices match new unknown dataset.
for pi in range(len(partition)):
partition[pi] = np.array([
orig2new_idx[p] for p in partition[pi]
if orig2new_idx[p] is not None
])
kwargs['partition'] = partition
return acquired, kwargs
def acquire(**kwargs):
if 'scheme' in kwargs:
scheme = kwargs['scheme']
else:
scheme = 'exploit'
if 'n_candidates' in kwargs:
n_candidates = kwargs['n_candidates']
else:
kwargs['n_candidates'] = 1
if scheme == 'exploit':
acquired = select_candidates(**kwargs)
elif scheme == 'pointexploit':
acquired = select_candidates(point=True, **kwargs)
elif scheme == 'explore':
acquired = select_candidates(explore=True, **kwargs)
elif scheme == 'quad':
acquired = select_candidates_per_quadrant(**kwargs)
elif scheme == 'quadexplore':
acquired = select_candidates_per_quadrant(explore=True, **kwargs)
elif scheme == 'perprot':
acquired = select_candidates_per_protein(**kwargs)
elif scheme == 'partition':
acquired, kwargs = select_candidates_per_partition(**kwargs)
return acquired, kwargs
def iterate(**kwargs):
prots = kwargs['prots']
X_obs = kwargs['X_obs']
y_obs = kwargs['y_obs']
idx_obs = kwargs['idx_obs']
X_unk = kwargs['X_unk']
y_unk = kwargs['y_unk']
idx_unk = kwargs['idx_unk']
regressor = kwargs['regressor']
regress_type = kwargs['regress_type']
if regress_type == 'cmf':
kwargs['y_unk_pred'] = regressor.predict(idx_unk)
else:
kwargs['y_unk_pred'] = regressor.predict(X_unk)
kwargs['var_unk_pred'] = regressor.uncertainties_
acquired, kwargs = acquire(**kwargs)
# Reset observations.
X_acquired = X_unk[acquired]
y_acquired = y_unk[acquired]
X_obs = np.vstack((X_obs, X_acquired))
y_obs = np.hstack((y_obs, y_acquired))
[ idx_obs.append(idx_unk[a]) for a in acquired ]
# Reset unknowns.
unacquired = [ i for i in range(X_unk.shape[0]) if i not in set(acquired) ]
X_unk = X_unk[unacquired]
y_unk = y_unk[unacquired]
idx_unk = [ idx for i, idx in enumerate(idx_unk) if i not in set(acquired) ]
kwargs['X_obs'] = X_obs
kwargs['y_obs'] = y_obs
kwargs['idx_obs'] = idx_obs
kwargs['X_unk'] = X_unk
kwargs['y_unk'] = y_unk
kwargs['idx_unk'] = idx_unk
return kwargs
if __name__ == '__main__':
#debug_selection('hybrid')
param_dict = process()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('regress_type', help='model to use')
parser.add_argument('scheme', help='acquisition strategy')
parser.add_argument('n_candidates', type=int, help='number to acquire')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--beta', type=float, default=1,
help='explore/exploit tradeoff parameter')
args = parser.parse_args()
param_dict['regress_type'] = args.regress_type
param_dict['scheme'] = args.scheme
param_dict['n_candidates'] = args.n_candidates
param_dict['seed'] = args.seed
param_dict['beta'] = args.beta
n_iter = 1
for i in range(n_iter):
tprint('Iteration {}'.format(i))
param_dict = train(**param_dict)
param_dict = iterate(**param_dict)
| 32.010444
| 86
| 0.598777
|
acfd4b79d963b39a99d3995286f57077761d28c9
| 13,693
|
py
|
Python
|
dojo/unittests/test_duplication_loops.py
|
brunoduruzup/django-DefectDojo
|
cd598b44f1c44ca2a05fdf95f99c0d526509f656
|
[
"BSD-3-Clause"
] | 1
|
2021-06-22T21:15:50.000Z
|
2021-06-22T21:15:50.000Z
|
dojo/unittests/test_duplication_loops.py
|
brunoduruzup/django-DefectDojo
|
cd598b44f1c44ca2a05fdf95f99c0d526509f656
|
[
"BSD-3-Clause"
] | 47
|
2021-09-15T12:19:19.000Z
|
2022-03-31T12:20:02.000Z
|
dojo/unittests/test_duplication_loops.py
|
Hijerboa/django-DefectDojo
|
3aea3bc3406f860c0842b0bf8800efe2c86bf81b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase
from dojo.utils import set_duplicate
from dojo.management.commands.fix_loop_duplicates import fix_loop_duplicates
from dojo.models import Finding
import logging
deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication")
class TestDuplication(TestCase):
fixtures = ['dojo_testdata.json']
def setUp(self):
self.finding_a = Finding.objects.get(id=2)
self.finding_a.pk = None
self.finding_a.duplicate = False
self.finding_a.duplicate_finding = None
self.finding_a.save()
self.finding_b = Finding.objects.get(id=3)
self.finding_b.pk = None
self.finding_b.duplicate = False
self.finding_b.duplicate_finding = None
self.finding_b.save()
self.finding_c = Finding.objects.get(id=4)
self.finding_c.duplicate = False
self.finding_c.duplicate_finding = None
self.finding_c.pk = None
self.finding_c.save()
def tearDown(self):
if self.finding_a.id:
self.finding_a.delete()
if self.finding_b.id:
self.finding_b.delete()
if self.finding_c.id:
self.finding_c.delete()
# Set A as duplicate of B and check both directions
def test_set_duplicate_basic(self):
set_duplicate(self.finding_a, self.finding_b)
self.assertTrue(self.finding_a.duplicate)
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_b.id)
self.assertEqual(self.finding_b.duplicate_finding, None)
self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id)
self.assertEqual(self.finding_a.duplicate_finding_set().count(), 1)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 1)
self.assertEqual(self.finding_b.duplicate_finding_set().first().id, self.finding_a.id)
# A duplicate should not be considered to be an original for another finding
def test_set_duplicate_exception_1(self):
self.finding_a.duplicate = True
self.finding_a.save()
with self.assertRaisesRegex(Exception, "Existing finding is a duplicate"):
set_duplicate(self.finding_b, self.finding_a)
# A finding should never be the duplicate of itself
def test_set_duplicate_exception_2(self):
with self.assertRaisesRegex(Exception, "Can not add duplicate to itself"):
set_duplicate(self.finding_b, self.finding_b)
# Two duplicate findings can not be duplicates of each other as well
def test_set_duplicate_exception_3(self):
set_duplicate(self.finding_a, self.finding_b)
set_duplicate(self.finding_c, self.finding_b)
with self.assertRaisesRegex(Exception, "Existing finding is a duplicate"):
set_duplicate(self.finding_a, self.finding_c)
# Merge duplicates: If the original of a dupicate is now considered to be a duplicate of a new original the old duplicate should be appended too
def test_set_duplicate_exception_merge(self):
# A -> B
set_duplicate(self.finding_a, self.finding_b)
# B -> C
set_duplicate(self.finding_b, self.finding_c)
self.finding_a = Finding.objects.get(id=self.finding_a.id)
# A -> C and B -> C
self.assertTrue(self.finding_b.duplicate)
self.assertTrue(self.finding_a.duplicate)
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_b.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_c.duplicate_finding, None)
self.assertEqual(self.finding_a.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_c.id)
# if a duplicate is deleted the original should still be present
def test_set_duplicate_exception_delete_1(self):
set_duplicate(self.finding_a, self.finding_b)
self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id)
self.finding_a.delete()
self.assertEqual(self.finding_a.id, None)
self.assertEqual(self.finding_b.original_finding.first(), None)
# if the original is deleted all duplicates should be deleted
def test_set_duplicate_exception_delete_2(self):
set_duplicate(self.finding_a, self.finding_b)
self.assertEqual(self.finding_b.original_finding.first().id, self.finding_a.id)
self.finding_b.delete()
with self.assertRaises(Finding.DoesNotExist):
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.assertEqual(self.finding_b.id, None)
def test_loop_relations_for_one(self):
# B -> B
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_b
super(Finding, self.finding_b).save()
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
self.assertEqual(candidates, 1)
loop_count = fix_loop_duplicates()
self.assertEqual(loop_count, 0)
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
self.assertEqual(candidates, 0)
# if two findings are connected with each other the fix_loop function should detect and remove the loop
def test_loop_relations_for_two(self):
# A -> B -> B
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
loop_count = fix_loop_duplicates()
self.assertEqual(loop_count, 0)
candidates = Finding.objects.filter(duplicate_finding__isnull=False, original_finding__isnull=False).count()
self.assertEqual(candidates, 0)
# Get latest status
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
# assert that A -> B (or B -> A)?
if self.finding_a.duplicate_finding:
self.assertTrue(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 0)
else:
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 1)
if self.finding_b.duplicate_finding:
self.assertTrue(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 0)
else:
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 1)
# Similar Loop detection and deletion for three findings
def test_loop_relations_for_three(self):
# A -> B, B -> C, C -> A
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_c
self.finding_c.duplicate = True
self.finding_c.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
loop_count = fix_loop_duplicates()
self.assertEqual(loop_count, 0)
# Get latest status
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
if self.finding_a.duplicate_finding:
self.assertTrue(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 0)
else:
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 2)
if self.finding_b.duplicate_finding:
self.assertTrue(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 0)
else:
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 2)
if self.finding_c.duplicate_finding:
self.assertTrue(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 0)
else:
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 2)
# Another loop-test for 4 findings
def test_loop_relations_for_four(self):
self.finding_d = Finding.objects.get(id=4)
self.finding_d.pk = None
self.finding_d.duplicate = False
self.finding_d.duplicate_finding = None
self.finding_d.save()
# A -> B, B -> C, C -> D, D -> A
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_c
self.finding_c.duplicate = True
self.finding_c.duplicate_finding = self.finding_d
self.finding_d.duplicate = True
self.finding_d.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
super(Finding, self.finding_d).save()
loop_count = fix_loop_duplicates()
self.assertEqual(loop_count, 0)
# Get latest status
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
self.finding_d = Finding.objects.get(id=self.finding_d.id)
if self.finding_a.duplicate_finding:
self.assertTrue(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 0)
else:
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_a.original_finding.count(), 3)
if self.finding_b.duplicate_finding:
self.assertTrue(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 0)
else:
self.assertFalse(self.finding_b.duplicate)
self.assertEqual(self.finding_b.original_finding.count(), 3)
if self.finding_c.duplicate_finding:
self.assertTrue(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 0)
else:
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_c.original_finding.count(), 3)
if self.finding_d.duplicate_finding:
self.assertTrue(self.finding_d.duplicate)
self.assertEqual(self.finding_d.original_finding.count(), 0)
else:
self.assertFalse(self.finding_d.duplicate)
self.assertEqual(self.finding_d.original_finding.count(), 3)
# Similar Loop detection and deletion for three findings
def test_list_relations_for_three(self):
# A -> B, B -> C
set_duplicate(self.finding_a, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_c
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
loop_count = fix_loop_duplicates()
self.assertEqual(loop_count, 0)
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
# A -> C, B -> C
self.assertTrue(self.finding_b.duplicate)
self.assertTrue(self.finding_a.duplicate)
self.assertFalse(self.finding_c.duplicate)
self.assertEqual(self.finding_b.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_a.duplicate_finding.id, self.finding_c.id)
self.assertEqual(self.finding_c.duplicate_finding, None)
self.assertEqual(self.finding_a.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 2)
def test_list_relations_for_three_reverse(self):
# C -> B, B -> A
set_duplicate(self.finding_c, self.finding_b)
self.finding_b.duplicate = True
self.finding_b.duplicate_finding = self.finding_a
super(Finding, self.finding_a).save()
super(Finding, self.finding_b).save()
super(Finding, self.finding_c).save()
loop_count = fix_loop_duplicates()
self.assertEqual(loop_count, 0)
self.finding_a = Finding.objects.get(id=self.finding_a.id)
self.finding_b = Finding.objects.get(id=self.finding_b.id)
self.finding_c = Finding.objects.get(id=self.finding_c.id)
# B -> A, C -> A
self.assertTrue(self.finding_b.duplicate)
self.assertTrue(self.finding_c.duplicate)
self.assertFalse(self.finding_a.duplicate)
self.assertEqual(self.finding_b.duplicate_finding.id, self.finding_a.id)
self.assertEqual(self.finding_c.duplicate_finding.id, self.finding_a.id)
self.assertEqual(self.finding_a.duplicate_finding, None)
self.assertEqual(self.finding_c.duplicate_finding_set().count(), 2)
self.assertEqual(self.finding_b.duplicate_finding_set().count(), 2)
| 44.170968
| 148
| 0.688527
|
acfd4c1e2833e28fc9a0f3f8eb93b74cfce8242a
| 5,520
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
klymacks/kinsure
|
3c9598ef615371c43f7c730ed705c9296b4532f6
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
klymacks/kinsure
|
3c9598ef615371c43f7c730ed705c9296b4532f6
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
klymacks/kinsure
|
3c9598ef615371c43f7c730ed705c9296b4532f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/SafeInsureCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.093023
| 186
| 0.567391
|
acfd4c1efa2962bf46d543b666a7e2eb990af382
| 5,357
|
py
|
Python
|
dailyfresh/settings.py
|
LinkanDawang/FreshMallDemo
|
5b8e2d2e8e137f609e8ac1e29ea013bb3ef34edb
|
[
"Apache-2.0"
] | null | null | null |
dailyfresh/settings.py
|
LinkanDawang/FreshMallDemo
|
5b8e2d2e8e137f609e8ac1e29ea013bb3ef34edb
|
[
"Apache-2.0"
] | 5
|
2020-06-05T18:27:41.000Z
|
2022-01-13T00:48:03.000Z
|
dailyfresh/settings.py
|
LinkanDawang/FreshMallDemo
|
5b8e2d2e8e137f609e8ac1e29ea013bb3ef34edb
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for dailyfresh project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 把apps文件夹添加到导包路径
sys.path.insert(1, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm)-i2x=@fgggqo&1n@6+lh!6cu)rijx#5ejjeh!%)$pi8o#^n2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'goods',
'orders',
'cart',
'celery_tasks.celery.CeleryConfig' # 添加celery应用
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'dailyfresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dailyfresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dailyfresh',
'HOST': 'localhost', # MySQL数据库地址(主)
'USER': 'root',
'PORT': '3306',
'PASSWORD': 'mysql',
}
}
# 数据库主从的读写分离配置
# DATABASE_ROUTERS = ['utils.db_routers.MasterSlaveDBRouter']
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# django自带用户系统,语法格式:应用.用户模型
AUTH_USER_MODEL = 'users.User'
# 指定静态文件存放的目录
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# 配置发送邮箱验证邮件发件人的信息
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # 导入邮件模块
EMAIL_HOST = 'smtp.163.com' # 发邮件主机
EMAIL_PORT = 25 # 发邮件端口
EMAIL_HOST_USER = '18312913688@163.com' # 授权的邮箱
EMAIL_HOST_PASSWORD = 'admin666' # 邮箱授权时获得的密码,非注册登录密码
EMAIL_FROM = '天天生鲜<18312913688@163.com>' # 发件人抬头
# 缓存, 配置django-redis
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://192.168.3.168:6379/5",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# Session
# http://django-redis-chs.readthedocs.io/zh_CN/latest/#session-backend
# 设置session存储在redis中
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# 如果用户没有登入,会跳转到登入页面
LOGIN_URL = '/users/login'
# 配置django自定义的存储系统
DEFAULT_FILE_STORAGE = 'utils.fastdfs.storage.FastDFSStorage'
# fdfs默认配置
FDFS_CLIENT_CONF = os.path.join(BASE_DIR, './utils/fastdfs/client.conf')
FDFS_SERVER_IP = 'http://192.168.3.168:8888/'
# # 富文本编辑框的配置
# TINYMCE_DEFAULT_CONFIG = {
# 'theme': 'advanced', # 丰富样式
# 'width': 600,
# 'height': 400,
# }
#
# # 支付宝接口地址
# ALIPAY_URL = 'https://openapi.alipaydev.com/gateway.do'
#
# # 搜集静态文件
# STATIC_ROOT = '/home/python/Desktop/static'
#
# # 配置搜索引擎后端
# HAYSTACK_CONNECTIONS = {
# 'default': {
# # 使用whoosh引擎:提示,如果不需要使用jieba框架实现分词,就使用whoosh_backend
# 'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine',
# # 索引文件路径
# 'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
# }
# }
#
# # 当添加、修改、删除数据时,自动生成索引
# HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
"""CELERY配置"""
CELERY_ACCEPT_CONTENT = ['msgpack']
CELERY_TASK_SERIALIZER = 'msgpack' # Use msgpack may be more speed than json.
CELERY_RESULT_SERIALIZER = 'msgpack'
CELERY_TASK_RESULT_EXPIRES = 60 * 60 * 24
# Using rabbitmq as broker and redis as result backend is a popular combination.
CELERY_BROKER_URL = 'pyamqp://leslie:admin@192.168.3.168:5672/administrator'
CELERY_RESULT_BACKEND = 'redis://192.168.3.168:6379/4'
CELERY_TIMEZONE = 'Asia/Shanghai'
CELERY_ENABLE_UTC = True
CELERYD_MAX_TASKS_PER_CHILD = 5
| 27.055556
| 80
| 0.699272
|
acfd4c22c528956da4a22b9045cd3bb0cbc9b6a6
| 464
|
py
|
Python
|
api/app/utils/database/redis_db.py
|
lucasfusinato/fusi-short
|
d178c3e08b3f349179d530c35e84ab504c99ff44
|
[
"MIT"
] | 7
|
2022-02-04T23:12:19.000Z
|
2022-03-15T13:04:41.000Z
|
api/app/utils/database/redis_db.py
|
lucasfusinato/fusi-short
|
d178c3e08b3f349179d530c35e84ab504c99ff44
|
[
"MIT"
] | null | null | null |
api/app/utils/database/redis_db.py
|
lucasfusinato/fusi-short
|
d178c3e08b3f349179d530c35e84ab504c99ff44
|
[
"MIT"
] | null | null | null |
from typing import Union
class RedisDb(object):
def __init__(self, host: str, port: str):
import redis
self.__db = redis.Redis(host=host, port=port)
def is_short_url_exists(self, alias: str) -> bool:
return self.__db.exists(alias)
def set_short_url(self, alias: str, link: str) -> None:
self.__db.set(alias, link)
def get_short_url_link(self, alias: str) -> Union[str, None]:
return self.__db.get(alias)
| 29
| 65
| 0.650862
|
acfd4cc5f8c775cc11777ab5e7252a382e2ce138
| 17,962
|
py
|
Python
|
src/rotest/management/base_resource.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 26
|
2017-06-11T18:21:17.000Z
|
2021-02-21T20:36:30.000Z
|
src/rotest/management/base_resource.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 143
|
2017-06-29T11:18:35.000Z
|
2021-06-10T17:23:46.000Z
|
src/rotest/management/base_resource.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 11
|
2017-06-12T09:16:14.000Z
|
2021-07-11T23:20:59.000Z
|
"""Define resources model classes.
Defines the basic attributes & interface of any resource type class,
responsible for the resource static & dynamic information.
"""
# pylint: disable=too-many-instance-attributes,no-self-use,broad-except
# pylint: disable=protected-access,unused-argument,ungrouped-imports
from __future__ import absolute_import
import sys
from bdb import BdbQuit
from threading import Thread
import six
from ipdbugger import debug
from attrdict import AttrDict
from future.utils import iteritems
from future.builtins import zip, object
from rotest.common import core_log
from rotest.common.config import ROTEST_WORK_DIR
from rotest.common.utils import parse_config_file
from rotest.common.utils import get_work_dir, get_class_fields
from rotest.common.constants import default_config, DEFAULT_SCHEMA_PATH
from rotest.management.models.resource_data import ResourceData, DataPointer
class ResourceRequest(object):
"""Holds the data for a resource request.
Attributes:
resource_name (str): attribute name to be assigned.
resource_class (type): resource type.
force_initialize (bool): a flag to determine if the resources will be
initialized even if their validation succeeds.
kwargs (dict): requested resource arguments.
"""
DONT_UNPACK = 0
UNPACK_ONCE = 1
RECURSIVE_UNPACK = 2
def __init__(self, resource_name=None, resource_class=None, **kwargs):
"""Initialize the required parameters of resource request."""
self.name = resource_name
self.type = resource_class
self.kwargs = kwargs
self.do_unpack = self.DONT_UNPACK
def __eq__(self, oth):
"""Compare with another request."""
return oth.name == self.name
def __repr__(self):
"""Return a string representing the request."""
return "%s %r of type %r (kwargs=%r)" % (self.__class__.__name__,
self.name,
self.type,
self.kwargs)
def get_type(self, config):
"""Get the requested resource class.
Args:
config (dict): the configuration file being used.
"""
return self.type
def unpack(self, recursive=False):
"""Unpack the sub-resources into the test as well.
Args:
recursive (number): whether to also unpack sub-resources.
"""
self.do_unpack = self.RECURSIVE_UNPACK if recursive \
else self.UNPACK_ONCE
return self
def unpack_sub_resources_names(self, recursive):
"""Get all sub resources' names."""
if recursive == self.DONT_UNPACK:
return
for name, _ in get_class_fields(self.type, ResourceRequest):
yield name
if recursive == self.RECURSIVE_UNPACK:
for _, sub_request in get_class_fields(self.type,
ResourceRequest):
for name in sub_request.unpack_sub_resources_names(recursive):
yield name
class ExceptionCatchingThread(Thread):
"""A thread that saves traceback information if one occurs."""
def __init__(self, *args, **kwargs):
super(ExceptionCatchingThread, self).__init__(*args, **kwargs)
self.traceback_tuple = None
def run(self):
try:
super(ExceptionCatchingThread, self).run()
except Exception:
self.traceback_tuple = sys.exc_info()
raise
class BaseResource(object):
"""Represent the common interface of all the resources.
To implement a resource, you may override:
initialize, connect, finalize, validate, create_sub_resources, store_state.
Also, assign a data container class by setting the
attribute 'DATA_CLASS', which should point to a subclass of
:class:`rotest.management.models.resource_data.ResourceData`.
Resource without a data class (also called 'Services') will be handled
locally, without involving the server.
Attributes:
DATA_CLASS (class): class of the resource's global data container.
PARALLEL_INITIALIZATION (bool): whether or not to validate and
initialize sub-resources in other threads.
logger (logger): resource's logger instance.
data (ResourceData): assigned data instance.
config (AttrDict): run configuration.
work_dir (str): working directory for this resource.
"""
DATA_CLASS = None
PARALLEL_INITIALIZATION = False
_SHELL_CLIENT = None
_SHELL_REQUEST_NAME = 'shell_resource'
def __init__(self, data=None, config=None, base_work_dir=ROTEST_WORK_DIR,
**kwargs):
# We use core_log as default logger in case
# that resource is used outside case.
self.logger = core_log
self._prev_loggers = []
if data is not None:
self.data = data
if isinstance(data, ResourceData):
for field_name, value in iteritems(self.data.get_fields()):
setattr(self, field_name, value)
else:
self.data = AttrDict(**kwargs)
if 'name' not in self.data:
self.data.name = "%s-%d" % (self.__class__.__name__, id(self))
for field_name, field_value in iteritems(self.data):
setattr(self, field_name, field_value)
self.config = config
self.parent = None
self.work_dir = None
self._sub_resources = None
self.set_work_dir(self.name, base_work_dir)
self.logger.debug("Resource %r work dir was created under %r",
self.name, base_work_dir)
self.set_sub_resources()
@classmethod
def request(cls, **kwargs):
"""Create a resource request for an instance of this class."""
return ResourceRequest(None, cls, **kwargs)
def create_sub_resources(self):
"""Create and return the sub resources if needed.
By default, this method searches for sub-resources declared as
class fields, where the 'data' attribute in the declaration points
to the name of the sub-resource's data field under the current's data.
Override and assign sub-resources to fields in the current resource,
using the 'data' object.
Returns:
dict. sub-resources created, name -> instance.
"""
sub_resources = {}
for sub_name, sub_request in get_class_fields(self.__class__,
ResourceRequest):
sub_class = sub_request.get_type(self.config)
actual_kwargs = sub_request.kwargs.copy()
actual_kwargs['config'] = self.config
actual_kwargs['base_work_dir'] = self.work_dir
for key, value in six.iteritems(sub_request.kwargs):
if isinstance(value, DataPointer):
actual_kwargs[key] = value.unwrap_data_pointer(self.data)
sub_resource = sub_class(**actual_kwargs)
setattr(self, sub_name, sub_resource)
sub_resources[sub_name] = sub_resource
return sub_resources
def setup_resource(self, skip_init=False, force_initialize=False):
"""Try to initialize the resource.
Note:
Initialization failure will cause a finalization attempt.
Args:
force_initialize(bool): True to always initialize.
skip_init (bool): True to skip initialize and validation.
"""
try:
self.connect()
except Exception:
self.logger.exception("Connecting to %r failed", self.name)
raise
if skip_init and not force_initialize:
self.logger.debug("Skipping validation and initialization")
return
try:
self.logger.debug("Initializing resource %r", self.name)
self._initialize_resource(force_initialize)
self.logger.debug("Resource %r was initialized", self.name)
except Exception:
self.logger.exception("Failed initializing %r, calling finalize",
self.name)
self.finalize()
raise
def is_available(self, user_name=""):
"""Return whether resource is available for the given user.
Args:
user_name (str): user name to be checked. Empty string means
available to all.
Returns:
bool. determine whether resource is available for the given user.
Note:
If this method is called from code then leaf is equal to 'self'.
If it is being called by BaseResources table in DB then leaf's
'is_available' method will be called.
"""
leaf = self.leaf # 'leaf' is a property.
if leaf == self:
return self.reserved in [user_name, ""] and self.owner == ""
return leaf.is_available(user_name)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.data)
def get_sub_resource_dict(self):
"""Return the dict of all the resource's sub-resources by name."""
return self._sub_resources
def get_sub_resources(self):
"""Return an iterable to the resource's sub-resources."""
return (sub_resource for sub_resource in self._sub_resources.values())
def set_sub_resources(self):
"""Create and set the sub resources if needed."""
self._sub_resources = self.create_sub_resources()
for resource in self.get_sub_resources():
resource.parent = self
def _safe_execute(self, callbacks, *args, **kwargs):
"""Executes all the callbacks, even if one or more fails.
Args:
callbacks (list): callbacks to be called one after the other (even
if one of them fails).
args (list): args passed for each callback when invoked.
kwargs (dict): keyword-args passed for each callback when invoked.
Raises:
RuntimeError: when one, or more, of the callbacks fail.
"""
error_messages = []
for callback in callbacks:
try:
self.logger.debug("Starting %s", callback)
callback(*args, **kwargs)
self.logger.debug("%s ended successfully", callback)
except Exception as ex:
self.logger.exception("%s failed", callback)
error_messages.append("%s: %s" % (callback, ex))
if len(error_messages) != 0:
raise RuntimeError("Some of the callbacks have failed. "
"Reasons: %s" % "\n".join(error_messages))
def set_work_dir(self, resource_name, containing_work_dir):
"""Set the work directory under the given case's work directory.
Args:
resource_name (str): name of resource.
containing_work_dir (str): root work directory.
"""
self.work_dir = get_work_dir(containing_work_dir, resource_name, None)
def store_state(self, state_dir_path):
"""Hook method for backing up the resource state.
Args:
state_dir_path (str): path of state directory to be saved.
"""
self.logger.debug("Storing resource %r state", self.name)
self._safe_execute([resource.store_state for resource
in self.get_sub_resources()], state_dir_path)
def initialize(self):
"""Hook method for setting up the resource before using it.
Will be called by the resource client once the connection to the
resource was successful. Override to specify the resource's
initialization procedure (remember to call 'super' at the beginning).
"""
self.logger.debug("Initializing resource %r", self.name)
def connect(self):
"""Setup a connection session to the resource.
Will be called by the resource client once the
resource is locked successfully.
"""
self.logger.debug("Connecting resource %r", self.name)
for resource in self.get_sub_resources():
resource.connect()
def finalize(self):
"""Hook method for cleaning up the resource after using it.
Will be called by the resource client before the resource is released.
Override to specify the resource's finalization procedure
(remember to call 'super' at the end).
"""
# Create a list of resources finalize calls in a specific order.
self.logger.debug("Finalizing resource %r", self.name)
finalization_methods = [resource.finalize for resource
in self.get_sub_resources()]
self._safe_execute(finalization_methods)
def validate(self):
"""Validate whether the resource is ready for work or not.
If this method failed, the resource client will call the 'initialize'
method to setup the resource.
Returns:
bool. True if validation succeeded, False otherwise.
"""
return False
def _initialize_resource(self, force_initialize=False):
"""Validate and initialize if needed the resource and subresources."""
sub_threads = []
for sub_resource in self.get_sub_resources():
if self.PARALLEL_INITIALIZATION:
sub_resource.logger.debug("Initializing %r in a new thread",
sub_resource.name)
initialize_thread = ExceptionCatchingThread(
target=sub_resource._initialize_resource,
args=(force_initialize,))
initialize_thread.start()
sub_threads.append(initialize_thread)
else:
sub_resource._initialize_resource(force_initialize)
for sub_thread, sub_resource in zip(sub_threads,
self.get_sub_resources()):
sub_thread.join()
if sub_thread.traceback_tuple is not None:
self.logger.error("Got an error while preparing resource %s",
sub_resource.name,
exc_info=sub_thread.traceback_tuple)
for sub_thread in sub_threads:
if sub_thread.traceback_tuple is not None:
six.reraise(*sub_thread.traceback_tuple)
if force_initialize or not self.validate():
if not force_initialize:
self.logger.debug("Resource %r validation failed",
self.name)
self.initialize()
else:
self.logger.debug("Resource %r skipped initialization",
self.name)
def override_logger(self, new_logger):
"""Replace the resource's logger.
Args:
new_logger (logging.Logger): new logger to set.
"""
if self.logger is new_logger:
return
self._prev_loggers.insert(0, self.logger)
self.logger = new_logger
def release_logger(self, logger):
"""Revert logger replacement.
Args:
logger (logging.Logger): logger to release.
"""
if self.logger is logger:
self.logger = self._prev_loggers.pop(0)
def enable_debug(self):
"""Wrap the resource methods with debugger."""
debug(self.connect, ignore_exceptions=[KeyboardInterrupt, BdbQuit])
debug(self.initialize, ignore_exceptions=[KeyboardInterrupt, BdbQuit])
debug(self.finalize, ignore_exceptions=[KeyboardInterrupt, BdbQuit])
debug(self.validate, ignore_exceptions=[KeyboardInterrupt, BdbQuit])
debug(self.store_state, ignore_exceptions=[KeyboardInterrupt, BdbQuit])
for resource in self.get_sub_resources():
resource.enable_debug()
@classmethod
def lock(cls, config=None, skip_init=False, **kwargs):
"""Lock an instance of this resource class.
Args:
config (str): path to the json config file.
skip_init (bool): whether to skip initialization or not.
kwargs (dict): additional query parameters for the request,
e.g. name or group.
Returns:
BaseResource. locked and initialized resource, ready for work.
"""
# These runtime imports are done to avoid cyclic imports.
from rotest.management.client.manager import ClientResourceManager
if BaseResource._SHELL_CLIENT is None:
BaseResource._SHELL_CLIENT = ClientResourceManager()
resource_request = ResourceRequest(BaseResource._SHELL_REQUEST_NAME,
cls,
**kwargs)
config_dict = default_config
if config is not None:
if isinstance(config, dict):
config_dict = config
else:
config_dict = parse_config_file(config, DEFAULT_SCHEMA_PATH)
result = BaseResource._SHELL_CLIENT.request_resources(
[resource_request],
config=config_dict,
skip_init=skip_init,
use_previous=False)
return result[BaseResource._SHELL_REQUEST_NAME]
def release(self):
"""Release the resource, assuming it was locked with a shell client."""
if BaseResource._SHELL_CLIENT is not None:
BaseResource._SHELL_CLIENT.release_resources([self],
force_release=True)
| 37.188406
| 79
| 0.610845
|
acfd4df7947abe71941a316481d4ca8f29b81359
| 9,587
|
py
|
Python
|
PreviousVerions/noclassbackup.py
|
cjbcodedump/2084game-finished
|
238c8f7bbaca971cbd2435dbb349852ce61df3d0
|
[
"MIT"
] | null | null | null |
PreviousVerions/noclassbackup.py
|
cjbcodedump/2084game-finished
|
238c8f7bbaca971cbd2435dbb349852ce61df3d0
|
[
"MIT"
] | null | null | null |
PreviousVerions/noclassbackup.py
|
cjbcodedump/2084game-finished
|
238c8f7bbaca971cbd2435dbb349852ce61df3d0
|
[
"MIT"
] | null | null | null |
# Welcome to 2084 ver 0.0.0.4
# Created by Cameron Burton, with help from the Pygame and Python community!
# importing modules
import pygame
from pygame.locals import *
import sys
import time
import pyganim # used for animations
# initialises pygame, was told this was necessary
pygame.init()
pygame.joystick.init()
pygame.mixer.init()
joysticks = []
# by defining these as a variable I can simply change these variables latter to change the resolution rather than edit the program in every bit they're referenced.
WINDOWWIDTH = 1600
WINDOWHEIGHT = 900
# character creation, these are the definitions of what a drawn character will look like, used later.
x = 100
y = 100
vel = 10
width = 64
height = 64
direction = 'left' # start off as left facing
moveleft = moveright = moveup = movedown = False
# loading in characters and enviroment
background = pygame.image.load('sprites/background1.png')
leftidle = pygame.image.load("sprites/left.png")
rightidle = pygame.image.load("sprites/right.png")
upidle = pygame.image.load("sprites/up.png")
downidle = pygame.image.load("sprites/down.png")
# pyganim aminations for character
charanim = {}
charanim["walkleft"] = pyganim.PygAnimation(
[("sprites/left2.png", 100), ("sprites/left.png", 100), ("sprites/left3.png", 100), ("sprites/left.png", 10)])
charanim["walkright"] = pyganim.PygAnimation(
[("sprites/right2.png", 100), ("sprites/right.png", 100), ("sprites/right3.png", 100), ("sprites/right.png", 10)])
charanim["walkup"] = pyganim.PygAnimation(
[("sprites/up2.png", 100), ("sprites/up.png", 100), ("sprites/up3.png", 100), ("sprites/up.png", 10)])
charanim["walkdown"] = pyganim.PygAnimation(
[("sprites/down2.png", 100), ("sprites/down.png", 100), ("sprites/down3.png", 100), ("sprites/down.png", 10)])
moveConductor = pyganim.PygConductor(charanim)
# mainmenu animations
mainmenuanim = {}
mainmenuanim["splashscreen"] = pyganim.PygAnimation([("sprites/splash1.png", 500), ("sprites/splash2.png", 500)])
mainmenuConductor = pyganim.PygConductor(mainmenuanim)
pygame.mixer.music.load('track0.ogg')
mainwin = pygame.display.set_mode(
(WINDOWWIDTH, WINDOWHEIGHT)) # this sets height and width of my window and creates a window.
pygame.display.set_caption("2084 Ver 0.1.0.3") # sets window title
def mainMenu():
for i in range(0, pygame.joystick.get_count()):
joysticks.append(pygame.joystick.Joystick(0))
joysticks[-1].init()
print("Detected joystick '", joysticks[-1].get_name(), "'")
global running
menulive = True
pygame.mixer.music.play(-1)
while menulive:
mainmenuConductor.play()
mainmenuanim["splashscreen"].blit(mainwin, (0, 0))
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_RETURN:
pygame.mixer.music.stop()
mainLoop()
if event.type == pygame.QUIT:
running = False
pygame.quit()
pygame.display.update()
def mainLoop():
global running
global x
global y
global vel
global width
global height
global direction
global moveleft
global moveright
global moveup
global movedown
# joystick.get_hat( i )
for i in range(0, pygame.joystick.get_count()):
joysticks.append(pygame.joystick.Joystick(0))
joysticks[-1].init()
print("Detected joystick '", joysticks[-1].get_name(), "'")
running = True
pygame.mixer.music.load("track1.ogg")
pygame.mixer.music.play(-1)
while running:
mainwin.blit(background, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == KEYDOWN: # KEYDOWN detects if a key is being pressed and is held down
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == K_LEFT or event.key == K_a:
moveleft = True
moveright = False
if not moveup and not movedown:
direction = "left"
elif event.key == K_RIGHT or event.key == K_d:
moveleft = False
moveright = True
if not moveup and not movedown:
direction = "right"
elif event.key == K_UP or event.key == K_w:
moveup = True
movedown = False
if not moveleft and not moveright:
direction = "up"
elif event.key == K_DOWN or event.key == K_s:
moveup = False
movedown = True
if not moveleft and not moveright:
direction = "down"
# The KEYUP event means the user has stopped pressing the key, this is important as it makes movement much more fluid -> see documentation
elif event.type == KEYUP:
if event.key == K_LEFT or event.key == K_a:
moveleft = False
if moveup:
direction = "up"
if movedown:
direction = "down"
elif event.key == K_RIGHT or event.key == K_d:
moveright = False
if moveup:
direction = "up"
if movedown:
direction = "down"
elif event.key == K_UP or event.key == K_w:
moveup = False
if moveleft:
direction = "left"
if moveright:
direction = "right"
elif event.key == K_DOWN or event.key == K_s:
movedown = False
if moveleft:
direction = "left"
if moveright:
direction = "right"
elif event.type == pygame.JOYBUTTONDOWN:
if event.button == 2:
moveleft = True
moveright = False
if not moveup and not movedown:
direction = "left"
elif event.button == 1:
moveleft = False
moveright = True
if not moveup and not movedown:
direction = "right"
elif event.button == 3:
moveup = True
movedown = False
if not moveleft and not moveright:
direction = "up"
elif event.button == 0:
moveup = False
movedown = True
if not moveleft and not moveright:
direction = "down"
elif event.type == pygame.JOYBUTTONUP:
if event.button == 2:
moveleft = False
if moveup:
direction = "up"
if movedown:
direction = "down"
elif event.button == 1:
moveright = False
if moveup:
direction = "up"
if movedown:
direction = "down"
elif event.button == 3:
moveup = False
if moveleft:
direction = "left"
if moveright:
direction = "right"
elif event.button == 0:
movedown = False
if moveleft:
direction = "left"
if moveright:
direction = "right"
elif event.type == pygame.JOYHATMOTION:
if event.hat == (0, 1):
moveup = True
movedown = False
if not moveleft and not moveright:
direction = "up"
# drawing sprites
if moveleft or moveright or moveup or movedown:
moveConductor.play()
# draws animations for each of the directions
if direction == "left":
charanim["walkleft"].blit(mainwin, (x, y))
elif direction == "right":
charanim["walkright"].blit(mainwin, (x, y))
elif direction == "up":
charanim["walkup"].blit(mainwin, (x, y))
elif direction == "down":
charanim["walkdown"].blit(mainwin, (x, y))
# moving the physicial character
if moveleft and x > 0:
x -= vel
if moveright and x < (WINDOWWIDTH - 64):
x += vel
if moveup and y > 0:
y -= vel
if movedown and y < (WINDOWHEIGHT - 64):
y += vel
else:
moveConductor.stop()
if direction == "left":
mainwin.blit(pygame.image.load("sprites/left.png"), (x, y))
elif direction == "right":
mainwin.blit(pygame.image.load("sprites/right.png"), (x, y))
elif direction == "up":
mainwin.blit(pygame.image.load("sprites/up.png"), (x, y))
elif direction == "down":
mainwin.blit(pygame.image.load("sprites/down.png"), (x, y))
pygame.display.update()
mainMenu()
pygame.quit()
| 37.893281
| 163
| 0.515385
|
acfd4fe3d90f7021f65d80166f445fc474c89a27
| 1,457
|
py
|
Python
|
input_data.py
|
tonib/tokens-rnn-tensorflow
|
9da815d3f9f4d215dd4d7041e5638eeb3ac51a0a
|
[
"MIT"
] | null | null | null |
input_data.py
|
tonib/tokens-rnn-tensorflow
|
9da815d3f9f4d215dd4d7041e5638eeb3ac51a0a
|
[
"MIT"
] | null | null | null |
input_data.py
|
tonib/tokens-rnn-tensorflow
|
9da815d3f9f4d215dd4d7041e5638eeb3ac51a0a
|
[
"MIT"
] | null | null | null |
from enum import Enum
import os
from typing import List
class InputData:
""" The train data: A text file """
#def __init__(self, data_file_path : str , token_mode : TokenMode , sequence_length : int ):
def __init__(self, args : object ):
# Input file file
path = os.path.join( args.data_dir , 'input.txt' )
# The text to train / predict
print("Reading", path)
with open( path , 'r', encoding='utf-8') as file:
self.text = file.read()
self.word_mode = False
if args.mode == 'word':
# Words vocabulary. Store the text as a words list
self.word_mode = True
self.text = self.text.split()
# Text vocabulary
self.vocabulary = list( set(self.text) )
# Important!
self.vocabulary.sort()
print( "Vocabulary length:", len(self.vocabulary) )
print( "Text length:", len(self.text) , "tokens")
#print( self.vocabulary )
def get_sequence( self, sequence_start_idx : int , sequence_length : int ) -> List[str]:
return list( self.text[sequence_start_idx : sequence_start_idx + sequence_length] )
def get_sequence_output( self, sequence_start_idx : int , sequence_length : int ) -> str:
output = self.text[sequence_start_idx + sequence_length : sequence_start_idx + sequence_length+1]
if self.word_mode:
output = output[0]
return output
| 32.377778
| 105
| 0.615649
|
acfd508a8551cdbe0829e9eb746e82aead9dc021
| 4,886
|
py
|
Python
|
libs/utils.py
|
kyrus/PyMyo
|
0c5c63813b176484083cc50dc0d072264bf7a9ef
|
[
"BSD-3-Clause"
] | 6
|
2015-03-23T19:03:02.000Z
|
2017-10-09T14:42:07.000Z
|
libs/utils.py
|
kyrus/PyMyo
|
0c5c63813b176484083cc50dc0d072264bf7a9ef
|
[
"BSD-3-Clause"
] | null | null | null |
libs/utils.py
|
kyrus/PyMyo
|
0c5c63813b176484083cc50dc0d072264bf7a9ef
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
## File : utils.py
## Description: general purpose library utilities
## :
## Created_On : Tue Sep 25 17:36:21 2012
## Created_By : Rich Smith (rich@kyr.us)
## Modified_On: Sun Apr 14 19:44:54 2013
## Modified_By: Rich Smith (rich@syndis.is)
## License : BSD-3
##
##
###############################################################################
import os
import re
import imp
import sys
import time
def sort_ipv4(ips):
"""
Sort a list of ipv4 addresses in ascending order
"""
for i in range(len(ips)):
ips[i] = "%3s.%3s.%3s.%3s" % tuple(ips[i].split("."))
ips.sort()
for i in range(len(ips)):
ips[i] = ips[i].replace(" ", "")
return ips
class ImportModules(object):
"""
A pattern for importing all Python modules from a supplied directory root that is walked
until the leaves, .py, .pyc, .pyo, .pyd will all be imported with the normal import preference
rules applied
Optionally supply a regex string, which if supplied must match the PATH of any modules that
you want to be imported
return a dictionary of modules keyed on their names qualified by dotted path
NOTE:
In the case of the rpyc_implant_server we walk the dir tree from the given start point and
find any 'Service.py' module. We then import it (which means doing all the parent hierachy imports)
and then finally add that module to a list of capabilities.
"""
def __call__(self, path, only_packages = False, filter=None, fail_hard = False):
"""
Walk the dir structure and import appropriate files
Return a dictionary with all loaded modules in
"""
#TODO proper exception handling
self.path = path
self.fail_hard = fail_hard
self.module_dict = {}
self.only_import_packages = only_packages
##Walk the tree to all leaves
for root, dirs, files in os.walk(path):
##Filter out python modules and import
if self.only_import_packages:
self._import(root, dirs)
else:
self._import(root, self._filter(files, filter_re=filter))
return self.module_dict
def _import(self, root, modules_to_import, put_in_dict=True):
"""
Do the import - raise any errors up for the caller to deal with
"""
f = None
for m in modules_to_import:
##Do the parent imports required for a hierarchical import
if root and root != self.path:
##We eat up the chain of the directory hierarchy until we hit the path we began
## from, we then know we will have imported all parents etc
##Snip one level off the root
parent_root = os.path.sep.join(root.split(os.path.sep)[:-1])
parent_module = root.split(os.path.sep)[-1]
self._import(parent_root, [parent_module], False)
q_mod = os.path.join(root.replace(self.path,""),m).replace(os.path.sep, ".").strip(".")
try:
##Find the modules whatever the extension
f, filename, descr = imp.find_module(m, [root])
loaded_module = imp.load_module(q_mod, f, filename, descr)
#print "\t Imported %s"%(q_mod)
##Load it and add it to our dict representation of this dir tree
if put_in_dict:
#self.module_dict[q_mod] = loaded_module
self.module_dict[q_mod.split(".")[0]] = loaded_module
except ImportError, err:
print "[-] Error importing %s : [%s]"%(q_mod, err)
if self.fail_hard:
raise
finally:
if f:
f.close()
def _filter(self, modules, filter_re=None):
"""
Skip __init__ & produce a unique list of modules across all known extensions
Apply filtering to list of returned files prior to import
"""
ret = []
py_exts = [".py", ".pyc", ".pyo", ".pyd"] #todo others
for m in modules:
if "__init__" in m or \
os.path.splitext(m)[-1] not in py_exts or\
os.path.splitext(m)[0] in ret:
continue
##Check special regex filter - if re supplied and nothing matched return
if filter_re and not re.search(filter_re, m):
continue
ret.append(os.path.splitext(m)[0])
return ret
| 35.405797
| 103
| 0.535612
|
acfd509fa9ff3d6ee27c454427aa80449c013b08
| 1,597
|
py
|
Python
|
var/spack/repos/builtin/packages/r-xde/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/r-xde/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/r-xde/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RXde(RPackage):
"""XDE: a Bayesian hierarchical model for cross-study analysis of
differential gene expression
Multi-level model for cross-study detection of differential gene
expression."""
homepage = "https://bioconductor.org/packages/XDE"
git = "https://git.bioconductor.org/packages/XDE.git"
version('2.36.0', commit='0277f9dffbd7d1880be77cb8581fc614501b3293')
version('2.30.0', commit='058af6f1e431522778f970bf61f834620d3d7dd7')
version('2.28.0', commit='b8cc7d0840ce1324644e8b4a750fbb964884498b')
version('2.26.0', commit='7bf6368037937c53542447175061c2e2059ee3be')
version('2.24.0', commit='fd5f245f82893657dc36e5a67a1d3b8255772462')
version('2.22.0', commit='25bcec965ae42a410dd285a9db9be46d112d8e81')
depends_on('r@2.10.0:', type=('build', 'run'))
depends_on('r-biobase@2.5.5:', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-gtools', type=('build', 'run'))
depends_on('r-mvtnorm', type=('build', 'run'))
depends_on('r-rcolorbrewer', when='@2.24.0:', type=('build', 'run'))
depends_on('r-genemeta', when='@2.24.0:', type=('build', 'run'))
depends_on('r-siggenes', when='@2.24.0:', type=('build', 'run'))
depends_on('r-mergemaid', when='@:2.30.0', type=('build', 'run'))
| 44.361111
| 73
| 0.687539
|
acfd50ca6ce7339823e2f6cddc15a624e0466a6b
| 2,388
|
py
|
Python
|
archives/correlation_analysis_celllevel/scf_configs/configs/config_mop_10x_cells_v3_snatac_gene_ka30_knn50_201120.py
|
mukamel-lab/SingleCellFusion_EnhancerPaper
|
acbfa5184667ca57c333c04c310b0712a0e8e15e
|
[
"MIT"
] | null | null | null |
archives/correlation_analysis_celllevel/scf_configs/configs/config_mop_10x_cells_v3_snatac_gene_ka30_knn50_201120.py
|
mukamel-lab/SingleCellFusion_EnhancerPaper
|
acbfa5184667ca57c333c04c310b0712a0e8e15e
|
[
"MIT"
] | null | null | null |
archives/correlation_analysis_celllevel/scf_configs/configs/config_mop_10x_cells_v3_snatac_gene_ka30_knn50_201120.py
|
mukamel-lab/SingleCellFusion_EnhancerPaper
|
acbfa5184667ca57c333c04c310b0712a0e8e15e
|
[
"MIT"
] | 1
|
2021-11-15T19:03:03.000Z
|
2021-11-15T19:03:03.000Z
|
#!/usr/bin/env python3
"""An example configuration file
"""
import sys
import os
# Assuming the cell order in the metadata tables are the same as those in the gene level matrices
# The output knn matrices follow such order as well
ka_smooth = 30
knn = 50
date = 201120
# # Configs
name = 'mop_10x_cells_v3_snatac_gene_ka{}_knn{}_{}'.format(ka_smooth, knn, date,)
outdir = '/cndd2/fangming/projects/miniatlas/results'
output_pcX_all = outdir + '/pcX_all_{}.npy'.format(name)
output_cells_all = outdir + '/cells_all_{}.npy'.format(name)
output_imputed_data_format = outdir + '/imputed_data_{}_{{}}.npy'.format(name)
output_clst_and_umap = outdir + '/intg_summary_{}.tsv'.format(name)
output_figures = outdir + '/figures/{}_{{}}.{{}}'.format(name)
output_cluster_centroids = outdir + '/centroids_{}.pkl'.format(name)
save_knn = True # new required arguments (7/27/2020)
output_knn_within = outdir + "/knn_within_{}_{{}}.npz".format(name)
output_knn_across = outdir + "/knn_across_{}_{{}}_{{}}.npz".format(name)
# end of new required arguments (7/27/2020)
# required for downsamp (8/7/2020)
output_cells = outdir + "/cells_{{}}_{}.npy".format(name)
DATA_DIR = '/cndd/fangming/CEMBA/data/MOp_all/data_freeze_neurons'
# fixed dataset configs
sys.path.insert(0, DATA_DIR)
from __init__datasets import *
meta_f = os.path.join(DATA_DIR, '{0}_metadata.tsv')
hvftrs_f = os.path.join(DATA_DIR, '{0}_hvfeatures.{1}')
hvftrs_gene = os.path.join(DATA_DIR, '{0}_hvfeatures.gene')
hvftrs_cell = os.path.join(DATA_DIR, '{0}_hvfeatures.cell')
mods_selected = [
# 'snmcseq_gene',
'snatac_gene',
'10x_cells_v3',
# 'smarter_nuclei',
# '10x_cells_v2',
# '10x_cells_v3',
# '10x_nuclei_v3',
# '10x_nuclei_v3_macosko',
]
# features_selected = ['smarter_cells']
# features_selected = ['snmcseq_gene']
features_selected = ['snatac_gene']
# check features
for features_modality in features_selected:
assert (features_modality in mods_selected)
# within modality
ps = {'mc': 0.9,
'atac': 0.1,
'rna': 0.7,
}
drop_npcs = {
'mc': 0,
'atac': 0,
'rna': 0,
}
ka_smooth = ka_smooth # default: 5
# across modality
cross_mod_distance_measure = 'correlation' # cca
knn = knn
relaxation = 3
n_cca = 30
# PCA
npc = 50
# clustering
k = 30 # default: 30
resolutions = [0.1, 1, 2, 4]
# umap
umap_neighbors = 60
min_dist = 0.5
| 25.956522
| 97
| 0.691792
|
acfd50d12a0bbdd189c3b7cc43202bad77020731
| 353
|
py
|
Python
|
experiments/issue469/raw_memory_parser.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2019-04-23T10:41:35.000Z
|
2019-10-27T05:14:42.000Z
|
experiments/issue469/raw_memory_parser.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | null | null | null |
experiments/issue469/raw_memory_parser.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2018-01-16T00:00:22.000Z
|
2019-11-01T23:35:01.000Z
|
#! /usr/bin/env python
from lab.parser import Parser
class RawMemoryParser(Parser):
def __init__(self):
Parser.__init__(self)
self.add_pattern('raw_memory', r'Peak memory: (.+) KB', type=int, required=False)
if __name__ == '__main__':
parser = RawMemoryParser()
print 'Running RawMemoryParser parser'
parser.parse()
| 22.0625
| 89
| 0.679887
|
acfd519bbd767841df8fc0d7cc578e28777f4aea
| 1,264
|
py
|
Python
|
source/python/neuropod/backends/torchscript/test/custom_ops/setup.py
|
Moti-H/neuropod
|
6f3e5862230cecf8b0d0b463eb145f187a398e64
|
[
"Apache-2.0"
] | null | null | null |
source/python/neuropod/backends/torchscript/test/custom_ops/setup.py
|
Moti-H/neuropod
|
6f3e5862230cecf8b0d0b463eb145f187a398e64
|
[
"Apache-2.0"
] | null | null | null |
source/python/neuropod/backends/torchscript/test/custom_ops/setup.py
|
Moti-H/neuropod
|
6f3e5862230cecf8b0d0b463eb145f187a398e64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 The Neuropod Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
# See https://pytorch.org/tutorials/advanced/cpp_extension.html#building-with-setuptools
# and https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html#building-with-setuptools
# We want to avoid depending on `libtorch_python` for torchscript custom ops
ext_module = CppExtension("addition_op", ["addition_op.cc"])
ext_module.libraries = [
libname for libname in ext_module.libraries if libname != "torch_python"
]
setup(
name="addition_op",
ext_modules=[ext_module],
cmdclass={"build_ext": BuildExtension.with_options(no_python_abi_suffix=True)},
)
| 40.774194
| 98
| 0.778481
|
acfd52367b38f8f25d22dc648a05ea95888f8b0b
| 314
|
py
|
Python
|
deprecated/ProcessDatasetCreator.py
|
Insert-Generic-Name-Here/ReCOn
|
116d1185b47affd77de82194ae7c8293b5ce22b2
|
[
"Apache-2.0"
] | null | null | null |
deprecated/ProcessDatasetCreator.py
|
Insert-Generic-Name-Here/ReCOn
|
116d1185b47affd77de82194ae7c8293b5ce22b2
|
[
"Apache-2.0"
] | null | null | null |
deprecated/ProcessDatasetCreator.py
|
Insert-Generic-Name-Here/ReCOn
|
116d1185b47affd77de82194ae7c8293b5ce22b2
|
[
"Apache-2.0"
] | null | null | null |
import os
import datetime
import lib.monitor.ProcessMonitor as monitor
from time import gmtime, strftime, sleep
procMonitor = monitor.ProcessMonitor(data_path=os.path.join('.', 'data'), logData=True, interval=5)
while (True):
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()), end='\r', flush=True)
sleep(1)
| 28.545455
| 99
| 0.707006
|
acfd52a34ccdf1c8fb9baffbf0bb28067f6b99bf
| 16,050
|
py
|
Python
|
atom/proton/python/proton_api/models/retirement_calculator_expenses_request.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/proton/python/proton_api/models/retirement_calculator_expenses_request.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
atom/proton/python/proton_api/models/retirement_calculator_expenses_request.py
|
sumit4-ttn/SDK
|
b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Proton API
Financial engineering module of Hydrogen Atom # noqa: E501
OpenAPI spec version: 1.7.18
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RetirementCalculatorExpensesRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'retirement_income': 'float',
'percent_of_expenses_covered': 'float',
'deposit_schedule': 'object',
'retirement_age': 'int',
'portfolio_return': 'float',
'inflation_rate': 'float',
'aggregation_account_ids': 'list[str]',
'current_age': 'int',
'account_ids': 'list[str]',
'retirement_savings': 'float',
'retirement_tax': 'float',
'death_age': 'int'
}
attribute_map = {
'retirement_income': 'retirement_income',
'percent_of_expenses_covered': 'percent_of_expenses_covered',
'deposit_schedule': 'deposit_schedule',
'retirement_age': 'retirement_age',
'portfolio_return': 'portfolio_return',
'inflation_rate': 'inflation_rate',
'aggregation_account_ids': 'aggregation_account_ids',
'current_age': 'current_age',
'account_ids': 'account_ids',
'retirement_savings': 'retirement_savings',
'retirement_tax': 'retirement_tax',
'death_age': 'death_age'
}
def __init__(self, retirement_income=0.0, percent_of_expenses_covered=1.0, deposit_schedule=None, retirement_age=65, portfolio_return=None, inflation_rate=0.0, aggregation_account_ids=None, current_age=None, account_ids=None, retirement_savings=0.0, retirement_tax=0.0, death_age=None): # noqa: E501
"""RetirementCalculatorExpensesRequest - a model defined in Swagger""" # noqa: E501
self._retirement_income = None
self._percent_of_expenses_covered = None
self._deposit_schedule = None
self._retirement_age = None
self._portfolio_return = None
self._inflation_rate = None
self._aggregation_account_ids = None
self._current_age = None
self._account_ids = None
self._retirement_savings = None
self._retirement_tax = None
self._death_age = None
self.discriminator = None
if retirement_income is not None:
self.retirement_income = retirement_income
if percent_of_expenses_covered is not None:
self.percent_of_expenses_covered = percent_of_expenses_covered
if deposit_schedule is not None:
self.deposit_schedule = deposit_schedule
if retirement_age is not None:
self.retirement_age = retirement_age
self.portfolio_return = portfolio_return
if inflation_rate is not None:
self.inflation_rate = inflation_rate
if aggregation_account_ids is not None:
self.aggregation_account_ids = aggregation_account_ids
self.current_age = current_age
if account_ids is not None:
self.account_ids = account_ids
if retirement_savings is not None:
self.retirement_savings = retirement_savings
if retirement_tax is not None:
self.retirement_tax = retirement_tax
self.death_age = death_age
@property
def retirement_income(self):
"""Gets the retirement_income of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The retirement_income of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: float
"""
return self._retirement_income
@retirement_income.setter
def retirement_income(self, retirement_income):
"""Sets the retirement_income of this RetirementCalculatorExpensesRequest.
:param retirement_income: The retirement_income of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: float
"""
if retirement_income is not None and retirement_income < 0: # noqa: E501
raise ValueError("Invalid value for `retirement_income`, must be a value greater than or equal to `0`") # noqa: E501
self._retirement_income = retirement_income
@property
def percent_of_expenses_covered(self):
"""Gets the percent_of_expenses_covered of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The percent_of_expenses_covered of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: float
"""
return self._percent_of_expenses_covered
@percent_of_expenses_covered.setter
def percent_of_expenses_covered(self, percent_of_expenses_covered):
"""Sets the percent_of_expenses_covered of this RetirementCalculatorExpensesRequest.
:param percent_of_expenses_covered: The percent_of_expenses_covered of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: float
"""
if percent_of_expenses_covered is not None and percent_of_expenses_covered > 1: # noqa: E501
raise ValueError("Invalid value for `percent_of_expenses_covered`, must be a value less than or equal to `1`") # noqa: E501
if percent_of_expenses_covered is not None and percent_of_expenses_covered < 0: # noqa: E501
raise ValueError("Invalid value for `percent_of_expenses_covered`, must be a value greater than or equal to `0`") # noqa: E501
self._percent_of_expenses_covered = percent_of_expenses_covered
@property
def deposit_schedule(self):
"""Gets the deposit_schedule of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The deposit_schedule of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: object
"""
return self._deposit_schedule
@deposit_schedule.setter
def deposit_schedule(self, deposit_schedule):
"""Sets the deposit_schedule of this RetirementCalculatorExpensesRequest.
:param deposit_schedule: The deposit_schedule of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: object
"""
self._deposit_schedule = deposit_schedule
@property
def retirement_age(self):
"""Gets the retirement_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The retirement_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: int
"""
return self._retirement_age
@retirement_age.setter
def retirement_age(self, retirement_age):
"""Sets the retirement_age of this RetirementCalculatorExpensesRequest.
:param retirement_age: The retirement_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: int
"""
if retirement_age is not None and retirement_age < 0: # noqa: E501
raise ValueError("Invalid value for `retirement_age`, must be a value greater than or equal to `0`") # noqa: E501
self._retirement_age = retirement_age
@property
def portfolio_return(self):
"""Gets the portfolio_return of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The portfolio_return of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: float
"""
return self._portfolio_return
@portfolio_return.setter
def portfolio_return(self, portfolio_return):
"""Sets the portfolio_return of this RetirementCalculatorExpensesRequest.
:param portfolio_return: The portfolio_return of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: float
"""
if portfolio_return is None:
raise ValueError("Invalid value for `portfolio_return`, must not be `None`") # noqa: E501
if portfolio_return is not None and portfolio_return < -1: # noqa: E501
raise ValueError("Invalid value for `portfolio_return`, must be a value greater than or equal to `-1`") # noqa: E501
self._portfolio_return = portfolio_return
@property
def inflation_rate(self):
"""Gets the inflation_rate of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The inflation_rate of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: float
"""
return self._inflation_rate
@inflation_rate.setter
def inflation_rate(self, inflation_rate):
"""Sets the inflation_rate of this RetirementCalculatorExpensesRequest.
:param inflation_rate: The inflation_rate of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: float
"""
if inflation_rate is not None and inflation_rate < -1: # noqa: E501
raise ValueError("Invalid value for `inflation_rate`, must be a value greater than or equal to `-1`") # noqa: E501
self._inflation_rate = inflation_rate
@property
def aggregation_account_ids(self):
"""Gets the aggregation_account_ids of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The aggregation_account_ids of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: list[str]
"""
return self._aggregation_account_ids
@aggregation_account_ids.setter
def aggregation_account_ids(self, aggregation_account_ids):
"""Sets the aggregation_account_ids of this RetirementCalculatorExpensesRequest.
:param aggregation_account_ids: The aggregation_account_ids of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: list[str]
"""
self._aggregation_account_ids = aggregation_account_ids
@property
def current_age(self):
"""Gets the current_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The current_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: int
"""
return self._current_age
@current_age.setter
def current_age(self, current_age):
"""Sets the current_age of this RetirementCalculatorExpensesRequest.
:param current_age: The current_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: int
"""
if current_age is None:
raise ValueError("Invalid value for `current_age`, must not be `None`") # noqa: E501
if current_age is not None and current_age < 0: # noqa: E501
raise ValueError("Invalid value for `current_age`, must be a value greater than or equal to `0`") # noqa: E501
self._current_age = current_age
@property
def account_ids(self):
"""Gets the account_ids of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The account_ids of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: list[str]
"""
return self._account_ids
@account_ids.setter
def account_ids(self, account_ids):
"""Sets the account_ids of this RetirementCalculatorExpensesRequest.
:param account_ids: The account_ids of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: list[str]
"""
self._account_ids = account_ids
@property
def retirement_savings(self):
"""Gets the retirement_savings of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The retirement_savings of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: float
"""
return self._retirement_savings
@retirement_savings.setter
def retirement_savings(self, retirement_savings):
"""Sets the retirement_savings of this RetirementCalculatorExpensesRequest.
:param retirement_savings: The retirement_savings of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: float
"""
if retirement_savings is not None and retirement_savings < 0: # noqa: E501
raise ValueError("Invalid value for `retirement_savings`, must be a value greater than or equal to `0`") # noqa: E501
self._retirement_savings = retirement_savings
@property
def retirement_tax(self):
"""Gets the retirement_tax of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The retirement_tax of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: float
"""
return self._retirement_tax
@retirement_tax.setter
def retirement_tax(self, retirement_tax):
"""Sets the retirement_tax of this RetirementCalculatorExpensesRequest.
:param retirement_tax: The retirement_tax of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: float
"""
if retirement_tax is not None and retirement_tax > 1: # noqa: E501
raise ValueError("Invalid value for `retirement_tax`, must be a value less than or equal to `1`") # noqa: E501
if retirement_tax is not None and retirement_tax < 0: # noqa: E501
raise ValueError("Invalid value for `retirement_tax`, must be a value greater than or equal to `0`") # noqa: E501
self._retirement_tax = retirement_tax
@property
def death_age(self):
"""Gets the death_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:return: The death_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:rtype: int
"""
return self._death_age
@death_age.setter
def death_age(self, death_age):
"""Sets the death_age of this RetirementCalculatorExpensesRequest.
:param death_age: The death_age of this RetirementCalculatorExpensesRequest. # noqa: E501
:type: int
"""
if death_age is None:
raise ValueError("Invalid value for `death_age`, must not be `None`") # noqa: E501
if death_age is not None and death_age < 0: # noqa: E501
raise ValueError("Invalid value for `death_age`, must be a value greater than or equal to `0`") # noqa: E501
self._death_age = death_age
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RetirementCalculatorExpensesRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RetirementCalculatorExpensesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.587822
| 304
| 0.669844
|
acfd541ef124b85139be400b84cbb771a8f1e706
| 2,344
|
py
|
Python
|
tools/nntool/importer/onnx/handlers/backend/gather.py
|
bot-motion/gap_sdk
|
bd117a8a4b5384b2b889f72effbfed4f7f938a88
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/importer/onnx/handlers/backend/gather.py
|
bot-motion/gap_sdk
|
bd117a8a4b5384b2b889f72effbfed4f7f938a88
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/importer/onnx/handlers/backend/gather.py
|
bot-motion/gap_sdk
|
bd117a8a4b5384b2b889f72effbfed4f7f938a88
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from graph.types import GatherParametters, NNEdge
from graph.types.input_output import ConstantInputParameters
from importer.common.constant_mixin import ConstantMixin
from importer.common.provisional_dim import ProvisionalDim
from importer.onnx.common import logger
from ..backend_handler import BackendHandler
from ..handler import onnx_op
@onnx_op("Gather")
class Gather(ConstantMixin, BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
all_nodes = kwargs['all_nodes']
G = kwargs['G']
valid_name = kwargs['valid_name']
inputs = [all_nodes[inp] for inp in node.input]
x = inputs[0]
x_shape = x[2].shape
y = inputs[1]
indices = cls.get_constant(y)
axis = node.attrs.get('axis', 0)
pshape = ProvisionalDim(x_shape[:axis:] + list(indices.shape) + x_shape[axis + 1:])
if cls.is_constant(x):
logger.info("reducing %s to a constant", valid_name)
x_val = cls.get_constant(x)
params = ConstantInputParameters(valid_name, value=np.take(x_val, indices, axis=axis))
else:
axis = cls._trim_axis(axis, x_shape)
params = GatherParametters(valid_name, axis=axis, indices=indices)
G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
all_nodes[node.output[0]] = (params, 0, pshape)
return params
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_11(cls, node, **kwargs):
return cls._common(node, **kwargs)
| 38.42623
| 98
| 0.695392
|
acfd54f18d936b38496ccd6b2bfd2d89681e4525
| 1,947
|
py
|
Python
|
utils/gen-numerics.py
|
scwagner/irc
|
2792362d9447e612789d360fe8be915cb457b33a
|
[
"MIT"
] | 109
|
2017-01-15T23:08:56.000Z
|
2022-03-22T10:05:41.000Z
|
utils/gen-numerics.py
|
scwagner/irc
|
2792362d9447e612789d360fe8be915cb457b33a
|
[
"MIT"
] | 34
|
2017-01-10T01:07:45.000Z
|
2021-12-04T19:06:06.000Z
|
utils/gen-numerics.py
|
scwagner/irc
|
2792362d9447e612789d360fe8be915cb457b33a
|
[
"MIT"
] | 16
|
2017-08-05T16:05:58.000Z
|
2022-03-02T19:34:06.000Z
|
import yaml
data = yaml.safe_load(open('./numerics.yml', 'r'))
vals = data['values']
used = set()
print('//nolint')
print('package irc')
print()
print('const (')
def print_item(idx, item, ircv3=False, obsolete=None, tablevel=1, **kwargs):
if idx in used:
return
origin = item.get('origin', '')
origin_name = kwargs.pop('origin', '')
if ircv3:
if ('ircv3.net' not in item.get('contact', '')
and 'ircv3.net' not in item.get('information', '')):
return
elif origin_name and not origin or origin_name not in origin:
return
kwargs['obsolete'] = obsolete
for k, v in kwargs.items():
if item.get(k) != v:
return
# Mark seen
used.add(idx)
print('\t' * tablevel, end='')
print('{} = "{}"'.format(item['name'], item['numeric']), end='')
if origin and origin != origin_name:
print(' // {}'.format(origin), end='')
print()
def print_specific(**kwargs):
for index, item in enumerate(vals):
print_item(index, item, **kwargs)
print('\t// RFC1459')
print_specific(origin='RFC1459')
print()
print('\t// RFC1459 (Obsolete)')
print_specific(origin='RFC1459', obsolete=True)
print()
print('\t// RFC2812')
print_specific(origin='RFC2812')
print()
print('\t// RFC2812 (Obsolete)')
print_specific(origin='RFC2812', obsolete=True)
print()
print('\t// IRCv3')
print_specific(origin='IRCv3', ircv3=True)
print()
#print('\t// IRCv3 (obsolete)')
#print_specific(origin='IRCv3', ircv3=True, obsolete=True)
#print()
print('\t// Other')
print_specific(name='RPL_ISUPPORT')
print()
print('\t// Ignored')
print('\t//')
print('\t// Anything not in an RFC has not been included because')
print('\t// there are way too many conflicts to deal with.')
print('\t/*')
print_specific(tablevel=2)
print('\t//*/')
print()
print('\t// Obsolete')
print('\t/*')
print_specific(obsolete=True, tablevel=2)
print('\t//*/')
print(')')
| 22.639535
| 76
| 0.62301
|
acfd55151c534fa9458d8f28033b10e315725b5c
| 633
|
py
|
Python
|
tryalgo/merge_ordered_lists.py
|
fuez/tryalgo
|
b21862be294b681640706f983692956219d7b216
|
[
"MIT"
] | 351
|
2015-10-25T18:45:58.000Z
|
2022-03-26T02:06:51.000Z
|
tryalgo/merge_ordered_lists.py
|
fuez/tryalgo
|
b21862be294b681640706f983692956219d7b216
|
[
"MIT"
] | 39
|
2016-02-04T07:01:57.000Z
|
2021-01-26T02:53:54.000Z
|
tryalgo/merge_ordered_lists.py
|
fuez/tryalgo
|
b21862be294b681640706f983692956219d7b216
|
[
"MIT"
] | 119
|
2015-11-18T12:13:01.000Z
|
2022-03-21T04:18:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Merge two ordered lists
jill-jênn vie et christoph dürr - 2014-2019
"""
# snip{
def merge(x, y):
"""Merge two ordered lists
:param x:
:param y: x, y are nondecreasing ordered lists
:returns: union of x and y in order
:complexity: linear
"""
z = []
i = 0
j = 0
while i < len(x) or j < len(y):
if j == len(y) or i < len(x) and x[i] <= y[j]: # priority on x
z.append(x[i])
i += 1
else:
z.append(y[j]) # now switch to y
j += 1
return z
# snip}
| 21.1
| 73
| 0.475513
|
acfd5522b6c56539cd9a0027df6370f3a611a8e9
| 1,438
|
py
|
Python
|
Current/clean.py
|
CharlesCarley/HackComputer
|
2f29ec7929ef08f295b5be5810f324acc99604e0
|
[
"MIT"
] | null | null | null |
Current/clean.py
|
CharlesCarley/HackComputer
|
2f29ec7929ef08f295b5be5810f324acc99604e0
|
[
"MIT"
] | null | null | null |
Current/clean.py
|
CharlesCarley/HackComputer
|
2f29ec7929ef08f295b5be5810f324acc99604e0
|
[
"MIT"
] | null | null | null |
# -----------------------------------------------------------------------------
#
# Copyright (c) Charles Carley.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# ------------------------------------------------------------------------------
import os, glob
def dropList(fileList):
for file in fileList:
print ("Deleting file =>", file)
os.remove(file)
def main():
dropList(glob.glob("html/*.html"))
dropList(glob.glob("xml/*.xml"))
dropList(glob.glob("markdown/*.md"))
dropList(glob.glob("images/dot/*.dot"))
dropList(glob.glob("images/dot/*.svg"))
if __name__ =='__main__':
main()
| 37.842105
| 80
| 0.63491
|
acfd55ebeb44bf38521312af1396cb86eb3563db
| 704
|
py
|
Python
|
src/python/grpcio_status/grpc_version.py
|
hanjingo/grpc
|
42b083322f6cf45fc00c801a6e16a013f3edfc99
|
[
"Apache-2.0"
] | null | null | null |
src/python/grpcio_status/grpc_version.py
|
hanjingo/grpc
|
42b083322f6cf45fc00c801a6e16a013f3edfc99
|
[
"Apache-2.0"
] | 4
|
2022-02-27T18:59:37.000Z
|
2022-02-27T18:59:53.000Z
|
src/python/grpcio_status/grpc_version.py
|
hanjingo/grpc
|
42b083322f6cf45fc00c801a6e16a013f3edfc99
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_status/grpc_version.py.template`!!!
VERSION = '1.47.0.dev0'
| 39.111111
| 97
| 0.762784
|
acfd5621bb5b24905809c062abc65567ff981f47
| 9,406
|
py
|
Python
|
arelle/ViewWinTests.py
|
hamscher/Arelle
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
[
"Apache-2.0"
] | 1
|
2020-12-29T09:20:24.000Z
|
2020-12-29T09:20:24.000Z
|
arelle/ViewWinTests.py
|
hamscher/Arelle
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
[
"Apache-2.0"
] | 3
|
2021-01-07T23:36:40.000Z
|
2021-12-13T20:43:27.000Z
|
arelle/ViewWinTests.py
|
hamscher/Arelle
|
64c1beddcc7163e571011faf07a03d8ffe18bb78
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Oct 17, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from tkinter import *
try:
from tkinter.ttk import *
except ImportError:
from ttk import *
import os
from arelle import (ViewWinTree, ModelDocument, XmlUtil)
def viewTests(modelXbrl, tabWin):
view = ViewTests(modelXbrl, tabWin)
modelXbrl.modelManager.showStatus(_("viewing Tests"))
view.treeView["columns"] = ("name", "readMeFirst", "infoset", "status", "call", "test", "expected", "actual")
view.treeView.column("#0", width=150, anchor="w")
view.treeView.heading("#0", text="ID")
view.treeView.column("name", width=150, anchor="w")
view.treeView.heading("name", text="Name")
view.treeView.column("readMeFirst", width=75, anchor="w")
view.treeView.heading("readMeFirst", text="ReadMeFirst")
view.treeView.column("infoset", width=75, anchor="w")
view.treeView.heading("infoset", text="Infoset File")
view.treeView.column("status", width=80, anchor="w")
view.treeView.heading("status", text="Status")
view.treeView.column("call", width=150, anchor="w")
view.treeView.heading("call", text="Call")
view.treeView.column("test", width=100, anchor="w")
view.treeView.heading("test", text="Test")
view.treeView.column("expected", width=100, anchor="w")
view.treeView.heading("expected", text="Expected")
view.treeView.column("actual", width=100, anchor="w")
view.treeView.heading("actual", text="Actual")
view.isTransformRegistry = False
modelDocument = modelXbrl.modelDocument
if modelXbrl.modelDocument.type in (ModelDocument.Type.REGISTRY, ModelDocument.Type.REGISTRYTESTCASE):
if modelXbrl.modelDocument.xmlRootElement.namespaceURI == "http://xbrl.org/2011/conformance-rendering/transforms":
view.treeView["displaycolumns"] = ("status", "call", "test", "expected", "actual")
view.isTransformRegistry = True
else:
view.treeView["displaycolumns"] = ("name", "readMeFirst", "status", "call", "test", "expected", "actual")
elif modelXbrl.modelDocument.type == ModelDocument.Type.XPATHTESTSUITE:
view.treeView["displaycolumns"] = ("name", "readMeFirst", "status", "call", "test", "expected", "actual")
else:
# check if infoset needed
if modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.REGISTRY):
hasInfoset = any(getattr(refDoc, "outpath", None) for refDoc in modelDocument.referencesDocument)
else:
hasInfoset = bool(getattr(modelDocument, "outpath", None))
view.treeView["displaycolumns"] = (("name", "readMeFirst") +
( ("infoset",) if hasInfoset else () ) +
( "status", "expected", "actual"))
menu = view.contextMenu()
view.menuAddExpandCollapse()
view.menuAddClipboard()
view.id = 0
view.viewTestcaseIndexElement(modelDocument, "")
view.blockSelectEvent = 1
view.blockViewModelObject = 0
view.treeView.bind("<<TreeviewSelect>>", view.treeviewSelect, '+')
view.treeView.bind("<Enter>", view.treeviewEnter, '+')
view.treeView.bind("<Leave>", view.treeviewLeave, '+')
class ViewTests(ViewWinTree.ViewTree):
def __init__(self, modelXbrl, tabWin):
super(ViewTests, self).__init__(modelXbrl, tabWin, "Tests", True)
def viewTestcaseIndexElement(self, modelDocument, parentNode, parentNodeText=None):
self.id += 1
if modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.REGISTRY):
nodeText = os.path.basename(modelDocument.uri)
if nodeText == parentNodeText: # may be same name, index.xml, use directory name instead
nodeText = os.path.basename(os.path.dirname(modelDocument.uri))
node = self.treeView.insert(parentNode, "end", modelDocument.objectId(self.id),
text=nodeText, tags=("odd",))
self.id += 1;
# sort test cases by uri
testcases = []
for referencedDocument, _ref in sorted(modelDocument.referencesDocument.items(),
key=lambda i:i[1].referringModelObject.objectIndex if i[1] else 0):
if referencedDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.REGISTRY):
self.viewTestcaseIndexElement(referencedDocument, node, nodeText)
else:
testcases.append((referencedDocument.uri, referencedDocument.objectId()))
testcases.sort()
for i, testcaseTuple in enumerate(testcases):
self.viewTestcase(self.modelXbrl.modelObject(testcaseTuple[1]), node, i)
elif modelDocument.type in (ModelDocument.Type.TESTCASE, ModelDocument.Type.REGISTRYTESTCASE):
self.viewTestcase(modelDocument, parentNode, 1)
elif modelDocument.type == ModelDocument.Type.XPATHTESTSUITE:
for i, elt in enumerate(modelDocument.xmlRootElement.iterchildren(tag="{http://www.w3.org/2005/02/query-test-XQTSCatalog}test-group")):
self.viewTestGroup(elt, parentNode, i)
else:
pass
def viewTestcase(self, modelDocument, parentNode, n):
node = self.treeView.insert(parentNode, "end", modelDocument.objectId(),
text=os.path.basename(modelDocument.uri),
tags=("odd" if n & 1 else "even",))
self.id += 1;
if hasattr(modelDocument, "testcaseVariations"):
for i, modelTestcaseVariation in enumerate(modelDocument.testcaseVariations):
self.viewTestcaseVariation(modelTestcaseVariation, node, n + i + 1)
def viewTestGroup(self, group, parentNode, n):
node = self.treeView.insert(parentNode, "end", group.objectId(),
text=group.get("name"),
tags=("odd" if n & 1 else "even",))
titleElt = XmlUtil.child(group, None, "title")
if titleElt is not None:
self.treeView.set(node, "name", titleElt.text)
self.id += 1;
i = -1
for elt in group.iterchildren(tag="{http://www.w3.org/2005/02/query-test-XQTSCatalog}test-group"):
i = i + 1
self.viewTestGroup(elt, node, n + i + 1)
for elt in group.iterchildren(tag="{http://www.w3.org/2005/02/query-test-XQTSCatalog}test-case"):
if elt.get("is-XPath2") == "true":
i = i + 1
self.viewTestcaseVariation(elt, node, n + i + 1)
def viewTestcaseVariation(self, modelTestcaseVariation, parentNode, n):
if self.isTransformRegistry or modelTestcaseVariation.localName in ("testGroup", "test-case"):
id = modelTestcaseVariation.name
else:
id = modelTestcaseVariation.id
if id is None:
id = ""
node = self.treeView.insert(parentNode, "end", modelTestcaseVariation.objectId(),
text=id,
tags=("odd" if n & 1 else "even",))
self.treeView.set(node, "name", (modelTestcaseVariation.description or modelTestcaseVariation.name))
self.treeView.set(node, "readMeFirst", ",".join(str(uri) for uri in modelTestcaseVariation.readMeFirstUris))
self.treeView.set(node, "status", modelTestcaseVariation.status)
call = modelTestcaseVariation.cfcnCall
if call: self.treeView.set(node, "call", call[0])
test = modelTestcaseVariation.cfcnTest
if test:
self.treeView.set(node, "test", test[0])
if getattr(self.modelXbrl.modelDocument, "outpath", None) and modelTestcaseVariation.resultIsInfoset:
self.treeView.set(node, "infoset", modelTestcaseVariation.resultInfosetUri)
self.treeView.set(node, "expected", modelTestcaseVariation.expected)
self.treeView.set(node, "actual", ", ".join(modelTestcaseVariation.actual))
self.id += 1;
def treeviewEnter(self, *args):
self.blockSelectEvent = 0
def treeviewLeave(self, *args):
self.blockSelectEvent = 1
def treeviewSelect(self, *args):
if self.blockSelectEvent == 0 and self.blockViewModelObject == 0:
self.blockViewModelObject += 1
self.modelXbrl.viewModelObject(self.treeView.selection()[0])
self.blockViewModelObject -= 1
def viewModelObject(self, modelObject):
if self.blockViewModelObject == 0:
self.blockViewModelObject += 1
testcaseVariationId = modelObject.objectId()
if self.treeView.exists(testcaseVariationId):
if hasattr(modelObject, "status"):
self.treeView.set(testcaseVariationId, "status", modelObject.status)
if hasattr(modelObject, "actual"):
self.treeView.set(testcaseVariationId, "actual", ", ".join(
str(code) for code in modelObject.actual))
self.treeView.see(testcaseVariationId)
self.treeView.selection_set(testcaseVariationId)
self.blockViewModelObject -= 1
| 53.748571
| 147
| 0.625558
|
acfd56362621b6412d9d5935a2b52af362feaf35
| 1,856
|
py
|
Python
|
anthill/discovery/server.py
|
anthill-services/anthill-discovery
|
f61242bd6f853045696aae24a498a615979b3969
|
[
"MIT"
] | null | null | null |
anthill/discovery/server.py
|
anthill-services/anthill-discovery
|
f61242bd6f853045696aae24a498a615979b3969
|
[
"MIT"
] | null | null | null |
anthill/discovery/server.py
|
anthill-services/anthill-discovery
|
f61242bd6f853045696aae24a498a615979b3969
|
[
"MIT"
] | 1
|
2017-12-13T07:46:03.000Z
|
2017-12-13T07:46:03.000Z
|
from anthill.common import handler, server, access, sign, discover
from . model.discovery import DiscoveryModel, ServiceNotFound
from . import handler as h
from . import admin
from . import options as _opts
class DiscoveryServer(server.Server):
def __init__(self):
super(DiscoveryServer, self).__init__()
self.services = DiscoveryModel(self)
def get_admin(self):
return {
"index": admin.RootAdminController,
"service": admin.ServiceController,
"new_service": admin.NewServiceController,
"clone_service": admin.CloneServiceController,
"services": admin.ServicesController
}
def get_models(self):
return [self.services]
def get_metadata(self):
return {
"title": "Discovery",
"description": "Map each service location dynamically",
"icon": "map-signs"
}
def get_handlers(self):
return [
(r"/@service/(.*?)/(.*)", h.ServiceInternalHandler),
(r"/@services/(.*)", h.ServiceListInternalHandler),
(r"/service/(.*?)/(.*)", h.DiscoverNetworkHandler),
(r"/services/(.*?)/(.*)", h.MultiDiscoverNetworkHandler),
(r"/service/(.*)", h.DiscoverHandler),
(r"/services/(.*)", h.MultiDiscoverHandler),
]
def get_internal_handler(self):
return h.InternalHandler(self)
async def get_auth_location(self, network):
try:
location = await self.services.get_service("login", network)
except ServiceNotFound:
location = None
return location
def init_discovery(self):
discover.cache = self.services
if __name__ == "__main__":
stt = server.init()
access.AccessToken.init([access.public()])
server.start(DiscoveryServer)
| 28.121212
| 72
| 0.608297
|
acfd56e71c1dd024c3d034dbeea701ab069ebb36
| 353
|
py
|
Python
|
content/tampers/space2comment.py
|
magnologan/WhatWaf
|
fc16b5225302a6329ffd4cb78e0ecf1d42cf8acd
|
[
"MIT"
] | 2
|
2018-02-13T13:58:18.000Z
|
2021-05-26T09:55:09.000Z
|
content/tampers/space2comment.py
|
magnologan/WhatWaf
|
fc16b5225302a6329ffd4cb78e0ecf1d42cf8acd
|
[
"MIT"
] | null | null | null |
content/tampers/space2comment.py
|
magnologan/WhatWaf
|
fc16b5225302a6329ffd4cb78e0ecf1d42cf8acd
|
[
"MIT"
] | 1
|
2022-02-09T06:23:40.000Z
|
2022-02-09T06:23:40.000Z
|
__example_payload__ = '484029") AS xDKy WHERE 5427=5427 UNION ALL SELECT NULL,NULL'
__type__ = "changing the payload spaces into a comment"
def tamper(payload, **kwargs):
payload = str(payload)
retval = ""
for char in payload:
if char == " ":
retval += "/**/"
else:
retval += char
return retval
| 25.214286
| 83
| 0.592068
|
acfd57b02238a71e8fe62feb47b4ae7998dc5a95
| 228
|
py
|
Python
|
src/cfeapi/restconf/pagination.py
|
ricardoBento/Django-Angular-Ionic
|
fea23986deb613603a150d11787b609971c7152f
|
[
"MIT"
] | 277
|
2017-12-10T19:47:15.000Z
|
2022-03-27T06:27:57.000Z
|
src/cfeapi/restconf/pagination.py
|
avanish981/REST-API
|
b5e4afe61ae1f21cb4928bf1f15be546d9252a5d
|
[
"MIT"
] | 8
|
2020-02-12T03:21:26.000Z
|
2022-01-13T01:51:42.000Z
|
src/cfeapi/restconf/pagination.py
|
hmtanbir/django-angular-ionic
|
2984ce6bdf7a54290671095e60d3bd7bb39b14a8
|
[
"MIT"
] | 160
|
2017-12-10T19:19:17.000Z
|
2022-03-27T06:27:58.000Z
|
from rest_framework import pagination
class CFEAPIPagination(pagination.LimitOffsetPagination): #PageNumberPagination):
#page_size = 20
default_limit = 10
max_limit = 20
#limit_query_param = 'lim'
| 22.8
| 81
| 0.714912
|
acfd586408bea5eb469b791f673ccca17bb1be6e
| 2,264
|
py
|
Python
|
segmenter/score-segments.py
|
trane293/nlp-project
|
1db71a5cf2572b4a80245a3a545d43cb0e778a84
|
[
"MIT"
] | null | null | null |
segmenter/score-segments.py
|
trane293/nlp-project
|
1db71a5cf2572b4a80245a3a545d43cb0e778a84
|
[
"MIT"
] | null | null | null |
segmenter/score-segments.py
|
trane293/nlp-project
|
1db71a5cf2572b4a80245a3a545d43cb0e778a84
|
[
"MIT"
] | 1
|
2021-01-27T01:20:00.000Z
|
2021-01-27T01:20:00.000Z
|
from __future__ import division
import optparse, sys, codecs
from collections import defaultdict
optparser = optparse.OptionParser()
optparser.add_option("-t", "--testfile", dest="testfile", default=None, help="Output from your segmenter program")
optparser.add_option("-r", "--referencefile", dest="referencefile", default="data/reference", help="Reference segmentation")
(opts, _) = optparser.parse_args()
if opts.testfile is None:
test = list(sys.stdin)
else:
with open(opts.testfile) as f:
test = list(f)
with open(opts.referencefile) as f:
reference = list(f)
if len(test) != len(reference):
raise ValueError("Error: output and reference do not have identical number of lines")
def precision(reference, test):
if len(test) == 0:
return None
else:
return float(len(reference & test)) / len(test)
def recall(reference, test):
if len(reference) == 0:
return None
else:
return float(len(reference & test)) / len(reference)
def fmeasure(reference, test, alpha=0.5):
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0/(alpha/p + (1-alpha)/r)
def corpus_fmeasure(reference, test):
"""
assumes that the input lines are in UTF-8
used to compute f-measure for Chinese word segmentation
sys.stdout is temporarily changed to enable debugging of UTF-8
"""
old = sys.stdout
sys.stdout = codecs.lookup('utf-8')[-1](sys.stdout)
score = 0
for i in range(len(reference)):
reference_len, test_len = 0,0
for w in unicode(reference[i], 'utf-8').split():
reference_len += len(w)
for w in unicode(test[i], 'utf-8').split():
test_len += len(w)
test_utf8 = set(unicode(test[i], 'utf-8').split())
reference_utf8 = set(unicode(reference[i], 'utf-8').split())
if (reference_len != test_len) or (len(test_utf8) == 0):
test_utf8 = set(['empty'])
score += fmeasure(reference_utf8, test_utf8)
#print "Score: %.2f" % ((score/len(test))*100)
sys.stdout = old
return ((score/len(test))*100)
print "Score: %.2f" % corpus_fmeasure(reference, test)
| 33.791045
| 124
| 0.639576
|
acfd58dff174aeb36474e49bb5b9cd5e6cb0f0d1
| 4,045
|
py
|
Python
|
tests/functional/test_install_extras.py
|
alexandrul/pip
|
c52b6bf44829dd89c2fa400a0e3496f3754e2305
|
[
"MIT"
] | 1
|
2019-12-20T05:27:25.000Z
|
2019-12-20T05:27:25.000Z
|
tests/functional/test_install_extras.py
|
alexandrul/pip
|
c52b6bf44829dd89c2fa400a0e3496f3754e2305
|
[
"MIT"
] | 7
|
2019-12-27T07:56:50.000Z
|
2022-01-25T03:41:39.000Z
|
tests/functional/test_install_extras.py
|
gudonglaile/pip-gui
|
ac9c49c54b7a882d8091959ba14eec37612a3845
|
[
"MIT"
] | 1
|
2020-02-14T16:53:19.000Z
|
2020-02-14T16:53:19.000Z
|
import textwrap
from os.path import join
import pytest
@pytest.mark.network
def test_simple_extras_install_from_pypi(script):
"""
Test installing a package from PyPI using extras dependency Paste[openid].
"""
result = script.pip(
'install', 'Paste[openid]==1.7.5.1', expect_stderr=True,
)
initools_folder = script.site_packages / 'openid'
assert initools_folder in result.files_created, result.files_created
def test_extras_after_wheel(script, data):
"""
Test installing a package with extras after installing from a wheel.
"""
simple = script.site_packages / 'simple'
no_extra = script.pip(
'install', '--no-index', '-f', data.find_links,
'requires_simple_extra', expect_stderr=True,
)
assert simple not in no_extra.files_created, no_extra.files_created
extra = script.pip(
'install', '--no-index', '-f', data.find_links,
'requires_simple_extra[extra]', expect_stderr=True,
)
assert simple in extra.files_created, extra.files_created
@pytest.mark.network
def test_no_extras_uninstall(script):
"""
No extras dependency gets uninstalled when the root package is uninstalled
"""
result = script.pip(
'install', 'Paste[openid]==1.7.5.1', expect_stderr=True,
)
assert join(script.site_packages, 'paste') in result.files_created, (
sorted(result.files_created.keys())
)
assert join(script.site_packages, 'openid') in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'Paste', '-y')
# openid should not be uninstalled
initools_folder = script.site_packages / 'openid'
assert initools_folder not in result2.files_deleted, result.files_deleted
def test_nonexistent_extra_warns_user_no_wheel(script, data):
"""
A warning is logged telling the user that the extra option they requested
does not exist in the project they are wishing to install.
This exercises source installs.
"""
result = script.pip(
'install', '--no-binary=:all:', '--no-index',
'--find-links=' + data.find_links,
'simple[nonexistent]', expect_stderr=True,
)
assert (
"simple 3.0 does not provide the extra 'nonexistent'"
in result.stderr
), str(result)
def test_nonexistent_extra_warns_user_with_wheel(script, data):
"""
A warning is logged telling the user that the extra option they requested
does not exist in the project they are wishing to install.
This exercises wheel installs.
"""
result = script.pip(
'install', '--no-index',
'--find-links=' + data.find_links,
'simplewheel[nonexistent]', expect_stderr=True,
)
assert (
"simplewheel 2.0 does not provide the extra 'nonexistent'"
in result.stderr
)
def test_nonexistent_options_listed_in_order(script, data):
"""
Warn the user for each extra that doesn't exist.
"""
result = script.pip(
'install', '--no-index',
'--find-links=' + data.find_links,
'simplewheel[nonexistent, nope]', expect_stderr=True,
)
msg = (
" WARNING: simplewheel 2.0 does not provide the extra 'nonexistent'\n"
" WARNING: simplewheel 2.0 does not provide the extra 'nope'"
)
assert msg in result.stderr
def test_install_special_extra(script):
# Check that uppercase letters and '-' are dealt with
# make a dummy project
pkga_path = script.scratch_path / 'pkga'
pkga_path.mkdir()
pkga_path.joinpath("setup.py").write_text(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1',
extras_require={'Hop_hOp-hoP': ['missing_pkg']},
)
"""))
result = script.pip(
'install', '--no-index', '%s[Hop_hOp-hoP]' % pkga_path,
expect_error=True)
assert (
"Could not find a version that satisfies the requirement missing_pkg"
) in result.stderr, str(result)
| 31.356589
| 79
| 0.658838
|
acfd59a00c98c72a130aa8fad4743b61e3f06f5d
| 1,497
|
py
|
Python
|
Lib/site-packages/tensorflow_core/_api/v2/compat/v1/autograph/__init__.py
|
caiyongji/py36-tf2.0rc
|
c5b4b364ba14214534228570e58ef96b1a8bb6dc
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/site-packages/tensorflow_core/_api/v2/compat/v1/autograph/__init__.py
|
caiyongji/py36-tf2.0rc
|
c5b4b364ba14214534228570e58ef96b1a8bb6dc
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/site-packages/tensorflow_core/_api/v2/compat/v1/autograph/__init__.py
|
caiyongji/py36-tf2.0rc
|
c5b4b364ba14214534228570e58ef96b1a8bb6dc
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Conversion of plain Python into TensorFlow graph code.
NOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using
`tf.function`. This module contains lower-level APIs for advanced use.
For more information, see the
[AutoGraph guide](https://www.tensorflow.org/guide/autograph).
By equivalent graph code we mean code that generates a TensorFlow graph when
run. The generated graph has the same effects as the original code when executed
(for example with `tf.function` or `tf.compat.v1.Session.run`). In other words,
using AutoGraph can be thought of as running Python in TensorFlow.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow._api.v2.compat.v1.autograph import experimental
from tensorflow.python.autograph.impl.api import to_code_v1 as to_code
from tensorflow.python.autograph.impl.api import to_graph_v1 as to_graph
from tensorflow.python.autograph.utils.ag_logging import set_verbosity
from tensorflow.python.autograph.utils.ag_logging import trace
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "compat.v1.autograph", public_apis=None, deprecation=False,
has_lite=False)
| 41.583333
| 89
| 0.811623
|
acfd59b53bf5e878c918a042670c34c09bdb23aa
| 6,558
|
py
|
Python
|
salt/utils/oset.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/utils/oset.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/utils/oset.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
Available at repository https://github.com/LuminosoInsight/ordered-set
salt.utils.oset
~~~~~~~~~~~~~~~~
An OrderedSet is a custom MutableSet that remembers its order, so that every
entry has an index that can be looked up.
Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
and released under the MIT license.
Rob Speer's changes are as follows:
- changed the content from a doubly-linked list to a regular Python list.
Seriously, who wants O(1) deletes but O(N) lookups by index?
- add() returns the index of the added item
- index() just returns the index of an item
- added a __getstate__ and __setstate__ so it can be pickled
- added __getitem__
'''
from __future__ import absolute_import, unicode_literals, print_function
import collections
SLICE_ALL = slice(None)
__version__ = '2.0.1'
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str) and not isinstance(obj, tuple)
class OrderedSet(collections.MutableSet):
"""
An OrderedSet is a custom MutableSet that remembers its order, so that
every entry has an index that can be looked up.
"""
def __init__(self, iterable=None):
self.items = []
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.items)
def __getitem__(self, index):
"""
Get the item at a given index.
If `index` is a slice, you will get back that slice of items. If it's
the slice [:], exactly the same object is returned. (If you want an
independent copy of an OrderedSet, use `OrderedSet.copy()`.)
If `index` is an iterable, you'll get the OrderedSet of items
corresponding to those indices. This is similar to NumPy's
"fancy indexing".
"""
if index == SLICE_ALL:
return self
elif hasattr(index, '__index__') or isinstance(index, slice):
result = self.items[index]
if isinstance(result, list):
return OrderedSet(result)
else:
return result
elif is_iterable(index):
return OrderedSet([self.items[i] for i in index])
else:
raise TypeError("Don't know how to index an OrderedSet by {}".format(repr(index)))
def copy(self):
return OrderedSet(self)
def __getstate__(self):
if len(self) == 0:
# The state can't be an empty list.
# We need to return a truthy value, or else __setstate__ won't be run.
#
# This could have been done more gracefully by always putting the state
# in a tuple, but this way is backwards- and forwards- compatible with
# previous versions of OrderedSet.
return (None,)
else:
return list(self)
def __setstate__(self, state):
if state == (None,):
self.__init__([])
else:
self.__init__(state)
def __contains__(self, key):
return key in self.map
def add(self, key): # pylint: disable=arguments-differ
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
"""
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key]
append = add
def update(self, sequence):
"""
Update the set with the given iterable sequence, then return the index
of the last element inserted.
"""
item_index = None
try:
for item in sequence:
item_index = self.add(item)
except TypeError:
raise ValueError("Argument needs to be an iterable, got {}".format(type(sequence)))
return item_index
def index(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
"""
if is_iterable(key):
return [self.index(subkey) for subkey in key]
return self.map[key]
def pop(self):
"""
Remove and return the last element from the set.
Raises KeyError if the set is empty.
"""
if not self.items:
raise KeyError('Set is empty')
elem = self.items[-1]
del self.items[-1]
del self.map[elem]
return elem
def discard(self, key): # pylint: disable=arguments-differ
"""
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
"""
if key in self:
i = self.map[key]
del self.items[i]
del self.map[key]
for k, v in self.map.items():
if v >= i:
self.map[k] = v - 1
def clear(self):
"""
Remove all items from this OrderedSet.
"""
del self.items[:]
self.map.clear()
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
def __repr__(self):
if not self:
return "{}()".format(self.__class__.__name__)
return "{}({})".format(self.__class__.__name__, repr(list(self)))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and self.items == other.items
try:
other_as_set = set(other)
except TypeError:
# If `other` can't be converted into a set, it's not equal.
return False
else:
return set(self) == other_as_set
| 31.990244
| 95
| 0.598963
|
acfd59b99b5dc327fe8424a9362a0a6a65f94dbb
| 1,197
|
py
|
Python
|
tests/test_action.py
|
guionardo/py-housekeeping
|
3aaedc001b7ddb0b5a3397f99bd6bf4e8958f69c
|
[
"MIT"
] | null | null | null |
tests/test_action.py
|
guionardo/py-housekeeping
|
3aaedc001b7ddb0b5a3397f99bd6bf4e8958f69c
|
[
"MIT"
] | null | null | null |
tests/test_action.py
|
guionardo/py-housekeeping
|
3aaedc001b7ddb0b5a3397f99bd6bf4e8958f69c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import unittest
from src.action import Action
from utils import remove_dir
class TestAction(unittest.TestCase):
TEST_PATH = './test_path'
@classmethod
def setUpClass(cls):
remove_dir(cls.TEST_PATH)
@classmethod
def tearDownClass(cls):
remove_dir(cls.TEST_PATH)
def test_invalid_action_config(self):
with self.assertRaises(ValueError):
_ = Action({'action': 'error'})
def test_valid_action_config(self):
action = Action({'action': 'delete',
'action_destiny': '.'})
self.assertIsInstance(str(action), str)
action = Action({'action': 'move',
'action_destiny': '.'})
self.assertIsInstance(str(action), str)
self.assertIsInstance(action.to_dict(), dict)
def test_invalid_destiny(self):
with self.assertRaises(ValueError):
_ = Action({'action': 'move',
'action_destiny': ''})
def test_create_destiny(self):
_ = Action({'action': 'move',
'action_destiny': self.TEST_PATH})
self.assertTrue(os.path.isdir(self.TEST_PATH))
| 27.204545
| 54
| 0.597327
|
acfd59efac9f089d3976f79eddaf1307a803342e
| 12,674
|
py
|
Python
|
tofu/data/_inversions_checks.py
|
ToFuProject/tofu
|
e341d466d934fcce74dfd40a4556509e7439045d
|
[
"MIT"
] | 56
|
2017-07-09T10:29:45.000Z
|
2022-03-31T02:44:50.000Z
|
tofu/data/_inversions_checks.py
|
ToFuProject/tofu
|
e341d466d934fcce74dfd40a4556509e7439045d
|
[
"MIT"
] | 522
|
2017-07-02T21:06:07.000Z
|
2022-03-02T08:07:57.000Z
|
tofu/data/_inversions_checks.py
|
ToFuProject/tofu
|
e341d466d934fcce74dfd40a4556509e7439045d
|
[
"MIT"
] | 9
|
2017-07-02T20:38:53.000Z
|
2021-12-04T00:12:30.000Z
|
# -*- coding: utf-8 -*-
# Built-in
# Common
import numpy as np
import scipy.sparse as scpsp
# specific
from . import _generic_check
_LALGO = [
'inv_linear_augTikho_sparse',
'inv_linear_augTikho_dense',
'inv_linear_augTikho_chol_dense',
'inv_linear_augTikho_chol_sparse',
'inv_linear_augTikho_pos_dense',
'inv_linear_DisPrinc_sparse',
]
_LREGPARAM_ALGO = [
'augTikho',
'DisPrinc',
]
# #############################################################################
# #############################################################################
# main
# #############################################################################
def _compute_check(
# input data
coll=None,
key_matrix=None,
key_data=None,
key_sigma=None,
data=None,
sigma=None,
# choice of algo
isotropic=None,
sparse=None,
positive=None,
cholesky=None,
regparam_algo=None,
algo=None,
# regularity operator
solver=None,
operator=None,
geometry=None,
# misc
conv_crit=None,
chain=None,
verb=None,
store=None,
# algo and solver-specific options
kwdargs=None,
method=None,
options=None,
):
# ----
# keys
# key_matrix
lk = list(coll.dobj.get('matrix', {}).keys())
if key_matrix is None and len(lk):
key_matrix = lk[0]
key_matrix = _generic_check._check_var(
key_matrix, 'key_matrix',
types=str,
allowed=lk,
)
keybs = coll.dobj['matrix'][key_matrix]['bsplines']
keym = coll.dobj['bsplines'][keybs]['mesh']
matrix = coll.ddata[coll.dobj['matrix'][key_matrix]['data']]['data']
shapemat = matrix.shape
crop = coll.dobj['matrix'][key_matrix]['crop']
if np.any(~np.isfinite(matrix)):
msg = "Geometry matrix should not contain NaNs or infs!"
raise Exception(msg)
# key_data
if key_data is not None or (key_data is None and data is None):
lk = [
kk for kk, vv in coll.ddata.items()
if vv['data'].ndim in [1, 2]
and vv['data'].shape[-1] == shapemat[0]
]
if key_data is None and len(lk):
key_data = lk[0]
key_data = _generic_check._check_var(
key_data, 'key_data',
types=str,
allowed=lk,
)
data = coll.ddata[key_data]['data']
# ------------
# data, sigma
# data
data = _generic_check._check_var(
data, 'data',
types=(np.ndarray, list, tuple),
)
if not isinstance(data, np.ndarray):
data = np.asarray(data)
if data.ndim not in [1, 2] or shapemat[0] not in data.shape:
msg = (
"Arg data must have dim in [1, 2]"
f" and {shapemat[0]} must be in shape\n"
f"\t- data.shape: {data.shape}"
)
raise Exception(msg)
if data.ndim == 1:
data = data[None, :]
if data.shape[1] != shapemat[0]:
data = data.T
if np.any(~np.isfinite(data)):
msg = "Arg data should not contain NaNs or inf!"
raise Exception(msg)
# key_sigma
if key_sigma is not None:
lk = [
kk for kk, vv in coll.ddata.items()
if vv['data'].ndim in [1, 2]
and vv['data'].shape[-1] == shapemat[0]
]
key_sigma = _generic_check._check_var(
key_sigma, 'key_sigma',
types=str,
allowed=lk,
)
sigma = coll.ddata[key_sigma]['data']
# sigma
if np.isscalar(sigma):
sigma = np.full((shapemat[0],), sigma*np.nanmean(np.abs(data)))
sigma = _generic_check._check_var(
sigma, 'sigma',
default=np.full((shapemat[0],), 0.05*np.nanmean(np.abs(data))),
types=(np.ndarray, list, tuple),
)
if not isinstance(sigma, np.ndarray):
sigma = np.asarray(sigma)
if sigma.ndim not in [1, 2] or shapemat[0] not in sigma.shape:
msg = (
"Arg sigma must have dim in [1, 2]"
f" and {shapemat[0]} must be in shape\n"
f"\t- sigma.shape = {sigma.shape}"
)
raise Exception(msg)
if sigma.ndim == 1:
sigma = sigma[None, :]
elif sigma.ndim == 2 and data.shape != sigma.shape:
msg = (
"Arg sigma must have the same shape as data!\n"
f"\t- data.shape: {data.shape}\n"
f"\t- sigma.shape: {sigma.shape}\n"
)
raise Exception(msg)
if sigma.shape[1] != shapemat[0]:
sigma = sigma.T
if np.any(~np.isfinite(sigma)):
msg = "Arg sigma should not contain NaNs or inf!"
raise Exception(msg)
# --------------
# choice of algo
lc = [
algo is None,
all([kk is None for kk in [isotropic, positive, sparse, cholesky]])
]
if not any(lc):
msg = (
"Please provide either (xor):\n"
"\t- algo: directly provide the algo name\n"
"\t- flags for choosing the algo:\n"
"\t\t- isotropic: whether to perform isotropic regularization\n"
"\t\t- sparse: whether to use sparse matrices\n"
"\t\t- positive: whether to enforce a positivity constraint\n"
"\t\t- cholesky: whether to use cholesky factorization\n"
)
raise Exception(msg)
if all(lc):
algo = 'inv_linear_augTikho_sparse'
lc[0] = False
if not lc[0] and lc[1]:
# extract keywrods from algo name
isotropic = True
positive = 'pos' in algo
sparse = 'sparse' in algo
cholesky = 'chol' in algo
for aa in _LREGPARAM_ALGO:
if f'_{aa}_' in algo:
regparam_algo = aa
break
else:
msg = 'Unreckognized algo for regularization parameter!'
raise Exception(msg)
elif lc[0] and not lc[1]:
# get algo name from keywords
# isotropic
isotropic = _generic_check._check_var(
isotropic, 'isotropic',
default=True,
types=bool,
)
if isotropic is False:
msg = "Anisotropic regularization unavailable yet"
raise NotImplementedError(msg)
# sparse and matrix and operator
sparse = _generic_check._check_var(
sparse, 'sparse',
default=True,
types=bool,
)
# positive
positive = _generic_check._check_var(
positive, 'positive',
default=False,
types=bool,
)
# cholesky
cholesky = _generic_check._check_var(
cholesky, 'cholesky',
default=False,
types=bool,
)
if positive and cholesky is False:
msg = "cholesky cannot be used for positive constraint!"
raise Exception(msg)
# regparam_algo
regparam_algo = _generic_check._check_var(
regparam_algo, 'regparam_algo',
default='augTikho',
types=str,
allowed=_LREGPARAM_ALGO,
)
algo = f"inv_linear_{regparam_algo}"
if cholesky:
algo += '_chol'
elif positive:
algo += '_pos'
algo += f"_{'sparse' if sparse else 'dense'}"
# final algo check
algo = _generic_check._check_var(
algo, 'algo',
default=None,
types=str,
allowed=_LALGO,
)
# -------------------
# regularity operator
# get operator
opmat, operator, geometry, dim, ref, crop = coll.add_bsplines_operator(
key=keybs,
operator=operator,
geometry=geometry,
returnas=True,
store=False,
crop=crop,
)
nchan, nbs = matrix.shape
if isinstance(opmat, tuple):
assert all([op.shape == (nbs, nbs) for op in opmat])
elif opmat.ndim == 1:
msg = "Inversion algorithm requires a quadratic operator!"
raise Exception(msg)
else:
assert opmat.shape == (nbs,) or opmat.shape == (nbs, nbs)
opmat = (opmat,)
if not scpsp.issparse(opmat[0]):
assert all([np.all(np.isfinite(op)) for op in opmat])
assert data.shape[1] == nchan
nt = data.shape[0]
# -------------------
# consistent sparsity
# sparse
if sparse is True:
if not scpsp.issparse(matrix):
matrix = scpsp.csc_matrix(matrix)
if not scpsp.issparse(opmat[0]):
opmat = [scpsp.csc_matrix(pp) for pp in opmat]
elif sparse is False:
if scpsp.issparse(matrix):
matrix = matrix.toarray()
if scpsp.issparse(opmat[0]):
opmat = [scpsp.csc_matrix(pp).toarray() for pp in opmat]
# -----------------------
# miscellaneous parameter
# conv_crit
conv_crit = _generic_check._check_var(
conv_crit, 'conv_crit',
default=1e-4,
types=float,
)
# chain
chain = _generic_check._check_var(
chain, 'chain',
default=True,
types=bool,
)
# verb
verb = _generic_check._check_var(
verb, 'verb',
default=True,
types=(bool, int),
allowed=[False, 0, True, 1, 2],
)
if verb is False:
verb = 0
if verb is True:
verb = 1
# store
store = _generic_check._check_var(
store, 'store',
default=True,
types=bool,
)
if key_data is None:
store = False
# solver
solver = _generic_check._check_var(
solver, 'solver',
default='spsolve',
types=str,
allowed=['spsolve'],
)
# ----------------------------------------
# algo-specific kwdargs and solver options
# kwdargs, method, options
kwdargs, method, options = _algo_check(
algo,
kwdargs=kwdargs,
options=options,
nchan=shapemat[0],
nbs=shapemat[1],
conv_crit=conv_crit,
)
return (
key_matrix, key_data, key_sigma, keybs, keym,
data, sigma, matrix, opmat, operator, geometry,
isotropic, sparse, positive, cholesky, regparam_algo, algo,
conv_crit, crop, chain, kwdargs, method, options,
solver, verb, store,
)
# #############################################################################
# #############################################################################
# ikwdargs / options for each algo
# #############################################################################
def _algo_check(
algo,
kwdargs=None,
method=None,
options=None,
nchan=None,
nbs=None,
conv_crit=None,
):
# ------------------------
# generic kwdargs
# kwdargs
if kwdargs is None:
kwdargs = {}
# generic kwdargs
if kwdargs.get('maxiter') is None:
kwdargs['maxiter'] = 100
if kwdargs.get('tol') is None:
kwdargs['tol'] = 1.e-6
# ------------------------
# algo specific kwdargs
# kwdargs specific to aug. tikhonov
if 'augTikho' in algo:
a0 = kwdargs.get('a0', 10)
a1 = kwdargs.get('a1', 2)
# to have [x]=1
kwdargs['b0'] = np.math.factorial(a0)**(1 / (a0 + 1))
kwdargs['b1'] = np.math.factorial(a1)**(1 / (a1 + 1))
kwdargs['a0'] = a0
kwdargs['a1'] = a1
# Exponent for rescaling of a0bis
# typically in [1/3 ; 1/2], but real limits are 0 < d < 1 (or 2 ?)
if kwdargs.get('d') is None:
kwdargs['d'] = 0.95
if kwdargs.get('conv_reg') is None:
kwdargs['conv_reg'] = True
if kwdargs.get('nbs_fixed') is None:
kwdargs['nbs_fixed'] = True
if kwdargs['nbs_fixed']:
kwdargs['a0bis'] = kwdargs['a0'] - 1. + 1200./2.
else:
kwdargs['a0bis'] = kwdargs['a0'] - 1. + nbs/2.
kwdargs['a1bis'] = kwdargs['a1'] - 1. + nchan/2.
# kwdargs specific to discrepancy principle
elif 'DisPrinc' in algo:
if kwdargs.get('chi2n_obj') is None:
kwdargs['chi2n_obj'] = 1.
if kwdargs.get('chi2n_tol') is None:
kwdargs['chi2n_tol'] = 0.05
# ------------------------
# low-level solver options
if 'quad' in algo:
if options is None:
options = {}
if method is None:
method = 'L-BFGS-B'
if method == 'L-BFGS-B':
if options.get('ftol') is None:
options['ftol'] = conv_crit/100.
if options.get('disp') is None:
options['disp'] = False
else:
raise NotImplementedError
return kwdargs, method, options
| 26.570231
| 79
| 0.516254
|
acfd5a42c4f7072a3db2ae9096ccc53f891c8c76
| 10,753
|
py
|
Python
|
ansible_collection/Juniper/junos/plugins/modules/juniper_junos_srx_cluster.py
|
isometry/ansible-junos-stdlib
|
63345737c43057b425593bb57d7b8a0eaaf766c1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ansible_collection/Juniper/junos/plugins/modules/juniper_junos_srx_cluster.py
|
isometry/ansible-junos-stdlib
|
63345737c43057b425593bb57d7b8a0eaaf766c1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
ansible_collection/Juniper/junos/plugins/modules/juniper_junos_srx_cluster.py
|
isometry/ansible-junos-stdlib
|
63345737c43057b425593bb57d7b8a0eaaf766c1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 1999-2018, Juniper Networks Inc.
# 2014, Patrik Bok
# 2015, Rick Sherman
#
# All rights reserved.
#
# License: Apache 2.0
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the Juniper Networks nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Juniper Networks, Inc. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Juniper Networks, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
ANSIBLE_METADATA = {'metadata_version': '1.1',
'supported_by': 'community',
'status': ['stableinterface']}
DOCUMENTATION = '''
---
extends_documentation_fragment:
- juniper_junos_common.connection_documentation
- juniper_junos_common.logging_documentation
module: juniper_junos_srx_cluster
version_added: "2.0.0" # of Juniper.junos role
author: "Juniper Networks - Stacy Smith (@stacywsmith)"
short_description: Add or remove SRX chassis cluster configuration
description:
- Add an SRX chassis cluster configuration and reboot the device. Assuming
the device is capable of forming an SRX cluster and has the correct
cables connected, this will form an SRX cluster.
- If an SRX chassis cluster is already present, setting I(cluster_enable) to
C(false) will remove the SRX chassis cluster configuration and reboot
the device causing the SRX cluster to be broken and the device to return
to stand-alone mode.
options:
enable:
description:
- Enable or disable cluster mode. When C(true) cluster mode is enabled
and I(cluster_id) and I(node_id) must also be specified. When C(false)
cluster mode is disabled and the device returns to stand-alone mode.
required: true
default: none
type: bool
aliases:
- cluster_enable
cluster_id:
description:
- The cluster ID to configure.
- Required when I(enable) is C(true).
required: false
default: none
type: int
aliases:
- cluster
node_id:
description:
- The node ID to configure. (C(0) or C(1))
- Required when I(enable) is C(true).
required: false
default: none
type: int
aliases:
- node
'''
EXAMPLES = '''
---
- name: Manipulate the SRX cluster configuration of Junos SRX devices
hosts: junos-all
connection: local
gather_facts: no
roles:
- Juniper.junos
tasks:
- name: Enable an SRX cluster
juniper_junos_srx_cluster:
enable: true
cluster_id: 4
node_id: 0
register: response
- name: Print the response.
debug:
var: response.config_lines
- name: Disable an SRX cluster
juniper_junos_srx_cluster:
enable: false
register: response
- name: Print the response.
debug:
var: response.config_lines
'''
RETURN = '''
changed:
description:
- Indicates if the device's configuration has changed, or would have
changed when in check mode.
returned: success
type: bool
failed:
description:
- Indicates if the task failed.
returned: always
type: bool
msg:
description:
- A human-readable message indicating the result.
returned: always
type: str
reboot:
description:
- Indicates if a reboot of the device has been initiated.
returned: success
type: bool
'''
# Standard library imports
"""From Ansible 2.1, Ansible uses Ansiballz framework for assembling modules
But custom module_utils directory is supported from Ansible 2.3
Reference for the issue: https://groups.google.com/forum/#!topic/ansible-project/J8FL7Z1J1Mw """
# Ansiballz packages module_utils into ansible.module_utils
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.Juniper.junos.plugins.module_utils import juniper_junos_common
def main():
# Create the module instance.
junos_module = juniper_junos_common.JuniperJunosModule(
argument_spec=dict(
enable=dict(type='bool',
required=True,
aliases=['cluster_enable'],
default=None),
cluster_id=dict(type='int',
required=False,
aliases=['cluster'],
default=None),
node_id=dict(type='int',
required=False,
aliases=['node'],
default=None)
),
# Required if options
# If enable is True, then cluster_id and node_id must be set.
required_if=[['enable', True, ['cluster_id', 'node_id']]],
# Check mode is implemented.
supports_check_mode=True
)
# Do additional argument verification.
# Straight from params
enable = junos_module.params.get('enable')
cluster_id = junos_module.params.get('cluster_id')
node_id = junos_module.params.get('node_id')
# cluster_id must be between 0 and 255
if cluster_id is not None:
if cluster_id < 0 or cluster_id > 255:
junos_module.fail_json(msg="The cluster_id option (%s) must have "
"an integer value between 0 and 255." %
(cluster_id))
# node_id must be between 0 and 1
if node_id is not None:
if node_id < 0 or node_id > 1:
junos_module.fail_json(msg="The node_id option (%s) must have a "
"value of 0 or 1." % (node_id))
# Initialize the results. Assume failure until we know it's success.
results = {'msg': '',
'changed': False,
'reboot': False,
'failed': True}
junos_module.logger.debug("Check current SRX cluster operational state.")
current_cluster_state = junos_module.dev.facts['srx_cluster']
current_cluster_id = junos_module.dev.facts['srx_cluster_id']
if current_cluster_id is not None:
current_cluster_id = int(current_cluster_id)
current_node_name = junos_module.dev.re_name
current_node_id = None
if current_node_name is not None:
(_, _, current_node_id) = current_node_name.partition('node')
if current_node_id:
current_node_id = int(current_node_id)
junos_module.logger.debug(
"Current SRX cluster operational state: %s, cluster_id: %s, "
"node_id: %s",
'enabled' if current_cluster_state else 'disabled',
str(current_cluster_id),
str(current_node_id))
# Is a state change needed?
if current_cluster_state != enable:
junos_module.logger.debug(
"SRX cluster configuration change needed. Current state: %s. "
"Desired state: %s",
'enabled' if current_cluster_state else 'disabled',
'enabled' if enable else 'disabled')
results['changed'] = True
# Is a cluster ID change needed?
if (enable is True and current_cluster_id is not None and
current_cluster_id != cluster_id):
junos_module.logger.debug(
"SRX cluster ID change needed. Current cluster ID: %d. "
"Desired cluster ID: %d",
current_cluster_id, cluster_id)
results['changed'] = True
# Is a node ID change needed?
if (enable is True and current_node_id is not None and
current_node_id != node_id):
junos_module.logger.debug(
"SRX node ID change needed. Current node ID: %d. "
"Desired cluster ID: %d",
current_node_id, node_id)
results['changed'] = True
results['msg'] = 'Current state: %s, cluster_id: %s, node_id: %s' % \
('enabled' if current_cluster_state else 'disabled',
str(current_cluster_id),
str(current_node_id))
if results['changed'] is True:
results['msg'] += ' Desired state: %s, cluster_id: %s, ' \
'node_id: %s' % \
('enabled' if enable else 'disabled',
str(cluster_id),
str(node_id))
if not junos_module.check_mode:
results['msg'] += ' Initiating change.'
try:
output = None
if enable is True:
resp = junos_module.dev.rpc.set_chassis_cluster_enable(
cluster_id=str(cluster_id), node=str(node_id),
reboot=True, normalize=True
)
else:
resp = junos_module.dev.rpc.set_chassis_cluster_disable(
reboot=True, normalize=True
)
if resp is not None:
output = resp.getparent().findtext('.//output')
if output is None:
output = resp.getparent().findtext('.//message')
results['msg'] += ' Reboot initiated. Response: %s' % (output)
results['reboot'] = True
except (junos_module.pyez_exception.ConnectError,
junos_module.pyez_exception.RpcError) as ex:
junos_module.logger.debug('Error: %s', str(ex))
results['msg'] += ' Error: %s' % (str(ex))
junos_module.fail_json(**results)
# If we made it this far, everything was successful.
results['failed'] = False
# Return response.
junos_module.exit_json(**results)
if __name__ == '__main__':
main()
| 36.450847
| 96
| 0.63015
|
acfd5b488c4c1b59de364befcf2f7fae854b2d58
| 12,463
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/modules/network/onyx/onyx_linkagg.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
ansible/ansible/modules/network/onyx/onyx_linkagg.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
ansible/ansible/modules/network/onyx/onyx_linkagg.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_linkagg
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Manage link aggregation groups on Mellanox ONYX network devices
description:
- This module provides declarative management of link aggregation groups
on Mellanox ONYX network devices.
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group. A value of C(on) will enable LACP.
C(active) configures the link to actively information about the state of the link,
or it can be configured in C(passive) mode ie. send link state information only when
received them from another link.
default: on
choices: ['on', 'active', 'passive']
members:
description:
- List of members interfaces of the link aggregation group. The value can be
single interface or list of interfaces.
required: true
aggregate:
description: List of link aggregation definitions.
purge:
description:
- Purge link aggregation groups not defined in the I(aggregate) parameter.
default: false
type: bool
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure link aggregation group
onyx_linkagg:
name: Po1
members:
- Eth1/1
- Eth1/2
- name: remove configuration
onyx_linkagg:
name: Po1
state: absent
- name: Create aggregate of linkagg definitions
onyx_linkagg:
aggregate:
- { name: Po1, members: [Eth1/1] }
- { name: Po2, members: [Eth1/2] }
- name: Remove aggregate of linkagg definitions
onyx_linkagg:
aggregate:
- name: Po1
- name: Po2
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always.
type: list
sample:
- interface port-channel 1
- exit
- interface ethernet 1/1 channel-group 1 mode on
- interface ethernet 1/2 channel-group 1 mode on
"""
import re
from copy import deepcopy
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import get_interfaces_config
class OnyxLinkAggModule(BaseOnyxModule):
LAG_ID_REGEX = re.compile(r"^\d+ (Po\d+|Mpo\d+)\(([A-Z])\)$")
LAG_NAME_REGEX = re.compile(r"^(Po|Mpo)(\d+)$")
IF_NAME_REGEX = re.compile(r"^(Eth\d+\/\d+|Eth\d+\/\d+\/\d+)(.*)$")
PORT_CHANNEL = 'port-channel'
CHANNEL_GROUP = 'channel-group'
MLAG_PORT_CHANNEL = 'mlag-port-channel'
MLAG_CHANNEL_GROUP = 'mlag-channel-group'
MLAG_SUMMARY = 'MLAG Port-Channel Summary'
LAG_TYPE = 'lag'
MLAG_TYPE = 'mlag'
IF_TYPE_MAP = dict(
lag=PORT_CHANNEL,
mlag=MLAG_PORT_CHANNEL
)
_purge = False
@classmethod
def _get_element_spec(cls):
return dict(
name=dict(type='str'),
members=dict(type='list'),
mode=dict(default='on', choices=['active', 'on', 'passive']),
state=dict(default='present', choices=['present', 'absent']),
)
@classmethod
def _get_aggregate_spec(cls, element_spec):
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
return aggregate_spec
def init_module(self):
""" module initialization
"""
element_spec = self._get_element_spec()
aggregate_spec = self._get_aggregate_spec(element_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict',
options=aggregate_spec),
purge=dict(default=False, type='bool'),
)
argument_spec.update(element_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
self._module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
def _get_lag_type(self, lag_name):
match = self.LAG_NAME_REGEX.match(lag_name)
if match:
prefix = match.group(1)
if prefix == "Po":
return self.LAG_TYPE
return self.MLAG_TYPE
self._module.fail_json(
msg='invalid lag name: %s, lag name should start with Po or '
'Mpo' % lag_name)
def get_required_config(self):
self._required_config = list()
module_params = self._module.params
aggregate = module_params.get('aggregate')
self._purge = module_params.get('purge', False)
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module_params[key]
self.validate_param_values(item, item)
req_item = item.copy()
req_item['type'] = self._get_lag_type(req_item['name'])
self._required_config.append(req_item)
else:
params = {
'name': module_params['name'],
'state': module_params['state'],
'members': module_params['members'],
'mode': module_params['mode'],
'type': self._get_lag_type(module_params['name']),
}
self.validate_param_values(params)
self._required_config.append(params)
@classmethod
def _extract_lag_name(cls, header):
match = cls.LAG_ID_REGEX.match(header)
state = None
lag_name = None
if match:
state = 'up' if match.group(2) == 'U' else 'down'
lag_name = match.group(1)
return lag_name, state
@classmethod
def _extract_if_name(cls, member):
match = cls.IF_NAME_REGEX.match(member)
if match:
return match.group(1)
@classmethod
def _extract_lag_members(cls, lag_type, lag_item):
members = ""
if lag_type == cls.LAG_TYPE:
members = cls.get_config_attr(lag_item, "Member Ports")
else:
for attr_name, attr_val in iteritems(lag_item):
if attr_name.startswith('Local Ports'):
members = attr_val
return [cls._extract_if_name(member) for member in members.split()]
def _get_port_channels(self, if_type):
return get_interfaces_config(self._module, if_type, flags="summary")
def _parse_port_channels_summary(self, lag_type, lag_summary):
if lag_type == self.MLAG_TYPE:
if self._os_version >= self.ONYX_API_VERSION:
found_summary = False
for summary_item in lag_summary:
if self.MLAG_SUMMARY in summary_item:
lag_summary = summary_item[self.MLAG_SUMMARY]
if lag_summary:
lag_summary = lag_summary[0]
else:
lag_summary = dict()
found_summary = True
break
if not found_summary:
lag_summary = dict()
else:
lag_summary = lag_summary.get(self.MLAG_SUMMARY, dict())
for lag_key, lag_data in iteritems(lag_summary):
lag_name, state = self._extract_lag_name(lag_key)
if not lag_name:
continue
lag_members = self._extract_lag_members(lag_type, lag_data[0])
lag_obj = dict(
name=lag_name,
state=state,
members=lag_members
)
self._current_config[lag_name] = lag_obj
def load_current_config(self):
self._current_config = dict()
self._os_version = self._get_os_version()
lag_types = set([lag_obj['type'] for lag_obj in self._required_config])
for lag_type in lag_types:
if_type = self.IF_TYPE_MAP[lag_type]
lag_summary = self._get_port_channels(if_type)
if lag_summary:
self._parse_port_channels_summary(lag_type, lag_summary)
def _get_interface_command_suffix(self, if_name):
if if_name.startswith('Eth'):
return if_name.replace("Eth", "ethernet ")
if if_name.startswith('Po'):
return if_name.replace("Po", "port-channel ")
if if_name.startswith('Mpo'):
return if_name.replace("Mpo", "mlag-port-channel ")
self._module.fail_json(
msg='invalid interface name: %s' % if_name)
def _get_channel_group(self, if_name):
if if_name.startswith('Po'):
return if_name.replace("Po", "channel-group ")
if if_name.startswith('Mpo'):
return if_name.replace("Mpo", "mlag-channel-group ")
self._module.fail_json(
msg='invalid interface name: %s' % if_name)
def _generate_no_linkagg_commands(self, lag_name):
suffix = self._get_interface_command_suffix(lag_name)
command = 'no interface %s' % suffix
self._commands.append(command)
def _generate_linkagg_commands(self, lag_name, req_lag):
curr_lag = self._current_config.get(lag_name, {})
if not curr_lag:
suffix = self._get_interface_command_suffix(lag_name)
self._commands.append("interface %s" % suffix)
self._commands.append("exit")
curr_members = set(curr_lag.get('members', []))
req_members = set(req_lag.get('members') or [])
lag_mode = req_lag['mode']
if req_members != curr_members:
channel_group = self._get_channel_group(lag_name)
channel_group_type = channel_group.split()[0]
for member in req_members:
if member in curr_members:
continue
suffix = self._get_interface_command_suffix(member)
self._commands.append(
"interface %s %s mode %s" %
(suffix, channel_group, lag_mode))
for member in curr_members:
if member in req_members:
continue
suffix = self._get_interface_command_suffix(member)
self._commands.append(
"interface %s no %s" % (suffix, channel_group_type))
req_state = req_lag.get('state')
if req_state in ('up', 'down'):
curr_state = curr_lag.get('state')
if curr_state != req_state:
suffix = self._get_interface_command_suffix(lag_name)
cmd = "interface %s " % suffix
if req_state == 'up':
cmd += 'no shutdown'
else:
cmd += 'shutdown'
self._commands.append(cmd)
def generate_commands(self):
req_lags = set()
for req_conf in self._required_config:
state = req_conf['state']
lag_name = req_conf['name']
if state == 'absent':
if lag_name in self._current_config:
self._generate_no_linkagg_commands(lag_name)
else:
req_lags.add(lag_name)
self._generate_linkagg_commands(lag_name, req_conf)
if self._purge:
for lag_name in self._current_config:
if lag_name not in req_lags:
self._generate_no_linkagg_commands(lag_name)
def check_declarative_intent_params(self, result):
pass
def main():
""" main entry point for module execution
"""
OnyxLinkAggModule.main()
if __name__ == '__main__':
main()
| 35.107042
| 92
| 0.603546
|
acfd5b7aff87e60ce7e431d6470bd1fb0cd7f47f
| 1,926
|
py
|
Python
|
kolibri/core/notifications/migrations/0005_learnerprogressnotification_assignment_collections.py
|
alexMet/kolibri
|
394f94e6e3145089e965aa1b70bfdb5056d38bad
|
[
"MIT"
] | 1
|
2020-10-22T05:54:52.000Z
|
2020-10-22T05:54:52.000Z
|
kolibri/core/notifications/migrations/0005_learnerprogressnotification_assignment_collections.py
|
alexMet/kolibri
|
394f94e6e3145089e965aa1b70bfdb5056d38bad
|
[
"MIT"
] | 2
|
2021-09-24T11:36:21.000Z
|
2021-09-29T16:09:25.000Z
|
kolibri/core/notifications/migrations/0005_learnerprogressnotification_assignment_collections.py
|
alexMet/kolibri
|
394f94e6e3145089e965aa1b70bfdb5056d38bad
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-07-23 16:06
from __future__ import unicode_literals
from django.db import migrations
from django.db.utils import OperationalError
import kolibri.core.fields
def migrate_collection_ids(apps, schema_editor):
try:
LearnerProgressNotification = apps.get_model(
"notifications", "LearnerProgressNotification"
)
LearnerGroup = apps.get_model("kolibriauth", "LearnerGroup")
AdHocGroup = apps.get_model("kolibriauth", "AdHocGroup")
LearnerGroupMap = {
g["id"]: g["parent_id"]
for g in LearnerGroup.objects.all().values("id", "parent_id")
}
AdHocGroupMap = {
g["id"]: g["parent_id"]
for g in AdHocGroup.objects.all().values("id", "parent_id")
}
for notification in LearnerProgressNotification.objects.all():
collection_id = notification.classroom_id
if collection_id in LearnerGroupMap:
notification.classroom_id = LearnerGroupMap[collection_id]
if collection_id in AdHocGroupMap:
notification.classroom_id = AdHocGroupMap[collection_id]
notification.assignment_collections = [collection_id]
notification.save()
except OperationalError:
# Can get an error if the collections table doesn't exist yet
# in which case we can't do any data migrations at all, so just skip
pass
class Migration(migrations.Migration):
dependencies = [
("notifications", "0004_learnerprogressnotification_quiz_num_answered"),
]
operations = [
migrations.AddField(
model_name="learnerprogressnotification",
name="assignment_collections",
field=kolibri.core.fields.JSONField(default=[], null=True),
),
migrations.RunPython(migrate_collection_ids),
]
| 35.018182
| 80
| 0.654206
|
acfd5b839b0e723d9b4305e55ba3aec03b054023
| 6,005
|
py
|
Python
|
streaming/python/runtime/graph.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
streaming/python/runtime/graph.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
streaming/python/runtime/graph.py
|
gramhagen/ray
|
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
import enum
import logging
import ray
import ray.streaming.generated.remote_call_pb2 as remote_call_pb
import ray.streaming.operator as operator
import ray.streaming.partition as partition
from ray._raylet import ActorID
from ray.actor import ActorHandle
from ray.streaming.config import Config
from ray.streaming.generated.streaming_pb2 import Language
logger = logging.getLogger(__name__)
class NodeType(enum.Enum):
"""
SOURCE: Sources are where your program reads its input from
TRANSFORM: Operators transform one or more DataStreams into a new
DataStream. Programs can combine multiple transformations into
sophisticated dataflow topologies.
SINK: Sinks consume DataStreams and forward them to files, sockets,
external systems, or print them.
"""
SOURCE = 0
TRANSFORM = 1
SINK = 2
class ExecutionEdge:
def __init__(self, execution_edge_pb, language):
self.source_execution_vertex_id = execution_edge_pb \
.source_execution_vertex_id
self.target_execution_vertex_id = execution_edge_pb \
.target_execution_vertex_id
partition_bytes = execution_edge_pb.partition
# Sink node doesn't have partition function,
# so we only deserialize partition_bytes when it's not None or empty
if language == Language.PYTHON and partition_bytes:
self.partition = partition.load_partition(partition_bytes)
class ExecutionVertex:
worker_actor: ActorHandle
def __init__(self, execution_vertex_pb):
self.execution_vertex_id = execution_vertex_pb.execution_vertex_id
self.execution_job_vertex_id = execution_vertex_pb \
.execution_job_vertex_id
self.execution_job_vertex_name = execution_vertex_pb \
.execution_job_vertex_name
self.execution_vertex_index = execution_vertex_pb\
.execution_vertex_index
self.parallelism = execution_vertex_pb.parallelism
if execution_vertex_pb\
.language == Language.PYTHON:
# python operator descriptor
operator_bytes = execution_vertex_pb.operator
if execution_vertex_pb.chained:
logger.info("Load chained operator")
self.stream_operator = operator.load_chained_operator(
operator_bytes)
else:
logger.info("Load operator")
self.stream_operator = operator.load_operator(operator_bytes)
self.worker_actor = None
if execution_vertex_pb.worker_actor:
self.worker_actor = ray.actor.ActorHandle. \
_deserialization_helper(execution_vertex_pb.worker_actor)
self.container_id = execution_vertex_pb.container_id
self.build_time = execution_vertex_pb.build_time
self.language = execution_vertex_pb.language
self.config = execution_vertex_pb.config
self.resource = execution_vertex_pb.resource
@property
def execution_vertex_name(self):
return "{}_{}_{}".format(self.execution_job_vertex_id,
self.execution_job_vertex_name,
self.execution_vertex_id)
class ExecutionVertexContext:
actor_id: ActorID
execution_vertex: ExecutionVertex
def __init__(
self,
execution_vertex_context_pb: remote_call_pb.ExecutionVertexContext
):
self.execution_vertex = ExecutionVertex(
execution_vertex_context_pb.current_execution_vertex)
self.job_name = self.execution_vertex.config[Config.STREAMING_JOB_NAME]
self.exe_vertex_name = self.execution_vertex.execution_vertex_name
self.actor_id = self.execution_vertex.worker_actor._ray_actor_id
self.upstream_execution_vertices = [
ExecutionVertex(vertex) for vertex in
execution_vertex_context_pb.upstream_execution_vertices
]
self.downstream_execution_vertices = [
ExecutionVertex(vertex) for vertex in
execution_vertex_context_pb.downstream_execution_vertices
]
self.input_execution_edges = [
ExecutionEdge(edge, self.execution_vertex.language)
for edge in execution_vertex_context_pb.input_execution_edges
]
self.output_execution_edges = [
ExecutionEdge(edge, self.execution_vertex.language)
for edge in execution_vertex_context_pb.output_execution_edges
]
def get_parallelism(self):
return self.execution_vertex.parallelism
def get_upstream_parallelism(self):
if self.upstream_execution_vertices:
return self.upstream_execution_vertices[0].parallelism
return 0
def get_downstream_parallelism(self):
if self.downstream_execution_vertices:
return self.downstream_execution_vertices[0].parallelism
return 0
@property
def build_time(self):
return self.execution_vertex.build_time
@property
def stream_operator(self):
return self.execution_vertex.stream_operator
@property
def config(self):
return self.execution_vertex.config
def get_task_id(self):
return self.execution_vertex.execution_vertex_id
def get_source_actor_by_execution_vertex_id(self, execution_vertex_id):
for execution_vertex in self.upstream_execution_vertices:
if execution_vertex.execution_vertex_id == execution_vertex_id:
return execution_vertex.worker_actor
raise Exception(
"Vertex {} does not exist!".format(execution_vertex_id))
def get_target_actor_by_execution_vertex_id(self, execution_vertex_id):
for execution_vertex in self.downstream_execution_vertices:
if execution_vertex.execution_vertex_id == execution_vertex_id:
return execution_vertex.worker_actor
raise Exception(
"Vertex {} does not exist!".format(execution_vertex_id))
| 38.49359
| 79
| 0.706744
|
acfd5becf0c3b8df9b2630d54e5e4c3f87ad90d3
| 2,352
|
py
|
Python
|
bigml/tests/test_29_script.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 137
|
2015-01-12T06:04:10.000Z
|
2022-03-06T21:00:04.000Z
|
bigml/tests/test_29_script.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 78
|
2015-01-13T18:28:51.000Z
|
2022-03-04T19:18:28.000Z
|
bigml/tests/test_29_script.py
|
axellos162/python-4
|
569f6655871ea60b41e9442396433567b9daaea7
|
[
"Apache-2.0"
] | 144
|
2015-01-16T06:13:33.000Z
|
2022-03-29T17:53:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating and updating scripts
"""
from .world import world, setup_module, teardown_module
from . import create_script_steps as script_create
class TestScript(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a whizzml script:
Given I create a whizzml script from a excerpt of code "<source_code>"
And I wait until the script is ready less than <time_1> secs
And I update the script with "<param>", "<param_value>"
And I wait until the script is ready less than <time_2> secs
Then the script code is "<source_code>" and the value of "<param>" is "<param_value>"
Examples:
| source_code | time_1 | time_2 | param | param_value
| (+ 1 1) | 10 | 10 | name | my script
"""
print(self.test_scenario1.__doc__)
examples = [
['(+ 1 1)', '10', '10', 'name', 'my script']]
for example in examples:
print("\nTesting with:\n", example)
script_create.i_create_a_script(self, example[0])
script_create.the_script_is_finished(self, example[1])
script_create.i_update_a_script(self, example[3], example[4])
script_create.the_script_is_finished(self, example[2])
script_create.the_script_code_and_attributes(self, example[0], example[3], example[4])
| 38.557377
| 101
| 0.609269
|
acfd5c3ad0dfa55875d6729c3315a39ccf870395
| 449
|
py
|
Python
|
members/migrations/0008_auto_20181001_1031.py
|
Venus9023/django_for_beginer
|
c12edb9d347f444f9817d0c6e13716284476820c
|
[
"BSD-3-Clause"
] | 1,440
|
2015-01-05T13:06:12.000Z
|
2022-03-30T23:09:24.000Z
|
members/migrations/0008_auto_20181001_1031.py
|
Venus9023/django_for_beginer
|
c12edb9d347f444f9817d0c6e13716284476820c
|
[
"BSD-3-Clause"
] | 711
|
2015-01-01T19:42:33.000Z
|
2022-03-29T08:36:29.000Z
|
members/migrations/0008_auto_20181001_1031.py
|
Venus9023/django_for_beginer
|
c12edb9d347f444f9817d0c6e13716284476820c
|
[
"BSD-3-Clause"
] | 887
|
2015-01-01T03:17:20.000Z
|
2022-03-23T09:15:26.000Z
|
# Generated by Django 2.1.2 on 2018-10-01 10:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0007_auto_20170216_0837'),
]
operations = [
migrations.RemoveField(
model_name='individualmember',
name='bio',
),
migrations.RemoveField(
model_name='individualmember',
name='website',
),
]
| 20.409091
| 47
| 0.576837
|
acfd5c91641d7e023f388899a7271f3202c799e3
| 1,193
|
py
|
Python
|
tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/forward_rate_agreement/__init__.py
|
slowy07/tf-quant-finance
|
0976f720fb58a2d7bfd863640c12a2425cd2f94f
|
[
"Apache-2.0"
] | 3,138
|
2019-07-24T21:43:17.000Z
|
2022-03-30T12:11:09.000Z
|
tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/forward_rate_agreement/__init__.py
|
Aarif1430/tf-quant-finance
|
9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6
|
[
"Apache-2.0"
] | 63
|
2019-09-07T19:16:03.000Z
|
2022-03-29T19:29:40.000Z
|
tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/forward_rate_agreement/__init__.py
|
Aarif1430/tf-quant-finance
|
9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6
|
[
"Apache-2.0"
] | 423
|
2019-07-26T21:28:05.000Z
|
2022-03-26T13:07:44.000Z
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forward rate agreement."""
from tf_quant_finance.experimental.pricing_platform.framework.rate_instruments.forward_rate_agreement.forward_rate_agreement_impl import ForwardRateAgreement
from tf_quant_finance.experimental.pricing_platform.framework.rate_instruments.forward_rate_agreement.forward_rate_agreement_impl import ForwardRateAgreementConfig
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
"ForwardRateAgreement",
"ForwardRateAgreementConfig",
]
remove_undocumented(__name__, _allowed_symbols)
| 42.607143
| 163
| 0.814753
|
acfd5d6394fdecb06a90d9ab5a8b77b2b7549814
| 980
|
py
|
Python
|
Sidekick_4.5/xgrid_sequences_from_file.py
|
hallba/Sidekick
|
2208ee913e6e2c181d0c27bec85495d3595d980c
|
[
"Apache-2.0"
] | null | null | null |
Sidekick_4.5/xgrid_sequences_from_file.py
|
hallba/Sidekick
|
2208ee913e6e2c181d0c27bec85495d3595d980c
|
[
"Apache-2.0"
] | 3
|
2015-02-24T23:04:51.000Z
|
2015-02-24T23:07:15.000Z
|
Sidekick_4.5/xgrid_sequences_from_file.py
|
hallba/Sidekick
|
2208ee913e6e2c181d0c27bec85495d3595d980c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys,os
import HAConf
from xgrid_tools import *
starting_point = os.getcwd()
input_file = sys.argv[1]
job_list =[]
for line in open(input_file,'r'):
mutant = line[:-1]
print mutant
try:
os.mkdir(mutant)
except:
continue
os.chdir(mutant)
#Now create run directories and run
os.mkdir("Rotation-Translation")
os.chdir("Rotation-Translation")
job_list.append(job_submit(HAConf.programs['hippo_tr'] + " " + mutant + " -dAgg "))
os.chdir("..")
os.mkdir("MARTINI")
os.chdir("MARTINI")
job_list.append(job_submit(HAConf.programs['MARTINI'] + " " + mutant + " " + str(5) + " -dAgg "))
os.chdir("..")
#Finally, return to the starting directory
os.chdir(starting_point)
collect_results_daemon(job_list,starting_point+"/daemon.log",starting_point+"/restart.pickle")
| 25.789474
| 105
| 0.578571
|
acfd5d8732c164f981fe9c2ba1655d0bb822421e
| 2,718
|
py
|
Python
|
contracts/validator_registration.v.py
|
atcswap/atc_chain
|
b5df207605578f1009afb439a5c76ce80800155f
|
[
"MIT"
] | 217
|
2018-06-15T16:35:59.000Z
|
2022-01-23T21:21:13.000Z
|
contracts/validator_registration.v.py
|
KenMan79/beacon_chain
|
fa126de4aff583a9b6dab074ef7d553148f13530
|
[
"MIT"
] | 111
|
2018-06-17T03:14:12.000Z
|
2021-01-12T04:29:26.000Z
|
contracts/validator_registration.v.py
|
KenMan79/beacon_chain
|
fa126de4aff583a9b6dab074ef7d553148f13530
|
[
"MIT"
] | 60
|
2018-06-16T02:55:16.000Z
|
2022-03-28T18:42:10.000Z
|
MIN_DEPOSIT: constant(uint256) = 1 # ETH
MAX_DEPOSIT: constant(uint256) = 32 # ETH
GWEI_PER_ETH: constant(uint256) = 1000000000 # 10**9
CHAIN_START_FULL_DEPOSIT_THRESHOLD: constant(uint256) = 16384 # 2**14
DEPOSIT_CONTRACT_TREE_DEPTH: constant(uint256) = 32
TWO_TO_POWER_OF_TREE_DEPTH: constant(uint256) = 4294967296 # 2**32
SECONDS_PER_DAY: constant(uint256) = 86400
Deposit: event({previous_deposit_root: bytes32, data: bytes[2064], merkle_tree_index: bytes[8]})
ChainStart: event({deposit_root: bytes32, time: bytes[8]})
deposit_tree: map(uint256, bytes32)
deposit_count: uint256
full_deposit_count: uint256
@payable
@public
def deposit(deposit_input: bytes[2048]):
assert msg.value >= as_wei_value(MIN_DEPOSIT, "ether")
assert msg.value <= as_wei_value(MAX_DEPOSIT, "ether")
index: uint256 = self.deposit_count + TWO_TO_POWER_OF_TREE_DEPTH
msg_gwei_bytes8: bytes[8] = slice(concat("", convert(msg.value / GWEI_PER_ETH, bytes32)), start=24, len=8)
timestamp_bytes8: bytes[8] = slice(concat("", convert(block.timestamp, bytes32)), start=24, len=8)
deposit_data: bytes[2064] = concat(msg_gwei_bytes8, timestamp_bytes8, deposit_input)
merkle_tree_index: bytes[8] = slice(concat("", convert(index, bytes32)), start=24, len=8)
log.Deposit(self.deposit_tree[1], deposit_data, merkle_tree_index)
# add deposit to merkle tree
self.deposit_tree[index] = sha3(deposit_data)
for i in range(DEPOSIT_CONTRACT_TREE_DEPTH): # DEPOSIT_CONTRACT_TREE_DEPTH (range of constant var not yet supported)
index /= 2
self.deposit_tree[index] = sha3(concat(self.deposit_tree[index * 2], self.deposit_tree[index * 2 + 1]))
self.deposit_count += 1
if msg.value == as_wei_value(MAX_DEPOSIT, "ether"):
self.full_deposit_count += 1
if self.full_deposit_count == CHAIN_START_FULL_DEPOSIT_THRESHOLD:
timestamp_day_boundary: uint256 = as_unitless_number(block.timestamp) - as_unitless_number(block.timestamp) % SECONDS_PER_DAY + SECONDS_PER_DAY
timestamp_day_boundary_bytes8: bytes[8] = slice(concat("", convert(timestamp_day_boundary, bytes32)), start=24, len=8)
log.ChainStart(self.deposit_tree[1], timestamp_day_boundary_bytes8)
@public
@constant
def get_deposit_root() -> bytes32:
return self.deposit_tree[1]
@public
@constant
def get_branch(leaf: uint256) -> bytes32[32]: # size is DEPOSIT_CONTRACT_TREE_DEPTH (symbolic const not supported)
branch: bytes32[32] # size is DEPOSIT_CONTRACT_TREE_DEPTH
index: uint256 = leaf + TWO_TO_POWER_OF_TREE_DEPTH
for i in range(DEPOSIT_CONTRACT_TREE_DEPTH):
branch[i] = self.deposit_tree[bitwise_xor(index, 1)]
index /= 2
return branch
| 46.862069
| 155
| 0.740986
|
acfd5dc9954bfded9b0a280121fba2b1f04d0d9a
| 31,241
|
py
|
Python
|
webalchemy/tornado/template.py
|
PythonJS/webalchemy
|
c245685daceb69b9fcc95704e7682e428bf80d16
|
[
"MIT"
] | 1
|
2015-02-09T17:54:29.000Z
|
2015-02-09T17:54:29.000Z
|
webalchemy/tornado/template.py
|
PythonJS/webalchemy
|
c245685daceb69b9fcc95704e7682e428bf80d16
|
[
"MIT"
] | null | null | null |
webalchemy/tornado/template.py
|
PythonJS/webalchemy
|
c245685daceb69b9fcc95704e7682e428bf80d16
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`webalchemy.tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~webalchemy.tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `webalchemy.tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% finally %}...{% else %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from webalchemy.tornado import escape
from webalchemy.tornado.log import app_log
from webalchemy.tornado.util import bytes_type, ObjectDict, exec_in, unicode_type
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes_type),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader, compress_whitespace):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""``autoescape`` must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
f = open(path, "rb")
template = Template(f.read(), name=name, loader=self)
f.close()
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
raise ParseError("unknown operator: %r" % operator)
| 36.033449
| 98
| 0.593771
|
acfd60132911c2b2e86fa8462e546648eb314d27
| 249
|
py
|
Python
|
topCoder/srms/200s/srm262/div2/div_to_zero.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 25
|
2015-01-21T16:39:18.000Z
|
2021-05-24T07:01:24.000Z
|
topCoder/srms/200s/srm262/div2/div_to_zero.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 2
|
2020-09-30T19:39:36.000Z
|
2020-10-01T17:15:16.000Z
|
topCoder/srms/200s/srm262/div2/div_to_zero.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 15
|
2015-01-21T16:39:27.000Z
|
2020-10-01T17:00:22.000Z
|
class DivToZero:
def lastTwo(self, num, factor):
b = str(num)[:-2]
for i in xrange(100):
s = str(i)
if i < 10:
s = "0" + s
if int(b + s) % factor == 0:
return s
| 24.9
| 40
| 0.37751
|
acfd6218fdf794d06d011b67b8ba356b355efe78
| 1,887
|
py
|
Python
|
src/fixedincome/yield_curve.py
|
shreysrins/fixedincome
|
26c21b991a9e2a8f1cd9916517aa9e5502487efa
|
[
"MIT"
] | 2
|
2022-02-10T06:16:29.000Z
|
2022-02-16T17:12:33.000Z
|
src/fixedincome/yield_curve.py
|
shreysrins/FixedIncome
|
26c21b991a9e2a8f1cd9916517aa9e5502487efa
|
[
"MIT"
] | 3
|
2022-02-11T00:50:09.000Z
|
2022-02-17T18:43:38.000Z
|
src/fixedincome/yield_curve.py
|
shreysrins/fixedincome
|
26c21b991a9e2a8f1cd9916517aa9e5502487efa
|
[
"MIT"
] | 2
|
2022-03-28T15:19:30.000Z
|
2022-03-29T00:23:14.000Z
|
"""
Fixed income analytics for the yield curve
Implements various functions useful for yield curve analytics.
"""
import numpy as np
def bootstrap(cash_flows : np.ndarray, prices : np.ndarray) -> np.ndarray:
"""
Calculates the spot yield curve from a series of bonds.
Parameters
----------
cash_flows : np.ndarray
Nonsingular payoff matrix of a series of bonds.
prices : np.ndarray
Corresponding price of each bond.
Returns
-------
np.ndarray
The corresponding spot yield curve derived from no-arbitrage relationships.
"""
assert cash_flows.shape[1] == prices.shape[0], "Check shapes of input matrices."
discount_factors = np.linalg.inv(cash_flows) @ prices
yields = np.power(discount_factors, np.reshape(-1 / np.arange(start=1, stop=discount_factors.shape[0]+1), newshape=discount_factors.shape)) - 1 # Formula: d = 1/(1 + y)^i
return np.reshape(yields, newshape=(yields.shape[0])) # Flatten output
def regression():
raise NotImplementedError("Regression not yet implemented.")
def spline():
raise NotImplementedError("Spline not yet implemented.")
def nelson_siegel(T : np.ndarray, theta0 : float, theta1 : float, theta2 : float, lambda_ : float) -> np.ndarray:
"""
Calculates the Nelson-Siegel yield curve.
Parameters
----------
T : np.ndarray
List of times at which to calculate yield.
theta0 : float
A Nelson-Siegel Model parameter.
theta1 : float
A Nelson-Siegel Model parameter.
theta2 : float
A Nelson-Siegel Model parameter.
lambda_ : float
A Nelson-Siegel Model parameter.
Returns
-------
np.ndarray
The Nelson-Siegel yield curve.
"""
return theta0 + (theta1 + theta2) * (1 - np.exp(-T / lambda_)) / (T / lambda_) - theta2 * np.exp(-T / lambda_)
| 28.164179
| 174
| 0.648649
|
acfd62f124a22cb5a04f7a226934b5e1325ad450
| 5,043
|
py
|
Python
|
tests/unittest/test_crypto.py
|
Kjwon15/torpy
|
db185f7a251b0c5a89ef96d27fe8b2374b6d3e8d
|
[
"Apache-2.0"
] | 230
|
2019-07-18T14:22:28.000Z
|
2022-03-28T10:12:37.000Z
|
tests/unittest/test_crypto.py
|
Kjwon15/torpy
|
db185f7a251b0c5a89ef96d27fe8b2374b6d3e8d
|
[
"Apache-2.0"
] | 37
|
2019-07-26T16:20:12.000Z
|
2022-03-03T23:24:04.000Z
|
tests/unittest/test_crypto.py
|
Kjwon15/torpy
|
db185f7a251b0c5a89ef96d27fe8b2374b6d3e8d
|
[
"Apache-2.0"
] | 49
|
2019-08-08T11:48:20.000Z
|
2022-03-11T21:07:06.000Z
|
# Copyright 2019 James Brown
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# flake8: noqa: E501
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import dh
from torpy.crypto import kdf_tor
from torpy.crypto_common import dh_shared, dh_public_to_bytes
def to_bytes(hex_str):
return bytes.fromhex(hex_str)
def simple_urandom(length):
key = to_bytes('30 DB E6 14 B4 96 05 9F 52 47 8D 36 9B 2A 03 9E')
assert length == len(key)
return key
def test_dh(): # noqa: E501
bend = default_backend()
p = 179769313486231590770839156793787453197860296048756011706444423684197180216158519368947833795864925541502180565485980503646440548199239100050792877003355816639229553136239076508735759914822574862575007425302077447712589550957937778424442426617334727629299387668709205606050270810842907692932019128194467627007
g = 2
mini_tor_p_bytes = b'\xff\xff\xff\xff\xff\xff\xff\xff\xc9\x0f\xda\xa2\x21\x68\xc2\x34\xc4\xc6\x62\x8b\x80\xdc\x1c\xd1\x29\x02\x4e\x08\x8a\x67\xcc\x74\x02\x0b\xbe\xa6\x3b\x13\x9b\x22\x51\x4a\x08\x79\x8e\x34\x04\xdd\xef\x95\x19\xb3\xcd\x3a\x43\x1b\x30\x2b\x0a\x6d\xf2\x5f\x14\x37\x4f\xe1\x35\x6d\x6d\x51\xc2\x45\xe4\x85\xb5\x76\x62\x5e\x7e\xc6\xf4\x4c\x42\xe9\xa6\x37\xed\x6b\x0b\xff\x5c\xb6\xf4\x06\xb7\xed\xee\x38\x6b\xfb\x5a\x89\x9f\xa5\xae\x9f\x24\x11\x7c\x4b\x1f\xe6\x49\x28\x66\x51\xec\xe6\x53\x81\xff\xff\xff\xff\xff\xff\xff\xff'
assert p.to_bytes(128, 'big') == mini_tor_p_bytes
dh_parameters_numbers = dh.DHParameterNumbers(p, g)
dh_private_bytes = to_bytes(
'81 2A 69 3A CD 75 6B 3F 3E 9A 8C 64 A4 ED 8C B5 2A 43 8B E7 6B 0D FB F5 CB 0D 70 E1 DB 2A 5F 57 4F 36 7D F1 B0 D1 E5 57 32 57 92 03 E4 0C 08 EF BA 7B 82 25 10 00 94 F0 3A E9 3F 2C AF 24 D6 FB A7 E0 DE 97 18 B5 DB E9 15 44 69 F8 CA 42 E8 87 18 16 BB F6 F8 8E D0 C9 B1 41 D4 02 02 E8 1A EC B3 E2 EB 06 04 86 EB 3D 6E A4 5E D7 4C ED EB B5 C6 7A A7 4F 13 99 D4 50 C8 BA 1E 9B 79 66 36 1D'
)
dh_private_x = int.from_bytes(dh_private_bytes, 'big')
dh_public_bytes = to_bytes(
'8F 61 59 22 DC 09 BF AB EF 79 3B 2F 3C 6D D3 51 2D FB 29 41 B2 45 59 B7 BF 64 17 41 9B 17 5F F3 7C 5E C8 A8 A9 87 19 72 4D 94 8A 7F 3A 7B D8 30 8C F3 79 88 4F 72 55 DA 7F A7 DC 93 26 C4 16 92 DB 14 C5 34 94 5C 48 4A 0F 54 39 EF 77 8F D1 64 EF BE 0F B4 55 B8 C1 DF DA 9F D5 60 03 B2 C5 34 4C 46 23 00 A8 89 47 F0 2F 5A 26 FC 5E 1A BB 63 49 25 19 BB BD 5F 69 6E 7D A0 00 50 28 06 21 CC'
)
dh_public_y = int.from_bytes(dh_public_bytes, 'big')
dh_public_num = dh.DHPublicNumbers(dh_public_y, dh_parameters_numbers)
dh_public = dh_public_num.public_key(bend)
# Check public serialization
assert dh_public_to_bytes(dh_public) == dh_public_bytes
dh_private_num = dh.DHPrivateNumbers(dh_private_x, dh_public_num)
dh_private = dh_private_num.private_key(bend)
other_public_bytes = to_bytes(
'69 47 FC C9 54 60 AE F6 F6 99 C1 E2 FA 9A 6F FA A2 76 FD 0B 89 6C CD 6F 0C 73 99 20 F6 38 64 83 54 09 61 F4 48 F4 90 9D 41 BB D7 72 E5 B0 C1 B7 9D B2 DD ED E2 C8 50 D8 49 EE 61 DA D0 6E 73 02 4B B4 A9 66 CE 83 AF 97 01 2D 08 9C 83 63 9A AB 33 D9 0C 80 2B 26 E9 6B D0 C9 9D 53 FF 53 C0 24 8F 73 5A 71 15 CC 6D 20 92 80 00 4E EA FD 11 25 C9 74 44 8A 86 3F 27 BC 5F 4C B3 D7 98 DB 7A 7F'
)
dh_other_public_y = int.from_bytes(other_public_bytes, 'big')
dh_other_public_num = dh.DHPublicNumbers(dh_other_public_y, dh_parameters_numbers)
dh_other_public = dh_other_public_num.public_key(bend)
# Check shared
shared = dh_shared(dh_private, dh_other_public)
expected_shared = to_bytes(
'CF 67 62 D4 65 96 A0 B0 E0 A9 C2 32 7E 09 E5 B4 81 6B 30 6B 9B 7B 75 65 BE 91 0E 59 F0 96 D8 95 AA 51 89 AE CB 07 50 DE E7 9B 53 A9 29 06 16 65 6C F1 F1 4D F8 B9 94 23 5E FE C5 64 83 F2 40 AD 92 7D 63 76 47 37 6F DE 67 16 CE 38 B7 5C BD 36 C6 99 00 00 09 DE 6E E2 5A 9D 9B BB EC 71 43 1C 41 1A 39 C0 C5 21 88 A0 BB 0E C4 BF 46 F3 30 FC 47 5B 05 45 F9 49 59 3B 63 1C ED C0 EF 21 F0 44'
)
assert shared == expected_shared
# Check derived
computed_auth, key_material = kdf_tor(shared)
expected_derived = to_bytes(
'01 41 3D 3A 4A 06 55 4E 27 76 42 EA D4 44 F5 D8 A3 50 CD DD 60 2B 4D BD 97 76 7C CE DF E9 05 29 40 C6 14 EA E0 05 40 2D 08 8C B9 34 BD 24 16 E9 97 E6 8A 76 C1 FB C9 25 EA 77 D5 F6 19 9C 0E 65 A1 C2 D9 9E 70 B4 39 7F 60 C2 9D 8C A8 BE C0 E3 77 7D 05 FC A8 5A 6C F2 BD 46 05 CB 83 37 B4 96 4A 6C 2F 8F'
)
assert computed_auth + key_material == expected_derived
| 60.035714
| 538
| 0.723181
|
acfd62f64604313e778367c9856e8c30840ab45f
| 9,377
|
py
|
Python
|
clang-tidy-diff.py
|
asarium/clang-tidy-action-images
|
3d1d66a54bce8251da01f2b02d9bdac2f7023fa7
|
[
"MIT"
] | null | null | null |
clang-tidy-diff.py
|
asarium/clang-tidy-action-images
|
3d1d66a54bce8251da01f2b02d9bdac2f7023fa7
|
[
"MIT"
] | null | null | null |
clang-tidy-diff.py
|
asarium/clang-tidy-action-images
|
3d1d66a54bce8251da01f2b02d9bdac2f7023fa7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
#===- clang-tidy-diff.py - ClangTidy Diff Checker ------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
r"""
ClangTidy Diff Checker
======================
This script reads input from a unified diff, runs clang-tidy on all changed
files and outputs clang-tidy warnings in changed lines only. This is useful to
detect clang-tidy regressions in the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-tidy-diff.py -p1
svn diff --diff-cmd=diff -x-U0 | \
clang-tidy-diff.py -fix -checks=-*,modernize-use-override
"""
import argparse
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
try:
import yaml
except ImportError:
yaml = None
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def run_tidy(task_queue, lock, timeout):
watchdog = None
while True:
command = task_queue.get()
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if timeout is not None:
watchdog = threading.Timer(timeout, proc.kill)
watchdog.start()
stdout, stderr = proc.communicate()
with lock:
sys.stdout.write(stdout.decode('utf-8') + '\n')
sys.stdout.flush()
if stderr:
sys.stderr.write(stderr.decode('utf-8') + '\n')
sys.stderr.flush()
except Exception as e:
with lock:
sys.stderr.write('Failed: ' + str(e) + ': '.join(command) + '\n')
finally:
with lock:
if (not timeout is None) and (not watchdog is None):
if not watchdog.is_alive():
sys.stderr.write('Terminated by timeout: ' +
' '.join(command) + '\n')
watchdog.cancel()
task_queue.task_done()
def start_workers(max_tasks, tidy_caller, task_queue, lock, timeout):
for _ in range(max_tasks):
t = threading.Thread(target=tidy_caller, args=(task_queue, lock, timeout))
t.daemon = True
t.start()
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey = "Diagnostics"
merged = []
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close()
def main():
parser = argparse.ArgumentParser(description=
'Run clang-tidy against changed files, and '
'output diagnostics only for modified '
'lines.')
parser.add_argument('-clang-tidy-binary', metavar='PATH',
default='clang-tidy',
help='path to clang-tidy binary')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to check '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)',
help='custom pattern selecting file paths to check '
'(case insensitive, overridden by -regex)')
parser.add_argument('-j', type=int, default=1,
help='number of tidy instances to be run in parallel.')
parser.add_argument('-timeout', type=int, default=None,
help='timeout per each file in seconds.')
parser.add_argument('-fix', action='store_true', default=False,
help='apply suggested fixes')
parser.add_argument('-checks',
help='checks filter, when not specified, use clang-tidy '
'default',
default='')
parser.add_argument('-path', dest='build_path',
help='Path used to read a compile command database.')
parser.add_argument('-vfsoverlay', dest='vfsoverlay', metavar='VFSFILE',
help='Specified a VFS overlay configuration file')
if yaml:
parser.add_argument('-export-fixes', metavar='FILE', dest='export_fixes',
help='Create a yaml file to store suggested fixes in, '
'which can be applied with clang-apply-replacements.')
parser.add_argument('-extra-arg', dest='extra_arg',
action='append', default=[],
help='Additional argument to append to the compiler '
'command line.')
parser.add_argument('-extra-arg-before', dest='extra_arg_before',
action='append', default=[],
help='Additional argument to prepend to the compiler '
'command line.')
parser.add_argument('-quiet', action='store_true', default=False,
help='Run clang-tidy in quiet mode')
clang_tidy_args = []
argv = sys.argv[1:]
if '--' in argv:
clang_tidy_args.extend(argv[argv.index('--'):])
argv = argv[:argv.index('--')]
args = parser.parse_args(argv)
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ \"?(.*?/){%s}([^ \t\n\"]*)' % args.p, line)
if match:
filename = match.group(2)
if filename is None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
lines_by_file.setdefault(filename, []).append([start_line, end_line])
if not any(lines_by_file):
print("No relevant changes found.")
sys.exit(0)
max_task_count = args.j
if max_task_count == 0:
max_task_count = multiprocessing.cpu_count()
max_task_count = min(len(lines_by_file), max_task_count)
tmpdir = None
if yaml and args.export_fixes:
tmpdir = tempfile.mkdtemp()
# Tasks for clang-tidy.
task_queue = queue.Queue(max_task_count)
# A lock for console output.
lock = threading.Lock()
# Run a pool of clang-tidy workers.
start_workers(max_task_count, run_tidy, task_queue, lock, args.timeout)
# Form the common args list.
common_clang_tidy_args = []
if args.fix:
common_clang_tidy_args.append('-fix')
if args.checks != '':
common_clang_tidy_args.append('-checks=' + args.checks)
if args.quiet:
common_clang_tidy_args.append('-quiet')
if args.build_path is not None:
common_clang_tidy_args.append('-p=%s' % args.build_path)
if args.vfsoverlay is not None:
common_clang_tidy_args.append('--vfsoverlay=%s' % args.vfsoverlay)
for arg in args.extra_arg:
common_clang_tidy_args.append('-extra-arg=%s' % arg)
for arg in args.extra_arg_before:
common_clang_tidy_args.append('-extra-arg-before=%s' % arg)
for name in lines_by_file:
line_filter_json = json.dumps(
[{"name": name, "lines": lines_by_file[name]}],
separators=(',', ':'))
# Run clang-tidy on files containing changes.
command = [args.clang_tidy_binary]
command.append('-line-filter=' + line_filter_json)
if yaml and args.export_fixes:
# Get a temporary file. We immediately close the handle so clang-tidy can
# overwrite it.
(handle, tmp_name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
os.close(handle)
command.append('-export-fixes=' + tmp_name)
command.extend(common_clang_tidy_args)
command.append(name)
command.extend(clang_tidy_args)
task_queue.put(command)
# Wait for all threads to be done.
task_queue.join()
if yaml and args.export_fixes:
print('Writing fixes to ' + args.export_fixes + ' ...')
try:
merge_replacement_files(tmpdir, args.export_fixes)
except:
sys.stderr.write('Error exporting fixes.\n')
traceback.print_exc()
if tmpdir:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
| 34.347985
| 80
| 0.618001
|
acfd632239d0ef73ce9527b427909b205192c5b5
| 18,210
|
py
|
Python
|
src/setup_phase.py
|
sporreking/Evaluation-of-Synthetic-Face-Generation-Approaches
|
480608524751c8f48445404ad92708133472cd0e
|
[
"MIT"
] | 1
|
2022-03-10T13:40:05.000Z
|
2022-03-10T13:40:05.000Z
|
src/setup_phase.py
|
sporreking/Evaluation-of-Synthetic-Face-Generation-Approaches
|
480608524751c8f48445404ad92708133472cd0e
|
[
"MIT"
] | 55
|
2022-03-09T13:59:11.000Z
|
2022-03-31T14:58:47.000Z
|
src/setup_phase.py
|
sporreking/Evaluation-of-Synthetic-Face-Generation-Approaches
|
480608524751c8f48445404ad92708133472cd0e
|
[
"MIT"
] | null | null | null |
from src.controller.ControllerRegistry import ControllerRegistry
from src.metric.SampleMetricRegistry import SampleMetricRegistry
from src.metric.SampleMetricManager import SampleMetricManager
from src.metric.CompoundMetricManager import CompoundMetricManager
from src.core.Setupable import Setupable
import src.util.PromptUtil as PU
from typing import Any
from src.phase_utils import (
confirm_lists,
select_dataset,
select_attributes,
select_controllers,
select_generators,
select_sample_metrics,
select_filters,
select_compound_metrics,
)
def setup_phase() -> None:
"""
Allows the user to perform setups for their desired controllers, generators, filters, and metrics.
"""
# * Initial setupable selection
dataset = select_dataset()
attributes = select_attributes(dataset)
controller_names, controller_types = select_controllers()
generator_names, generator_types = select_generators()
sample_metric_names, sample_metrics_types = select_sample_metrics()
filter_names, filter_types = select_filters()
compound_metric_names, compound_metrics_types = select_compound_metrics()
# * Final presentation before config
if not confirm_lists(
"Initial Selection",
("Dataset", [dataset.get_name(dataset.get_resolution())]),
("Attributes", attributes),
("Controllers", controller_names),
("Generators", generator_names),
("Sample Metrics", sample_metric_names),
("Filters", filter_names),
("Compound Metrics", compound_metric_names),
):
print("Aborting.")
return
# * Create setupable instances
generators = [g(dataset) for g in generator_types]
controllers = [
c(g, attributes)
for i, c in enumerate(controller_types)
for g in generators
if g.get_name()
in ControllerRegistry.get_compatible_generator_names(controller_names[i])
]
sample_metric_manager = SampleMetricManager(sample_metrics_types, None, dataset)
sample_metric_manager_all = SampleMetricManager(
SampleMetricRegistry.get_resources(), None, dataset
)
filters = [f(_setup_only=True, smm=sample_metric_manager_all) for f in filter_types]
compound_metric_managers = [
CompoundMetricManager(
compound_metrics_types, None, dataset, sample_metric_manager_all, c, -1
)
for c in controllers
]
# * Configure controller setup
(
selected_controllers,
selected_controller_names,
controller_setup_config,
) = _select_setup_modes(
"Controller",
controllers,
[c.get_name() + "_" + c.get_generator().get_name() for c in controllers],
)
# * Configure sample metric setup
(
selected_sample_metrics,
selected_sample_metric_names,
sample_metric_setup_config,
) = _select_setup_modes(
"Sample Metric",
sample_metric_manager.get_metric_instances(),
[m.get_name() for m in sample_metric_manager.get_metric_instances()],
)
# * Configure filter setup
(
selected_filters,
selected_filter_names,
filter_setup_config,
) = _select_setup_modes(
"Filter",
filters,
[f.get_name() for f in filter_types],
)
# * Configure compound metric setup
(
selected_compound_metrics,
selected_compound_metric_names,
compound_metric_setup_config,
) = _select_setup_modes(
"Compound Metric",
[m for cmm in compound_metric_managers for m in cmm.get_metric_instances()],
[
m.get_name()
+ "_"
+ cmm.get_controller().get_name()
+ "_"
+ cmm.get_controller().get_generator().get_name()
for cmm in compound_metric_managers
for m in cmm.get_metric_instances()
],
)
# * Final confirmation before launch
if not confirm_lists(
"Run setups for the following targets?",
("Controllers", selected_controller_names),
("Sample Metrics", selected_sample_metric_names),
("Filters", selected_filter_names),
("Compound Metrics", selected_compound_metric_names),
):
print("Aborting.")
return
# * Perform controller setups
status_controllers: dict[str, dict[str, bool]]
if len(selected_controllers) > 0:
PU.push_indent(1)
PU.print_with_border("Performing Setup for Controllers")
status_controllers = _run_setup_modes(
selected_controllers, selected_controller_names, controller_setup_config
)
PU.pop_indent()
# * Perform sample metric setups
status_sample_metrics: dict[str, dict[str, bool]]
if len(selected_sample_metrics) > 0:
PU.push_indent(1)
PU.print_with_border("Performing Setup for Sample Metrics")
status_sample_metrics = _run_setup_modes(
selected_sample_metrics,
selected_sample_metric_names,
sample_metric_setup_config,
)
PU.pop_indent()
# * Perform filter setups
status_filters: dict[str, dict[str, bool]]
if len(selected_filters) > 0:
PU.push_indent(1)
PU.print_with_border("Performing Setup for Filters")
status_filters = _run_setup_modes(
selected_filters, selected_filter_names, filter_setup_config
)
PU.pop_indent()
# * Perform compound metric setups
status_compound_metrics: dict[str, dict[str, bool]]
if len(selected_compound_metrics) > 0:
PU.push_indent(1)
PU.print_with_border("Performing Setup for Compound Metrics")
status_compound_metrics = _run_setup_modes(
selected_compound_metrics,
selected_compound_metric_names,
compound_metric_setup_config,
)
PU.pop_indent()
# * Print status
PU.push_indent(1)
PU.push_indent(1)
PU.print_with_border("Finished", "=", "||")
PU.pop_indent()
if len(selected_controllers) > 0:
_display_status("Controller", status_controllers)
if len(selected_sample_metrics) > 0:
_display_status("Sample Metric", status_sample_metrics)
if len(selected_filters) > 0:
_display_status("Filter", status_filters)
if len(selected_compound_metrics) > 0:
_display_status("Compound Metric", status_compound_metrics)
PU.pop_indent()
def _display_status(
type_name: str, status_setupables: dict[str, dict[str, bool]]
) -> None:
PU.push_indent(1)
PU.print_with_border(f"{type_name} Status")
for setupable_name, status_modes in status_setupables.items():
if len(status_modes) <= 0:
PU.print_with_indent("No setup modes were performed.")
continue
PU.print_list(
setupable_name,
PU.tablify(
[
list(status_modes.keys()),
[
"SUCCESS" if success else "FAILURE"
for success in status_modes.values()
],
]
),
bullet_symbol=" *",
header_border_symbol="-",
header_border_side_symbol="|",
)
PU.pop_indent()
def _run_with_prerequisite_modes(
setupable: Setupable, config: dict[str, tuple[bool, dict[str, Any]]], mode: str
) -> None:
for r in setupable.get_required_modes(mode):
_run_with_prerequisite_modes(setupable, config, r)
run, parameters = config[mode]
if run:
setupable.setup(mode, parameters, skip_if_completed=True)
elif not setupable.is_ready(mode):
raise RuntimeError(
f"Required mode: '{mode}' is not ready, yet it was not queried for execution."
)
def _run_setup_modes(
setupables: list[Setupable],
setupable_names: list[str],
setup_config: list[dict[str, tuple[bool, dict[str, Any]]]],
) -> dict[str, dict[str, bool]]:
# Contains information about what modes were successful
status = {setupable_name: {} for setupable_name in setupable_names}
# Run all selected modes
for setupable, name, config in zip(setupables, setupable_names, setup_config):
# Run all modes
for mode, (run, _) in config.items():
if not run:
continue
try:
_run_with_prerequisite_modes(setupable, config, mode)
status[name][mode] = True
except Exception as error:
status[name][mode] = False
PU.push_indent(3)
PU.print_with_border(f"Failed to setup {name} | {mode}", "!")
PU.print_with_indent(repr(error))
PU.pop_indent()
return status
def _select_setup_modes(
type_name: str, setupables: list[Setupable], setupable_names: list[str]
) -> tuple[list[Setupable], list[str], list[dict[str, tuple[bool, dict[str, Any]]]]]:
assert len(setupables) == len(setupable_names)
PU.push_indent(1)
# Compile table of setupable names and 'ready' status
setupable_names_ready = PU.tablify(
[
setupable_names,
[
"SETUP COMPLETE" if setupable.is_ready() else "SETUP NOT COMPLETE"
for setupable in setupables
],
]
)
# Select what to setup
setupable_selection = PU.prompt_multi_options(
f"What {type_name.lower()}(s) would you like to setup?",
setupable_names_ready,
default_indices=[i for i, s in enumerate(setupables) if not s.is_ready()],
allow_empty=True,
return_index=True,
)
# TODO: Warn if completed setupables were selected (same for modes?)
# Selection context
setupable_config = [ # [setupable{mode: (run, params), ...}, ...]
{
m: [not setupables[i].is_ready(m), setupables[i].get_setup_parameters(m)]
for m in setupables[i].get_setup_modes()
}
for i in setupable_selection
]
setupable_config_modified = [False for _ in setupable_config]
# Early exit if there was no selection
if len(setupable_selection) <= 0:
PU.pop_indent()
return [], [], setupable_config
# Select what to configure
config_index = -1
while config_index != len(setupable_selection):
config_index = PU.prompt_options(
f"Would you like to customize the setup of any {type_name.lower()}(s)?",
PU.tablify(
[
[setupable_names_ready[i] for i in setupable_selection],
[
"RUN CUSTOMIZED SETUP"
if modified
else "RUN ALL UNCOMPLETED MODES WITH DEFAULT PARAMETERS"
for modified in setupable_config_modified
],
]
)
+ ["confirm"],
default_index=len(setupable_selection),
return_index=True,
)
# Configure setupable if applicable
if config_index < len(setupable_selection):
setupable = setupables[setupable_selection[config_index]]
name = setupable_names[setupable_selection[config_index]]
config = setupable_config[config_index]
# Print indent
PU.push_indent(1)
PU.print_with_border(f"Customization of {type_name} Setup: {name}")
# Pick setup modes
valid_selection = False
while not valid_selection:
mode_selection = PU.prompt_multi_options(
f"What setup modes would you like to run? | {name}",
PU.tablify(
[
setupable.get_setup_modes(), # Mode name
[ # Ready?
(
"SETUP COMPLETE"
if setupable.is_ready(m)
else "SETUP NOT COMPLETE"
)
for m in setupable.get_setup_modes()
],
[ # Requirements
(
"Info: " + setupable.get_setup_info(m)
if setupable.is_ready(m)
else "UNSATISFIED PREREQUISITES: "
+ (
"None"
if all(
setupable.is_ready(r)
for r in setupable.get_required_modes(m)
)
else ", ".join(
[
r
for r in setupable.get_required_modes(m)
if not setupable.is_ready(r)
]
)
)
)
for m in setupable.get_setup_modes()
],
]
),
default_indices=[
i
for i, m in enumerate(setupable.get_setup_modes())
if config[m][0]
],
allow_empty=True,
return_index=True,
)
# Confirm selection
valid_selection = True
for i, m in enumerate(setupable.get_setup_modes()):
if i in mode_selection:
# Check if selection is valid w.r.t. prerequisites
if any(
r
not in [
sm
for j, sm in enumerate(setupable.get_setup_modes())
if j in mode_selection
]
and not setupable.is_ready(r)
for r in setupable.get_required_modes(m)
):
valid_selection = False
PU.print_with_border(
f"Prerequisites of '{m}' are not met", "!"
)
PU.input_continue()
continue
# Selection valid -> add mode
config[m][0] = True
setupable_config_modified[
config_index
] = True # Signal modification
else:
# Remove mode (always allowed)
config[m][0] = False
setupable_config_modified[
config_index
] = True # Signal modification
# Get selected setup modes
setup_mode_selection = list(
enumerate(m for m in setupable.get_setup_modes() if config[m][0])
)
# Check if no modes were selected
if len(setup_mode_selection) <= 0:
PU.pop_indent()
continue
# Configure parameters for selected modes
mode_config_index = -1
while True:
# Pick mode to configure
mode_config_index = PU.prompt_options(
f"Would you like to configure any parameters? | {name}",
PU.tablify(
[
[m for _, m in setup_mode_selection],
[
", ".join(
[f"{p} = {v}" for p, v in config[m][1].items()]
)
for _, m in setup_mode_selection
],
]
)
+ ["confirm"],
default_index=len(setup_mode_selection),
return_index=True,
)
# Check if done
if mode_config_index == len(setup_mode_selection):
break
# Print indent
mode = setup_mode_selection[mode_config_index][1]
PU.push_indent(1)
PU.print_with_border(
f"Configuration of Parameters for Setup Mode: {name} | {mode}"
)
# Pick parameter to configure
param_index = -1
while True:
# Pick parameter
param_index = PU.prompt_options(
f"What parameter would you like to configure? | {name} | {mode}",
[f"{p} = {v}" for p, v in config[mode][1].items()]
+ ["confirm"],
default_index=len(config[mode][1]),
return_index=True,
)
# Check if done
if param_index == len(config[mode][1]):
break
# Update parameter
param_name = list(config[mode][1].keys())[param_index]
param_type = type(config[mode][1][param_name])
config[mode][1][param_name] = PU.input_type(
f"New value for '{param_name}'", param_type
)
PU.pop_indent() # Parameter config
PU.pop_indent() # Setupable config
PU.pop_indent() # Done
return (
[setupables[i] for i in setupable_selection],
[setupable_names[i] for i in setupable_selection],
setupable_config,
)
| 35.636008
| 102
| 0.529819
|
acfd632728c3bdea09a1f0365e6472a4c6766224
| 9,855
|
py
|
Python
|
detectron2/layers/batch_norm.py
|
PedroUria/detectron2
|
53d838d1528c3511838a1a3b35c642291e0df495
|
[
"Apache-2.0"
] | 780
|
2021-03-01T08:04:08.000Z
|
2022-03-30T04:29:17.000Z
|
detectron2/layers/batch_norm.py
|
PedroUria/detectron2
|
53d838d1528c3511838a1a3b35c642291e0df495
|
[
"Apache-2.0"
] | 89
|
2021-03-05T01:22:18.000Z
|
2022-03-30T19:28:33.000Z
|
detectron2/layers/batch_norm.py
|
PedroUria/detectron2
|
53d838d1528c3511838a1a3b35c642291e0df495
|
[
"Apache-2.0"
] | 119
|
2021-03-04T07:30:41.000Z
|
2022-03-31T11:32:22.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import torch
import torch.distributed as dist
from torch import nn
from torch.autograd.function import Function
from torch.nn import functional as F
from detectron2.utils import comm, env
from .wrappers import BatchNorm2d
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": lambda channels: nn.GroupNorm(32, channels),
# for debugging:
"nnSyncBN": nn.SyncBatchNorm,
"naiveSyncBN": NaiveSyncBatchNorm,
}[norm]
return norm(out_channels)
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]
# Use allgather instead of allreduce since I don't trust in-place operations ..
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class NaiveSyncBatchNorm(BatchNorm2d):
"""
In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
when the batch size on each worker is different.
(e.g., when scale augmentation is used, or when it is applied to mask head).
This is a slower but correct alternative to `nn.SyncBatchNorm`.
Note:
There isn't a single definition of Sync BatchNorm.
When ``stats_mode==""``, this module computes overall statistics by using
statistics of each worker with equal weight. The result is true statistics
of all samples (as if they are all on one worker) only when all workers
have the same (N, H, W). This mode does not support inputs with zero batch size.
When ``stats_mode=="N"``, this module computes overall statistics by weighting
the statistics of each worker by their ``N``. The result is true statistics
of all samples (as if they are all on one worker) only when all workers
have the same (H, W). It is slower than ``stats_mode==""``.
Even though the result of this module may not be the true statistics of all samples,
it may still be reasonable because it might be preferrable to assign equal weights
to all workers, regardless of their (H, W) dimension, instead of putting larger weight
on larger images. From preliminary experiments, little difference is found between such
a simplified implementation and an accurate computation of overall mean & variance.
"""
def __init__(self, *args, stats_mode="", **kwargs):
super().__init__(*args, **kwargs)
assert stats_mode in ["", "N"]
self._stats_mode = stats_mode
def forward(self, input):
if comm.get_world_size() == 1 or not self.training:
return super().forward(input)
B, C = input.shape[0], input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
if self._stats_mode == "":
assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
momentum = self.momentum
else:
if B == 0:
vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
vec = vec + input.sum() # make sure there is gradient w.r.t input
else:
vec = torch.cat(
[mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
)
vec = AllReduce.apply(vec * B)
total_batch = vec[-1].detach()
momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
total_batch = torch.max(total_batch, torch.ones_like(total_batch)) # avoid div-by-zero
mean, meansqr, _ = torch.split(vec / total_batch, C)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
self.running_mean += momentum * (mean.detach() - self.running_mean)
self.running_var += momentum * (var.detach() - self.running_var)
return input * scale + bias
| 40.892116
| 99
| 0.627499
|
acfd6351fbdf0ede7c174dc5c8e0f19eab08e969
| 4,667
|
py
|
Python
|
pypureclient/flasharray/FA_2_4/models/volume_post.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_4/models/volume_post.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_4/models/volume_post.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_4 import models
class VolumePost(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'destroyed': 'bool',
'provisioned': 'int',
'qos': 'Qos',
'source': 'Reference',
'subtype': 'str'
}
attribute_map = {
'destroyed': 'destroyed',
'provisioned': 'provisioned',
'qos': 'qos',
'source': 'source',
'subtype': 'subtype'
}
required_args = {
}
def __init__(
self,
destroyed=None, # type: bool
provisioned=None, # type: int
qos=None, # type: models.Qos
source=None, # type: models.Reference
subtype=None, # type: str
):
"""
Keyword args:
destroyed (bool): If set to `true`, destroys a resource. Once set to `true`, the `time_remaining` value will display the amount of time left until the destroyed resource is permanently eradicated. Before the `time_remaining` period has elapsed, the destroyed resource can be recovered by setting `destroyed=false`. Once the `time_remaining` period has elapsed, the resource is permanently eradicated and can no longer be recovered.
provisioned (int): Sets the virtual size of the volume. Measured in bytes.
qos (Qos): Sets QoS limits.
source (Reference): The source volume of a volume copy.
subtype (str): The type of volume. Valid values are `protocol_endpoint` and `regular`.
"""
if destroyed is not None:
self.destroyed = destroyed
if provisioned is not None:
self.provisioned = provisioned
if qos is not None:
self.qos = qos
if source is not None:
self.source = source
if subtype is not None:
self.subtype = subtype
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumePost`".format(key))
if key == "provisioned" and value is not None:
if value > 4503599627370496:
raise ValueError("Invalid value for `provisioned`, value must be less than or equal to `4503599627370496`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumePost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumePost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.57554
| 443
| 0.569531
|
acfd64261f06881b29195e26f115e410e30aac82
| 546
|
py
|
Python
|
django_tea_do/tea_do/migrations/0002_alter_teado_body_alter_teado_title.py
|
utkueray/TeaDo
|
b568d5fd50855270037cc44d0731eeacdb36ade3
|
[
"Apache-2.0"
] | null | null | null |
django_tea_do/tea_do/migrations/0002_alter_teado_body_alter_teado_title.py
|
utkueray/TeaDo
|
b568d5fd50855270037cc44d0731eeacdb36ade3
|
[
"Apache-2.0"
] | null | null | null |
django_tea_do/tea_do/migrations/0002_alter_teado_body_alter_teado_title.py
|
utkueray/TeaDo
|
b568d5fd50855270037cc44d0731eeacdb36ade3
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-17 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tea_do', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='teado',
name='body',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='teado',
name='title',
field=models.CharField(max_length=100, null=True),
),
]
| 22.75
| 62
| 0.567766
|
acfd644bb511ac832affc37dc6da6057ce23f2ca
| 1,338
|
py
|
Python
|
Trie_build_data.py
|
utkarsh23/RadixTrees
|
20cd5d56441cda7294407e64b38d1057e96584e0
|
[
"MIT"
] | null | null | null |
Trie_build_data.py
|
utkarsh23/RadixTrees
|
20cd5d56441cda7294407e64b38d1057e96584e0
|
[
"MIT"
] | null | null | null |
Trie_build_data.py
|
utkarsh23/RadixTrees
|
20cd5d56441cda7294407e64b38d1057e96584e0
|
[
"MIT"
] | 2
|
2017-11-30T13:36:42.000Z
|
2018-04-15T12:37:57.000Z
|
import time
from Trie import *
def main():
word_lists = ['words_44k.txt', 'words_109k.txt', 'words_178k.txt', 'words_263k.list', 'words_370k.txt']
# insert
print("Insertion:\n")
insert_times = []
counter = 1
for file in word_lists:
print("Processing file " + str(counter))
f_read = open('word_list/' + file)
T = Trie()
start = time.time()
for word in f_read.readlines():
T.insert(word.rstrip())
endtime = time.time() - start
print(endtime)
insert_times.append(endtime)
f_read.close()
counter += 1
f_write_insertradix = open('data/trie_insert.txt', 'w')
for timetaken in insert_times:
f_write_insertradix.write(str(timetaken) + '\n')
f_write_insertradix.close()
# search
print("\nSearch:\n")
word_searches = ['to', 'unappeasableness', 'pseudopseudohypoparathyroidism', 'hippopotomonstrosesquippedaliophobia', 'pneumonoultramicroscopicsilicovolcanoconiosis']
counter = 1
search_times = []
for word in word_searches:
print("Processing word " + str(counter))
start = time.time()
T.search(word)
endtime = time.time() - start
print(endtime)
search_times.append(endtime)
counter += 1
f_write_searchradix = open('data/trie_search.txt', 'w')
for timetaken in search_times:
f_write_searchradix.write(str(timetaken) + '\n')
f_write_searchradix.close()
if __name__ == '__main__':
main()
| 28.468085
| 166
| 0.713004
|
acfd6487a4c87104d11da229f01c06f297cd24d8
| 150
|
py
|
Python
|
cases/__init__.py
|
mavahedinia/s1d-csp-sa
|
1b04e06b277baff198c653e3827c1b1bed4a485e
|
[
"MIT"
] | null | null | null |
cases/__init__.py
|
mavahedinia/s1d-csp-sa
|
1b04e06b277baff198c653e3827c1b1bed4a485e
|
[
"MIT"
] | null | null | null |
cases/__init__.py
|
mavahedinia/s1d-csp-sa
|
1b04e06b277baff198c653e3827c1b1bed4a485e
|
[
"MIT"
] | null | null | null |
from .base import *
from .case1 import case_1
from .case2 import case_2
from .case3 import case_3
from .case4 import case_4
from .case5 import case_5
| 21.428571
| 25
| 0.793333
|
acfd65834a218be6b680fcb1227dd1b4dec89e4a
| 14,401
|
py
|
Python
|
tests/unit/test_curve.py
|
dibir-magomedsaygitov/bezier
|
a3c408d11133aa1b97fb6dd673888cf56f03178e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_curve.py
|
dibir-magomedsaygitov/bezier
|
a3c408d11133aa1b97fb6dd673888cf56f03178e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_curve.py
|
dibir-magomedsaygitov/bezier
|
a3c408d11133aa1b97fb6dd673888cf56f03178e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock
import numpy as np
try:
import sympy
except ImportError: # pragma: NO COVER
sympy = None
from tests.unit import test__symbolic
from tests.unit import utils
class TestCurve(utils.NumPyTestCase):
ZEROS = np.zeros((2, 2), order="F")
@staticmethod
def _get_target_class():
from bezier import curve
return curve.Curve
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def test_constructor(self):
nodes = np.asfortranarray([[0.0, 0.625, 1.0], [0.0, 0.5, 0.75]])
curve = self._make_one(nodes, 2, copy=False)
self.assertEqual(curve._degree, 2)
self.assertEqual(curve._dimension, 2)
self.assertIs(curve._nodes, nodes)
def test_constructor_invalid_num_nodes(self):
nodes = np.empty((1, 3), order="F")
with self.assertRaises(ValueError) as exc_info:
self._make_one(nodes, 7, copy=False)
exc_args = exc_info.exception.args
self.assertEqual(
exc_args, ("A degree 7 curve should have 8 nodes, not 3.",)
)
def test_constructor_wrong_dimension(self):
nodes = np.asfortranarray([1.0, 2.0])
with self.assertRaises(ValueError):
self._make_one(nodes, None)
nodes = np.zeros((2, 2, 2), order="F")
with self.assertRaises(ValueError):
self._make_one(nodes, None)
def test_from_nodes_factory(self):
nodes = np.asfortranarray([[1.0, 1.0], [2.0, 3.0], [0.0, 0.0]])
klass = self._get_target_class()
curve = klass.from_nodes(nodes)
self.assertIsInstance(curve, klass)
self.assertEqual(curve._degree, 1)
self.assertEqual(curve._dimension, 3)
self.assertEqual(curve._nodes, nodes)
def test_from_nodes_factory_non_array(self):
nodes = [[1.0, 1.0, 2.0], [2.0, 3.0, 4.0]]
klass = self._get_target_class()
curve = klass.from_nodes(nodes)
self.assertIsInstance(curve, klass)
self.assertEqual(curve._degree, 2)
self.assertEqual(curve._dimension, 2)
self.assertTrue(np.all(curve._nodes == nodes))
def test__get_degree(self):
klass = self._get_target_class()
self.assertEqual(0, klass._get_degree(1))
self.assertEqual(1, klass._get_degree(2))
def test_length_property(self):
nodes = np.asfortranarray([[0.0, 3.0], [0.0, 4.0]])
curve = self._make_one(nodes, 1)
self.assertEqual(curve.length, 5.0)
def test___dict___property(self):
curve = self._make_one(self.ZEROS, 1, copy=False)
props_dict = curve.__dict__
expected = {"_nodes": self.ZEROS, "_dimension": 2, "_degree": 1}
self.assertEqual(props_dict, expected)
# Check that modifying ``props_dict`` won't modify ``curve``.
expected["_dimension"] = 47
self.assertNotEqual(curve._dimension, expected["_dimension"])
def test_copy(self):
nodes = np.asfortranarray([[2.0, 3.5, 4.0], [0.0, 1.0, 0.0]])
curve = self._make_one(nodes, 2)
new_curve = curve.copy()
self.assertEqual(curve._degree, new_curve._degree)
self.assertEqual(curve._dimension, new_curve._dimension)
self.assertTrue(np.all(curve._nodes == new_curve._nodes))
self.assertIsNot(curve._nodes, new_curve._nodes)
def test_evaluate(self):
s = 0.25
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 0.5, 1.25]])
curve = self._make_one(nodes, 2)
expected = np.asfortranarray([[0.25], [0.265625]])
result = curve.evaluate(s)
self.assertEqual(expected, result)
def test_evaluate_multi(self):
s_vals = np.asfortranarray([0.0, 0.25, 0.5, 1.0, 1.25])
nodes = np.asfortranarray([[0.0, 0.375, 1.0], [0.0, 0.375, 1.0]])
curve = self._make_one(nodes, 2)
expected = np.asfortranarray(
[
[0.0, 0.203125, 0.4375, 1.0, 1.328125],
[0.0, 0.203125, 0.4375, 1.0, 1.328125],
]
)
result = curve.evaluate_multi(s_vals)
self.assertEqual(expected, result)
def test_evaluate_hodograph(self):
s = 0.25
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, 0.5, 1.25]])
curve = self._make_one(nodes, 2)
expected = np.asfortranarray([[1.0], [1.125]])
result = curve.evaluate_hodograph(s)
self.assertEqual(expected, result)
def test_plot_wrong_dimension(self):
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 3.0], [0.0, 4.0]])
curve = self._make_one(nodes, 1)
with self.assertRaises(NotImplementedError):
curve.plot(32)
@unittest.mock.patch("bezier._plot_helpers.new_axis")
def test_plot_defaults(self, new_axis_mock):
ax = unittest.mock.Mock(spec=["plot"])
new_axis_mock.return_value = ax
nodes = np.asfortranarray([[0.0, 1.0], [1.0, 3.0]])
curve = self._make_one(nodes, 1, copy=False)
num_pts = 2 # This value is crucial for the plot call.
result = curve.plot(num_pts)
self.assertIs(result, ax)
# Verify mocks.
new_axis_mock.assert_called_once_with()
# Check the call to ax.plot(). We can't assert_any_call()
# since == breaks on NumPy arrays.
self.assertEqual(ax.plot.call_count, 1)
call = ax.plot.mock_calls[0]
utils.check_plot_call(self, call, nodes, color=None, alpha=None)
@unittest.mock.patch("bezier._plot_helpers.new_axis")
def test_plot_explicit(self, new_axis_mock):
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
curve = self._make_one(nodes, 1, copy=False)
num_pts = 2 # This value is crucial for the plot call.
ax = unittest.mock.Mock(spec=["plot"])
color = (0.75, 1.0, 1.0)
alpha = 0.625
result = curve.plot(num_pts, color=color, alpha=alpha, ax=ax)
self.assertIs(result, ax)
# Verify mocks.
new_axis_mock.assert_not_called()
# Check the call to ax.plot(). We can't assert_any_call()
# since == breaks on NumPy arrays.
self.assertEqual(ax.plot.call_count, 1)
call = ax.plot.mock_calls[0]
utils.check_plot_call(self, call, nodes, color=color, alpha=alpha)
def test_subdivide(self):
nodes = np.asfortranarray([[0.0, 4.0], [1.0, 6.0]])
klass = self._get_target_class()
curve = klass.from_nodes(nodes)
# Call ``subdivide()`` and then compare.
left, right = curve.subdivide()
# Check the "left" sub-curve.
self.assertEqual(left._degree, 1)
self.assertIsInstance(left, klass)
expected_l = np.asfortranarray([[0.0, 2.0], [1.0, 3.5]])
self.assertEqual(left._nodes, expected_l)
# Check the "right" sub-curve.
self.assertIsInstance(right, klass)
expected_r = np.asfortranarray([[2.0, 4.0], [3.5, 6.0]])
self.assertEqual(right._nodes, expected_r)
def test_intersect_bad_strategy(self):
curve = self._make_one(self.ZEROS, 1)
strategy = unittest.mock.sentinel.bad_strategy
with self.assertRaises(ValueError) as exc_info:
curve.intersect(curve, strategy=strategy)
exc_args = exc_info.exception.args
self.assertEqual(exc_args, ("Unexpected strategy.", strategy))
def test_intersect_algebraic(self):
from bezier.hazmat import intersection_helpers
nodes1 = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
curve1 = self._make_one(nodes1, 1)
nodes2 = np.asfortranarray([[0.0, 1.0], [1.0, 0.0]])
curve2 = self._make_one(nodes2, 1)
strategy = intersection_helpers.IntersectionStrategy.ALGEBRAIC
intersections = curve1.intersect(curve2, strategy=strategy)
expected = np.asfortranarray([[0.5], [0.5]])
self.assertEqual(intersections, expected)
def test_intersect_empty(self):
nodes1 = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
curve1 = self._make_one(nodes1, 1)
nodes2 = np.asfortranarray([[3.0, 2.0], [0.0, 1.0]])
curve2 = self._make_one(nodes2, 1)
result = curve1.intersect(curve2)
self.assertEqual(result.shape, (2, 0))
def test_intersect_at_boundary(self):
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, -0.25, 0.0]])
curve = self._make_one(nodes, 2)
left, right = curve.subdivide()
result = left.intersect(right)
# The "right" end of ``left`` and the "left" end of ``right``.
expected = np.asfortranarray([[1.0], [0.0]])
self.assertEqual(result, expected)
def _intersect_helper(self, **kwargs):
# NOTE: ``nodes1`` is a specialization of [0, 0], [1/2, 1], [1, 1]
# onto the interval [1/4, 1] and ``nodes`` is a specialization
# of [0, 1], [1/2, 1], [1, 0] onto the interval [0, 3/4].
# We expect them to intersect at s = 1/3, t = 2/3.
nodes_left = np.asfortranarray(
[[0.25, 0.625, 1.0], [0.4375, 1.0, 1.0]]
)
left = self._make_one(nodes_left, 2)
nodes_right = np.asfortranarray(
[[0.0, 0.375, 0.75], [1.0, 1.0, 0.4375]]
)
right = self._make_one(nodes_right, 2)
result = left.intersect(right, **kwargs)
expected = np.asfortranarray([[1.0], [2.0]]) / 3.0
self.assertTrue(
np.allclose(result, expected, atol=0.0, rtol=0.5 ** 52)
)
def test_intersect(self):
self._intersect_helper()
def test_intersect_no_verify(self):
self._intersect_helper(_verify=False)
def test_intersect_non_curve(self):
nodes = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, -0.25, 0.0]])
curve = self._make_one(nodes, 2)
with self.assertRaises(TypeError):
curve.intersect(object())
def test_intersect_unsupported_dimension(self):
nodes = np.asfortranarray(
[[0.0, 0.5, 1.0], [0.0, -0.25, 0.0], [0.0, 0.75, 1.25]]
)
curve1 = self._make_one(nodes, 2)
curve2 = self._make_one(nodes[:, :2], 1)
with self.assertRaises(NotImplementedError):
curve1.intersect(curve2)
with self.assertRaises(NotImplementedError):
curve2.intersect(curve1)
def test_elevate(self):
nodes = np.asfortranarray([[0.0, 1.0, 3.0, 3.5], [0.5, 1.0, 2.0, 4.0]])
curve = self._make_one(nodes, 3)
self.assertEqual(curve.degree, 3)
elevated = curve.elevate()
self.assertEqual(elevated.degree, 4)
s_vals = np.linspace(0.0, 1.0, 64 + 1)
orig_vals = curve.evaluate_multi(s_vals)
new_vals = elevated.evaluate_multi(s_vals)
self.assertEqual(orig_vals, new_vals)
def test_reduce_(self):
nodes = np.asfortranarray([[0.0, 1.0, 2.0, 3.0], [0.0, 3.0, 3.0, 0.0]])
curve = self._make_one(nodes, 3)
self.assertEqual(curve.degree, 3)
reduced = curve.reduce_()
expected = np.asfortranarray([[0.0, 1.5, 3.0], [0.0, 4.5, 0.0]])
self.assertEqual(reduced.nodes, expected)
s_vals = np.linspace(0.0, 1.0, 64 + 1)
orig_vals = curve.evaluate_multi(s_vals)
new_vals = reduced.evaluate_multi(s_vals)
self.assertEqual(orig_vals, new_vals)
def test_specialize(self):
nodes = np.asfortranarray([[0.0, 1.0, 5.0], [0.0, 6.0, 2.0]])
curve = self._make_one(nodes, 2)
new_curve = curve.specialize(0.25, 0.875)
expected = np.asfortranarray(
[[0.6875, 1.78125, 4.046875], [2.375, 4.5625, 2.84375]]
)
self.assertEqual(new_curve.nodes, expected)
def test_locate_wrong_shape(self):
nodes = np.asfortranarray([[0.0, 1.0], [0.0, 1.0]])
curve = self._make_one(nodes, 1)
point = np.asfortranarray([0.0, 1.0, 2.0])
with self.assertRaises(ValueError):
curve.locate(point)
def test_locate(self):
nodes = np.asfortranarray(
[[0.0, 1.0, 2.0, 5.0], [0.0, 1.0, -1.0, 1.0]]
)
curve = self._make_one(nodes, 3)
s_val = 0.75
point = curve.evaluate(s_val)
result = curve.locate(point)
self.assertEqual(result, s_val)
@unittest.skipIf(sympy is None, "SymPy not installed")
def test_to_symbolic(self):
nodes = np.asfortranarray([[3, 3, 4, 6], [3, 3, 3, 0]])
curve = self._make_one(nodes, 3, copy=False)
b_polynomial = curve.to_symbolic()
s = sympy.Symbol("s")
expected = 3 * sympy.Matrix([[1 + s ** 2, 1 - s ** 3]]).T
self.assertTrue(
test__symbolic.sympy_matrix_equal(b_polynomial, expected)
)
@unittest.skipIf(sympy is None, "SymPy not installed")
def test_implicitize(self):
nodes = np.asfortranarray([[3, 3, 4, 6], [3, 3, 3, 0]])
curve = self._make_one(nodes, 3, copy=False)
f_polynomial = curve.implicitize()
x_sym, y_sym = sympy.symbols("x, y")
expected = -9 * (
x_sym ** 3
- 9 * x_sym ** 2
+ 27 * x_sym
- 3 * y_sym ** 2
+ 18 * y_sym
- 54
)
self.assertTrue(test__symbolic.sympy_equal(f_polynomial, expected))
@unittest.skipIf(sympy is None, "SymPy not installed")
def test_implicitize_bad_dimension(self):
nodes = np.empty((1, 2), order="F")
curve = self._make_one(nodes, 1, copy=False)
with self.assertRaises(ValueError) as exc_info:
curve.implicitize()
exc_args = exc_info.exception.args
self.assertEqual(
exc_args,
(
"Only a planar (2D) curve can be implicitized",
"Current dimension",
1,
),
)
| 38.816712
| 79
| 0.600583
|
acfd65d56920e148b456e6aa4e0d03bc2f417ac1
| 2,657
|
py
|
Python
|
Local_Cell_Density_Project/Unit_Test_Density/Unit_Test.py
|
The-Kristina/CellComp
|
29ec7690e0d9adb1a6214937ca41fd1dadce18c6
|
[
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | 7
|
2019-05-13T10:07:44.000Z
|
2022-03-01T16:20:48.000Z
|
Local_Cell_Density_Project/Unit_Test_Density/Unit_Test.py
|
The-Kristina/CellComp
|
29ec7690e0d9adb1a6214937ca41fd1dadce18c6
|
[
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | null | null | null |
Local_Cell_Density_Project/Unit_Test_Density/Unit_Test.py
|
The-Kristina/CellComp
|
29ec7690e0d9adb1a6214937ca41fd1dadce18c6
|
[
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | 3
|
2020-04-23T18:13:20.000Z
|
2020-11-11T18:46:48.000Z
|
# TODO: Calculate the densities of the cells present in frame #552-554
# of slice movie (pos0, '17_07_24', 'MDCK_90WT_10Sc_NoComp') manually
import numpy as np
import scipy.spatial as sp
import matplotlib.pyplot as plt
from SegClass_HDF_Output_Files.HDF_Format_New.HDF5_Data_Functions import GetXandYcoordsPerFrameSLOW
# Initiate coordinates of all cells per frame:
hdf5_file = "/Volumes/lowegrp/Data/Kristina/MDCK_90WT_10Sc_NoComp/17_07_24/pos0/HDF/segmented.hdf5"
print ("Processing the hdf5 file: {}".format(hdf5_file))
x_gfp, y_gfp, x_rfp, y_rfp = GetXandYcoordsPerFrameSLOW(hdf5_file=hdf5_file, frame=775)
print ("Done with the hdf5 file: {}".format(hdf5_file))
print (x_gfp)
print (y_gfp)
print (x_rfp)
print (y_rfp)
x_coords = x_gfp + x_rfp
y_coords = y_gfp + y_rfp
cells = []
for x, y in zip(x_coords, y_coords):
if x > 597.0 and x < 598.0 and y > 730.0 and y < 732.0:
print ("HERE", x, y)
if len(x_coords) == len(y_coords):
for x, y in zip(x_coords, y_coords):
cells.append([x, y])
else:
raise ValueError("Length of 'x-coords' and 'y-coords' vectors are not identical.")
cells = tuple(cells)
print (cells)
areas = [0 for _ in range(len(cells))]
print (areas)
# Now calculate their densities - unit test:
points = np.array(cells)
print (points)
tri = sp.Delaunay(points)
fig = sp.delaunay_plot_2d(tri=tri)
plt.title("Unit Test - Local Density Analysis\n'MDCK_90WT_10Sc_NoComp', '17_07_24', 'pos0', slice #552")
plt.show()
print (tri.points)
print ("Uno")
print (tri.simplices)
print ("Dos")
print (points[tri.simplices])
print ("Tres")
triangles_all = points[tri.simplices]
print ("Total # of triangles: {}".format(len(triangles_all)))
for number, triangle in enumerate(triangles_all):
a_x = triangle[0][0]
a_y = triangle[0][1]
b_x = triangle[1][0]
b_y = triangle[1][1]
c_x = triangle[2][0]
c_y = triangle[2][1]
a_edge = np.sqrt( (b_x - c_x) ** 2 + (b_y - c_y) ** 2 )
b_edge = np.sqrt( (a_x - c_x) ** 2 + (a_y - c_y) ** 2 )
c_edge = np.sqrt( (a_x - b_x) ** 2 + (a_y - b_y) ** 2 )
s = (a_edge + b_edge + c_edge) / 2
area = np.sqrt(s*(s-a_edge)*(s-b_edge)*(s-c_edge))
print("Triangle #{} = {}\tEdges = {}\tArea = {} pixels^2".format(number + 1, triangle, [a_edge, b_edge, c_edge], area))
for point in triangle:
index_x = x_coords.index(point[0])
index_y = y_coords.index(point[1])
if index_x == index_y:
areas[index_x] += 1/area
#break
print (cells)
print (areas)
for cell, area in zip(cells, areas):
#if cell[0] > 597.0 and cell[0] < 598.0 and cell[1] > 730.0 and cell[1] < 732.0:
print (cell, area)
| 29.197802
| 123
| 0.662778
|
acfd66941a9a87f6047c6f7c9f49e095ee544ed7
| 955
|
py
|
Python
|
tests/unit/test_delivery_method_setup.py
|
elin1231/htmap
|
b9c43ec1d86e90730210c3317409b75595061d91
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_delivery_method_setup.py
|
elin1231/htmap
|
b9c43ec1d86e90730210c3317409b75595061d91
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_delivery_method_setup.py
|
elin1231/htmap
|
b9c43ec1d86e90730210c3317409b75595061d91
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pathlib import Path
import htmap
from htmap.options import run_delivery_setup
def test_unknown_delivery_method_for_delivery_setup_raises(tmp_path):
with pytest.raises(htmap.exceptions.UnknownPythonDeliveryMethod):
run_delivery_setup("foo", tmp_path, "definitely-not-real")
| 35.37037
| 74
| 0.78534
|
acfd66bcb61445a129eca8caa9567e7932f733e3
| 3,777
|
py
|
Python
|
iaso/api/source_versions.py
|
BLSQ/iaso
|
95c8087c0182bdd576598eb8cd39c440e58e15d7
|
[
"MIT"
] | 29
|
2020-12-26T07:22:19.000Z
|
2022-03-07T13:40:09.000Z
|
iaso/api/source_versions.py
|
BLSQ/iaso
|
95c8087c0182bdd576598eb8cd39c440e58e15d7
|
[
"MIT"
] | 150
|
2020-11-09T15:03:27.000Z
|
2022-03-07T15:36:07.000Z
|
iaso/api/source_versions.py
|
BLSQ/iaso
|
95c8087c0182bdd576598eb8cd39c440e58e15d7
|
[
"MIT"
] | 4
|
2020-11-09T10:38:13.000Z
|
2021-10-04T09:42:47.000Z
|
from django.http import HttpResponse
from rest_framework.decorators import action
from rest_framework.response import Response
from iaso.models import SourceVersion
from .common import ModelViewSet
from iaso.models import DataSource
from rest_framework import serializers, permissions
from .source_versions_serializers import DiffSerializer, ExportSerializer
from .tasks import TaskSerializer
class SourceVersionSerializer(serializers.ModelSerializer):
"""Source versions API
This API is restricted to authenticated users (no specific permission check)
GET /api/sourceversions/
"""
data_source_name = serializers.SlugRelatedField(source="data_source", slug_field="name", read_only=True)
# Default version for source not global
is_default = serializers.SerializerMethodField()
def get_is_default(self, source_version: SourceVersion):
return source_version.data_source.default_version == source_version
class Meta:
model = SourceVersion
fields = [
"id",
"data_source",
"number",
"description",
"created_at",
"updated_at",
"data_source_name",
"is_default",
]
def validate_data_source(self, value):
"""
Check that data source belongs to the account
"""
account = self.context["request"].user.iaso_profile.account
sources = DataSource.objects.filter(projects__account=account)
if value not in sources:
raise serializers.ValidationError("Source does not belong to this account ")
return value
class SourceVersionViewSet(ModelViewSet):
"""Data source API
This API is restricted to authenticated users having at least one of the "menupermissions.iaso_mappings",
"menupermissions.iaso_org_units", and "menupermissions.iaso_links" permissions
GET /api/datasources/
GET /api/datasources/<id>
"""
permission_classes = [permissions.IsAuthenticated]
serializer_class = SourceVersionSerializer
results_key = "versions"
queryset = DataSource.objects.all()
http_method_names = ["get", "post", "put", "head", "options", "trace", "delete"]
def get_queryset(self):
profile = self.request.user.iaso_profile
versions = SourceVersion.objects.filter(data_source__projects__account=profile.account).prefetch_related(
"data_source"
)
source_id = self.kwargs.get("source", None)
if source_id:
versions = versions.filter(data_source_id=source_id)
return versions.order_by("id")
@action(methods=["GET", "POST"], detail=False, serializer_class=DiffSerializer, url_path="diff.csv")
def diff_csv(self, request):
serializer: DiffSerializer = self.get_serializer(
data=request.data if request.method == "POST" else request.query_params
)
serializer.is_valid(raise_exception=True)
# FIXME: FileResponse don't work, no idea why, not a priority
filename = "comparison.csv"
response = HttpResponse(serializer.generate_csv(), content_type="text/csv")
response["Content-Disposition"] = "attachment; filename=%s" % filename
return response
@action(methods=["POST"], detail=False, serializer_class=ExportSerializer)
def export_dhis2(self, request):
"""Export diff between two source to the DHIS2 server"""
serializer: ExportSerializer = self.get_serializer(
data=request.data if request.method == "POST" else request.query_params
)
serializer.is_valid(raise_exception=True)
task = serializer.launch_export(user=request.user)
return Response({"task": TaskSerializer(instance=task).data})
| 35.632075
| 113
| 0.693407
|
acfd673d9aba996399d24ca5a9e113c696432724
| 4,506
|
py
|
Python
|
maskrcnn_benchmark/engine/trainer.py
|
lichuanqi/MPSR
|
3e4ecdbeb02faf1857ada74858b38187213d676e
|
[
"MIT"
] | 120
|
2020-07-21T02:10:01.000Z
|
2022-03-25T12:51:52.000Z
|
maskrcnn_benchmark/engine/trainer.py
|
zhangfx123/MPSR
|
3e4ecdbeb02faf1857ada74858b38187213d676e
|
[
"MIT"
] | 28
|
2020-07-21T09:42:05.000Z
|
2021-04-18T01:07:27.000Z
|
maskrcnn_benchmark/engine/trainer.py
|
zhangfx123/MPSR
|
3e4ecdbeb02faf1857ada74858b38187213d676e
|
[
"MIT"
] | 18
|
2020-08-01T12:21:21.000Z
|
2022-03-25T07:19:24.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
import torch
import torch.distributed as dist
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
from apex import amp
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
data_loader_closeup,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
if data_loader_closeup is not None:
data_iter_closeup = iter(data_loader_closeup)
else:
data_iter_closeup = None
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
scheduler.step()
images = images.to(device)
if data_iter_closeup is not None:
closeups, closeup_targets = next(data_iter_closeup)
closeups = [closeup.to(device) for closeup in closeups]
closeup_targets = closeup_targets.to(device)
else:
closeups, closeup_targets = None, None
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets, closeups, closeup_targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
# Note: If mixed precision is not used, this ends up doing nothing
# Otherwise apply loss scaling for mixed-precision recipe
with amp.scale_loss(losses, optimizer) as scaled_losses:
scaled_losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
| 34.396947
| 79
| 0.611851
|
acfd68d6b1a0e2b4d263b187621260b730d11ae2
| 2,660
|
py
|
Python
|
janus/tests/test_repair.py
|
josepablocam/janus-public
|
4713092b27d02386bdb408213d8edc0dc5859eec
|
[
"MIT"
] | null | null | null |
janus/tests/test_repair.py
|
josepablocam/janus-public
|
4713092b27d02386bdb408213d8edc0dc5859eec
|
[
"MIT"
] | null | null | null |
janus/tests/test_repair.py
|
josepablocam/janus-public
|
4713092b27d02386bdb408213d8edc0dc5859eec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import numpy as np
import sklearn.linear_model
import sklearn.datasets
import tqdm
from janus.pipeline import pipeline_to_tree as pt
from janus.repair.repairer import PipelineRepairer
from janus.repair.rule_sampler import get_rule_sampler
from janus.repair.tree_enumerator import get_tree_enumerator
from janus import utils
from tests.utils import PipelineGenerator
import tqdm
import copy
import sys
sys.path.append(".")
data = PipelineGenerator(nrows=100, ncols=10, seed=42, max_time_mins=0.5)
def get_repair_hashes(repairer, seed, num_passes=2, num_pipelines=3):
passes = [[]] * num_passes
for i in range(0, num_passes):
print("Pass: {}".format(i))
utils.set_seed(seed)
num_remaining = num_pipelines
pbar = tqdm.tqdm(total=num_pipelines)
for p in data.pipelines:
if num_remaining <= 0:
break
repaired = repairer.repair(p, data.X, data.y, bound_num_repairs=1)
orig_md5 = pt.md5(p)
if repaired is None:
continue
repaired_md5 = pt.md5(repaired)
if orig_md5 == repaired_md5:
continue
passes[i].append(repaired_md5)
num_remaining -= 1
pbar.update(1)
pbar.close()
return passes
def test_repair_deterministic():
rules = []
seed = 42
random_rule_sampler = get_rule_sampler(
"mutation",
None,
)
random_enumerator = get_tree_enumerator(
"beam", random_rule_sampler, force_apply=True)
random_repairer = PipelineRepairer(random_enumerator)
print("Random-mutation")
random_passes = get_repair_hashes(random_repairer, seed)
assert len(random_passes[0]) > 0
assert len(set(random_passes[0])) > 1
assert random_passes[0] == random_passes[
1], "random-mutation should be deterministic"
# hack up some "fake" rules
rules = []
for _, lineage in random_enumerator.statistics.trace:
for r in lineage:
r._score_delta = np.random.random()
rules.append(r)
weighted_rule_sampler = get_rule_sampler("weighted", rules)
weighted_enumerator = get_tree_enumerator(
"beam",
weighted_rule_sampler,
force_apply=False,
)
weighted_repairer = PipelineRepairer(weighted_enumerator)
print("Weighted-transducer")
weighted_passes = get_repair_hashes(weighted_repairer, seed)
assert len(weighted_passes[0]) > 0
assert len(set(weighted_passes[0])) > 1
assert weighted_passes[0] == weighted_passes[
1], "weighted-transducer should be deterministic"
| 31.294118
| 78
| 0.673684
|
acfd6ae6bf0fea2c7e9eb762cdc5fd304982fe06
| 805
|
py
|
Python
|
gui.py
|
joelkaret/Sudoku_Solver
|
8243d38695ff093d4335c66e08370bcfda7874f3
|
[
"MIT"
] | null | null | null |
gui.py
|
joelkaret/Sudoku_Solver
|
8243d38695ff093d4335c66e08370bcfda7874f3
|
[
"MIT"
] | null | null | null |
gui.py
|
joelkaret/Sudoku_Solver
|
8243d38695ff093d4335c66e08370bcfda7874f3
|
[
"MIT"
] | null | null | null |
# import sudokuSolver
from SudokuSolverGui import Ui_MainWindow
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
class Solver(qtw.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.edit_board_button.clicked.connect(self.edit_board)
self.ui.all_solutions_button.clicked.connect(self.all_solutions)
self.ui.solve_button.clicked.connect(self.solve)
def edit_board(self):
print("yay")
def all_solutions(self):
print("all solutions")
def solve(self):
print("solve the board")
if __name__ == '__main__':
app = qtw.QApplication([])
widget = Solver()
widget.show()
app.exec_()
| 22.361111
| 72
| 0.654658
|
acfd6b14fee49bcbe9cefaa8bc68ee6c30d5c24a
| 341
|
py
|
Python
|
twitter/api/migrations/0004_alter_tweet_options.py
|
JollyBanny/sample-django
|
bc001fa4fd6d547bc7ef4704c4954980862a91e7
|
[
"MIT"
] | null | null | null |
twitter/api/migrations/0004_alter_tweet_options.py
|
JollyBanny/sample-django
|
bc001fa4fd6d547bc7ef4704c4954980862a91e7
|
[
"MIT"
] | null | null | null |
twitter/api/migrations/0004_alter_tweet_options.py
|
JollyBanny/sample-django
|
bc001fa4fd6d547bc7ef4704c4954980862a91e7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-04-23 01:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0003_alter_tweet_photo'),
]
operations = [
migrations.AlterModelOptions(
name='tweet',
options={'ordering': ['-created']},
),
]
| 18.944444
| 47
| 0.583578
|
acfd6be898b08eaffcfdc2c9c4011d4115be1d37
| 5,268
|
py
|
Python
|
builder/s3_trigger_handler.py
|
drilonibrahimi/ExcelLexBot
|
5abe780a7ab9f7e9e3ac6fd74b0c485516b516a8
|
[
"Apache-2.0"
] | 15
|
2018-03-06T05:39:50.000Z
|
2021-09-04T05:00:12.000Z
|
builder/s3_trigger_handler.py
|
drilonibrahimi/ExcelLexBot
|
5abe780a7ab9f7e9e3ac6fd74b0c485516b516a8
|
[
"Apache-2.0"
] | 3
|
2021-05-11T11:16:18.000Z
|
2021-06-17T15:49:14.000Z
|
builder/s3_trigger_handler.py
|
drilonibrahimi/ExcelLexBot
|
5abe780a7ab9f7e9e3ac6fd74b0c485516b516a8
|
[
"Apache-2.0"
] | 11
|
2018-03-10T23:48:07.000Z
|
2021-09-06T07:56:40.000Z
|
import sys
sys.path.append("/opt/")
import os
import zipfile
from shutil import copyfile
from time import sleep
import boto3
from boto3.s3.transfer import S3Transfer
from BotBuilder import BotBuilder
s3 = boto3.client('s3')
region = os.environ['AWS_REGION']
dynamodbAutoScaling = os.environ['DynamodbAutoScaling']
lib_layer = os.environ['LibLayer']
cloudformation = boto3.client('cloudformation')
tmp_folder = "/tmp/"
def lambda_handler(event, context):
def get_object_information():
record = event['Records'][0]
return record['s3']['bucket']['name'], record['s3']['object'][
'key'], record["eventName"]
source_bucket, xlsx_file_name, event_name = get_object_information()
print(xlsx_file_name)
stack_name = xlsx_file_name.lower().replace(".xlsx", "")
aws_account = context.invoked_function_arn.split(":")[4]
if event_name == "ObjectRemoved:Delete":
print("Delete Excel Lex chatbot stack " + stack_name)
response = cloudformation.delete_stack(StackName=stack_name)
print(response)
elif event_name == "ObjectCreated:Put":
print("Create Excel Lex chatbot stack " + stack_name)
create_excel_lex_chatbot_stack(aws_account, source_bucket, stack_name,
xlsx_file_name)
def create_excel_lex_chatbot_stack(aws_account, source_bucket, stack_name,
xlsx_file_name):
base_path = os.environ['LAMBDA_TASK_ROOT']
change_set_name = stack_name + "ChangeSet"
s3.download_file(source_bucket, xlsx_file_name,
tmp_folder + xlsx_file_name)
json_base_folder = os.path.join(tmp_folder, "json")
json_output = os.path.join(tmp_folder, "json", "lexjson")
deployment_output = os.path.join(tmp_folder, "deployment")
ensure_path_exists(json_base_folder)
ensure_path_exists(json_output)
ensure_path_exists(deployment_output)
print("Generate Lex Json and cloudformation from Excel")
xlsx_file_path = os.path.join(tmp_folder, xlsx_file_name)
bot_builder = BotBuilder(
xlsx_file_path, json_output,
"arn:aws:lambda:{0}:{1}:function:".format(region, aws_account))
bot_builder.generate_cloudformation_resources()
print("Copy Cloudformation")
source = os.path.join(json_output, "lexbot.yaml")
destination = os.path.join(deployment_output, stack_name + ".yaml")
copyfile(source, destination)
print("Create Deployment packages including Lex Json and dependencies")
lex_builder_function_zip = os.path.join(deployment_output, stack_name)
zip_dir([base_path, json_base_folder], lex_builder_function_zip)
print("Upload Package")
upload_to_s3(source_bucket,
os.path.join(deployment_output, stack_name + ".zip"))
print("Upload Cloudformation Template")
upload_to_s3(source_bucket,
os.path.join(deployment_output, stack_name + ".yaml"))
response = cloudformation.create_change_set(
StackName=stack_name,
TemplateURL='https://s3.amazonaws.com/{0}/code/{1}.yaml'.format(
source_bucket, stack_name),
Parameters=[{
'ParameterKey': 'SourceBucket',
'ParameterValue': source_bucket,
}, {
'ParameterKey': 'DynamodbAutoScaling',
'ParameterValue': dynamodbAutoScaling,
}, {
'ParameterKey': 'LibLayer',
'ParameterValue': lib_layer,
}],
Capabilities=[
'CAPABILITY_IAM',
],
ChangeSetName=change_set_name,
ChangeSetType='CREATE')
print(response)
execution_status = 'UNAVAILABLE'
for i in range(1, 10):
sleep(5)
print(execution_status)
response = cloudformation.describe_change_set(
ChangeSetName=change_set_name, StackName=stack_name)
execution_status = response["ExecutionStatus"]
if execution_status == "AVAILABLE":
break
if execution_status != "AVAILABLE":
cloudformation.delete_change_set(ChangeSetName=change_set_name,
StackName=stack_name)
print("Cannot create change Set, so delete it!")
else:
response = cloudformation.execute_change_set(
ChangeSetName=change_set_name, StackName=stack_name)
print(response)
def ensure_path_exists(path):
newdir = os.path.dirname(path + "/")
if os.path.exists(newdir):
import shutil
shutil.rmtree(newdir)
os.makedirs(newdir)
def upload_to_s3(bucket: str, file_path_name: str):
path, filename = os.path.split(file_path_name)
transfer = S3Transfer(s3)
transfer.upload_file(file_path_name, bucket, "code/" + filename)
def zip_dir(folders: list, dst):
zf = zipfile.ZipFile("%s.zip" % (dst), "w", zipfile.ZIP_DEFLATED)
for src in folders:
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
# print('zipping %s as %s' % (os.path.join(dirname, filename), arcname))
zf.write(absname, arcname)
zf.close()
| 37.361702
| 88
| 0.66306
|
acfd6bf64dfd75b671ee17490ec33d57c199fc70
| 2,725
|
py
|
Python
|
DetectionOnset.py
|
gamaievsky/DescripteursHarmoniquesAudio
|
551e253058502049a91803da8b0412b5ffb1bd60
|
[
"MIT"
] | null | null | null |
DetectionOnset.py
|
gamaievsky/DescripteursHarmoniquesAudio
|
551e253058502049a91803da8b0412b5ffb1bd60
|
[
"MIT"
] | null | null | null |
DetectionOnset.py
|
gamaievsky/DescripteursHarmoniquesAudio
|
551e253058502049a91803da8b0412b5ffb1bd60
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statistics as stat
from scipy import signal
import math
#from scipy import signal
import librosa
import librosa.display
import params
WINDOW = params.WINDOW
NFFT = int(params.NFFT)
STEP = int(params.STEP)
ALPHA = params.ALPHA
BETA = params.BETA
H = params.H
triFreq = params.triFreq
title = 'Palestrina'
#Palestrina, Cadence4VMaj
y, sr = librosa.load('/Users/manuel/Dropbox (TMG)/Thèse/code/DescripteursHarmoniquesAudio/'+title+'.wav', duration = 6)
Notemin = 'D3'
Notemax = 'D8'
def detectionOnsets(y):
S = librosa.stft(y, n_fft = NFFT,hop_length= STEP, window=WINDOW)
Ampl = np.abs(S)
Phas = np.angle(S)
Nf = len(Ampl)
N = len(Ampl[0])
Ampl_predict = np.zeros((Nf,N))
Phas_predict = np.zeros((Nf,N))
Erreur = np.zeros((Nf,N))
Dev = np.zeros(N)
if triFreq:
freqs = librosa.fft_frequencies(sr=sr,n_fft=NFFT)
fmin = librosa.note_to_hz(Notemin)
fmax = librosa.note_to_hz(Notemax)
imin = 0
while freqs[imin]<fmin:
imin = imin+1
imax = imin
while freqs[imax]<fmax:
imax = imax+1
for j in range(2,N):
for i in range(Nf):
Ampl_predict[i,j] = Ampl[i,j-1]
Phas_predict[i,j] = 2*Phas[i,j-1]-Phas[i,j-2]
#Erreur[i,j] = (Ampl[i,j]**2 + Ampl_predict[i,j]**2 - 2*Ampl_predict[i,j]*Ampl[i,j]*math.cos(Phas[i,j]-Phas_predict[i,j]))**(1/2)
Erreur[i,j] = math.cos(Phas[i,j]-Phas_predict[i,j])
if triFreq:
Dev[j] = sum(Erreur[imin:(imax+1),j])
else: Dev[j] = sum(Erreur[:,j])
# Fonction de seuil
# Ajout de zéros en queue et en tête
l = []
Seuil = []
Onsets = []
for k in range(int(H/2)):
l.append(0)
for val in Dev:
l.append(val)
for k in range(int(H/2)):
l.append(0)
#Calcul de la médiane
for i in range(N):
Seuil.append(ALPHA + BETA*stat.median(l[i:i+H]))
if Dev[i] > Seuil[i]:
Onsets.append(i)
times = librosa.frames_to_time(np.arange(N), sr=sr, hop_length=STEP, n_fft=NFFT)
plt.figure()
ax1 = plt.subplot(2, 1, 1)
librosa.display.specshow(librosa.amplitude_to_db(Ampl, ref=np.max), sr=sr, hop_length=STEP, x_axis='time', y_axis='log')
plt.title('Power spectrogram')
plt.subplot(2, 1, 2, sharex=ax1)
plt.plot(times, Dev, label='Deviation')
plt.plot(times, Seuil, color='g', label='Seuil')
plt.vlines(times[Onsets], 0, Dev.max(), color='r', alpha=0.9, linestyle='--', label='Onsets')
plt.axis('tight')
plt.legend(frameon=True, framealpha=0.75)
plt.show()
detectionOnsets(y)
| 27.525253
| 141
| 0.611743
|
acfd6c0b4b8869d8323fbab1f08d9c48b85ec7e6
| 4,304
|
py
|
Python
|
torch/ao/quantization/fx/_lower_to_native_backend.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 1
|
2022-01-20T03:49:23.000Z
|
2022-01-20T03:49:23.000Z
|
torch/ao/quantization/fx/_lower_to_native_backend.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 14
|
2021-10-14T06:58:50.000Z
|
2021-12-17T11:51:07.000Z
|
torch/ao/quantization/fx/_lower_to_native_backend.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | null | null | null |
import torch
from torch.nn.quantized.modules.utils import ReferenceableQuantizedModule
from . import subgraph_rewriter_FORKED_DO_NOT_USE
from .graph_module import QuantizedGraphModule
from .quantized_fusion_patterns_and_replacements import get_fbgemm_patterns_and_replacements
from .match_utils import is_match
from .match_utils import MatchAllNode
from ..utils import _parent_name
from typing import Dict, Type
# Mapping from reference module class to the replacement quantized module class for lowering
LOWER_MODULE_MAP: Dict[Type[torch.nn.Module], Type[ReferenceableQuantizedModule]] = {
torch.nn.quantized._reference.Linear: torch.nn.quantized.Linear,
torch.nn.quantized._reference.Conv1d: torch.nn.quantized.Conv1d,
torch.nn.quantized._reference.Conv2d: torch.nn.quantized.Conv2d,
torch.nn.quantized._reference.Conv3d: torch.nn.quantized.Conv3d,
}
def _lower_weighted_ref_module(model: QuantizedGraphModule, ref_class: Type[torch.nn.Module]) -> QuantizedGraphModule:
"""
Traverse the graph and find dequantize - ref module - quantize patterns
and replace them with the quantized version of the ref module.
"""
if ref_class not in LOWER_MODULE_MAP:
raise ValueError("Lowering is currently not supported for reference module %s" % ref_class.__name__)
q_class = LOWER_MODULE_MAP[ref_class]
pattern = (torch.quantize_per_tensor,
(ref_class, "dequantize"),
MatchAllNode, MatchAllNode, MatchAllNode)
modules = dict(model.named_modules(remove_duplicate=False))
nodes = list(model.graph.nodes)
# TODO: maybe orgnize this better (e.g. break down to more functions)
# to make this function more readable
for n in model.graph.nodes:
if not is_match(modules, n, pattern):
continue
q_node = n
ref_node = q_node.args[0]
dq_node = ref_node.args[0]
# get output scale/zero_point/dtype from the quantize node
scale_node = q_node.args[1]
zero_point_node = q_node.args[2]
dtype = q_node.args[3]
# this can be removed if we add support for "get_attr" in is_match
if scale_node.op != "get_attr" or zero_point_node.op != "get_attr":
print("Find the pattern but scale_node and zero_point node are not `get_attr`,"
f"got: {scale_node.format_node} {zero_point_node.format_node()}")
continue
# this can be removed if we add support for constants in is_match
if dtype != torch.quint8:
print(f"Only qint8 output for quantized op is supported, got: {dtype}")
continue
# change this pattern to use the corresponding quantized module
ref_module = modules[ref_node.target]
output_scale = getattr(model, scale_node.target)
output_zero_point = getattr(model, zero_point_node.target)
assert issubclass(q_class, ReferenceableQuantizedModule) # suppress mypy warnings
q_module = q_class.from_reference(ref_module, output_scale, output_zero_point)
# replace reference module with quantized module
parent_name, module_name = _parent_name(ref_node.target)
setattr(modules[parent_name], module_name, q_module)
# remvoe dq node:
dq_node_input = dq_node.args[0]
dq_node.replace_all_uses_with(dq_node_input)
model.graph.erase_node(dq_node)
# remove q node and args:
q_node.replace_all_uses_with(ref_node)
model.graph.erase_node(q_node)
model.graph.erase_node(scale_node)
model.graph.erase_node(zero_point_node)
model.recompile()
return model
def _lower_to_native_backend(model: QuantizedGraphModule) -> QuantizedGraphModule:
""" Lower a quantized reference model (with reference quantized operator patterns)
to the native backend in PyTorch (fbgemm/qnnpack), both backends shares the same
operator signature so they can be lowered with the same function
"""
for ref_class in LOWER_MODULE_MAP.keys():
model = _lower_weighted_ref_module(model, ref_class)
model.recompile()
for pattern, replacement in get_fbgemm_patterns_and_replacements():
subgraph_rewriter_FORKED_DO_NOT_USE.replace_pattern(model, pattern, replacement)
model.graph.lint()
return model
| 46.27957
| 118
| 0.725139
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.