content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ray
from ray.util.sgd.torch import TorchTrainer
class PyTorchTrainer(object):
def __init__(self, model_creator, data_creator, optimizer_creator,
loss_creator=None, scheduler_creator=None, training_operator_cls=None,
initialization_hook=None, config=None, num_workers=1,
use_fp16=False, use_tqdm=False, scheduler_step_freq="batch"):
# Lift TorchTrainer to an Actor so that its local worker would be
# created on the cluster as well.
RemoteTrainer = ray.remote(TorchTrainer)
self.trainer = RemoteTrainer.remote(model_creator=model_creator,
data_creator=data_creator,
optimizer_creator=optimizer_creator,
loss_creator=loss_creator,
scheduler_creator=scheduler_creator,
training_operator_cls=training_operator_cls,
initialization_hook=initialization_hook,
config=config,
num_workers=num_workers,
backend="gloo",
use_fp16=use_fp16,
use_tqdm=use_tqdm,
scheduler_step_freq=scheduler_step_freq)
def train(self, nb_epoch=1):
"""Trains a PyTorch model for several epochs."""
for i in range(nb_epoch):
stats = ray.get(self.trainer.train.remote())
return stats
def shutdown(self, force=False):
self.trainer.shutdown(force)
|
nilq/baby-python
|
python
|
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(279)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
def test_set_279(client: utest.Client, variables: dict):
scenario.initial(variables)
scenario.variables["Image_Id"] = "#{u_get_image_resource($Region,$Zone)}"
scenario.run(client)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribeImageResponse"),
],
action="DescribeImage",
)
def describe_image_00(client: utest.Client, variables: dict):
d = {
"Zone": variables.get("Zone"),
"Region": variables.get("Region"),
"OsType": "Linux",
"ImageType": "Base",
}
try:
resp = client.uhost().describe_image(d)
except exc.RetCodeException as e:
resp = e.json()
variables["Image_Id"] = utest.value_at_path(resp, "ImageSet.0.ImageId")
return resp
@scenario.step(
max_retries=10,
retry_interval=10,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("len_ge", "UHostIds", 0),
],
action="CreateUHostInstance",
)
def create_uhost_instance_01(client: utest.Client, variables: dict):
d = {
"Zone": variables.get("Zone"),
"TimemachineFeature": "No",
"Tag": "Default",
"Region": variables.get("Region"),
"Password": "VXFhNzg5VGVzdCFAIyQ7LA==",
"Name": "eip-s1-bgp",
"Memory": 1024,
"LoginMode": "Password",
"ImageId": variables.get("Image_Id"),
"HotplugFeature": False,
"DiskSpace": 0,
"CPU": 1,
}
try:
resp = client.uhost().create_uhost_instance(d)
except exc.RetCodeException as e:
resp = e.json()
variables["UHostId"] = utest.value_at_path(resp, "UHostIds.0")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=120,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="AllocateEIP",
)
def allocate_eip_02(client: utest.Client, variables: dict):
d = {
"Tag": "Default",
"Remark": "test",
"Region": variables.get("Region"),
"Quantity": 1,
"PayMode": "Bandwidth",
"OperatorName": "Bgp",
"Name": "eip-bgp-01",
"ChargeType": "Dynamic",
"Bandwidth": 2,
}
try:
resp = client.unet().allocate_eip(d)
except exc.RetCodeException as e:
resp = e.json()
variables["EIPId_01"] = utest.value_at_path(resp, "EIPSet.0.EIPId")
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "EIPSet.0.EIPId", variables.get("EIPId_01")),
],
action="DescribeEIP",
)
def describe_eip_03(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"EIPIds": [variables.get("EIPId_01")],
}
try:
resp = client.unet().describe_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="UpdateEIPAttribute",
)
def update_eip_attribute_04(client: utest.Client, variables: dict):
d = {
"Tag": "huangchao",
"Remark": "test-gai",
"Region": variables.get("Region"),
"Name": "eip-auto-gai",
"EIPId": variables.get("EIPId_01"),
}
try:
resp = client.unet().update_eip_attribute(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="GetEIPPrice",
)
def get_eip_price_05(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"OperatorName": "Bgp",
"Bandwidth": 2,
}
try:
resp = client.unet().get_eip_price(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="BindEIP",
)
def bind_eip_06(client: utest.Client, variables: dict):
d = {
"ResourceType": "uhost",
"ResourceId": variables.get("UHostId"),
"Region": variables.get("Region"),
"EIPId": variables.get("EIPId_01"),
}
try:
resp = client.unet().bind_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "EIPSet.0.Resource.ResourceID", variables.get("UHostId")),
],
action="DescribeEIP",
)
def describe_eip_07(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"EIPIds": [variables.get("EIPId_01")],
}
try:
resp = client.unet().describe_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ModifyEIPBandwidth",
)
def modify_eip_bandwidth_08(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"EIPId": variables.get("EIPId_01"),
"Bandwidth": 3,
}
try:
resp = client.unet().modify_eip_bandwidth(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="SetEIPPayMode",
)
def set_eip_pay_mode_09(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"PayMode": "Traffic",
"EIPId": variables.get("EIPId_01"),
"Bandwidth": 2,
}
try:
resp = client.unet().set_eip_pay_mode(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ModifyEIPWeight",
)
def modify_eip_weight_10(client: utest.Client, variables: dict):
d = {
"Weight": 100,
"Region": variables.get("Region"),
"EIPId": variables.get("EIPId_01"),
}
try:
resp = client.unet().modify_eip_weight(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "UnBindEIPResponse"),
],
action="UnBindEIP",
)
def un_bind_eip_11(client: utest.Client, variables: dict):
d = {
"ResourceType": "uhost",
"ResourceId": variables.get("UHostId"),
"Region": variables.get("Region"),
"EIPId": variables.get("EIPId_01"),
}
try:
resp = client.unet().un_bind_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="AllocateEIP",
)
def allocate_eip_12(client: utest.Client, variables: dict):
d = {
"Tag": "Default",
"Remark": "test",
"Region": variables.get("Region"),
"Quantity": 1,
"PayMode": "Bandwidth",
"OperatorName": "Bgp",
"Name": "eip-bgp-01",
"ChargeType": "Dynamic",
"Bandwidth": 2,
}
try:
resp = client.unet().allocate_eip(d)
except exc.RetCodeException as e:
resp = e.json()
variables["EIPId_02"] = utest.value_at_path(resp, "EIPSet.0.EIPId")
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("gt", "TotalCount", 1),
("len_eq", "EIPSet", 1),
],
action="DescribeEIP",
)
def describe_eip_13(client: utest.Client, variables: dict):
d = {"Region": variables.get("Region"), "Limit": 1}
try:
resp = client.unet().describe_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=10,
retry_interval=10,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 8039)],
action="ReleaseEIP",
)
def release_eip_14(client: utest.Client, variables: dict):
d = {"Region": variables.get("Region"), "EIPId": variables.get("EIPId_01")}
try:
resp = client.unet().release_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=10,
retry_interval=10,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "TotalCount", 0),
],
action="DescribeEIP",
)
def describe_eip_15(client: utest.Client, variables: dict):
d = {
"Region": variables.get("Region"),
"EIPIds": [variables.get("EIPId_01")],
}
try:
resp = client.unet().describe_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="ReleaseEIP",
)
def release_eip_16(client: utest.Client, variables: dict):
d = {"Region": variables.get("Region"), "EIPId": variables.get("EIPId_02")}
try:
resp = client.unet().release_eip(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
action="PoweroffUHostInstance",
)
def poweroff_uhost_instance_17(client: utest.Client, variables: dict):
d = {
"Zone": variables.get("Zone"),
"UHostId": variables.get("UHostId"),
"Region": variables.get("Region"),
}
try:
resp = client.uhost().poweroff_uhost_instance(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=60,
fast_fail=False,
action="TerminateUHostInstance",
)
def terminate_uhost_instance_18(client: utest.Client, variables: dict):
d = {
"Zone": variables.get("Zone"),
"UHostId": variables.get("UHostId"),
"Region": variables.get("Region"),
}
try:
resp = client.uhost().terminate_uhost_instance(d)
except exc.RetCodeException as e:
resp = e.json()
return resp
|
nilq/baby-python
|
python
|
# deliverable_spec.py
# This file is auto-generated from the same code that generates
# https://docs.patreon.com. Community pull requests against this
# file may not be accepted.
import pytest
from patreon.schemas import deliverable
@pytest.fixture
def attributes():
return [
'completed_at',
'delivery_status',
'due_at',
]
@pytest.fixture
def relationships():
return [
'campaign',
'benefit',
'member',
'user',
]
def test_schema_attributes_are_properly_formatted(attributes):
for attribute_name in attributes:
value = getattr(deliverable.Attributes, attribute_name, None)
assert value is not None and value is attribute_name
def test_schema_relationships_are_properly_formatted(relationships):
for relationship_name in relationships:
value = getattr(deliverable.Relationships, relationship_name, None)
assert value is not None and value is relationship_name
|
nilq/baby-python
|
python
|
from base_automation import BaseAutomation
from lib.core.monitored_callback import monitored_callback
HOME_ZONE = "home"
AWAY = "not_home"
MESSAGE_LEFT_ZONE = "{} left {}"
MESSAGE_ARRIVED_ZONE = "{} arrived {}"
class ZoneChangeNotificationAutomation(BaseAutomation):
def initialize(self):
# args
self.device_entity_ids = self.cfg.value("device_entity_ids")
self.notify_entity_ids = self.cfg.value("notify_entity_ids")
for device in self.device_entity_ids:
self.listen_state(self.device_state_change_handler, device)
@monitored_callback
def device_state_change_handler(self, entity, attribute, old, new, kwargs):
if old != AWAY and new == AWAY:
self.log("{} left {}".format(entity, old))
self.person_left_zone(entity, old)
elif old == AWAY and new != AWAY:
self.log("{} arrived {}".format(entity, new))
self.person_arrived_zone(entity, new)
def person_left_zone(self, person_entity_id, zone):
person = self.get_state(person_entity_id, attribute="friendly_name")
data = {}
if zone == HOME_ZONE:
data["push"] = {
"category": "left_home"
}
self.notify(MESSAGE_LEFT_ZONE.format(person, zone.lower()), data)
def person_arrived_zone(self, person_entity_id, zone):
person = self.get_state(person_entity_id, attribute="friendly_name")
data = {}
if zone == HOME_ZONE:
data["push"] = {
"category": "arrived_home"
}
self.notify(MESSAGE_ARRIVED_ZONE.format(person, zone.lower()), data)
def notify(self, message, data):
for notify_entity_id in self.notify_entity_ids:
self.call_service("notify/{}".format(notify_entity_id),
message=message, data=data)
|
nilq/baby-python
|
python
|
"""All widgets related to editing channels are here."""
from PyQt4 import QtGui, QtCore
from ..ramps import Channel
from CommonWidgets import QMultipleSpinBoxEdit, QNamedPushButton
import rampage.format as fmt
class QEditChannelInfoDialog(QtGui.QDialog):
"""Dialog to edit channel info.
This dialog is called when the user right clicks on the channel name and
selects edit.
"""
def __init__(self, ch_name, dct, parent):
super(QEditChannelInfoDialog, self).__init__(parent)
self.setWindowTitle('Edit channel info')
self.text_name = QtGui.QLineEdit(ch_name, self)
self.text_comment = QtGui.QLineEdit(dct['comment'], self)
self.text_id = QtGui.QLineEdit(dct['id'], self)
self.button_ok = QtGui.QPushButton('Ok', self)
self.button_ok.clicked.connect(self.accept)
self.button_cancel = QtGui.QPushButton('Cancel', self)
self.button_cancel.clicked.connect(self.reject)
self.button_ok.clicked.connect(self.accept)
self.grid = QtGui.QGridLayout(self)
self.grid.addWidget(QtGui.QLabel('Name'), 0, 0)
self.grid.addWidget(self.text_name, 0, 1)
self.grid.addWidget(QtGui.QLabel('Comment'), 1, 0)
self.grid.addWidget(self.text_comment, 1, 1)
self.grid.addWidget(QtGui.QLabel('id'), 2, 0)
self.grid.addWidget(self.text_id, 2, 1)
if dct['type'] == 'analog':
print('yo la tengo ')
self.conversion = QtGui.QLineEdit(dct['conversion'], self)
self.grid.addWidget(QtGui.QLabel('conversion'), 3, 0)
self.grid.addWidget(self.conversion, 3, 1)
self.grid.addWidget(self.button_ok, 4, 0)
self.grid.addWidget(self.button_cancel, 4, 1)
self.setLayout(self.grid)
self.dct = dct
self.ch_name = ch_name
def exec_(self):
execReturn = super(QEditChannelInfoDialog, self).exec_()
name = str(self.text_name.text())
comment = str(self.text_comment.text())
id_string = str(self.text_id.text())
if self.dct['type'] == 'analog':
conversion_string = str(self.conversion.text())
else:
conversion_string = None
return execReturn, name, comment, id_string, conversion_string
class QChannelInfoBox(QtGui.QWidget):
"""Displays channel name, comment and other info.
This widget sits on the left-most column of every channel row.
Signals:
edit_signal(ch_name) - Emits this with its channel name whenever the user
clicks the edit menu item on the right-click menu. It is the job of the
parent widget to do something afterwards.
view_signal(ch_name) - Same as edit, but emitted when the user clicks view
"""
edit_signal = QtCore.pyqtSignal(object)
view_signal = QtCore.pyqtSignal(object)
def __init__(self, ch_name, dct, parent):
super(QChannelInfoBox, self).__init__(parent)
self.ch_name = ch_name
self.dct = dct
self.vbox = QtGui.QVBoxLayout(self)
self.setLayout(self.vbox)
self.ch_name_label = QtGui.QLabel(self)
self.vbox.addWidget(self.ch_name_label)
if dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(ch_name)))
self.generateToolTip()
# create actions to edit the keyframe
self.edit_action = QtGui.QAction('&Edit', self)
self.view_action = QtGui.QAction('&View Ramp', self)
# connect actions to slots
self.edit_action.triggered.connect(self.edit)
self.view_action.triggered.connect(self.view)
# create context menu
self.pop_menu = QtGui.QMenu(self)
self.pop_menu.addAction(self.edit_action)
self.pop_menu.addAction(self.view_action)
# right clicking on this will bring up the context menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
signal_str = 'customContextMenuRequested(const QPoint&)'
self.connect(self, QtCore.SIGNAL(signal_str), self.onContextMenu)
def generateToolTip(self):
tt = fmt.b(fmt.red(self.ch_name)) + '<br>'
tt += fmt.i(self.dct['comment']) + '<br>\n'
tt += fmt.b(self.dct['id']) + '<br>\n'
if self.dct['type'] == 'analog':
tt += 'Conversion: ' + fmt.b(self.dct['conversion']) + '<br>\n'
tt += '<br><i>right-click label to edit...</i>'
self.setToolTip(tt)
def edit(self):
self.edit_signal.emit(self.ch_name)
def view(self):
self.view_signal.emit(self.ch_name)
def onContextMenu(self, point):
# show context menu
self.pop_menu.exec_(self.mapToGlobal(point))
def edit_channel_info(self, new_ch_name, ch_dct):
"""Parent widget calls this whenever the user edits channel info.
"""
self.ch_name = new_ch_name
self.dct = ch_dct
if ch_dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(self.ch_name)))
self.generateToolTip()
class QChannelSegment(QtGui.QWidget):
delete_segment = QtCore.pyqtSignal(object)
edit_segment = QtCore.pyqtSignal()
def __init__(self, keyname, dct, parent, ramp_types):
super(QChannelSegment, self).__init__(parent)
self.dct = dct
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.setSpacing(0)
self.setLayout(self.vbox)
self.keyname = keyname
self.ramp_types = ramp_types
self.ramp_type_list = sorted(self.ramp_types.keys())
self.curr_ramp_index = self.ramp_type_list.index(self.dct['ramp_type'])
self.ramp_type_combo = QtGui.QComboBox(self)
self.ramp_type_combo.addItems(sorted(self.ramp_types.keys()))
self.ramp_type_combo.insertSeparator(len(self.ramp_types))
self.ramp_type_combo.addItem('delete')
self.ramp_type_combo.setCurrentIndex(self.curr_ramp_index)
self.ramp_type_combo.currentIndexChanged.connect(self.handleRampTypeChanged)
ramp_parm_names = self.ramp_types[self.dct['ramp_type']]
ramp_parm_values = [self.dct['ramp_data'][k] for k in ramp_parm_names]
self.spin_boxes = QMultipleSpinBoxEdit(ramp_parm_names, self,
ramp_parm_values)
self.spin_boxes.valueChanged.connect(self.handleValueChanged)
self.vbox.addWidget(self.ramp_type_combo)
self.vbox.addWidget(self.spin_boxes)
def handleRampTypeChanged(self, new_ramp_type_index):
item_text = str(self.ramp_type_combo.itemText(new_ramp_type_index))
if item_text == 'delete':
self.delete_segment.emit(self.keyname)
else:
ramp_parm_names = self.ramp_types[item_text]
self.spin_boxes.editAttributes(ramp_parm_names)
self.dct['ramp_type'] = item_text
ramp_data_dct = {}
for rpn in ramp_parm_names:
ramp_data_dct[rpn] = 0.0
self.dct['ramp_data'] = ramp_data_dct
self.edit_segment.emit()
def handleValueChanged(self, new_values):
ramp_parm_names = self.ramp_types[self.dct['ramp_type']]
for rpn, val in zip(ramp_parm_names, new_values):
self.dct['ramp_data'][rpn] = val
self.edit_segment.emit()
class QDigitalChannelSegment(QChannelSegment):
def __init__(self, keyname, dct, parent, ramp_types):
super(QDigitalChannelSegment, self).__init__(keyname, dct,
parent, ramp_types)
# super(QDigitalChannelSegment, self).setupUi()
self.boolButton = QtGui.QPushButton(self)
self.boolButton.setCheckable(True)
self.state = self.dct['state']
self.boolButton.setChecked(self.state)
if self.state:
text = 'ON'
else:
text = 'OFF'
self.boolButton.setText(text)
self.boolButton.clicked.connect(self.handleBoolButtonClicked)
stylesheet = ('QPushButton:checked { background-color:'
'rgb(100,255,125); }'
'QPushButton { background-color:'
'rgb(255,125,100); }')
self.boolButton.setStyleSheet(stylesheet)
self.vbox.addWidget(self.boolButton)
def handleBoolButtonClicked(self, checked):
self.state = bool(checked)
if self.state:
text = 'ON'
else:
text = 'OFF'
self.boolButton.setText(text)
self.dct['state'] = self.state
self.edit_segment.emit()
class QChannel(Channel):
"""Edits channels.
parent widget should have the following slots:
handleEditChannelInfo(self, ch_name)
"""
def __init__(self, ch_name, dct, key_frame_list, settings, grid, parent,
ramp_types, start_pos=(0, 0)):
super(QChannel, self).__init__(ch_name, dct, key_frame_list)
self.start_pos = start_pos
self.parent = parent
self.grid = grid
self.ramp_types = ramp_types
self.channel_type = dct['type']
self.setupUi()
def setupUi(self):
self.ch_info = QChannelInfoBox(self.ch_name, self.dct, self.parent)
self.ch_info.edit_signal.connect(self.parent.handleEditChannelInfo)
self.ch_info.view_signal.connect(self.parent.handleViewChannel)
self.grid.addWidget(self.ch_info, self.start_pos[0], self.start_pos[1])
# cycle through all keys keys in key list and find out which ones
# we have in our channel
self.ch_segments = []
self.add_buttons = []
for i, key in enumerate(self.key_frame_list.sorted_key_list()):
if key in self.dct['keys']:
if self.channel_type == 'analog':
ch_seg = QChannelSegment(key, self.dct['keys'][key],
self.parent, self.ramp_types)
elif self.channel_type == 'digital':
ch_seg = QDigitalChannelSegment(key, self.dct['keys'][key],
self.parent,
self.ramp_types)
ch_seg.delete_segment.connect(self.handleDeleteSegment)
# evil hack
ch_seg.edit_segment.connect(self.parent.ramp_changed)
self.grid.addWidget(ch_seg, self.start_pos[0],
self.start_pos[1] + i + 1)
self.ch_segments.append(ch_seg)
else:
add_button = QNamedPushButton('+', key, self.parent)
add_button.clicked_name.connect(self.handleAddSegment)
self.grid.addWidget(add_button, self.start_pos[0],
self.start_pos[1] + i + 1)
self.add_buttons.append(add_button)
def edit_channel_info(self, new_ch_name, ch_dct):
self.set_name(new_ch_name)
self.dct = ch_dct
self.ch_info.edit_channel_info(new_ch_name, ch_dct)
def handleDeleteSegment(self, keyname):
index = -1
for i, ch_seg in enumerate(self.ch_segments):
if ch_seg.keyname == keyname:
index = i
if index is not -1:
ch_del = self.ch_segments.pop(index)
self.grid.removeWidget(ch_del)
ch_del.deleteLater()
self.dct['keys'].pop(keyname)
# evil hack follows
add_button = QNamedPushButton('+', keyname, self.parent)
add_button.clicked_name.connect(self.handleAddSegment)
keyindex = -1
# find where to place our new channel segment
for i, key in enumerate(self.key_frame_list.sorted_key_list()):
if keyname == key:
keyindex = i
self.grid.addWidget(add_button, self.start_pos[0],
self.start_pos[1] + keyindex + 1)
self.add_buttons.append(add_button)
self.parent.ramp_changed.emit()
def handleAddSegment(self, keyname):
index = -1
for i, add_button in enumerate(self.add_buttons):
if add_button.name == keyname:
index = i
break
if index is not -1:
add_button = self.add_buttons.pop(index)
self.grid.removeWidget(add_button)
add_button.deleteLater()
segment_dct = {}
ramp_type = sorted(self.ramp_types.keys())[0]
segment_dct['ramp_type'] = ramp_type
segment_dct['ramp_data'] = {}
if self.channel_type == 'digital':
segment_dct['state'] = False
for rpn in self.ramp_types[ramp_type]:
segment_dct['ramp_data'][rpn] = 0.0
self.dct['keys'][keyname] = segment_dct
if self.channel_type == 'analog':
ch_seg = QChannelSegment(keyname, self.dct['keys'][keyname],
self.parent, self.ramp_types)
elif self.channel_type == 'digital':
ch_seg = QDigitalChannelSegment(keyname,
self.dct['keys'][keyname],
self.parent, self.ramp_types)
ch_seg.delete_segment.connect(self.handleDeleteSegment)
# evil hack
ch_seg.edit_segment.connect(self.parent.ramp_changed)
keyindex = -1
# find where to place our new channel segment
for i, key in enumerate(self.key_frame_list.sorted_key_list()):
if keyname == key:
keyindex = i
self.grid.addWidget(ch_seg, self.start_pos[0],
self.start_pos[1] + keyindex + 1)
self.ch_segments.append(ch_seg)
self.parent.ramp_changed.emit()
|
nilq/baby-python
|
python
|
import os
import bpy
import bpy_extras
from ... import ops, plugin, plugin_prefs, registry, utils
from ...version_utils import assign_props, IS_28
from .. import imp
from . import utils as imp_utils, props
op_import_object_props = {
'filter_glob': bpy.props.StringProperty(
default='*.object', options={'HIDDEN'}
),
'directory': bpy.props.StringProperty(subtype="DIR_PATH"),
'files': bpy.props.CollectionProperty(
type=bpy.types.OperatorFileListElement
),
'import_motions': props.PropObjectMotionsImport(),
'mesh_split_by_materials': props.PropObjectMeshSplitByMaterials(),
'use_motion_prefix_name': props.PropObjectUseMotionPrefixName(),
'shaped_bones': props.PropObjectBonesCustomShapes(),
'fmt_version': plugin_prefs.PropSDKVersion()
}
@registry.module_thing
class OpImportObject(ops.BaseOperator, bpy_extras.io_utils.ImportHelper):
bl_idname = 'xray_import.object'
bl_label = 'Import .object'
bl_description = 'Imports X-Ray object'
bl_options = {'UNDO', 'PRESET'}
if not IS_28:
for prop_name, prop_value in op_import_object_props.items():
exec('{0} = op_import_object_props.get("{0}")'.format(prop_name))
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, _context):
textures_folder = plugin_prefs.get_preferences().textures_folder_auto
objects_folder = plugin_prefs.get_preferences().objects_folder_auto
if not textures_folder:
self.report({'WARNING'}, 'No textures folder specified')
if not self.files or (len(self.files) == 1 and not self.files[0].name):
self.report({'ERROR'}, 'No files selected')
return {'CANCELLED'}
import_context = imp_utils.ImportObjectContext()
import_context.textures_folder=textures_folder
import_context.soc_sgroups=self.fmt_version == 'soc'
import_context.import_motions=self.import_motions
import_context.split_by_materials=self.mesh_split_by_materials
import_context.operator=self
import_context.use_motion_prefix_name=self.use_motion_prefix_name
import_context.objects_folder=objects_folder
for file in self.files:
ext = os.path.splitext(file.name)[-1].lower()
if ext == '.object':
import_context.before_import_file()
imp.import_file(
os.path.join(self.directory, file.name), import_context
)
else:
self.report(
{'ERROR'}, 'Format of "{}" not recognised'.format(file.name)
)
return {'FINISHED'}
def draw(self, _context):
layout = self.layout
row = layout.row()
row.enabled = False
row.label(text='%d items' % len(self.files))
row = layout.split()
row.label(text='Format Version:')
row.row().prop(self, 'fmt_version', expand=True)
layout.prop(self, 'import_motions')
row = layout.row()
row.active = self.import_motions
row.prop(self, 'use_motion_prefix_name')
layout.prop(self, 'mesh_split_by_materials')
layout.prop(self, 'shaped_bones')
def invoke(self, context, event):
prefs = plugin_prefs.get_preferences()
self.fmt_version = prefs.sdk_version
self.import_motions = prefs.object_motions_import
self.mesh_split_by_materials = prefs.object_mesh_split_by_mat
self.shaped_bones = prefs.object_bones_custom_shapes
self.use_motion_prefix_name = prefs.use_motion_prefix_name
return super().invoke(context, event)
assign_props([
(op_import_object_props, OpImportObject),
])
def menu_func_import(self, _context):
icon = plugin.get_stalker_icon()
self.layout.operator(
OpImportObject.bl_idname,
text='X-Ray object (.object)',
icon_value=icon
)
|
nilq/baby-python
|
python
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Provides a picker for icons."""
from __future__ import annotations
import math
from typing import TYPE_CHECKING
import _ba
import ba
from bastd.ui import popup
if TYPE_CHECKING:
from typing import Any, Tuple, Sequence
class IconPicker(popup.PopupWindow):
"""Picker for icons."""
def __init__(self,
parent: ba.Widget,
position: Tuple[float, float] = (0.0, 0.0),
delegate: Any = None,
scale: float = None,
offset: Tuple[float, float] = (0.0, 0.0),
tint_color: Sequence[float] = (1.0, 1.0, 1.0),
tint2_color: Sequence[float] = (1.0, 1.0, 1.0),
selected_icon: str = None):
# pylint: disable=too-many-locals
from ba.internal import get_purchased_icons
del parent # unused here
del tint_color # unused_here
del tint2_color # unused here
if scale is None:
scale = (1.85
if ba.app.small_ui else 1.65 if ba.app.med_ui else 1.23)
self._delegate = delegate
self._transitioning_out = False
self._icons = [ba.charstr(ba.SpecialChar.LOGO)] + get_purchased_icons()
count = len(self._icons)
columns = 4
rows = int(math.ceil(float(count) / columns))
button_width = 50
button_height = 50
button_buffer_h = 10
button_buffer_v = 5
self._width = (10 + columns * (button_width + 2 * button_buffer_h) *
(1.0 / 0.95) * (1.0 / 0.8))
self._height = self._width * (0.8 if ba.app.small_ui else 1.06)
self._scroll_width = self._width * 0.8
self._scroll_height = self._height * 0.8
self._scroll_position = ((self._width - self._scroll_width) * 0.5,
(self._height - self._scroll_height) * 0.5)
# creates our _root_widget
popup.PopupWindow.__init__(self,
position=position,
size=(self._width, self._height),
scale=scale,
bg_color=(0.5, 0.5, 0.5),
offset=offset,
focus_position=self._scroll_position,
focus_size=(self._scroll_width,
self._scroll_height))
self._scrollwidget = ba.scrollwidget(parent=self.root_widget,
size=(self._scroll_width,
self._scroll_height),
color=(0.55, 0.55, 0.55),
highlight=False,
position=self._scroll_position)
ba.containerwidget(edit=self._scrollwidget, claims_left_right=True)
self._sub_width = self._scroll_width * 0.95
self._sub_height = 5 + rows * (button_height +
2 * button_buffer_v) + 100
self._subcontainer = ba.containerwidget(parent=self._scrollwidget,
size=(self._sub_width,
self._sub_height),
background=False)
index = 0
for y in range(rows):
for x in range(columns):
pos = (x * (button_width + 2 * button_buffer_h) +
button_buffer_h, self._sub_height - (y + 1) *
(button_height + 2 * button_buffer_v) + 0)
btn = ba.buttonwidget(parent=self._subcontainer,
button_type='square',
size=(button_width, button_height),
autoselect=True,
text_scale=1.2,
label='',
color=(0.65, 0.65, 0.65),
on_activate_call=ba.Call(
self._select_icon,
self._icons[index]),
position=pos)
ba.textwidget(parent=self._subcontainer,
h_align='center',
v_align='center',
size=(0, 0),
position=(pos[0] + 0.5 * button_width - 1,
pos[1] + 15),
draw_controller=btn,
text=self._icons[index],
scale=1.8)
ba.widget(edit=btn, show_buffer_top=60, show_buffer_bottom=60)
if self._icons[index] == selected_icon:
ba.containerwidget(edit=self._subcontainer,
selected_child=btn,
visible_child=btn)
index += 1
if index >= count:
break
if index >= count:
break
self._get_more_icons_button = btn = ba.buttonwidget(
parent=self._subcontainer,
size=(self._sub_width * 0.8, 60),
position=(self._sub_width * 0.1, 30),
label=ba.Lstr(resource='editProfileWindow.getMoreIconsText'),
on_activate_call=self._on_store_press,
color=(0.6, 0.6, 0.6),
textcolor=(0.8, 0.8, 0.8),
autoselect=True)
ba.widget(edit=btn, show_buffer_top=30, show_buffer_bottom=30)
def _on_store_press(self) -> None:
from bastd.ui import account
from bastd.ui.store import browser
if _ba.get_account_state() != 'signed_in':
account.show_sign_in_prompt()
return
self._transition_out()
browser.StoreBrowserWindow(modal=True,
show_tab='icons',
origin_widget=self._get_more_icons_button)
def _select_icon(self, icon: str) -> None:
if self._delegate is not None:
self._delegate.on_icon_picker_pick(icon)
self._transition_out()
def _transition_out(self) -> None:
if not self._transitioning_out:
self._transitioning_out = True
ba.containerwidget(edit=self.root_widget, transition='out_scale')
def on_popup_cancel(self) -> None:
ba.playsound(ba.getsound('swish'))
self._transition_out()
|
nilq/baby-python
|
python
|
import shutil
import optparse
from os import listdir, mkdir
from os.path import abspath, join, exists
from codecs import open
from lxml import etree
from math import floor
def voc2yolo(output_p, imgs_p, labels_p, split_ratios = None):
img_filetype = ['.jpg', '.jpeg', '.png', '.tiff', '.tif']
classes_name = {} # store unique class name {class_name:0}
yolo_labels = {} # {file_name:file_content}
xmlFiles = list(filter(lambda x: x.endswith('.xml'), listdir(labels_p)))
n_imgs = len(xmlFiles)
counter = 0
for xmlFile in xmlFiles:
trees = etree.parse(labels_p + '/' + xmlFile)
width, height = int(trees.find('size/width').text), int(trees.find('size/height').text)
yolo_labels[xmlFile[:-4]] = ''
for obj in trees.findall('object'):
bdb = obj.find('bndbox')
name = obj.find('name').text
xmin = int(bdb.find('xmin').text)
ymin = int(bdb.find('ymin').text)
xmax = int(bdb.find('xmax').text)
ymax = int(bdb.find('ymax').text)
center_x = (xmin + (xmax-xmin)/2)/width
center_y = (ymin + (ymax-ymin)/2)/height
bdb_w = (xmax-xmin)/width
bdb_h = (ymax-ymin)/height
if name not in classes_name.keys():
classes_name[name] = len(classes_name)
yolo_labels[xmlFile[:-4]] += '{} {:6f} {:6f} {:6f} {:6f}\n'.format(
classes_name[name], center_x, center_y, bdb_w, bdb_h)
if split_ratios == None:
for filename in yolo_labels:
save_path = join(output_p, 'labels')
open(join(save_path, filename+'.txt'), 'w', encoding='utf-8-sig').write(yolo_labels[filename])
for ft in img_filetype:
src = join(imgs_p, f'{filename}{ft}')
dst = join(output_p, f'images/{filename}{ft}')
if exists(src):
shutil.copy(src, dst)
break
else:
n_train = floor(n_imgs*(split_ratios[0]/10))
n_val = round(n_train*(split_ratios[2]/10))
for filename in yolo_labels:
if counter < n_train:
if counter < n_train - n_val:
split_to = 'train'
else:
split_to = 'val'
else:
split_to = 'test'
save_path = join(output_p, f'labels/{split_to}')
open(join(save_path, f'{filename}.txt'), 'w', encoding='utf-8-sig').write(yolo_labels[filename])
for ft in img_filetype:
src = join(imgs_p, f'{filename}{ft}')
dst = join(output_p, f'images/{split_to}/{filename}{ft}')
if exists(src):
shutil.copy(src, dst)
break
counter += 1
with open(f'{output_p}/dataset_meta.txt', 'w', encoding='utf-8-sig') as f:
content = 'Class Name\tID\n'
for class_name in classes_name:
content += f'{class_name}\t{classes_name[class_name]}\n'
if split_ratios != None:
content += f'Number of Training Set : {floor(n_imgs*(split_ratios[0]/10))-round(floor(n_imgs*(split_ratios[0]/10))*(split_ratios[2]/10))}\nNumber of Testing Set : {counter-floor((n_imgs*(split_ratios[0]/10)))}\nNumber of Validation Set : {round(floor(n_imgs*(split_ratios[0]/10))*(split_ratios[2]/10))}'
else:
content += f'Number of Dataset : {n_imgs}'
f.write(content)
print('Convert completed!')
option_list = [
optparse.make_option('-o', '--output-path', help='Relative path to directory that used to store converted result.', type='string', dest='outpath'),
optparse.make_option('-i', '--imgs-path', help='Relative path to directory that contains PascalVoc images', type='string', dest='imgspath'),
optparse.make_option('-l', '--labels-path', help='Relative path to directory that contains PascalVoc labels', type='string', dest='labelspath'),
optparse.make_option('-t', '--ttv', help='Train Test Validation ratio, seperate each ratio with comma(,) e.g. 8,2,2 is mean 80% of the dataset is split into training set, 20% is split into testing set and 20% of training set is split into validation set', type='string', dest='ttv', default='8,8,2'),
optparse.make_option('-s', '--split', help='Split dataset into train, test and validation, input as boolean', type='string', dest='split', default='false')
]
parser = optparse.OptionParser(option_list=option_list)
if __name__ == "__main__":
curr_path = abspath('.')
opts, args = parser.parse_args()
imgs_path = join(curr_path, abspath(opts.imgspath))
labels_path = join(curr_path, abspath(opts.labelspath))
if exists(imgs_path) and exists(labels_path):
if not exists(join(curr_path, abspath(opts.outpath))):
out_path = join(curr_path, abspath(opts.outpath))
out_imgs_path = join(out_path, 'images')
out_labels_path = join(out_path, 'labels')
mkdir(out_path)
mkdir(out_imgs_path)
mkdir(out_labels_path)
if opts.split in ['true', 'True', '1', 'yes']:
mkdir(join(out_imgs_path, 'train'))
mkdir(join(out_imgs_path, 'test'))
mkdir(join(out_imgs_path, 'val'))
mkdir(join(out_labels_path, 'train'))
mkdir(join(out_labels_path, 'test'))
mkdir(join(out_labels_path, 'val'))
split_ratios = (opts.ttv).split(',')
if len(split_ratios) != 3:
split_ratios = (8, 8, 2)
if all(list(map(lambda x: x in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], split_ratios))):
try:
if int(split_ratios[0])+int(split_ratios[1]) != 10:
split_ratios = (8, 8, 2)
if int(split_ratios[2]) == int(split_ratios[0]): # Has no training set
split_ratios = (8, 8, 2)
split_ratios = (int(split_ratios[0]), int(split_ratios[1]), int(split_ratios[2]))
except:
split_ratios = (8, 8, 2)
else:
split_ratios = (8, 8, 2)
voc2yolo(out_path, imgs_path, labels_path, split_ratios)
else:
voc2yolo(out_path, imgs_path, labels_path)
else:
print('Please correct -i and -l')
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm
from wtforms import SelectMultipleField, StringField, SubmitField, \
TextAreaField, ValidationError
from wtforms.validators import DataRequired, Optional
class GroupForm(FlaskForm):
"""Main form for Group GUI"""
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[Optional()])
users = SelectMultipleField(
'Assigned users',
coerce=int, validators=[Optional()]
)
roles = SelectMultipleField(
'Assigned roles',
coerce=int, validators=[Optional()]
)
submit = SubmitField('Save')
def __init__(self, config_models, **kwargs):
"""Constructor
:param ConfigModels config_models: Helper for ORM models
"""
self.config_models = config_models
self.Group = self.config_models.model('groups')
# store any provided group object
self.obj = kwargs.get('obj')
super(GroupForm, self).__init__(**kwargs)
def validate_name(self, field):
# check if group name exists
session = self.config_models.session()
query = session.query(self.Group).filter_by(name=field.data)
if self.obj:
# ignore current group
query = query.filter(self.Group.id != self.obj.id)
group = query.first()
session.close()
if group is not None:
raise ValidationError('Name has already been taken.')
|
nilq/baby-python
|
python
|
from face_api.views import FaceApi
from django.urls import path,include
urlpatterns = [
path('',FaceApi.as_view())
]
|
nilq/baby-python
|
python
|
from time import sleep
print('CONTAGEM REGRESSIVA')
for c in range(10, -1, -1):
print(c)
sleep(1)
print('KABUM!!!')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from .didtx import ItemFromConfirmationId, ItemFromDid, Create, RecentItemsFromDid
from .did_document import GetDidDocumentsFromDid, GetDidDocumentsFromCryptoname
from .servicecount import GetServiceCountSpecificDidAndService, GetServiceCountAllServices
|
nilq/baby-python
|
python
|
import sys
import argparse
import collections
from os import remove, path
from subprocess import call
def _compress_png(source_path, destination_path, is_quantization_allowed):
PNG_CRUSH_TOOL = "./lib/pngcrush_1_8_11_w64.exe"
PNG_QUANT_TOOL = "./lib/pngquant.exe"
png_crush_source = source_path
temporary_file = None
if path.isfile(destination_path):
remove(destination_path)
if is_quantization_allowed:
temporary_file = source_path + ".quant"
call([PNG_QUANT_TOOL, "--strip", "--quality=45-75", "--speed", "1", source_path, "-o", temporary_file])
png_crush_source = temporary_file
call([PNG_CRUSH_TOOL, "-rem", "alla", "-rem", "text", "-reduce", "-q", png_crush_source, destination_path])
if temporary_file:
remove(temporary_file)
def optimize_png(source_path, destination_path, quantization_blacklist_path):
with open(quantization_blacklist_path, 'r') as f:
blacklist = set(f.read().split())
prev_blacklist_path = quantization_blacklist_path + ".prev"
prev_blacklist = set()
if path.isfile(prev_blacklist_path):
with open(prev_blacklist_path, 'r') as f:
prev_blacklist = set(f.read().split())
can_quantize = source_path.lower() not in blacklist
quantized_last_time = source_path.lower() not in prev_blacklist if len(prev_blacklist) else can_quantize
if not path.isfile(destination_path) or \
path.getmtime(source_path) > path.getmtime(destination_path) or \
can_quantize != quantized_last_time:
_compress_png(source_path, destination_path, can_quantize)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='optimize a png file for the web')
parser.add_argument('--unquantizable_textures', "-u", help='path to file with a list of unquantizable textures', required=True)
parser.add_argument('--input', "-i", type=str, help='path to input png', required=True)
parser.add_argument('--output', "-o", type=str, help='path to output png', required=True)
args = parser.parse_args()
optimize_png(args.input, args.output, args.unquantizable_textures)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from dehazer.core.DarkChannel import getDarkChannel
def getTransmission(I, A, w=0.95, patchSize=15):
"""
Get the transmission t of the RGB image data from a numpy array
# Arguments
- I: 3 * M * N numpy array of the input image, where 3 stands for
the RGB channels, M is the height, and N is the width
- A: 3-element list contains atmosphere light ([0, L-1]) for each RGB
channel
- w: a constant parameter (0 < w <= 1) to optionally keep a very small
amount of haze for the distant objects (aerial perspective)
- patchSize: patch size
# Returns
- M * N numpy array of the transmission rate [0.0, 1.0] of the input image
"""
t = 1 - w * getDarkChannel(I / A, patchSize)
return t
|
nilq/baby-python
|
python
|
import telebot
TOKEN = None
with open("token.txt") as f:
TOKEN = f.read().strip()
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['help'])
def send_welcome(message):
bot.reply_to(message, "sorry. v1.1 it's not finish")
bot.polling()
|
nilq/baby-python
|
python
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Lookup operations."""
from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
def _check_table_initializer_element_spec(element_spec):
"""Raises an error if the given table initializer element spec is invalid."""
base_error = ("Datasets used to initialize lookup tables must "
"produce elements in the form (key, value), where "
"the keys and values are scalar tensors. ")
specific_error = None
if len(element_spec) != 2:
raise ValueError(base_error + "However, the given dataset produces "
f"{len(element_spec)} components instead of two "
"(key, value) components. Full dataset element spec: "
f"{element_spec}.")
if not isinstance(element_spec[0], tensor_spec.TensorSpec):
raise ValueError(base_error + "However, the given dataset produces "
f"non-Tensor keys of type {type(element_spec[0])}.")
if not isinstance(element_spec[1], tensor_spec.TensorSpec):
raise ValueError(base_error + "However, the given dataset produces "
f"non-Tensor values of type {type(element_spec[1])}.")
if element_spec[0].shape.rank not in (None, 0):
raise ValueError(
base_error + "However, the given dataset produces "
f"non-scalar key Tensors of rank {element_spec[0].shape.rank}.")
if element_spec[1].shape.rank not in (None, 0):
raise ValueError(
base_error + "However, the given dataset produces "
f"non-scalar value Tensors of rank {element_spec[1].shape.rank}.")
@tf_export("data.experimental.DatasetInitializer")
class DatasetInitializer(lookup_ops.TableInitializerBase):
"""Creates a table initializer from a `tf.data.Dataset`.
Sample usage:
>>> keys = tf.data.Dataset.range(100)
>>> values = tf.data.Dataset.range(100).map(
... lambda x: tf.strings.as_string(x * 2))
>>> ds = tf.data.Dataset.zip((keys, values))
>>> init = tf.data.experimental.DatasetInitializer(ds)
>>> table = tf.lookup.StaticHashTable(init, "")
>>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy()
array([b'0', b'2', b'4'], dtype=object)
Attributes:
dataset: A `tf.data.Dataset` object that produces tuples of scalars. The
first scalar is treated as a key and the second as value.
Raises: ValueError if `dataset` doesn't conform to specifications.
"""
def __init__(self, dataset):
"""Creates a table initializer from a `tf.data.Dataset`.
Args:
dataset: A `tf.data.Dataset` object that produces tuples of scalars. The
first scalar is treated as a key and the second as value.
Raises: ValueError if `dataset` doesn't conform to specifications.
Returns: A `DatasetInitializer` object
"""
# Assert that the dataset element spec is a tuple of TensorSpecs where
# each tensor is a scalar.
self.dataset = dataset
elem_spec = self.dataset.element_spec
_check_table_initializer_element_spec(elem_spec)
key_type = elem_spec[0].dtype
value_type = elem_spec[1].dtype
super(DatasetInitializer, self).__init__(key_type, value_type)
def initialize(self, table):
lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype)
init_op = ged_ops.initialize_table_from_dataset(
table.resource_handle, self.dataset._variant_tensor) # pylint: disable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
@tf_export("data.experimental.table_from_dataset")
def table_from_dataset(dataset=None,
num_oov_buckets=0,
vocab_size=None,
default_value=None,
hasher_spec=lookup_ops.FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns a lookup table based on the given dataset.
This operation constructs a lookup table based on the given dataset of pairs
of (key, value).
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
Sample Usages:
>>> keys = tf.data.Dataset.range(100)
>>> values = tf.data.Dataset.range(100).map(
... lambda x: tf.strings.as_string(x * 2))
>>> ds = tf.data.Dataset.zip((keys, values))
>>> table = tf.data.experimental.table_from_dataset(
... ds, default_value='n/a', key_dtype=tf.int64)
>>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy()
array([b'0', b'2', b'4'], dtype=object)
Args:
dataset: A dataset containing (key, value) pairs.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table based on the given dataset.
Raises:
ValueError: If
* `dataset` does not contain pairs
* The 2nd item in the `dataset` pairs has a dtype which is incompatible
with `default_value`
* `num_oov_buckets` is negative
* `vocab_size` is not greater than zero
* The `key_dtype` is not integer or string
"""
elem_spec = dataset.element_spec
_check_table_initializer_element_spec(elem_spec)
if default_value is None:
default_value = -1
if not (elem_spec[1].dtype.is_integer or elem_spec[1].dtype.is_floating):
raise ValueError("`default_value` must be specified when creating a "
"table from a dataset that produces values of type "
f"{elem_spec[1].dtype}.")
if num_oov_buckets < 0:
raise ValueError("`num_oov_buckets` must be greater than or equal to 0, "
f"got {num_oov_buckets}.")
if (not isinstance(vocab_size, ops.Tensor) and vocab_size is not None and
vocab_size < 1):
raise ValueError(f"`vocab_size` must be greater than 0, got {vocab_size}.")
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("`key_dtype` must be either an integer or string type, "
f"but got {key_dtype}")
if vocab_size is not None:
if isinstance(vocab_size, ops.Tensor):
vocab_size = math_ops.cast(vocab_size, dtypes.int64)
dataset = dataset.take(vocab_size)
dataset = dataset.apply(assert_cardinality(vocab_size))
with ops.name_scope(name, "string_to_index"):
initializer = DatasetInitializer(dataset)
with ops.name_scope(None, "hash_table"):
table = lookup_ops.StaticHashTableV1(initializer, default_value)
if num_oov_buckets:
table = lookup_ops.IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
key_dtype=key_dtype)
return table
@tf_export("data.experimental.index_table_from_dataset")
def index_table_from_dataset(dataset=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=lookup_ops.FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns an index lookup table based on the given dataset.
This operation constructs a lookup table based on the given dataset of keys.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
Sample Usages:
>>> ds = tf.data.Dataset.range(100).map(lambda x: tf.strings.as_string(x * 2))
>>> table = tf.data.experimental.index_table_from_dataset(
... ds, key_dtype=dtypes.int64)
>>> table.lookup(tf.constant(['0', '2', '4'], dtype=tf.string)).numpy()
array([0, 1, 2])
Args:
dataset: A dataset of keys.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table based on the given dataset.
Raises:
ValueError: If
* `num_oov_buckets` is negative
* `vocab_size` is not greater than zero
* The `key_dtype` is not integer or string
"""
return table_from_dataset(dataset.enumerate().map(lambda v, k: (k, v)),
num_oov_buckets, vocab_size, default_value,
hasher_spec, key_dtype, name)
|
nilq/baby-python
|
python
|
n = float(input('A number: '))
if n%2 == 0:
print ('Even')
else:
print ('Odd')
|
nilq/baby-python
|
python
|
from machine import UART, Pin
import time
from httpParser import HttpParser
ESP8266_OK_STATUS = "OK\r\n"
ESP8266_ERROR_STATUS = "ERROR\r\n"
ESP8266_FAIL_STATUS = "FAIL\r\n"
ESP8266_WIFI_CONNECTED="WIFI CONNECTED\r\n"
ESP8266_WIFI_GOT_IP_CONNECTED="WIFI GOT IP\r\n"
ESP8266_WIFI_DISCONNECTED="WIFI DISCONNECT\r\n"
ESP8266_WIFI_AP_NOT_PRESENT="WIFI AP NOT FOUND\r\n"
ESP8266_WIFI_AP_WRONG_PWD="WIFI AP WRONG PASSWORD\r\n"
ESP8266_BUSY_STATUS="busy p...\r\n"
UART_Tx_BUFFER_LENGTH = 1024
UART_Rx_BUFFER_LENGTH = 1024*2
class ESP8266:
"""
This is a class for access ESP8266 using AT commands
Using this class, you access WiFi and do HTTP Post/Get operations.
Attributes:
uartPort (int): The Uart port numbet of the RPI Pico's UART BUS [Default UART0]
baudRate (int): UART Baud-Rate for communncating between RPI Pico's & ESP8266 [Default 115200]
txPin (init): RPI Pico's Tx pin [Default Pin 0]
rxPin (init): RPI Pico's Rx pin [Default Pin 1]
"""
__rxData=None
__txData=None
__httpResponse=None
def __init__(self, uartPort=0 ,baudRate=115200, txPin=(0), rxPin=(1)):
"""
The constaructor for ESP8266 class
Parameters:
uartPort (int): The Uart port numbet of the RPI Pico's UART BUS [Default UART0]
baudRate (int): UART Baud-Rate for communncating between RPI Pico's & ESP8266 [Default 115200]
txPin (init): RPI Pico's Tx pin [Default Pin 0]
rxPin (init): RPI Pico's Rx pin [Default Pin 1]
"""
self.__uartPort=uartPort
self.__baudRate=baudRate
self.__txPin=txPin
self.__rxPin=rxPin
#print(self.__uartPort, self.__baudRate, self.__txPin, self.__rxPin)
self.__uartObj = UART(self.__uartPort, baudrate=self.__baudRate, tx=Pin(self.__txPin), rx=Pin(self.__rxPin), txbuf=UART_Tx_BUFFER_LENGTH, rxbuf=UART_Rx_BUFFER_LENGTH)
#print(self.__uartObj)
def _createHTTPParseObj(self):
"""
This is private function for create HTTP response every time
before doing the HTTP Post/Get operation
"""
if(self.__httpResponse != None):
del self.__httpResponse
self.__httpResponse=HttpParser()
else:
#del self.__httpResponse
self.__httpResponse=HttpParser()
def _sendToESP8266(self, atCMD, delay=1):
"""
This is private function for complete ESP8266 AT command Send/Receive operation.
"""
self.__rxData=str()
self.__txData=atCMD
#print("---------------------------"+self.__txData)
self.__uartObj.write(self.__txData)
self.__rxData=bytes()
time.sleep(delay)
#while self.__uartObj.any()>0:
# self.__rxData += self.__uartObj.read(1)
while True:
#print(".")
if self.__uartObj.any()>0:
#print(self.__uartObj.any())
break
while self.__uartObj.any()>0:
self.__rxData += self.__uartObj.read(UART_Rx_BUFFER_LENGTH)
#print(self.__rxData)
if ESP8266_OK_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_ERROR_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_FAIL_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_BUSY_STATUS in self.__rxData:
return "ESP BUSY\r\n"
else:
return None
def startUP(self):
"""
This funtion use to check the communication between ESP8266 & RPI Pico
Return:
True if communication success with the ESP8266
False if unable to communication with the ESP8266
"""
retData = self._sendToESP8266("AT\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
False
def reStart(self):
"""
This funtion use to Reset the ESP8266
Return:
True if Reset successfully done with the ESP8266
False if unable to reset the ESP8266
"""
retData = self._sendToESP8266("AT+RST\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
time.sleep(5)
#self.startUP()
return self.startUP()
else:
return False
else:
False
def echoING(self, enable=False):
"""
This function use to enable/diable AT command echo [Default set as false for diable Echo]
Return:
True if echo off/on command succefully initiate with the ESP8266
False if echo off/on command failed to initiate with the ESP8266
"""
if enable==False:
retData = self._sendToESP8266("ATE0\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
else:
retData = self._sendToESP8266("ATE1\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getVersion(self):
"""
This function use to get AT command Version details
Return:
Version details on success else None
"""
retData = self._sendToESP8266("AT+GMR\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
#print(str(retData,"utf-8"))
retData = str(retData).partition(r"OK")[0]
#print(str(retData,"utf-8"))
retData = retData.split(r"\r\n")
retData[0] = retData[0].replace("b'","")
retData=str(retData[0]+"\r\n"+retData[1]+"\r\n"+retData[2])
return retData
else:
return None
else:
return None
def reStore(self):
"""
This function use to reset the ESP8266 into the Factory reset mode & delete previous configurations
Return:
True on ESP8266 restore succesfully
False on failed to restore ESP8266
"""
retData = self._sendToESP8266("AT+RESTORE\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return None
"""
def chcekSYSRAM(self):
#retData = self._sendToESP8266("AT+SYSRAM?\r\n")
self.__rxData=b''
self.__txData="AT+SYSRAM?\r\n"
self.__uartObj.write(self.__txData)
self.__rxData=bytes()
time.sleep(2)
while self.__uartObj.any()>0:
self.__rxData += self.__uartObj.read(1)
print(self.__rxData.decode())
if ESP8266_OK_STATUS in self.__rxData:
return self.__rxData
else:
return 1
"""
def getCurrentWiFiMode(self):
"""
This fucntion use to query ESP8266 WiFi's current mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Return:
STA if ESP8266's wifi's current mode pre-config as Station
SoftAP if ESP8266's wifi's current mode pre-config as SoftAP
SoftAP+STA if ESP8266's wifi's current mode set pre-config Station & SoftAP
None failed to detect the wifi's current pre-config mode
"""
retData = self._sendToESP8266("AT+CWMODE_CUR?\r\n")
if(retData != None):
if "1" in retData:
return "STA"
elif "2" in retData:
return "SoftAP"
elif "3" in retData:
return "SoftAP+STA"
else:
return None
else:
return None
def setCurrentWiFiMode(self, mode=3):
"""
This fucntion use to set ESP8266 WiFi's current mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Parameter:
mode (int): ESP8266 WiFi's [ 1: STA, 2: SoftAP, 3: SoftAP+STA(default)]
Return:
True on successfully set the current wifi mode
False on failed set the current wifi mode
"""
txData="AT+CWMODE_CUR="+str(mode)+"\r\n"
retData = self._sendToESP8266(txData)
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getDefaultWiFiMode(self):
"""
This fucntion use to query ESP8266 WiFi's default mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Return:
STA if ESP8266's wifi's default mode pre-config as Station
SoftAP if ESP8266's wifi's default mode pre-config as SoftAP
SoftAP+STA if ESP8266's wifi's default mode set pre-config Station & SoftAP
None failed to detect the wifi's default pre-config mode
"""
retData = self._sendToESP8266("AT+CWMODE_DEF?\r\n")
if(retData!=None):
if "1" in retData:
return "STA"
elif "2" in retData:
return "SoftAP"
elif "3" in retData:
return "SoftAP+STA"
else:
return None
else:
return None
def setDefaultWiFiMode(self, mode=3):
"""
This fucntion use to set ESP8266 WiFi's default mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Parameter:
mode (int): ESP8266 WiFi's [ 1: STA, 2: SoftAP, 3: SoftAP+STA(default)]
Return:
True on successfully set the default wifi mode
False on failed set the default wifi mode
"""
txData="AT+CWMODE_DEF="+str(mode)+"\r\n"
retData = self._sendToESP8266(txData)
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getAvailableAPs(self):
"""
This fucntion use to query ESP8266 for available WiFi AccessPoins
Retuns:
List of Available APs or None
"""
retData = str(self._sendToESP8266("AT+CWLAP\r\n", delay=10))
if(retData != None):
retData = retData.replace("+CWLAP:", "")
retData = retData.replace(r"\r\n\r\nOK\r\n", "")
retData = retData.replace(r"\r\n","@")
retData = retData.replace("b'(","(").replace("'","")
retData = retData.split("@")
retData =list(retData)
apLists=list()
for items in retData:
data=str(items).replace("(","").replace(")","").split(",")
data=tuple(data)
apLists.append(data)
return apLists
else:
return None
def connectWiFi(self,ssid,pwd):
"""
This fucntion use to connect ESP8266 with a WiFi AccessPoins
Parameters:
ssid : WiFi AP's SSID
pwd : WiFi AP's Password
Retuns:
WIFI DISCONNECT when ESP8266 failed connect with target AP's credential
WIFI AP WRONG PASSWORD when ESP8266 tried connect with taget AP with wrong password
WIFI AP NOT FOUND when ESP8266 cann't find the target AP
WIFI CONNECTED when ESP8266 successfully connect with the target AP
"""
txData="AT+CWJAP_CUR="+'"'+ssid+'"'+','+'"'+pwd+'"'+"\r\n"
#print(txData)
retData = self._sendToESP8266(txData, delay=15)
#print(".....")
#print(retData)
if(retData!=None):
if "+CWJAP" in retData:
if "1" in retData:
return ESP8266_WIFI_DISCONNECTED
elif "2" in retData:
return ESP8266_WIFI_AP_WRONG_PWD
elif "3" in retData:
return ESP8266_WIFI_AP_NOT_PRESENT
elif "4" in retData:
return ESP8266_WIFI_DISCONNECTED
else:
return None
elif ESP8266_WIFI_CONNECTED in retData:
if ESP8266_WIFI_GOT_IP_CONNECTED in retData:
return ESP8266_WIFI_CONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
def disconnectWiFi(self):
"""
This fucntion use to disconnect ESP8266 with a connected WiFi AccessPoins
Return:
False on failed to disconnect the WiFi
True on successfully disconnected
"""
retData = self._sendToESP8266("AT+CWQAP\r\n")
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def _createTCPConnection(self, link, port=80):
"""
This fucntion use to create connect between ESP8266 and Host.
Just like create a socket before complete the HTTP Get/Post operation.
Return:
False on failed to create a socket connection
True on successfully create and establish a socket connection.
"""
#self._sendToESP8266("AT+CIPMUX=0")
txData="AT+CIPSTART="+'"'+"TCP"+'"'+','+'"'+link+'"'+','+str(port)+"\r\n"
#print(txData)
retData = self._sendToESP8266(txData)
#print(".....")
#print(retData)
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
False
def doHttpGet(self,host,path,user_agent="RPi-Pico", port=80):
"""
This fucntion use to complete a HTTP Get operation
Parameter:
host (str): Host URL [ex: get operation URL: www.httpbin.org/ip. so, Host URL only "www.httpbin.org"]
path (str): Get operation's URL path [ex: get operation URL: www.httpbin.org/ip. so, the path "/ip"]
user-agent (str): User Agent Name [Default "RPi-Pico"]
post (int): HTTP post number [Default port number 80]
Return:
HTTP error code & HTTP response[If error not equal to 200 then the response is None]
On failed return 0 and None
"""
if(self._createTCPConnection(host, port) == True):
self._createHTTPParseObj()
#getHeader="GET "+path+" HTTP/1.1\r\n"+"Host: "+host+":"+str(port)+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"\r\n";
getHeader="GET "+path+" HTTP/1.1\r\n"+"Host: "+host+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"\r\n";
#print(getHeader,len(getHeader))
txData="AT+CIPSEND="+str(len(getHeader))+"\r\n"
retData = self._sendToESP8266(txData)
if(retData != None):
if ">" in retData:
retData = self._sendToESP8266(getHeader, delay=2)
self._sendToESP8266("AT+CIPCLOSE\r\n")
retData=self.__httpResponse.parseHTTP(retData)
return retData, self.__httpResponse.getHTTPResponse()
else:
return 0, None
else:
return 0, None
else:
self._sendToESP8266("AT+CIPCLOSE\r\n")
return 0, None
def doHttpPost(self,host,path,user_agent="RPi-Pico",content_type,content,port=80):
"""
This fucntion use to complete a HTTP Post operation
Parameter:
host (str): Host URL [ex: get operation URL: www.httpbin.org/ip. so, Host URL only "www.httpbin.org"]
path (str): Get operation's URL path [ex: get operation URL: www.httpbin.org/ip. so, the path "/ip"]
user-agent (str): User Agent Name [Default "RPi-Pico"]
content_type (str): Post operation's upload content type [ex. "application/json", "application/x-www-form-urlencoded", "text/plain"
content (str): Post operation's upload content
post (int): HTTP post number [Default port number 80]
Return:
HTTP error code & HTTP response[If error not equal to 200 then the response is None]
On failed return 0 and None
"""
if(self._createTCPConnection(host, port) == True):
self._createHTTPParseObj()
postHeader="POST "+path+" HTTP/1.1\r\n"+"Host: "+host+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"Content-Type: "+content_type+"\r\n"+"Content-Length: "+str(len(content))+"\r\n"+"\r\n"+content+"\r\n";
#print(postHeader,len(postHeader))
txData="AT+CIPSEND="+str(len(postHeader))+"\r\n"
retData = self._sendToESP8266(txData)
if(retData != None):
if ">" in retData:
retData = self._sendToESP8266(postHeader, delay=2)
#print(".......@@",retData)
self._sendToESP8266("AT+CIPCLOSE\r\n")
#print(self.__httpResponse)
retData=self.__httpResponse.parseHTTP(retData)
return retData, self.__httpResponse.getHTTPResponse()
else:
return 0, None
else:
return 0, None
else:
self._sendToESP8266("AT+CIPCLOSE\r\n")
return 0, None
def __del__(self):
"""
The distaructor for ESP8266 class
"""
print('Destructor called, ESP8266 deleted.')
pass
|
nilq/baby-python
|
python
|
from easydict import EasyDict
from copy import deepcopy
hopper_dt_config = dict(
exp_name='hopper_medium_expert_dt_seed0',
env=dict(
env_id='Hopper-v3',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=1,
evaluator_env_num=8,
use_act_scale=True,
n_evaluator_episode=8,
stop_value=6000,
),
policy=dict(
stop_value=6000,
cuda=True,
env_name='Hopper-v3',
rtg_target=6000, # max target return to go
max_eval_ep_len=1000, # max lenght of one episode
num_eval_ep=10 , # num of evaluation episode
batch_size= 64,
wt_decay=1e-4,
warmup_steps=10000,
num_updates_per_iter=100,
context_len=20,
n_blocks=3,
embed_dim=128,
n_heads=1,
dropout_p=0.1,
log_dir='/home/wangzilin/research/dt/DI-engine/dizoo/d4rl/dt_data/hopper_medium_expert_dt_log',
model=dict(
state_dim=11,
act_dim=3,
n_blocks=3,
h_dim=128,
context_len=20,
n_heads=1,
drop_p=0.1,
continuous=True,
),
discount_factor=0.999,
nstep=3,
learn=dict(
dataset_path='/mnt/lustre/wangzilin/d4rl_data/hopper-medium-expert-v2.pkl',
learning_rate=0.0001,
target_update_freq=100,
kappa=1.0,
min_q_weight=4.0,
),
collect=dict(
unroll_len=1,
),
eval=dict(evaluator=dict(evalu_freq=100,),),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=10000,
),
replay_buffer=dict(
replay_buffer_size=1000,
),
),
),
)
hopper_dt_config = EasyDict(hopper_dt_config)
main_config = hopper_dt_config
hopper_dt_create_config = dict(
env=dict(
type='mujoco',
import_names=['dizoo.mujoco.envs.mujoco_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='dt'),
)
hopper_dt_create_config = EasyDict(hopper_dt_create_config)
create_config = hopper_dt_create_config
if __name__ == "__main__":
from ding.entry import serial_pipeline_dt
config = deepcopy([main_config, create_config])
serial_pipeline_dt(config, seed=0, max_train_iter=1000)
|
nilq/baby-python
|
python
|
# import libraries
import urllib2
import json
#retrieve the information of a character an parse it into an character object name.data
def getCharacter(name):
Char = []
name = name.replace(' ', '+')
charUrl = 'https://api.tibiadata.com/v2/characters/' + name + '.json'
charPage = urllib2.urlopen(charUrl)
html = charPage.read()
j = json.loads(html)
#retreive the character info and try to get the guild if any
Char.append(j['characters']['data']['name'])
Char.append(j['characters']['data']['level'])
Char.append(j['characters']['data']['vocation'])
Char.append(j['characters']['data']['world'])
try:
Char.append(j['characters']['data']['guild']['name'])
except:
pass
return Char
#the argument in item specifies the column to sort the matrix of characters
def getKey(item):
return item[1]
def getHuntingPals(name):
Char = getCharacter(name)
level = Char[1]
voc = Char[2]
world = Char[3]
if len(Char) == 5:
guild = Char[4]
else:
guild = ' '
#get the world players
worldUrl = 'https://api.tibiadata.com/v2/world/' + world + '.json'
worldPage = urllib2.urlopen(worldUrl)
html = worldPage.read()
j = json.loads(html)
#get the number of players
cOnline = len(j['world']['players_online'])
#create a matrix to temporarily store the online players information
#and sorth the players by vocation
w, h = 4, cOnline;
charactersOnline = [[0 for x in range(w)] for y in range(h)]
knights = []
sorcerers = []
druids = []
paladins = []
print str(cOnline), 'Players online'
getGuild = 0
for i in range(0, cOnline):
#print j['world']['players_online'][i]['name'], j['world']['players_online'][i]['level'], j['world']['players_online'][i]['vocation']
charactersOnline[i][0] = j['world']['players_online'][i]['name']
charactersOnline[i][1] = j['world']['players_online'][i]['level']
charactersOnline[i][2] = j['world']['players_online'][i]['vocation']
#sort by vocation jus tthe characters with sharing level min of your char (2/3) and max (3/2) or yours
if ( int(charactersOnline[i][1]) >= (int(level)*2/3) and int(charactersOnline[i][1]) <= (int(level)*3/2)):
#check if they have a guild or not
if getGuild == 1:
try:
charactersOnline[i][3] = getCharacter(charactersOnline[i][0])[4]
except:
charactersOnline[i][3] = ' '
else:
charactersOnline[i][3] = ' '
if (charactersOnline[i][2] == 'Elite Knight' or charactersOnline[i][2] == 'Knight'):
knights.append([charactersOnline[i][0],charactersOnline[i][1],charactersOnline[i][3]])
elif (charactersOnline[i][2] == 'Master Sorcerer' or charactersOnline[i][2] == 'Sorcerer'):
sorcerers.append([charactersOnline[i][0],charactersOnline[i][1],charactersOnline[i][3]])
elif (charactersOnline[i][2] == 'Elder Druid' or charactersOnline[i][2] == 'Druid'):
druids.append([charactersOnline[i][0],charactersOnline[i][1],charactersOnline[i][3]])
elif (charactersOnline[i][2] == 'Royal Paladin' or charactersOnline[i][2] == 'paladin'):
paladins.append([charactersOnline[i][0],charactersOnline[i][1],charactersOnline[i][3]])
return charactersOnline, knights,sorcerers,paladins,druids
def main():
doIt = getHuntingPals('General Direction')
print 'Knights:',sorted(doIt[1],key=getKey)
print 'Sorcerers:',sorted(doIt[2],key=getKey)
print 'Paladins',sorted(doIt[3],key=getKey)
print 'Druids:',sorted(doIt[4],key=getKey)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import typing
from collections import defaultdict
import torch.nn as nn
from .jit_handles import (
addmm_flop_jit,
conv_flop_jit,
einsum_flop_jit,
get_jit_model_analysis,
matmul_flop_jit,
)
# A dictionary that maps supported operations to their flop count jit handles.
_SUPPORTED_OPS: typing.Dict[str, typing.Callable] = {
"aten::addmm": addmm_flop_jit,
"aten::_convolution": conv_flop_jit,
"aten::einsum": einsum_flop_jit,
"aten::matmul": matmul_flop_jit,
}
def flop_count(
model: nn.Module,
inputs: typing.Tuple[object, ...],
supported_ops: typing.Union[typing.Dict[str, typing.Callable], None] = None,
) -> typing.Tuple[typing.DefaultDict[str, float], typing.Counter[str]]:
"""
Given a model and an input to the model, compute the Gflops of the given
model. Note the input should have a batch size of 1.
Args:
model (nn.Module): The model to compute flop counts.
inputs (tuple): Inputs that are passed to `model` to count flops.
Inputs need to be in a tuple.
supported_ops (dict(str,Callable) or None) : By default, we count flops
for convolution layers, fully connected layers, torch.matmul and
torch.einsum operations. We define a FLOP as a single atomic
Multiply-Add. Users can provide customized supported_ops for
counting flops if desired.
Returns:
tuple[defaultdict, Counter]: A dictionary that records the number of
gflops for each operation and a Counter that records the number of
skipped operations.
"""
assert isinstance(inputs, tuple), "Inputs need to be in a tuple."
if not supported_ops:
supported_ops = _SUPPORTED_OPS.copy()
# Run flop count.
total_flop_counter, skipped_ops = get_jit_model_analysis(
model, inputs, supported_ops
)
# Log for skipped operations.
if len(skipped_ops) > 0:
for op, freq in skipped_ops.items():
logging.warning("Skipped operation {} {} time(s)".format(op, freq))
# Convert flop count to gigaflops.
final_count = defaultdict(float)
for op in total_flop_counter:
final_count[op] = total_flop_counter[op] / 1e9
return final_count, skipped_ops
|
nilq/baby-python
|
python
|
from direct.directnotify import DirectNotifyGlobal
from otp.chat.TalkAssistant import TalkAssistant
from otp.chat.ChatGlobals import *
class TTTalkAssistant(TalkAssistant):
notify = DirectNotifyGlobal.directNotify.newCategory('TTTalkAssistant')
def sendToonTaskSpeedChat(self, taskId, toNpcId, toonProgress, msgIndex):
messenger.send(SCChatEvent)
messenger.send('chatUpdateSCToontask', [taskId, toNpcId, toonProgress, msgIndex])
|
nilq/baby-python
|
python
|
import logging
from aiocron import crontab
@crontab("*/1 * * * *")
def test_task():
logging.debug("Example task")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/shp_info.py
Description: This code gives shapefile information.
Author: Maziyar Boustani (github.com/MBoustani)
'''
try:
import ogr
except ImportError:
from osgeo import ogr
#example shapefile file.
shp_file = '../static_files/shapefile/populated_places/ne_50m_populated_places.shp'
#set the driver to ESRI Shapefiel
driver = ogr.GetDriverByName('ESRI Shapefile')
#open shapefile
shp_datasource = driver.Open(shp_file)
#get shapefile name
shp_name = shp_datasource.GetName()
#get driver name
driver_name = shp_datasource.GetDriver().GetName()
#get number of layer
layer_num = shp_datasource.GetLayerCount()
#store layer info
layer_info = {}
layer_table = {}
if layer_num > 0:
for i in range(layer_num):
#get shapefile layer
layer = shp_datasource.GetLayerByIndex(i)
#get layer name
layer_name = layer.GetName()
layer_info['name'] = layer_name
#get layer type
geom_type = layer.GetGeomType()
#convert layer type to geometry name
geom_name = ogr.GeometryTypeToName(geom_type)
layer_info['geometry'] = geom_name
#get number of features in layer
num_feature = layer.GetFeatureCount()
layer_info['number of features'] = num_feature
#get layer extent
layer_extent = layer.GetExtent()
layer_info['extent'] = layer_extent
#get layer spatial reference (projection info)
layer_spatial_ref = layer.GetSpatialRef()
spatial_ref_name = layer_spatial_ref.ExportToWkt()
layer_info['spatial reference'] = spatial_ref_name
#get layer unit
layer_unit = layer_spatial_ref.GetLinearUnitsName()
layer_info['unit'] = layer_unit
#get layer number of columns in shp attribute table
layer_defn = layer.GetLayerDefn()
num_field_col = layer_defn.GetFieldCount()
layer_info['number of fields'] = num_field_col
for field in range(num_field_col):
field_name = layer_defn.GetFieldDefn(field).GetName()
field_width = layer_defn.GetFieldDefn(field).GetWidth()
field_code = layer_defn.GetFieldDefn(field).GetType()
field_type = layer_defn.GetFieldDefn(field).GetFieldTypeName(field_code)
layer_table[field_name] = [field_type, field_width]
#print all shapefile information
print "Shapefile Name: {0}".format(shp_name)
print "Driver Name: {0}".format(driver_name)
print "Number of Layer: {0}".format(layer_num)
print " Layer Name: {0}".format(layer_info['name'])
print " Geometry: {0}".format(layer_info['geometry'])
print " Number of Features: {0}".format(layer_info['number of features'])
print " Layer Extent: {0}".format(layer_info['extent'])
print " Spatial Reference: {0}".format(layer_info['spatial reference'])
print " Unit: {0}".format(layer_info['unit'])
print " Number of Fields: {0}".format(layer_info['number of fields'])
print " <Name>: <Type>(<width>)"
for field in layer_table:
print " {0}: {1}({2})".format(field, layer_table[field][0], layer_table[field][1])
|
nilq/baby-python
|
python
|
import tkinter as Tk
import sys
sys.path.append("/Users/PeterLevett/Documents/My Actual Documents/SideProjects/ORDERM8/ORDERM8_V2/SQL_functions")
import editentry
class CustomerpageWindow(Tk.Frame):
def __init__(self, parent):
Tk.Frame.__init__(self, parent)
self.parent = parent
self.basic_information_window = Tk.Frame(self)
self.update_information_frame = Tk.Frame(self)
self.update_entry = Tk.Entry(self.update_information_frame)
self.edit_entry = editentry.EditEntry()
self.rolodex_converter = {"First Name": 3,
"Last Name": 2,
"Address": 5,
"Phone Number": 4,
"Payment Method": 6,
"Order Method": 8,}
def clear_customer_page_window(self):
for widget in self.winfo_children():
widget.destroy()
self.basic_information_window = Tk.Frame(self)
self.update_information_frame = Tk.Frame(self)
def generate_customer_page(self, customer):
customer_name = customer[2] + " " + customer[1]
self.basic_information_window.grid(row=0, column=0)
Tk.Label(self.basic_information_window, text=customer_name).grid(row=0, column=0, sticky=Tk.W)
Tk.Label(self.basic_information_window, text="Address: " + customer[4]).grid(row=1, column=0, sticky=Tk.W)
Tk.Label(self.basic_information_window, text="Phone Number: " + customer[3]).grid(row=2, column=0, sticky=Tk.W)
Tk.Label(self.basic_information_window, text="Payment Method: " + customer[5]).grid(row=3, column=0, sticky=Tk.W)
Tk.Label(self.basic_information_window, text="Order Method: " + customer[7]).grid(row=4, column=0, sticky=Tk.W)
def update_customer_information(self, customer):
self.update_information_frame.grid(row=1, column=0)
self.update_entry.grid(row=0, column=0, columnspan=2)
option_variable = Tk.StringVar(self.update_information_frame)
option_variable.set('First Name')
update_options = Tk.OptionMenu(self.update_information_frame,
option_variable,
"First Name",
"Last Name",
"Address",
"Phone Number",
"Payment Method",
"Order Method").grid(row=1, column=0)
update_entry_button = Tk.Button(self.update_information_frame,
text="Update",
command=lambda: self.update_db(customer, option_variable)).grid(row=1, column=1)
def update_db(self, customer, option_variable):
desired_update = self.update_entry.get()
self.edit_entry.edit_rolodex_entry(self.rolodex_converter[option_variable.get()], desired_update, customer[0])
self.clear_customer_page_window()
self.parent.display_customerpage(customer)
|
nilq/baby-python
|
python
|
from fractions import Fraction as fr, gcd
def cancel_digits(num, den):
is_cancelled = False
numstr, denstr = str(num), str(den)
for i in range(len(numstr)):
for j in range(len(denstr)):
if numstr[i] == denstr[j] and numstr[i] != '0':
is_cancelled = True
numstr = numstr.replace(numstr[i], 'a', 1)
denstr = denstr.replace(denstr[j], 'b', 1)
numstr = numstr.replace('a', '')
denstr = denstr.replace('b', '')
if is_cancelled == False:
return -1
try:
return fr(int(numstr), int(denstr))
except:
return -1
ans = 1
for den in range(10, 100):
for num in range(10, den):
frac = fr(num, den)
frac2 = cancel_digits(num, den)
if frac == frac2:
ans *= frac
print ans.denominator
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
Layer
'''
import poller
class Layer(poller.Callback):
'''
Classe abstrata herdeira de poller.Calback para acrescentar
camadas ao protocolo
'''
def __init__(self):
self._top = None
self._bottom = None
def setBottom(self, bottom):
''' Método para definir camada inferior da classe
bottom: objeto da camada inferior
'''
self._bottom = bottom
def setTop(self, top):
''' Método para definir camada superior da classe
top: objeto da camada superior
'''
self._top = top
def handle(self):
'''Trata o evento associado a este callback. Tipicamente
deve-se ler o fileobj e processar os dados lidos. Classes
derivadas devem sobrescrever este método.'''
pass
def handle_timeout(self):
''' Trata a interrupção interna devido ao timeout
'''
pass
def receiveFromBottom(self, data):
''' Recebe um quadro da camada inferior
data: bytearray representando o quadro recebido
'''
pass
def receiveFromTop(self, data):
''' Envia o quadro de dados para a camada inferior
data: bytearray representando o quadro a ser enviado
'''
pass
def sendToLayer(self, data):
''' Envia o frame a ser transmitido para a camada inferior
data: bytearray representando o frame a ser transmitido
'''
pass
def notifyLayer(self, data):
''' Envia o frame recebido para a camada superior
data: bytearray representando o frame a ser enviado
'''
pass
|
nilq/baby-python
|
python
|
"""
Orkut OAuth support.
This contribution adds support for Orkut OAuth service. The scope is
limited to http://orkut.gmodules.com/social/ by default, but can be
extended with ORKUT_EXTRA_SCOPE on project settings. Also name, display
name and emails are the default requested user data, but extra values
can be specified by defining ORKUT_EXTRA_DATA setting.
OAuth settings ORKUT_CONSUMER_KEY and ORKUT_CONSUMER_SECRET are needed
to enable this service support.
"""
import urllib
from django.utils import simplejson
from social_auth.utils import setting
from social_auth.backends import OAuthBackend, USERNAME
from social_auth.backends.google import BaseGoogleOAuth
# Orkut configuration
# default scope, specify extra scope in settings as in:
# ORKUT_EXTRA_SCOPE = ['...']
ORKUT_SCOPE = ['http://orkut.gmodules.com/social/']
ORKUT_REST_ENDPOINT = 'http://www.orkut.com/social/rpc'
ORKUT_DEFAULT_DATA = 'name,displayName,emails'
class OrkutBackend(OAuthBackend):
"""Orkut OAuth authentication backend"""
name = 'orkut'
def get_user_details(self, response):
"""Return user details from Orkut account"""
try:
emails = response['emails'][0]['value']
except (KeyError, IndexError):
emails = ''
return {USERNAME: response['displayName'],
'email': emails,
'fullname': response['displayName'],
'first_name': response['name']['givenName'],
'last_name': response['name']['familyName']}
class OrkutAuth(BaseGoogleOAuth):
"""Orkut OAuth authentication mechanism"""
AUTH_BACKEND = OrkutBackend
SETTINGS_KEY_NAME = 'ORKUT_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'ORKUT_CONSUMER_SECRET'
def user_data(self, access_token):
"""Loads user data from Orkut service"""
fields = ORKUT_DEFAULT_DATA
if setting('ORKUT_EXTRA_DATA'):
fields += ',' + setting('ORKUT_EXTRA_DATA')
scope = ORKUT_SCOPE + setting('ORKUT_EXTRA_SCOPE', [])
params = {'method': 'people.get',
'id': 'myself',
'userId': '@me',
'groupId': '@self',
'fields': fields,
'scope': ' '.join(scope)}
request = self.oauth_request(access_token, ORKUT_REST_ENDPOINT, params)
response = urllib.urlopen(request.to_url()).read()
try:
return simplejson.loads(response)['data']
except (ValueError, KeyError):
return None
def oauth_request(self, token, url, extra_params=None):
extra_params = extra_params or {}
scope = ORKUT_SCOPE + setting('ORKUT_EXTRA_SCOPE', [])
extra_params['scope'] = ' '.join(scope)
return super(OrkutAuth, self).oauth_request(token, url, extra_params)
# Backend definition
BACKENDS = {
'orkut': OrkutAuth,
}
|
nilq/baby-python
|
python
|
import os.path
import tempfile
from youtube_dl import YoutubeDL
def download_subtitles(video_id, lang):
temp_dir = tempfile.gettempdir()
file_name = f'{temp_dir}/{video_id}.{lang}.vtt'
def read_file():
with open(file_name) as file:
return file.read()
if os.path.isfile(file_name):
return read_file()
opts = {
'subtitleslangs': [lang],
'writesubtitles': True,
'writeautomaticsub': True,
'outtmpl': tempfile.gettempdir() + "/" + "%(id)s.%(ext)s",
'skip_download': True,
'quiet': True,
}
url = f'https://www.youtube.com/watch?v={video_id}'
with YoutubeDL(opts) as ydl:
ydl.download([url])
return read_file() if os.path.isfile(file_name) else None
|
nilq/baby-python
|
python
|
"""MongoDB related commands for dhutil's CLI."""
import click
from dhutil.drive_ops import (
sync_google_drive_acceptance_status_to_mongo,
sync_uptodate_teams_from_mongo,
send_conf_confirm_emails,
)
@click.group(help="Google Drive related commands.")
def drive():
"""Google Drive related commands."""
pass
_SYNC_ACCEPTED_MSG = "Sync Google Drive acceptance status to MongoDB"
@drive.command(help=_SYNC_ACCEPTED_MSG)
def sync_accepted():
__doc__ = _SYNC_ACCEPTED_MSG # pylint: disable=W0622
sync_google_drive_acceptance_status_to_mongo()
_SYNC_TEAM_MSG = "Sync Google Drive user team from MongoDB"
@drive.command(help=_SYNC_TEAM_MSG)
def sync_team():
__doc__ = _SYNC_TEAM_MSG # pylint: disable=W0622
sync_uptodate_teams_from_mongo()
_CONF_CONFORM_MSG = "Send DataConf confirmation email."
@drive.command(help=_CONF_CONFORM_MSG)
def conf_confirm():
__doc__ = _CONF_CONFORM_MSG # pylint: disable=W0622
send_conf_confirm_emails()
|
nilq/baby-python
|
python
|
from battleship.interface.tty import TTY
from battleship.player.player import Player
__author__ = 'jitrixis'
class BattleShip:
def __init__(self):
self.__players = (Player(), Player())
TTY.cls()
TTY.player_show_turn('PLAYER 1')
self.__players[0].view_set_name()
self.__players[0].view_set_field()
TTY.cls()
TTY.player_show_turn('PLAYER 2')
self.__players[1].view_set_name()
self.__players[1].view_set_field()
TTY.cls()
while True:
self.__players[0].attack(self.__players[1])
self.__players[1].attack(self.__players[0])
|
nilq/baby-python
|
python
|
# various Amiga Math utils
import struct
import math
def int32(x):
st = struct.pack("I", x)
return struct.unpack("i", st)[0]
def int16(x):
st = struct.pack("H", x)
return struct.unpack("h", st)[0]
def int8(x):
st = struct.pack("B", x)
return struct.unpack("b", st)[0]
def signext16(x):
# extend sign bit of 16 bit value to 32 bit
if x & 0x8000 == 0x8000:
return 0xFFFF0000 | x
else:
return x
def double_to_regs(number):
"""convert Python double to (hi, lo) reg pair"""
st = struct.pack(">d", number)
return struct.unpack(">LL", st)
def regs_to_double(hi, lo):
"""convert (hi, lo) Amiga reg pair to double"""
st = struct.pack(">LL", hi, lo)
return struct.unpack(">d", st)[0]
def float_to_reg(number):
"""convert Python float to reg value"""
try:
st = struct.pack(">f", number)
except OverflowError:
if number > 0.0:
number = float("inf")
else:
number = float("-inf")
st = struct.pack(">f", number)
return struct.unpack(">L", st)[0]
def reg_to_float(reg):
"""convert reg value to Python float"""
st = struct.pack(">L", reg)
return struct.unpack(">f", st)[0]
# Motorola FFP
#
# 32 bit:
# 31 23 15 7 0
# MMMMMMMM MMMMMMMM MMMMMMMM SEEEEEEE
#
# - leading one in mantissa visible (one bit less accuracy)
# - 7bit exponent in excess-64 notation
#
# Single IEEE:
# SEEEEEEE EMMMMMMM MMMMMMMM MMMMMMMM
#
# - leading one is omitted
# - 8bit exponent in excess-128 notation
def float_to_ffp_reg(number):
"""convert Python number to ffp register"""
# zero
if number == 0.0:
return 0
# nan -> zero
if math.isnan(number):
return 0
# inf -> largest value
if math.isinf(number):
if number < 0.0:
return 0xFFFFFFFF
else:
return 0xFFFFFF7F
# convert float to 32bit num
b = struct.pack(">f", number)
i = struct.unpack(">L", b)[0]
# extract sign bit
sign = b[0] & 0x80
# extract 8 bit exponent
exp = (i >> 23) & 0xFF
# too small?
if exp <= 0x3E:
return 0
# too large?
elif exp >= 0xBD:
exp = 0x7F
mantissa = 0xFFFFFF00
# convert
else:
exp -= 0x3E
# mantissa (add leading one)
mantissa = (i << 8) & 0x7FFFFF00
mantissa |= 0x80000000
# resulting ffp
ffp = mantissa | sign | exp
return ffp
def ffp_reg_to_float(ffp):
"""convert ffp register to Python float"""
# zero
if ffp == 0:
return 0.0
# get sign bit
sign = ffp & 0x80
# get exponent: 0 .. 127 and shift to 8 bit range
exp = ffp & 0x7F
exp += 0x3E
# shift mantissa (skip leading one of ffp format)
mantissa = ffp >> 8
mantissa &= 0x007FFFFF
flt = sign << 24 | exp << 23 | mantissa
res = struct.unpack(">f", struct.pack(">L", flt))[0]
return res
# Amiga constants
Amiga_INT_MAX = 2147483647
Amiga_INT_MIN = -2147483648
Amiga_DBL_POS_MAX = regs_to_double(0x7FEFFFFF, 0xFFFFFFFF)
Amiga_DBL_NEG_MAX = regs_to_double(0xFFEFFFFF, 0xFFFFFFFF)
Amiga_DBL_NAN1 = regs_to_double(0x7FF10000, 0x00000000)
Amiga_FLT_POS_MAX = reg_to_float(0x7F7FFFFF)
Amiga_FLT_NEG_MAX = reg_to_float(0xFF7FFFFF)
|
nilq/baby-python
|
python
|
from .consts import ActionType, ClaimingType, TILE_SET
from .player_data import Action, Claiming
str2act_dict = {
'PASS': ActionType.PASS,
'DRAW': ActionType.DRAW,
'PLAY': ActionType.PLAY,
'CHI': ActionType.CHOW,
'PENG': ActionType.PUNG,
'GANG': ActionType.KONG,
'BUGANG': ActionType.MELD_KONG,
'HU': ActionType.HU
}
act2str_dict = {
ActionType.PASS: 'PASS',
ActionType.DRAW: 'DRAW',
ActionType.PLAY: 'PLAY',
ActionType.CHOW: 'CHI',
ActionType.PUNG: 'PENG',
ActionType.KONG: 'GANG',
ActionType.MELD_KONG: 'BUGANG',
ActionType.HU: 'HU'
}
def str2act(s: str) -> ActionType:
return str2act_dict[s]
def act2str(act: ActionType) -> str:
return act2str_dict[act]
def response2str(act: Action) -> str:
s = act2str(act.act_type)
if act.tile is not None:
s += f' {act.tile}'
return s
def request2str(act: Action, player_id: int) -> str:
if act.act_type == ActionType.DRAW:
if act.player == player_id:
return f'2 {act.tile}'
else:
return f'3 {act.player} DRAW'
s = f'3 {act.player} {act2str(act.act_type)}'
if act.tile is not None:
s += f' {act.tile}'
return s
def request2obs(request: dict) -> dict:
if len(request['requests']) <= 2:
# pass first two rounds
return {}
obs = {
'player_id': None,
'tiles': [],
'tile_count': [21] * 4,
'claimings': [],
'all_claimings': [[] for _ in range(4)],
'played_tiles': {t: 0 for t in TILE_SET},
'last_player': None,
'last_tile': None,
'last_operation': None,
'round_wind': None,
'request_hist': [],
'response_hist': []
}
request_hist = request['requests']
general_info = request_hist[0].split()
player_id = obs['player_id'] = int(general_info[1])
obs['round_wind'] = int(general_info[2])
obs['tiles'] = request_hist[1].split()[5:]
for act in request_hist[2:]:
act = act.split()
msgtype = int(act[0])
if msgtype == 2: # self draw
obs['tiles'].append(act[1])
obs['tile_count'][player_id] -= 1
obs['request_hist'].append(Action(player_id, ActionType.DRAW, act[1]))
obs['last_player'] = player_id
obs['last_operation'] = ActionType.DRAW
obs['last_tile'] = act[1]
continue
player = int(act[1])
is_self = player == player_id
act_type = str2act(act[2])
last_player = obs['last_player']
last_op = obs['last_operation']
last_tile = obs['last_tile']
obs['last_player'] = player
obs['last_operation'] = act_type
if len(act) == 3:
# kong, others draw
obs['request_hist'].append(Action(player, act_type, None))
if act_type == ActionType.KONG:
claim = Claiming(ClaimingType.KONG, last_tile or '<conceal>', last_player)
obs['all_claimings'][player].append(claim)
is_conceal = last_op == ActionType.DRAW
if not is_conceal:
obs['played_tiles'][last_tile] = 4
if is_self:
for _ in range(4 if is_conceal else 3):
obs['tiles'].remove(last_tile)
else:
obs['tile_count'][player] -= 1
obs['last_tile'] = None
continue
# play, chow, pung, meld kong
obs['request_hist'].append(Action(player, act_type, ' '.join(act[3:])))
play_tile = act[-1]
obs['played_tiles'][play_tile] += 1
obs['last_tile'] = play_tile
if is_self:
obs['tiles'].remove(play_tile)
if act_type == ActionType.PLAY:
# already removed!
pass
elif act_type == ActionType.MELD_KONG:
for claim in obs['all_claimings'][player]:
if claim.tile == play_tile:
claim.claiming_type = ClaimingType.KONG
break
elif act_type == ActionType.CHOW:
chow_tile = act[-2]
chow_t, chow_v = chow_tile[0], int(chow_tile[1])
offer_card = int(last_tile[1]) - chow_v + 2
claim = Claiming(ClaimingType.CHOW, chow_tile, offer_card)
obs['all_claimings'][player].append(claim)
for v in range(chow_v - 1, chow_v + 2):
cur_tile = f'{chow_t}{v}'
if cur_tile != last_tile:
obs['played_tiles'][cur_tile] += 1
if is_self:
obs['tiles'].remove(cur_tile)
elif act_type == ActionType.PUNG:
claim = Claiming(ClaimingType.PUNG, last_tile, last_player)
obs['all_claimings'][player].append(claim)
obs['played_tiles'][last_tile] += 2
if is_self:
for _ in range(2):
obs['tiles'].remove(last_tile)
else:
raise TypeError(f"Wrong action {' '.join(act)}!")
for res in request['responses']:
res = res.split()
act_type = str2act(res[0])
tile = None if len(res) == 1 else ' '.join(res[1:])
obs['response_hist'].append(Action(player_id, act_type, tile))
obs['tiles'].sort()
obs['claimings'] = obs['all_claimings'][player_id]
if obs['last_operation'] == ActionType.DRAW and obs['last_player'] == player_id:
# remove last draw (for calculating fan)
obs['tiles'].remove(obs['last_tile'])
return obs
def act2response(act: Action) -> dict:
output = act2str(act.act_type)
if act.tile is not None:
output += f' {act.tile}'
return {'response': output}
def response2act(response: str, player_id: int) -> Action:
act = response.split()
tile = None if len(act) == 1 else ' '.join(act[1:])
return Action(player_id, str2act(act[0]), tile)
def json2simple(request: dict) -> str:
req_hist = request['requests']
res_hist = request['responses']
simple = [str(len(req_hist))]
for req_act, res_act in zip(req_hist, res_hist):
simple.append(req_act)
simple.append(res_act)
simple.append(req_hist[-1])
return '\n'.join(simple)
|
nilq/baby-python
|
python
|
import logging
import requests
from collections import namedtuple
from contextlib import suppress
from getpass import getuser
from re import DOTALL, IGNORECASE, MULTILINE, compile as Regex
from time import time, ctime
from tkinter import _default_root, Label, Menu, PhotoImage, BOTH, DISABLED, END, NORMAL, RIGHT
from tkinter.messagebox import showinfo
from tkinter.ttk import Frame
from threading import Timer
try:
from thonny import get_workbench
from thonny.codeview import CodeView
from thonny.shell import ShellView
class ShellMirrorView(CodeView): # CodeView(tktextext.EnhancedTextFrame(tktextext.TextFrame(ttk.Frame)))
def __init__(self, *args, **kwargs):
# Syntax highlighting here should be different from a normal CodeView... maybe? Or maybe it really doesn't matter, as long as it's disabled?
kwargs['state'] = DISABLED
super().__init__(*args, **kwargs)
self.text.bind('<1>', lambda event: self.text.focus_set())
def destroy(self):
self.text.unbind('<1>')
super().destroy()
class CodeMirrorView(ShellMirrorView):
def __init__(self, *args, **kwargs):
kwargs['line_numbers'] = True
kwargs['font'] = 'EditorFont'
super().__init__(*args, **kwargs)
except ImportError:
# We're probably running unit tests outside of Thonny, so it's fine.
pass
copyablePattern = Regex(r'#\s*COPYABLE.*?#\s*END\s*COPYABLE', DOTALL | IGNORECASE)
blurCharPattern = Regex(r'\w')
blurLinePattern = Regex(r'^(.+)#\s*BLUR(\s*)$', IGNORECASE | MULTILINE)
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#
# Logs go to Thonny/frontend.log in ~/Library (mac) or ~\AppData\Roaming (win)
# This file gets installed in ~\AppData\Roaming\Python\Python37\site-packages\thonnycontrib (win)
# or in /Applications/Thonny.app/Contents/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages
#
# To Install:
# 1a - Windows: Need to install git first - can get it from here: https://git-scm.com/download/win
# 1b - Mac: Prefix the below command with sudo. It will prompt for the password (which won't be shown) after. May have to install Xcode command line tools if prompted.
# 2 - Everyone: pip3 install git+https://github.com/TaylorSMarks/classroom_sync.git
#
# BUGS SOMETIMES SEEN:
# 1 - Shutdown sometimes hangs on the Mac, or the window closes but the application keeps running on Windows.
# - Might have something to do with unsaved files?
# - Might have been because I lacked a destroy method for the mirror views?
# 2 - Explicitly picking something to view doesn't always work? <<< I think I prioritized a file from Windows, then the Mac couldn't request another?
# ^^^ This occurred for both Nicole and Matt during Lesson 4. I must figure this out immediately.
# There's also periodically a popup about clipboard enforcer failing?
# 3 - Files vanish after they're ten minutes old and never show up again?
#
# OPTIONAL STEPS LEFT:
# 1 - Fix inconsistent font issues in CodeMirrorView. <<< Seems to be related to it not viewing everything as code? Probably doesn't matter since we shouldn't edit it anyways.
# 2 - Fix the weird scroll bar in CodeMirrorView.
# 3 - Add an ability to un-request files.
# 4 - Add in an assistant mirror view.
#
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class ImageView(Frame):
# What I have written here worked - I just decided uploading/downloading
# images would be pretty complicated and that I could get most of the same
# benefits from the blur function for much lesser complexity.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Make it possible to change the image.
self.image = PhotoImage(file = r'C:\Users\Taylor\Downloads\fuckyou.gif')
self.label = Label(self, bg = 'pink', image = self.image)
self.label.pack(side = RIGHT, fill = BOTH, expand = True)
SentFile = namedtuple('SentFile', ['contents', 'time'])
def updateRequestedFile(username, file):
sync.requestUser, sync.requestFile = username, file
def updatePrioritizeFile(filename):
logging.info('Set prioritize file to: ' + filename)
sync.prioritizeFile = filename
def requestablePairToName(username, file):
return "{}'s {}".format(username, file)
def updateMenu(wb):
syncMenu = wb.get_menu('Classroom Sync')
if not hasattr(updateMenu, 'viewMenu'):
updateMenu.viewMenu = Menu(syncMenu, tearoff = 0)
syncMenu.add_cascade(label = 'View Remote...', menu = updateMenu.viewMenu)
if not hasattr(updateMenu, 'showMenu'):
updateMenu.showMenu = Menu(syncMenu, tearoff = 0)
syncMenu.add_cascade(label = 'Show Everyone...', menu = updateMenu.showMenu)
currentRequestables = [f for f in sync.requestableFiles]
# Remove everything that was requestable, but isn't anymore.
for oldRequestable in updateMenu.oldRequestable:
if oldRequestable not in currentRequestables:
updateMenu.viewMenu.delete(updateMenu.viewMenu.index(requestablePairToName(*oldRequestable)))
# Add everything new that's now requestable.
for newRequestable in currentRequestables:
if newRequestable not in updateMenu.oldRequestable:
updateMenu.viewMenu.add_command(label = requestablePairToName(*newRequestable), command = lambda: updateRequestedFile(*newRequestable))
updateMenu.oldRequestable = currentRequestables
currentFiles = [f for f in getAllFiles(wb)]
# Remove everything that was sharable, but isn't anymore.
for oldFile in updateMenu.oldSharable:
if oldFile not in currentFiles:
updateMenu.showMenu.delete(updateMenu.showMenu.index(oldFile))
# Add everything new that's now sharable.
for filename in currentFiles:
if filename not in updateMenu.oldSharable:
updateMenu.showMenu.add_command(label = filename, command = lambda: updatePrioritizeFile(filename))
updateMenu.oldSharable = currentFiles
updateMenu.oldRequestable = []
updateMenu.oldSharable = []
def getAllFiles(wb):
allFiles = {}
editors = wb.get_editor_notebook().get_all_editors()
for e in editors:
baseFilename = e.get_title()
if e.is_modified() and baseFilename.endswith('*'):
baseFilename = baseFilename[:-1]
baseFilename = filename = baseFilename.strip()
number = 1
while filename in allFiles:
filename = baseFilename + '-' + str(number)
number += 1
allFiles[filename] = e.get_code_view().get_content()
return allFiles
def blur(unblurredContents):
def blurLine(unblurredLine):
return blurCharPattern.sub('_', unblurredLine.group(1)) + unblurredLine.group(2)
return blurLinePattern.sub(blurLine, unblurredContents)
def syncHelper(wb, viewName, tabName, contents, syncKey, scrollToEnd = False):
wb.show_view(viewName, False) # Don't take the focus.
view = wb.get_view(viewName)
notebook = view.home_widget.master # Instance of ttk.Notebook
notebook.tab(view.home_widget, text = tabName)
viewText = view.text
xlo = ylo = '0.0'
xhi = yhi = '1.0'
with suppress(Exception): xlo, xhi = view._hbar.get()
with suppress(Exception): ylo, yhi = view._vbar.get()
logging.debug("The scroll position was retrieved as: {}-{}, {}-{}".format(xlo, xhi, ylo, yhi))
viewText['state'] = NORMAL
viewText.set_content(blur(contents))
viewText['state'] = DISABLED
with suppress(Exception): view._hbar.set(xlo, xhi)
if scrollToEnd:
viewText.see(END)
else:
with suppress(Exception): view._vertical_scrollbar_update(ylo, yhi)
clipboardEnforcer.syncText[syncKey] = contents
def addIfChanged(name, contents, building):
''' Adds to building if the contents have changed since last sent,
or if they haven't been sent in the past 10 minutes. '''
if (name not in sync.lastSentFiles
or sync.lastSentFiles[name].contents != contents
or sync.lastSentFiles[name].time <= time() - 600):
building[name] = contents
def sync():
wb = get_workbench()
allFiles = getAllFiles(wb)
changedFiles = {}
for filename in allFiles:
addIfChanged(filename, allFiles[filename], changedFiles)
shellContents = ''
with suppress(Exception):
shellContents = wb.get_view('ShellView').text.get('1.0', 'end-1c')
addIfChanged(':shell:', shellContents, changedFiles)
clipboardEnforcer.copyableText['files'] = ''.join(allFiles.values()) + shellContents
request = {'user': getuser()}
if changedFiles:
request['files'] = changedFiles
retractFiles = [f for f in sync.lastSentFiles if f != ':shell:' and f not in allFiles]
if retractFiles:
request['retract'] = retractFiles
for var in 'lastVersion', 'lastUser', 'lastFile', 'prioritizeFile', 'requestUser', 'requestFile', 'lastShell':
val = getattr(sync, var)
if val is not None:
request[var] = val
try:
r = requests.post('https://marksfam.com/class_sync/class_sync', json = request)
try:
response = r.json()
except Exception:
logging.exception('Failed to convert from json: ' + r.text)
raise
for f in changedFiles:
sync.lastSentFiles[f] = SentFile(changedFiles[f], time())
for f in retractFiles:
sync.lastSentFiles.pop(f, None) # Delete if it's there, ignore if it's not.
sync.prioritizeFiles = None # Ensure it's only ever declared as a priority once.
sync.requestableFiles = response['files']
updateMenu(wb)
if 'version' in response:
sync.lastVersion = response['version']
sync.lastUser = response['user']
sync.lastFile = response['file']
syncHelper(wb, 'CodeMirrorView', 'Code Mirror - ' + requestablePairToName(sync.lastUser, sync.lastFile), response['body'], 'main')
clipboardEnforcer.copyableText['allowed'] = ''.join(copyablePattern.findall(response['body']))
if 'shellVersion' in response:
sync.lastShell = response['shellVersion']
sync.lastUser = response['user']
syncHelper(wb, 'ShellMirrorView', sync.lastUser + "'s Shell", response['shellBody'], 'shell', scrollToEnd = True)
except Exception:
logging.exception('Failure during sync.', exc_info = True)
finally:
if not get_workbench()._closing:
logging.debug('Will kick off another sync in 5 seconds since there is no mention of the app closing as of: ' + ctime())
Timer(5, sync).start()
else:
logging.info('No more syncing - time for the app to die: ' + ctime())
sync.requestableFiles = []
sync.lastSentFiles = {}
sync.lastVersion = None
sync.lastUser = None
sync.lastFile = None
sync.lastShell = None
sync.prioritizeFile = None
sync.requestUser = None
sync.requestFile = None
def clipboardEnforcer():
try:
clipboardContents = _default_root.clipboard_get()
if clipboardContents != clipboardEnforcer.previousClipboardContents:
stripped = clipboardContents.strip()
if any(stripped in t for t in clipboardEnforcer.syncText.values()) and not any(stripped in t for t in clipboardEnforcer.copyableText.values()):
_default_root.clipboard_clear()
_default_root.clipboard_append(clipboardEnforcer.previousClipboardContents)
showinfo('Forbidden copy detected!', "You weren't allowed to copy that! Your clipboard has been rolled back!")
else:
clipboardEnforcer.previousClipboardContents = clipboardContents
except Exception:
logging.exception('Clipboard enforcer got an error.', exc_info = True)
finally:
if not get_workbench()._closing:
clipboardEnforcer.counter += 1
if clipboardEnforcer.counter > 30:
clipboardEnforcer.counter = 0
logging.debug('Clipboard enforcer is still running since there is no mention of the app closing as of: ' + ctime())
_default_root.after(200, clipboardEnforcer)
else:
logging.info('No more clipboard enforcing - time for the app to die: ' + ctime())
clipboardEnforcer.counter = 0
clipboardEnforcer.syncText = {}
clipboardEnforcer.copyableText = {}
def afterLoad():
try:
clipboardEnforcer.previousClipboardContents = _default_root.clipboard_get()
except:
clipboardEnforcer.previousClipboardContents = '<Failed to load clipboard.>'
get_workbench().report_exception("Failed to get the clipboard while loading the plugin.")
try:
sync()
clipboardEnforcer()
logging.info('Finished loading classroom_sharing.py')
except:
get_workbench().report_exception("Error while loading the plugin.")
def load_plugin():
logging.info('Loading classroom_sharing.py - will involve a 7 second wait.')
wb = get_workbench()
wb.add_view(CodeMirrorView, 'Code Mirror', 'ne', visible_by_default = True)
wb.add_view(ShellMirrorView, 'Shell Mirror', 'se', visible_by_default = True)
#wb.add_view(ImageView, 'Image View', 'se', visible_by_default = True)
_default_root.after(7000, afterLoad) # Give Thonny some time (7 seconds) to finish initializing
|
nilq/baby-python
|
python
|
"""Tests API to manage moderators."""
import json
from django.contrib.auth import get_user_model
from django.test import TestCase
from machina.apps.forum_permission.shortcuts import assign_perm
from ashley import SESSION_LTI_CONTEXT_ID
from ashley.defaults import _FORUM_ROLE_MODERATOR
from ashley.factories import ForumFactory, LTIContextFactory, UserFactory
User = get_user_model()
class ManageModeratorApiTest(TestCase):
"""Test the API to manage moderators."""
def test_access_basic_api_manage_moderator_list_users(self):
"""Anonymous users should not be allowed to retrieve list of users."""
response = self.client.get("/api/v1.0/users/")
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_access_can_manage_moderators_moderator_list_users(self):
"""Users that can manage moderators should be able to use the API to request
list of users"""
user = UserFactory()
lti_context = LTIContextFactory(lti_consumer=user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
self.client.force_login(user, "ashley.auth.backend.LTIBackend")
response = self.client.get("/api/v1.0/users/")
# First it's forbidden
self.assertEqual(403, response.status_code)
# Add session
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
self.assertEqual(
self.client.session.get(SESSION_LTI_CONTEXT_ID), lti_context.id
)
response = self.client.get("/api/v1.0/users/")
# Still forbidden session ok but missing permission
self.assertEqual(response.status_code, 403)
assign_perm("can_manage_moderator", user, forum, True)
# Should now be authorized
response = self.client.get("/api/v1.0/users/")
self.assertEqual(response.status_code, 200)
def test_access_basic_api_manage_moderator_list_students(self):
"""Anonymous users should not be allowed to retrieve list of students."""
response = self.client.get("/api/v1.0/users/?role=student")
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_access_can_manage_moderators_moderator_list_students(self):
"""Users that can manage moderators should be able to use the API to request
list of students"""
user = UserFactory()
lti_context = LTIContextFactory(lti_consumer=user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
self.client.force_login(user, "ashley.auth.backend.LTIBackend")
response = self.client.get("/api/v1.0/users/?role=student")
# First it's forbidden
self.assertEqual(403, response.status_code)
# Add session
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
self.assertEqual(
self.client.session.get(SESSION_LTI_CONTEXT_ID), lti_context.id
)
response = self.client.get("/api/v1.0/users/?role=student")
# Still forbidden session ok but missing permission
self.assertEqual(response.status_code, 403)
assign_perm("can_manage_moderator", user, forum, True)
# Should now be authorized
response = self.client.get("/api/v1.0/users/?role=student")
self.assertEqual(response.status_code, 200)
def test_access_basic_api_manage_moderator_list_moderators(self):
"""Anonymous users should not be allowed to retrieve list of moderators."""
response = self.client.get("/api/v1.0/users/?role=moderator")
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_access_can_manage_moderators_list_moderators(self):
"""Users that can manage moderators should be able to use the API to request
list of moderators"""
user = UserFactory()
lti_context = LTIContextFactory(lti_consumer=user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
self.client.force_login(user, "ashley.auth.backend.LTIBackend")
response = self.client.get("/api/v1.0/users/?role=moderator")
# First it's forbidden
self.assertEqual(403, response.status_code)
# Add permission
assign_perm("can_manage_moderator", user, forum, True)
# Still forbidden, missing the session
response = self.client.get("/api/v1.0/users/?role=moderator")
self.assertEqual(403, response.status_code)
# Add session
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
self.assertEqual(
self.client.session.get(SESSION_LTI_CONTEXT_ID), lti_context.id
)
response = self.client.get("/api/v1.0/users/?role=moderator")
# Permission + session added, it should be allowed
self.assertEqual(response.status_code, 200)
def test_access_basic_api_manage_moderator_list_instructors(self):
"""Anonymous users should not be allowed to retrieve list of instructors."""
response = self.client.get("/api/v1.0/users/?role=instructor")
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_access_can_manage_moderators_moderator_list_instructors(self):
"""Users that can manage moderators should be able to use the API to request
list of instructors"""
user = UserFactory()
lti_context = LTIContextFactory(lti_consumer=user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
self.client.force_login(user, "ashley.auth.backend.LTIBackend")
response = self.client.get("/api/v1.0/users/?role=instructor")
# First it's forbidden
self.assertEqual(403, response.status_code)
# Add session
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
self.assertEqual(
self.client.session.get(SESSION_LTI_CONTEXT_ID), lti_context.id
)
response = self.client.get("/api/v1.0/users/?role=instructor")
# Still forbidden session ok but missing permission
self.assertEqual(response.status_code, 403)
assign_perm("can_manage_moderator", user, forum, True)
# Should now be authorized
response = self.client.get("/api/v1.0/users/?role=instructor")
self.assertEqual(response.status_code, 200)
def test_access_basic_api_manage_moderator_list_not_moderators(self):
"""Anonymous users should not be allowed to retrieve list of non moderators."""
response = self.client.get("/api/v1.0/users/?role=!moderator")
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_access_can_manage_moderators_list_non_moderators(self):
"""Users that can manage moderators should be able to use the API to request
list of users that are not moderators"""
user = UserFactory()
lti_context = LTIContextFactory(lti_consumer=user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
self.client.force_login(user, "ashley.auth.backend.LTIBackend")
response = self.client.get("/api/v1.0/users/?role=!moderator")
# First it's forbidden
self.assertEqual(403, response.status_code)
# Add permission
assign_perm("can_manage_moderator", user, forum, True)
# Still forbidden, missing the session
response = self.client.get("/api/v1.0/users/?!role=moderator")
self.assertEqual(403, response.status_code)
# Add session
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
self.assertEqual(
self.client.session.get(SESSION_LTI_CONTEXT_ID), lti_context.id
)
response = self.client.get("/api/v1.0/users/?role=!moderator")
# Permission + session added, it should be allowed
self.assertEqual(response.status_code, 200)
def test_access_api_can_manage_moderators_update_student_promote(self):
"""
Promote and revoke a user with right context, permission, group
Test to validate that update request API is working when everything
is set properly.
"""
update_user = UserFactory(public_username="Thérèse")
api_user = UserFactory(lti_consumer=update_user.lti_consumer)
lti_context = LTIContextFactory(lti_consumer=update_user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
# Assign student group to user
lti_context.sync_user_groups(update_user, ["student"])
# Check list group of the user
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(update_user.groups.values_list("name", flat=True)),
)
# Assign the permission
assign_perm("can_manage_moderator", api_user, forum, True)
# Creates the session
self.client.force_login(api_user, "ashley.auth.backend.LTIBackend")
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
# Promote user to moderator
data = {"roles": ["student", "moderator"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, {"success": True})
# Check group moderator is part of group of the user
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
f"{lti_context.base_group_name}:role:moderator",
],
list(update_user.groups.values_list("name", flat=True)),
)
# Then Revoke user to moderator
data = {
"roles": ["student"],
}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content, {"success": True})
# Check group moderator is not part of users's group
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(update_user.groups.values_list("name", flat=True)),
)
def test_access_api_basic_manage_moderator_update_student(self):
"""Standard call should not be allowed to update a student."""
user = UserFactory()
data = {
"roles": ["moderator"],
}
response = self.client.post(
f"/api/v1.0/users/{user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
def test_access_api_can_manage_moderators_update_student_no_group_context(self):
"""Users that don't have a group from this context can't be promoted moderator"""
update_user = UserFactory()
api_user = UserFactory(lti_consumer=update_user.lti_consumer)
lti_context = LTIContextFactory(lti_consumer=update_user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
# Assign the permission
assign_perm("can_manage_moderator", api_user, forum, True)
# Creates the session
self.client.force_login(api_user, "ashley.auth.backend.LTIBackend")
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
# Data to promote user to moderator
data = {
"roles": ["moderator"],
}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
# Add group student and it should work
lti_context.sync_user_groups(update_user, ["student"])
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
def test_access_api_can_manage_moderators_update_student_no_group_moderator(self):
"""If moderator group doesn't exist user can be updated and group created
Case for forum created before this feature"""
update_user = UserFactory()
api_user = UserFactory(lti_consumer=update_user.lti_consumer)
lti_context = LTIContextFactory(lti_consumer=update_user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
# Add group student
lti_context.sync_user_groups(update_user, ["student"])
# Assign the permission
assign_perm("can_manage_moderator", api_user, forum, True)
# Creates the session
self.client.force_login(api_user, "ashley.auth.backend.LTIBackend")
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
# Data to promote user to moderator
data = {
"roles": ["moderator"],
}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
def test_access_api_can_manage_moderators_update_student_no_session(self):
"""Users with no session can't update user"""
update_user = UserFactory()
api_user = UserFactory(lti_consumer=update_user.lti_consumer)
lti_context = LTIContextFactory(lti_consumer=update_user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
# Assign student group to user
lti_context.sync_user_groups(update_user, ["student"])
# Assign the permission
assign_perm("can_manage_moderator", api_user, forum, True)
#
data = {
"roles": ["moderator"],
}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "Authentication credentials were not provided."}
)
# Create the session and it should work
self.client.force_login(api_user, "ashley.auth.backend.LTIBackend")
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
def _login_authorized_user_to_manage_moderators(self):
"""
Access to API has been tested in previous tests. This method is a shortcut for tests
below to retrieve a granted user for the API and the current lti_context.
"""
api_user = UserFactory()
lti_context = LTIContextFactory(lti_consumer=api_user.lti_consumer)
forum = ForumFactory()
forum.lti_contexts.add(lti_context)
# Assign the permission for API user
assign_perm("can_manage_moderator", api_user, forum, True)
# Create the session and it should work
self.client.force_login(api_user, "ashley.auth.backend.LTIBackend")
session = self.client.session
session[SESSION_LTI_CONTEXT_ID] = lti_context.id
session.save()
return api_user, lti_context
def test_access_api_can_manage_moderators_update_student_no_role(self):
"""If roles is not present or is defined to unexpected value, promote moderator is
not allowed"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
# Creates user to update
update_user = UserFactory(lti_consumer=api_user.lti_consumer)
lti_context.sync_user_groups(update_user, ["student"])
data = {
"id": update_user.id,
}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
# Change data, add role parameter to ’whatever’
data = {"roles": ["whatever"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
# Change data, add roles parameter to ’instructor’, it's not allowed
data = {"roles": ["instructor"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
# Change data, add roles parameter to ’moderator’ and it should work
data = {"roles": ["moderator"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
def test_revoke_moderator_on_student(self):
"""A user that is not moderator can't be revoked"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
# Creates user to update
update_user = UserFactory(lti_consumer=api_user.lti_consumer)
lti_context.sync_user_groups(update_user, ["student"])
data = {"roles": ["student"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
# Assign moderator group to user
lti_context.sync_user_groups(update_user, ["student", "moderator"])
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
f"{lti_context.base_group_name}:role:moderator",
],
list(update_user.groups.values_list("name", flat=True)),
)
# Revoke should now be ok
data = {"id": update_user.id, "roles": ["student"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
# Check group moderator is not part of users's group
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(update_user.groups.values_list("name", flat=True)),
)
def test_promote_on_moderator_student(self):
"""A user that is moderator can't be promoted"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
# Assign moderator group to user
update_user = UserFactory(lti_consumer=api_user.lti_consumer)
lti_context.sync_user_groups(update_user, ["student", "moderator"])
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
f"{lti_context.base_group_name}:role:moderator",
],
list(update_user.groups.values_list("name", flat=True)),
)
# Promote shouldn't work
data = {"roles": ["moderator"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 403)
content = json.loads(response.content)
self.assertEqual(
content, {"detail": "You do not have permission to perform this action."}
)
# Revoke should work
data = {"id": update_user.id, "roles": ["student"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(update_user.groups.values_list("name", flat=True)),
)
# Now promote should work
data = {"id": update_user.id, "roles": ["moderator"]}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
f"{lti_context.base_group_name}:role:moderator",
],
list(update_user.groups.values_list("name", flat=True)),
)
def test_list_users(self):
"""Controls that the list returned by the API contains expected users"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
user1 = UserFactory(
public_username="Thomas", lti_consumer=api_user.lti_consumer
)
user2 = UserFactory(
public_username="Aurélie", lti_consumer=api_user.lti_consumer
)
user3 = UserFactory(public_username="Abba", lti_consumer=api_user.lti_consumer)
user4 = UserFactory(
public_username="Silvio", lti_consumer=api_user.lti_consumer
)
UserFactory(public_username="Abdel", lti_consumer=api_user.lti_consumer)
lti_context.sync_user_groups(user1, ["student"])
lti_context.sync_user_groups(user2, ["student"])
lti_context.sync_user_groups(user3, ["student", "moderator"]),
lti_context.sync_user_groups(user4, ["instructor"])
# Request with no filter returns the list of users but user5 that has no roles
# list ordered by public_username
response = self.client.get("/api/v1.0/users/", content_type="application/json")
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user3.id,
"public_username": "Abba",
"roles": ["moderator", "student"],
},
{"id": user2.id, "public_username": "Aurélie", "roles": ["student"]},
{"id": user4.id, "public_username": "Silvio", "roles": ["instructor"]},
{"id": user1.id, "public_username": "Thomas", "roles": ["student"]},
],
)
response = self.client.get(
"/api/v1.0/users/?role=student", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user3.id,
"public_username": "Abba",
"roles": ["moderator", "student"],
},
{"id": user2.id, "public_username": "Aurélie", "roles": ["student"]},
{"id": user1.id, "public_username": "Thomas", "roles": ["student"]},
],
)
response = self.client.get(
"/api/v1.0/users/?role=moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user3.id,
"public_username": "Abba",
"roles": ["moderator", "student"],
},
],
)
response = self.client.get(
"/api/v1.0/users/?role=instructor", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{"id": user4.id, "public_username": "Silvio", "roles": ["instructor"]},
],
)
def test_list_moderators_with_student_groups(self):
"""Creates users with roles student and moderator, this user should be part of the
list of moderators and be part as well of the list of student group."""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
user1 = UserFactory(
public_username="Thomas", lti_consumer=api_user.lti_consumer
)
lti_context.sync_user_groups(user1, ["student", "moderator"])
# should be part of list student
response = self.client.get(
"/api/v1.0/users/?role=student", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user1.id,
"public_username": "Thomas",
"roles": ["moderator", "student"],
},
],
)
# should be part of list moderator
response = self.client.get(
"/api/v1.0/users/?role=moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user1.id,
"public_username": "Thomas",
"roles": ["moderator", "student"],
},
],
)
# should not be part of list !moderator
response = self.client.get(
"/api/v1.0/users/?role=!moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[],
)
# should be part of list of users
response = self.client.get("/api/v1.0/users/", content_type="application/json")
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user1.id,
"public_username": "Thomas",
"roles": ["moderator", "student"],
},
],
)
# should not be part of list of not moderators
response = self.client.get(
"/api/v1.0/users/?role=!moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[],
)
def test_list_moderators_with_instructor_groups(self):
"""Creates users with roles instructor and moderator, this user should be part
of the list of instructor only. Instructors are excluded from moderator list."""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
user1 = UserFactory(
public_username="Thomas", lti_consumer=api_user.lti_consumer
)
lti_context.sync_user_groups(user1, ["instructor", "moderator"])
# student list should be empty
response = self.client.get(
"/api/v1.0/users/?role=student", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[],
)
# moderator list should be empty because user1 is instructor
response = self.client.get(
"/api/v1.0/users/?role=moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[],
)
# instructor list should contain user1
response = self.client.get(
"/api/v1.0/users/?role=instructor", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user1.id,
"public_username": "Thomas",
"roles": ["instructor", "moderator"],
},
],
)
# !moderator list should not contain user1 because user1 is instructor and excluded
# from not moderators
response = self.client.get(
"/api/v1.0/users/?role=!moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[],
)
response = self.client.get("/api/v1.0/users/", content_type="application/json")
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user1.id,
"public_username": "Thomas",
"roles": ["instructor", "moderator"],
},
],
)
def test_list_users_no_moderator_if_no_group_in_context(self):
"""Controls that list of moderators only concerns users that are part of
users that have group in this context
"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
user1 = UserFactory(
public_username="Thomas", lti_consumer=api_user.lti_consumer
)
# add group moderator
group_moderator = lti_context.get_role_group(_FORUM_ROLE_MODERATOR)
user1.groups.add(group_moderator)
user1.save()
# check user has group moderator
self.assertCountEqual(
[f"{lti_context.base_group_name}:role:moderator"],
list(user1.groups.values_list("name", flat=True)),
)
# request users that are moderator
response = self.client.get(
"/api/v1.0/users/?role=moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# should be empty because user has no other groups from this context
self.assertEqual(content, [])
# request all users
response = self.client.get("/api/v1.0/users/", content_type="application/json")
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# should be empty because user has no other groups from this context
self.assertEqual(
content,
[],
)
# request all users that are not moderators
response = self.client.get(
"/api/v1.0/users/?role=!moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# should be empty because user has no other groups from this context
self.assertEqual(
content,
[],
)
def test_list_users_moderator_if_group_in_context(self):
"""Controls moderator list"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
user1 = UserFactory(
public_username="Thomas", lti_consumer=api_user.lti_consumer
)
# add group moderator and base group of this context
lti_context.sync_user_groups(user1, ["moderator"])
# check user has group moderator
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:moderator",
],
list(user1.groups.values_list("name", flat=True)),
)
# request users that are moderator
response = self.client.get(
"/api/v1.0/users/?role=moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# user should be in the list because user is moderator and has the base group
# from this context
self.assertEqual(
content,
[{"id": user1.id, "public_username": "Thomas", "roles": ["moderator"]}],
)
# request all users
response = self.client.get("/api/v1.0/users/", content_type="application/json")
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# user should be in the list because user is moderator and has the base group
# from this context
self.assertEqual(
content,
[{"id": user1.id, "public_username": "Thomas", "roles": ["moderator"]}],
)
# request all users that are not moderators
response = self.client.get(
"/api/v1.0/users/?role=!moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
# should be empty because user is moderator
self.assertEqual(
content,
[],
)
def test_list_users_are_active_users(self):
"""Controls that list of users and moderators only contains active
users."""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
user1 = UserFactory(
public_username="Thomas", lti_consumer=api_user.lti_consumer
)
user2 = UserFactory(
public_username="Aurélie", lti_consumer=api_user.lti_consumer
)
user4 = UserFactory(is_active=False, lti_consumer=api_user.lti_consumer)
user3 = UserFactory(is_active=False, lti_consumer=api_user.lti_consumer)
user5 = UserFactory(public_username="Théo", lti_consumer=api_user.lti_consumer)
user6 = UserFactory(is_active=False, lti_consumer=api_user.lti_consumer)
lti_context.sync_user_groups(user1, ["student"])
lti_context.sync_user_groups(user2, ["student", "moderator"])
lti_context.sync_user_groups(user3, ["student"])
lti_context.sync_user_groups(user4, ["student", "moderator"])
lti_context.sync_user_groups(user5, ["instructor", "moderator"])
lti_context.sync_user_groups(user6, ["instructor", "moderator"])
# only active student is listed
response = self.client.get(
"/api/v1.0/users/?role=student", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user2.id,
"public_username": "Aurélie",
"roles": ["moderator", "student"],
},
{"id": user1.id, "public_username": "Thomas", "roles": ["student"]},
],
)
# only active moderator is listed
response = self.client.get(
"/api/v1.0/users/?role=moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user2.id,
"public_username": "Aurélie",
"roles": ["moderator", "student"],
}
],
)
# only active instructor is listed
response = self.client.get(
"/api/v1.0/users/?role=instructor", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[
{
"id": user5.id,
"public_username": "Théo",
"roles": ["instructor", "moderator"],
}
],
)
# only active user not moderator is listed
response = self.client.get(
"/api/v1.0/users/?role=!moderator", content_type="application/json"
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(
content,
[{"id": user1.id, "public_username": "Thomas", "roles": ["student"]}],
)
def test_api_can_manage_moderators_update_student_public_username_readonly(
self,
):
"""If public_username is present and changed it's not updating the user as its a
read only data"""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
# Creates user to update
update_user = UserFactory(
public_username="Théo", lti_consumer=api_user.lti_consumer
)
lti_context.sync_user_groups(update_user, ["student"])
# Check group moderator is not part of group list of the user
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(update_user.groups.values_list("name", flat=True)),
)
data = {"roles": ["moderator"], "public_username": "Salomé"}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
# Check public_username has been ignored
self.assertEqual(update_user.public_username, "Théo")
# Check group moderator is now part of user's groups
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
f"{lti_context.base_group_name}:role:moderator",
],
list(update_user.groups.values_list("name", flat=True)),
)
def test_api_can_manage_moderators_update_student_id_param_ignored(
self,
):
"""If id in body of request api is different from the id in the url is ignored.
Only the user targeted in the url is updated."""
api_user, lti_context = self._login_authorized_user_to_manage_moderators()
# Creates user to update
update_user = UserFactory(
public_username="Théo", lti_consumer=api_user.lti_consumer
)
useless_user = UserFactory(lti_consumer=api_user.lti_consumer)
lti_context.sync_user_groups(update_user, ["student"])
lti_context.sync_user_groups(useless_user, ["student"])
# Check group moderator is now part of user's groups
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(update_user.groups.values_list("name", flat=True)),
)
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(useless_user.groups.values_list("name", flat=True)),
)
# in the body we target the other user
data = {"id": useless_user.id, "roles": "moderator"}
response = self.client.put(
f"/api/v1.0/users/{update_user.id}/",
json.dumps(data),
content_type="application/json",
)
self.assertEqual(response.status_code, 200)
# Check group moderator is now part of user's groups
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
f"{lti_context.base_group_name}:role:moderator",
],
list(update_user.groups.values_list("name", flat=True)),
)
# useless_user didn't get updated and still has no moderator group
self.assertCountEqual(
[
lti_context.base_group_name,
f"{lti_context.base_group_name}:role:student",
],
list(useless_user.groups.values_list("name", flat=True)),
)
|
nilq/baby-python
|
python
|
"""
33. How to import only every nth row from a csv file to create a dataframe?
"""
"""
Difficiulty Level: L2
"""
"""
Import every 50th row of BostonHousing dataset as a dataframe.
"""
"""
"""
|
nilq/baby-python
|
python
|
"""
Example prediction file.
"""
import os
import torch
import csv
import logging
import hydra
from tmb.data import AcclerationDataSetSchmutter
from tmb.model import FC_FFT
from sklearn.preprocessing import MaxAbsScaler
from hydra.core.config_store import ConfigStore
from config import tmbConfig
from pathlib import Path
# Setup Logger
logging.basicConfig(level=logging.INFO)
# Create ConfigStore
cs = ConfigStore.instance()
cs.store(name="tmb_config", node=tmbConfig)
@hydra.main(config_path="conf", config_name="config")
def predict_schmutter(cfg: tmbConfig) -> None:
model_name = "transfer_fft_15_hz.pt"
output_name = "schmutter_15Hz_loaded"
log_dir = cfg.dirs.log_dir
model_path = Path(f"{cfg.setup.project_dir}/{cfg.dirs.model_dir}/{model_name}")
output_path = Path(
f"{cfg.setup.project_dir}/{cfg.dirs.output_dir}/{output_name}_predict.csv"
)
with open(output_path.resolve(), "w") as csvfile:
writer = csv.writer(csvfile, delimiter=",")
model = FC_FFT(log_path=os.path.join(log_dir, model_name), input_size=1000)
map_location = (
torch.device("gpu") if torch.cuda.is_available() else torch.device("cpu")
)
model.load_state_dict(
torch.load(model_path.resolve(), map_location=map_location)
)
model.to("cuda") if torch.cuda.is_available() else model.to("cpu")
model.eval()
model.transfer_learning()
accPath = Path(f"{cfg.dirs.schmutter_acc}").resolve()
freqPath = Path(f"{cfg.dirs.schmutter_freq}/{cfg.paths.ssi_decay}").resolve()
data_schmutter = AcclerationDataSetSchmutter(
accPath,
sampling_rate=1200,
freq_path=freqPath,
repeats_f=8,
load_from_txt=True,
x_scaler=MaxAbsScaler(),
)
for i, (_, _) in enumerate(zip(data_schmutter.x.T, data_schmutter.y)):
x, y = data_schmutter.__getitem__(i)
with torch.no_grad():
x = torch.tensor(x.reshape(shape=(1, len(x))))
x = x.to("cuda") if torch.cuda.is_available() else x.to("cpu")
predict_f = model(x)
f = predict_f.item()
logging.info(
f"Frequenz: {y.item():.3f}, Prediction: {f:.3f}, Diff: {abs(y.item() - f):.3f}"
)
writer.writerow([y.item(), f, abs(y.item() - f)])
if __name__ == "__main__":
predict_schmutter()
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class SerialConfig(AppConfig):
name = "controllers.serial"
|
nilq/baby-python
|
python
|
from typing import NamedTuple, Dict, List, Tuple
import re
from collections import defaultdict
class Bag(NamedTuple):
color: str
contains: Dict[str, int]
def parse_line(line: str) -> Bag:
part1, part2 = line.split(" contain ")
color = part1[:-5]
part2 = part2.rstrip(".")
if part2 == "no other bags":
return Bag(color, {})
contains = {}
contained = part2.split(", ")
for subbag in contained:
subbag = re.sub(r"bags?$", "", subbag)
first_space = subbag.find(" ")
count = int(subbag[:first_space].strip())
color2 = subbag[first_space:].strip()
contains[color2] = count
return Bag(color, contains)
def make_bags(raw: str) -> List[Bag]:
return [parse_line(line) for line in raw.split("\n")]
def parents(bags: List[Bag]) -> Dict[str, List[str]]:
ic = defaultdict(list)
for bag in bags:
for child in bag.contains:
ic[child].append(bag.color)
return ic
def can_eventually_contain(bags: List[Bag], color: str) -> List[str]:
parent_map = parents(bags)
check_me = [color]
can_contain = set()
while check_me:
child = check_me.pop()
for parent in parent_map.get(child, []):
if parent not in can_contain:
can_contain.add(parent)
check_me.append(parent)
return list(can_contain)
def num_bags_inside(
bags: List[Bag],
color: str
) -> int:
by_color = {bag.color: bag for bag in bags}
num_bags = 0
stack: List[Tuple[str, int]] = [(color, 1)]
while stack:
next_color, multiplier = stack.pop()
bag = by_color[next_color]
for child, count in bag.contains.items():
num_bags += multiplier * count
stack.append((child, count * multiplier))
return num_bags
#
# UNIT TESTS
#
RAW = """light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags."""
RAW2 = """shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags."""
BAGS1 = make_bags(RAW)
BAGS2 = make_bags(RAW2)
#
# PROBLEMS
#
with open('input.txt') as f:
raw = f.read()
bags = make_bags(raw)
print(len(can_eventually_contain(bags, 'shiny gold')))
print(num_bags_inside(bags, "shiny gold"))
|
nilq/baby-python
|
python
|
import torchvision
import os
# 通过压缩包格式获得训练集和测试集的图片和相应标签
# 训练集图片在data\train_pic中,标签在data\train_label.txt
# 测试集图片在data\test_pic中,标签在data\test_label.txt
# 其中,标签数据用‘,’分隔,图片以i.jpg的方式进行存储,它代表标签文件的第i个数据就是这张图片的标签
def data_visual():
mnist_train=torchvision.datasets.MNIST('./data',train=True,download=True)#首先下载数据集,并数据分割成训练集与数据集
mnist_test=torchvision.datasets.MNIST('./data',train=False,download=True)
# 生成存放图片的文件夹
if not os.path.exists('./data/train_pic'):
os.makedirs('./data/train_pic')
if not os.path.exists('./data/test_pic'):
os.makedirs('./data/test_pic')
f=open("./data/train_label.txt",'w')#在指定路径之下生成.txt文件
for i,(img,label) in enumerate(mnist_train):
img_path = "./data/train_pic"+"/" + str(i) + ".jpg"
img.save(img_path)
f.write(str(label)+',')#将路径与标签组合成的字符串存在.txt文件下
f.close()#关闭文件
f=open("./data/test_label.txt",'w')#在指定路径之下生成.txt文件
for i,(img,label) in enumerate(mnist_test):
img_path = "./data/test_pic"+"/" + str(i) + ".jpg"
img.save(img_path)
f.write(str(label)+',')#将路径与标签组合成的字符串存在.txt文件下
f.close()#关闭文件
|
nilq/baby-python
|
python
|
"""
Python utilities to use it from ein.el
Copyright (C) 2012- Takafumi Arakaki
Author: Takafumi Arakaki <aka.tkf at gmail.com>
ein.py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ein.py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ein.py. If not, see <http://www.gnu.org/licenses/>.
"""
def export_nb(nb_json, format):
import IPython.nbconvert as nbconvert
import IPython.nbformat as nbformat
nb = nbformat.reads(nb_json, nbformat.NO_CONVERT)
output = nbconvert.export_by_name(format, nb)
print(output[0])
def _find_edit_target_012(*args, **kwds):
from IPython.core.interactiveshell import InteractiveShell
inst = InteractiveShell.instance()
return inst._find_edit_target(*args, **kwds)
def _find_edit_target_013(*args, **kwds):
from IPython.core.interactiveshell import InteractiveShell
inst = InteractiveShell.instance()
return CodeMagics._find_edit_target(inst, *args, **kwds)
def _find_edit_target_python(name):
from inspect import getsourcefile, getsourcelines
try:
obj = eval(name)
except NameError:
return False
else:
sfile = getsourcefile(obj)
sline = getsourcelines(obj)[-1]
if sfile and sline:
return(sfile, sline, False)
else:
return False
try:
from IPython.core.magics import CodeMagics
_find_edit_target = _find_edit_target_013
except ImportError:
_find_edit_target = _find_edit_target_012
def set_figure_size(*dim):
try:
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = dim
except:
raise RuntimeError("Matplotlib not installed in this instance of python!")
def find_source(name):
"""Given an object as string, `name`, print its place in source code."""
# FIXME: use JSON display object instead of stdout
ret = _find_edit_target_python(name) or _find_edit_target(name, {}, [])
if ret:
(filename, lineno, use_temp) = ret
if not use_temp:
print(filename)
print(lineno)
return
raise RuntimeError("Source code for {0} cannot be found".format(name))
def run_docstring_examples(obj, verbose=True):
from IPython.core.interactiveshell import InteractiveShell
import doctest
inst = InteractiveShell.instance()
globs = inst.user_ns
return doctest.run_docstring_examples(obj, globs, verbose=verbose)
def print_object_info_for(obj):
import IPython.core.oinspect
import json
inspector = IPython.core.oinspect.Inspector()
try:
print(json.dumps(inspector.info(obj)))
except NameError:
print(json.dumps(inspector.noinfo()))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if "DJANGO_SETTINGS_MODULE" not in os.environ:
sys.exit("Settings cannot be imported, because environment variable DJANGO_SETTINGS_MODULE is undefined")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mapApp', '0006_auto_20150820_1631'),
]
operations = [
migrations.RenameField(
model_name='weather',
old_name='precip_mmh',
new_name='precip_intensity',
),
migrations.RenameField(
model_name='weather',
old_name='precip_prob',
new_name='precip_probability',
),
migrations.RenameField(
model_name='weather',
old_name='temperature_c',
new_name='temperature',
),
migrations.RenameField(
model_name='weather',
old_name='wind_dir_deg',
new_name='wind_bearing',
),
migrations.RenameField(
model_name='weather',
old_name='wind_dir_str',
new_name='wind_bearing_str',
),
migrations.RenameField(
model_name='weather',
old_name='windspeed_kmh',
new_name='wind_speed',
),
migrations.AddField(
model_name='weather',
name='precip_type',
field=models.CharField(default='', max_length=50, verbose_name=b'Type of precipitation'),
preserve_default=False,
),
migrations.AlterField(
model_name='weather',
name='wind_bearing',
field=models.FloatField(verbose_name=b'Wind bearing (deg)'),
),
migrations.AlterField(
model_name='weather',
name='wind_bearing_str',
field=models.CharField(max_length=5, verbose_name=b'Wind bearing'),
),
]
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from .quicksectx import Interval, IntervalNode, IntervalTree, distancex
from .version import __version__
|
nilq/baby-python
|
python
|
from pydantic.dataclasses import dataclass
from typing import List
from .cost_center import CostCenter
@dataclass
class CostCenters:
offset: int
limit: int
cost_centers: List[CostCenter]
|
nilq/baby-python
|
python
|
from contextlib import contextmanager
import logging
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
"""
A context manager that will prevent any logging messages
triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL
is defined.
"""
# two kind-of hacks here:
# * can't get the highest logging level in effect => delegate to the user
# * can't get the current module-level override => use an undocumented
# (but non-private!) interface
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 14:24:59 2019
@author: chalbeisen
This program is used for saving filenames of .wav files which are already downloaded into a numpy file
"""
import numpy as np
import os
import re
#global variables
#audio_src defines directory where .wav files are saved
#audio_dest defines where numpy file should be saved
dirs = {'audio_src':['D:/Bachelorarbeit/audio/audio_speech/500000/','D:/Bachelorarbeit/audio/audio_speech/600000/'],'audio_dest':'D:/Bachelorarbeit/audio_download_500000_600000/'}#'D:/audio_speech/'}
audios = []
#get audios from different directories
for i in range(0, len(dirs['audio_src'])):
for file in os.listdir(dirs['audio_src'][i]):
if file.endswith(".wav"):
audios.append(file)
audios.sort()
'''
------------------------------------------------------------------------------
desc: save filenames of .wav files which are already downloaded into a numpy file
param: -
return: -
------------------------------------------------------------------------------
'''
def ids_to_np():
idx = []
for i in range(0, len(audios)):
aud1 = os.path.splitext(audios[i])[0]
idx1 = int(re.split(r'(^\d+)', aud1)[1])
print(idx1)
idx.append(idx1)
idx.sort()
np.save(dirs['audio_dest']+'ids_finished.npy', idx)
def main():
ids_to_np()
if __name__== "__main__":
main()
|
nilq/baby-python
|
python
|
import common.networking as networking
from tqdm import tqdm
from multiprocessing import Process, Manager, Semaphore
from pymongo import MongoClient
import os
import zlib
import re
import Levenshtein
from datetime import datetime
# Compare the levenshtein distance difference percentage with threshold
def compare_difference_percentage(str1, str2, percentage):
if 100 * Levenshtein.distance(str1, str2) / float(max(len(str1), len(str2))) > percentage:
return True
else:
return False
# Filter the document to match the specified date
def search_filter_date(arg, doc, spec_date):
arg_key = arg.lower()
# Handle if no spec_date was specified
if spec_date != None:
# Transform spec_date into a datetime object
spec_date_obj = datetime.strptime(spec_date, '%d/%m/%Y')
# Get the version of the document being first older or same than spec_date
for i in range(len(doc[arg_key])):
doc_date = doc[arg_key][len(
doc[arg_key]) - i - 1]['date'].strftime('%Y-%m-%d')
doc_date_obj = datetime.strptime(doc_date, '%Y-%m-%d')
if doc_date_obj <= spec_date_obj:
return doc[arg_key][len(doc[arg_key]) - i - 1]
# If no document was stored at spec_date
return None
# If no date was specified perform search on the most recent version
else:
return doc[arg_key][-1]
# Identify the regex and set parameters
def search_regex_setup(arg_param):
# Clean the passed regex
regex = arg_param.split('/')
regex = list(filter(lambda a: a != '', regex))
# Set the regex parameter and flag if no default
param = regex[0]
flags = [['S', re.S], ['I', re.I], ['M', re.M], [
'L', re.L], ['U', re.U], ['X', re.X], ['A', re.A]]
if len(regex) > 1:
for f in flags:
if regex[1] == f[0]:
flag = f[1]
else:
flag = re.I
pattern = re.compile(param, flag)
return (pattern)
# Perform OR operation between 2 lists
def search_or_operator(list1, list2):
final_list = []
for e in list1:
if e not in final_list:
final_list.append(e)
for e in list2:
if e not in final_list:
final_list.append(e)
return final_list
# Perform AND operation between 2 lists
def search_and_operator(list1, list2):
final_list = []
for e in list1:
if e in list2:
final_list.append(e)
return final_list
# Remove the html and _id from a database document
def search_clean_doc(doc):
# Make a copy of del to escape mutability
doc_to_del = doc.copy()
# Try to remove the html field if it has not been removed before
try:
del doc_to_del['html']
except Exception as e:
pass
# Try to remove the _id field if it has not been removed before
try:
del doc_to_del['_id']
except Exception as e:
pass
return doc_to_del
def remove_specific_key(the_dict, rubbish):
if rubbish in the_dict:
del the_dict[rubbish]
for key, value in the_dict.items():
# check for rubbish in sub dict
if isinstance(value, dict):
remove_specific_key(value, rubbish)
# check for existence of rubbish in lists
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
remove_specific_key(item, rubbish)
def file_input(args_argparse):
manager = Manager()
onion_sites = manager.dict()
sema = Semaphore(20)
jobs = []
session = networking.session_tor()
with open(args_argparse.URL, 'r') as f:
contents_list = f.read().splitlines()
for content in tqdm(contents_list):
sema.acquire()
args_argparse.URL = content
p = Process(target=args_argparse.func, args=(
args_argparse, onion_sites, sema, session))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
result = onion_sites.copy()
remove_specific_key(result, 'html')
return result
def db_input(args_argparse):
manager = Manager()
onion_sites = manager.dict()
sema = Semaphore(64)
jobs = []
session = networking.session_tor()
client = MongoClient(
'mongodb://%s:%s@' % (os.environ['ME_CONFIG_BASICAUTH_USERNAME'], os.environ['ME_CONFIG_BASICAUTH_PASSWORD']) +
os.environ["MONGO_IP"])
db = client.uncrawled_onions
try:
uncrawled_onions = db.uncrawled_onions.find()
except:
print('No addresses stored in database, aborting.')
exit()
for url in tqdm(uncrawled_onions):
sema.acquire()
args_argparse.URL = url['site']
args_argparse.indexers = url['indexers']
p = Process(target=args_argparse.func, args=(
args_argparse, onion_sites, sema, session))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
result = onion_sites.copy()
remove_specific_key(result, 'html')
return result
def compress_text(text: str) -> bytes:
compressor = zlib.compressobj(zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, -15)
return compressor.compress(text.encode('utf-8')) + compressor.flush()
def decompress_text(compressed_text: bytes) -> str:
return zlib.decompress(compressed_text, wbits=-15, bufsize=zlib.DEF_BUF_SIZE).decode("utf-8")
|
nilq/baby-python
|
python
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from utils import utils
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, need_spectral_norm=False):
"""3x3 convolution with padding"""
filter = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
if need_spectral_norm:
return nn.utils.spectral_norm(filter, name='weight')
else:
return filter
def norm2d(planes, num_groups=0):
'''
copy and modify from: https://github.com/chengyangfu/pytorch-groupnormalization/blob/master/resnet.py
'''
if num_groups > 0:
return nn.GroupNorm(num_groups, planes)
else:
return nn.BatchNorm2d(planes)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, opt, inplanes, planes, stride=1, downsample=None, group_norm=0):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, need_spectral_norm=opt.need_spectral_norm)
self.bn1 = norm2d(planes, group_norm)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, need_spectral_norm=opt.need_spectral_norm)
self.bn2 = norm2d(planes, group_norm)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, group_norm=0):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm2d(planes, group_norm)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = norm2d(planes, group_norm)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm2d(planes * self.expansion, group_norm)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, opt, block, layers, input_features: int = 3, num_classes: int = 1000, zero_init_residual=False, group_norm=0):
super(ResNet, self).__init__()
self.opt = opt
self.inplanes = 64
self.conv1 = nn.Conv2d(input_features, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm2d(64, group_norm)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], group_norm=group_norm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, group_norm=group_norm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, group_norm=group_norm)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, group_norm=group_norm)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, group_norm=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm2d(planes * block.expansion, group_norm),
)
layers = []
layers.append(block(self.opt, self.inplanes,
planes, stride, downsample, group_norm))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.opt, self.inplanes, planes, group_norm=group_norm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(opt, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(opt, BasicBlock, [2, 2, 2, 2], group_norm=opt.group_norm, **kwargs)
if pretrained:
try:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
content_list = []
content_list += ['Imagenet pretrained weights fully loaded']
utils.print_notification(content_list)
except:
pretrained_dict = model_zoo.load_url(model_urls['resnet18'])
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
# 2. pop-out fc
pretrained_dict.pop('fc.weight', None)
pretrained_dict.pop('fc.bias', None)
# 3. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 4. load the new state dict
model.load_state_dict(model_dict)
content_list = []
content_list += ['Imagenet pretrained weights partially loaded']
content_list += [str(pretrained_dict.keys())]
utils.print_notification(content_list)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
nilq/baby-python
|
python
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Module under test
import bokeh.palettes as pal # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_cmap_generator_function() -> None:
assert pal.viridis(256) == pal.Viridis256
assert pal.magma(256) == pal.Magma256
assert pal.plasma(256) == pal.Plasma256
assert pal.inferno(256) == pal.Inferno256
assert pal.gray(256) == pal.Greys256
assert pal.grey(256) == pal.Greys256
assert pal.turbo(256) == pal.Turbo256
assert pal.diverging_palette(pal.Reds9, pal.Greys9, n=18, midpoint=0.5) == pal.Reds9 + pal.Greys9[::-1]
def test_all_palettes___palettes__() -> None:
assert sum(len(p) for p in pal.all_palettes.values()) == len(pal.__palettes__)
def test_palettes_dir() -> None:
assert 'viridis' in dir(pal)
assert 'cividis' in dir(pal)
assert 'magma' in dir(pal)
assert 'inferno' in dir(pal)
assert 'turbo' in dir(pal)
assert not '__new__' in dir(pal)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
name = "aoc_utils"
|
nilq/baby-python
|
python
|
import random
import torch
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, args, capacity):
self.device = args.device
self.capacity = capacity
self.transitions = []
self.position = 0
self.discount = args.discount
def append(self, state, action, next_state, reward):
transition = Transition(state, action, next_state, reward)
if self.position >= len(self.transitions):
self.transitions.append(transition)
else:
self.transitions[self.position] = transition
# walk insertion point through list
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.transitions, batch_size)
states, actions, next_states, rewards = zip(*batch)
actions = torch.tensor(actions, dtype=torch.int64, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float32, device=self.device)
return states, actions, next_states, rewards
def __len__(self):
return len(self.transitions)
|
nilq/baby-python
|
python
|
import argparse
import json
import pandas as pd
from detoxify.bias_metrics import (
IDENTITY_COLUMNS,
MODEL_NAME,
TOXICITY_COLUMN,
calculate_overall_auc,
compute_bias_metrics_for_model,
convert_dataframe_to_bool,
get_final_metric,
)
def main():
with open(TEST) as f:
results = json.load(f)
test_private_path = "jigsaw_data/jigsaw-unintended-bias-in-toxicity-classification/test_private_expanded.csv"
test_private = pd.read_csv(test_private_path)
test_private = convert_dataframe_to_bool(test_private)
test_private[MODEL_NAME] = [s[0] for s in results["scores"]]
bias_metrics_df = compute_bias_metrics_for_model(test_private, IDENTITY_COLUMNS, MODEL_NAME, TOXICITY_COLUMN)
print(bias_metrics_df)
final_metric = get_final_metric(bias_metrics_df, calculate_overall_auc(test_private, MODEL_NAME))
print(final_metric)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("res_path", type=str, help="path to the saved results file")
args = parser.parse_args()
TEST = args.res_path
main()
|
nilq/baby-python
|
python
|
#%%
import pandas as pd
from pandas.core.frame import DataFrame
import requests
from bs4 import BeautifulSoup
import Levenshtein as lev
# %%
# 검색 결과 불러오기
search_result = pd.read_csv("Web Crawling Data/메뉴검색결과.csv", index_col=0)
search_result
# %%
# 정규 표현식을 통한 한글 외 문자 제거
search_result['검색결과'] = search_result['검색결과'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")
search_result
# %%
# Levenshtein 거리를 사용하여 단어 두개의 비슷함을 측정하여 테이블에 추가합니다.
lev_ratio = []
for index, row in search_result.iterrows():
lev_ratio.append(lev.ratio(str(row['메뉴이름']), str(row['검색결과'])))
search_result['Lev Ratio'] = lev_ratio
search_result
# %%
# 메뉴이름마다 Levenshtein 값이 가장 높은 행를 가져옵니다.
idx = search_result.groupby(['메뉴이름'])['Lev Ratio'].transform(max) == search_result['Lev Ratio']
info_data = search_result[idx]
info_data = info_data.drop_duplicates(subset=['메뉴이름'], keep='first')
info_data = info_data.reset_index(drop=True)
info_data
# %%
# 요청해야하는 URL주소를 가져옵니다.
urls = []
for part_url in info_data['URL']:
full_url = 'https://www.myfitnesspal.com' + str(part_url)
urls.append(full_url)
urls
# %%
# 병렬로 10개씩 URL주소를 요청합니다.
from concurrent.futures import ThreadPoolExecutor
def get_url(url):
return requests.get(url)
with ThreadPoolExecutor(max_workers=10) as pool:
response_list = list(pool.map(get_url,urls))
# %%
# 영양성분 정보를 가져옵니다.
def get_nutrition_value(response_content: str) -> list:
soup = BeautifulSoup(response_content, 'lxml')
# 기준양 (그램)
quantity = soup.find("div", {"class": "MuiSelect-root MuiSelect-select MuiSelect-selectMenu MuiInputBase-input MuiInput-input"})
quantity = quantity.text
# 열량
calorie = soup.find("span", {"class": "title-cgZqW"})
calorie = calorie.text
# 탄수화물, 지방, 단백질
carb_fat_protein = soup.find_all("span", {"class": "title-1P2uF"})
carb = carb_fat_protein[0].text
carb = carb[:-1]
fat = carb_fat_protein[1].text
fat = fat[:-1]
protein = carb_fat_protein[2].text
protein = protein[:-1]
# 나트륨, 콜레스트롤
sodium_cholesterol = soup.find_all("div", {"class": "subtext-2_Vtc"})
sodium = sodium_cholesterol[2].text
sodium = sodium.split()[0]
cholesterol = sodium_cholesterol[3].text
cholesterol = cholesterol.split()[0]
# 리스트의 형태로 반환합니다.
return [quantity, calorie, carb, fat, protein, sodium, cholesterol]
# %%
# 모든 메뉴의 영양성분 정보를 가져옵니다.
nutrition_value = pd.DataFrame(columns=['메뉴이름', '기준양', '기준열량', '탄수화물', '지방', '단백질', '나트륨', '콜레스트롤'])
for index, response in enumerate(response_list):
nutritions = get_nutrition_value(response.content)
nutritions.insert(0, info_data['메뉴이름'][index])
nutrition_value = nutrition_value.append(pd.Series(nutritions, index=nutrition_value.columns), ignore_index=True)
nutrition_value
# %%
# 현재 메뉴 정보 테이블를 가져옵니다.
all_menu = pd.read_csv("All Menu (Various Versions)/국방부메뉴_v1.0.csv", index_col=0)
all_menu
# %%
# 정규 표현식을 통한 한글 외 문자 제거
all_menu['메뉴이름'] = all_menu['메뉴이름'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")
all_menu
# %%
# 영양성분을 테이블에 더해줍니다
all_menu_with_nutrition = all_menu.merge(nutrition_value, how = 'inner', on = ['메뉴이름'])
all_menu_with_nutrition
# %%
# 중복을 제외하고 순번을 재정렬합니다.
all_menu_with_nutrition = all_menu_with_nutrition.drop_duplicates(subset=['메뉴이름'], keep='first')
all_menu_with_nutrition = all_menu_with_nutrition.reset_index(drop=True)
all_menu_with_nutrition
# %%
# 영양성분을 합한 테이블을 저장합니다.
all_menu_with_nutrition.to_csv("All Menu (Various Versions)/국방부메뉴_v2.0.csv")
|
nilq/baby-python
|
python
|
from discord import TextChannel, Role, User, utils
from discord.ext.commands import Cog, Context, group, check, guild_only
from bot.core.bot import Bot
class Dungeon(Cog):
"""Quarantine the users with an account less than specified days of creation."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
@group(autohelp=True)
@guild_only()
@check(manage_guild=True)
async def dungeon(self, ctx: Context) -> None:
"""Main dungeon commands."""
pass
@dungeon.command()
async def toggle(self, ctx: Context) -> None:
"""Toggle the dungeon on or off."""
async with self.pool.acquire(timeout=5) as database:
for row in await database.fetch("SELECT * FROM public.lock"):
kick_lock = row["kick_lock"]
ban_lock = row["ban_lock"]
if kick_lock and ban_lock:
await ctx.send("Please disable the server locks, before toggling the dungeon lock!")
return
async with self.pool.acquire(timeout=5) as database:
for row in await database.fetch("SELECT * FROM public.dungeon"):
dungeon_enabled = row["dungeon_status"]
if not dungeon_enabled:
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, dungeon_status) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET dungeon_status=$2
""",
ctx.guild.id,
True
)
else:
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, dungeon_status) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET dungeon_status=$2
""",
ctx.guild.id,
False
)
await ctx.send(f"Dungeon enabled: {dungeon_enabled}.")
@dungeon.command()
async def announce(self, ctx: Context, channel: TextChannel) -> None:
"""Sets the announcement channel for users moved to the dungeon."""
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, announcement_channel) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET announcement_channel=$2
""",
ctx.guild.id,
channel.id
)
await ctx.send(
f"Quarantined User announcement channel set to: {channel.mention}."
)
@dungeon.command()
async def message(self, ctx: Context, message: str = None) -> None:
"""Message to be sent when kick or ban is enabled for Dungeoned users."""
if not message:
message = "You have been banned due to lack of required days, to join the Server."
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, mod_message) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET mod_message=$2
""",
ctx.guild.id,
message
)
await ctx.send(f"Message has been turned on.\nMessage to send on ban:\n{message}")
@dungeon.command()
async def joindays(self, ctx: Context, days: int = None) -> None:
"""Set how old an account needs to be a trusted user."""
if not days:
days = 1
if days <= 0:
await ctx.send("The number of days must be atleast 1!")
days = 1
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, minimum_days) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET minimum_days=$2
""",
ctx.guild.id,
days
)
await ctx.send(
f"Users must have accounts older than {days} day(s) to be awarded the member role instead of the dungeon role on join."
)
@dungeon.command()
async def role(self, ctx: Context, role: Role = None) -> None:
"""Sets the role to use for the dungeon."""
if not role:
await ctx.send("Please specify a Role!")
return
if isinstance(role, Role):
role = role.id
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, minimum_days) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET minimum_days=$2
""",
ctx.guild.id,
role
)
dungeon_role = utils.get(ctx.guild.roles, role)
await ctx.send(f"Dungeon role set to: {dungeon_role.name}.")
@dungeon.command()
async def add_bypass(self, ctx: Context, user: User) -> None:
query = "SELECT * FROM public.dungeon"
async with self.pool.acquire(timeout=5) as database:
for row in await database.fetch(query):
bypassers = row["bypass_list"]
bypassers = set(bypassers)
if isinstance(user, User):
user = user.id
bypassers.append(user)
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, bypass_list) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET bypass_list=$2
""",
ctx.guild.id,
bypassers
)
await ctx.send(f"<@!{user}> Successfully added to the bypass list.")
@dungeon.command()
async def remove_bypass(self, ctx: Context, user: User) -> None:
query = "SELECT * FROM public.dungeon"
async with self.pool.acquire(timeout=5) as database:
for row in await database.fetch(query):
bypassers = row["bypass_list"]
bypassers = set(bypassers)
if isinstance(user, User):
user = user.id
bypassers.remove(user)
async with self.bot.pool.acquire() as database:
await database.execute(
"""
INSERT INTO public.dungeon (guild_id, bypass_list) VALUES ($1, $2)
ON CONFLICT (guild_id) DO UPDATE SET bypass_list=$2
""",
ctx.guild.id,
bypassers
)
await ctx.send(f"<@!{user}> Successfully added to the bypass list.")
@dungeon.command()
async def show_bypass(self, ctx: Context, user: User) -> None:
query = "SELECT * FROM public.dungeon"
async with self.pool.acquire(timeout=5) as database:
for row in await database.fetch(query):
bypassers = row["bypass_list"]
bypassers = set(bypassers)
if isinstance(user, User):
user = user.id
bp_list = '\n'.join(bypassers)
await ctx.send(f"Bypass List:\n```{bp_list}```")
def setup(bot: Bot) -> None:
bot.add_cog(Dungeon(bot))
|
nilq/baby-python
|
python
|
import numpy
import perfplot
def test():
kernels = [lambda a: numpy.c_[a, a]]
r = [2**k for k in range(4)]
out = perfplot.bench(
setup=numpy.random.rand,
kernels=kernels, labels=['c_'], n_range=r, xlabel='len(a)'
)
out.show()
out = perfplot.bench(
setup=numpy.random.rand,
kernels=kernels, labels=['c_'], n_range=r, xlabel='len(a)',
logx=True, logy=False
)
out.show()
out = perfplot.bench(
setup=numpy.random.rand,
kernels=kernels, labels=['c_'], n_range=r, xlabel='len(a)',
logx=False, logy=True
)
out.show()
out = perfplot.bench(
setup=numpy.random.rand,
kernels=kernels, labels=['c_'], n_range=r, xlabel='len(a)',
logx=True, logy=True
)
out.show()
return
def test_no_labels():
def mytest(a):
return numpy.c_[a, a]
kernels = [mytest]
r = [2**k for k in range(4)]
out = perfplot.bench(
setup=numpy.random.rand,
kernels=kernels, n_range=r, xlabel='len(a)'
)
out.show()
return
def test_save():
def mytest(a):
return numpy.c_[a, a]
kernels = [mytest]
r = [2**k for k in range(4)]
out = perfplot.bench(
setup=numpy.random.rand,
kernels=kernels, n_range=r,
xlabel='len(a)', title='mytest'
)
out.save('out.png')
return
|
nilq/baby-python
|
python
|
from sympy import I, Matrix, symbols, conjugate, Expr, Integer
from sympy.physics.quantum.dagger import Dagger
def test_scalars():
x = symbols('x',complex=True)
assert Dagger(x) == conjugate(x)
assert Dagger(I*x) == -I*conjugate(x)
i = symbols('i',real=True)
assert Dagger(i) == i
p = symbols('p')
assert isinstance(Dagger(p), Dagger)
i = Integer(3)
assert Dagger(i) == i
def test_matrix():
x = symbols('x')
m = Matrix([[I,x*I],[2,4]])
assert Dagger(m) == m.H
class Foo(Expr):
def _eval_dagger(self):
return I
def test_eval_dagger():
f = Foo()
d = Dagger(f)
assert d == I
try:
import numpy as np
except ImportError:
pass
else:
def test_numpy_dagger():
a = np.matrix([[1.0,2.0j],[-1.0j,2.0]])
adag = a.copy().transpose().conjugate()
assert (Dagger(a) == adag).all()
try:
from scipy import sparse
import numpy as np
except ImportError:
pass
else:
def test_scipy_sparse_dagger():
a = sparse.csr_matrix([[1.0+0.0j,2.0j],[-1.0j,2.0+0.0j]])
adag = a.copy().transpose().conjugate()
assert np.linalg.norm((Dagger(a) - adag).todense()) == 0.0
|
nilq/baby-python
|
python
|
import handler.handler as handler
class ScalingHandler(handler.TemplateHandler):
"""
ScalingHandler inherits from the hander.TemplateHandler class.
It displays an information page about scaling up a web application.
"""
def get(self):
self.render("scaling.html")
|
nilq/baby-python
|
python
|
""" Database description.
Session is performed by a subject.
Session has multiple blocks.
Block has multiple trials.
Sessions, blocks, and trials have events of certain event type at certain time.
Trial has parameters.
"""
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Float, DateTime, Time, Enum, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, reconstructor
from sqlalchemy import create_engine
from sqlalchemy_utils import drop_database
import json
Base = declarative_base()
class Subject(Base):
__tablename__ = "subject"
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False, unique=True)
age = Column(Float, nullable=False)
sex = Column(Enum("male", "female", name="sex_types"))
handness = Column(Enum("left", "right", name="hand_types"))
class Session(Base):
__tablename__ = "session"
id = Column(Integer, primary_key=True)
subject_id = Column(Integer, ForeignKey("subject.id"), index=True)
subject = relationship(
Subject,
backref=backref("sessions", uselist=True, cascade="delete,all"))
class EventType(Base):
__tablename__ = "event_type"
id = Column(Integer, primary_key=True)
desc = Column(String(250), nullable=False, unique=True)
class Block(Base):
__tablename__ = "block"
id = Column(Integer, primary_key=True)
session_id = Column(Integer, ForeignKey("session.id"))
paramslist = Column(String(10000), nullable=False)
number = Column(Integer, nullable=False, index=True) # block number within session
opto_filename = Column(String(1000), nullable=False)
odau_filename = Column(String(1000), nullable=False)
session = relationship(
Session,
backref=backref("blocks", uselist=True, cascade="delete,all"))
class BlockEvent(Base):
__tablename__ = "block_event"
id = Column(Integer, primary_key=True)
block_id = Column(Integer, ForeignKey("block.id"), index=True)
event_type_id = Column(Integer, ForeignKey("event_type.id"), index=True)
time = Column(DateTime, nullable=False, index=True)
event_type = relationship(EventType)
block = relationship(
Block,
backref=backref("events", uselist=True, cascade="delete,all"))
class Trial(Base):
__tablename__ = "trial"
id = Column(Integer, primary_key=True)
block_id = Column(Integer, ForeignKey("block.id"), index=True)
paramslist = Column(String(10000), nullable=True)
number = Column(Integer, nullable=False, index=True) # trial number within block
disturbance_mode = Column(Integer, nullable=False, index=True)
feedback_delay = Column(Float, nullable=False, index=True)
valid = Column(Boolean, nullable=True, index=True, default=None) # trial is valid for analysis
opto_start = Column(Integer)
opto_stop = Column(Integer)
odau_start = Column(Integer)
odau_stop = Column(Integer)
block = relationship(
Block,
backref=backref("trials", uselist=True, cascade="delete,all"))
@reconstructor
def init_on_load(self):
self.params = json.loads(self.paramslist)
class TrialEvent(Base):
__tablename__ = "trial_event"
id = Column(Integer, primary_key=True)
trial_id = Column(Integer, ForeignKey("trial.id"), index=True)
event_type_id = Column(Integer, ForeignKey("event_type.id"), index=True)
time = Column(DateTime, nullable=False, index=True)
event_type = relationship(EventType)
trial = relationship(
Trial,
backref=backref("events", uselist=True, cascade="delete,all"))
class SessionEvent(Base):
__tablename__ = "session_event"
id = Column(Integer, primary_key=True)
session_id = Column(Integer, ForeignKey("session.id"), index=True)
event_type = Column(Integer, ForeignKey("event_type.id"), index=True)
time = Column(DateTime, nullable=False, index=True)
session = relationship(
Session,
backref=backref("trials", uselist=True, cascade="delete,all"))
|
nilq/baby-python
|
python
|
import client
new_client = client.Client(observations='Billy/#nHello Billy')
print(new_client.encode_observations())
print(new_client.decode_observations(new_client.observations))
print(new_client.observations)
|
nilq/baby-python
|
python
|
import tkinter as tk
from tkinter import *
from tkinter.ttk import *
from datetime import datetime, timedelta, date
from app.models import session
from app.models.booking import Booking
from app.models.returns import Returns
from app.models.tools import Tools
from app.models.users import Users
class Bookings(tk.Frame):
def __init__(self, root, *args, **kwargs):
self.FONT = 'Helvetica'
self.TITLE_SIZE = 24
# this is for testing purposes only
# grab the user with the specified id to query for his bookings
self.CURRENT_USER = session.query(Users).filter_by(id=kwargs['user_id']).first()
# create a new frame
tk.Frame.__init__(self, root)
self.title_label = Label(self, text="Bookings", font=(self.FONT, self.TITLE_SIZE))
self.title_label.pack(side='top')
self.return_button = Button(self, text="Return tool", command=self.return_tool_frame)
self.return_button.pack(anchor='w')
self.report_button = Button(self, text="Report tool")
self.report_button.pack(anchor='w')
self.createTable()
self.loadTable()
def return_tool_frame(self):
# grab the booking id to use it later
self.booking_id = self.treeview.item(self.treeview.selection(), "text")
# first of all we have to destroy all widgets within the frame
self.title_label.destroy()
self.return_button.destroy()
self.report_button.destroy()
self.treeview.destroy()
# repopulate the window with the appropiate widgets
self.title_label = Label(self, text="Return tool", font=(self.FONT, self.TITLE_SIZE))
self.title_label.pack(side='top')
self.subtitle = Label(self, text="Please write a few words to describe the tool condition")
self.subtitle.pack()
self.feedback = tk.Text(self, height=10, width=20, font=(self.FONT, 12))
self.feedback.pack(fill=tk.BOTH)
self.return_tool_button = Button(self, command=self.return_tool, text="Return tool")
self.return_tool_button.pack()
self.error_label = Label(self, text="")
self.error_label.pack()
def return_tool(self):
if self.feedback.get('1.0', END) == "":
self.error_label.config(text="Please fill all fields")
return_tool = Returns(returned=True, booking_id=self.booking_id,
tool_condition=self.feedback.get('1.0', END), date=str(date.today()))
session.add(return_tool)
session.commit()
self.error_label.config(text="Item returned successfully")
def createTable(self):
tv = Treeview(self)
tv.pack(side='left')
vsb = Scrollbar(self, orient="vertical", command=tv.yview)
vsb.pack(side='right', fill='y')
tv.configure(yscrollcommand=vsb.set)
tv['columns'] = ('tool_name', 'booked_date', 'return_date', 'cost', 'delivery')
tv.heading("#0", text='ID', anchor='w')
tv.column("#0", anchor="w", width=10)
tv.heading('tool_name', text='Tool name')
tv.column('tool_name', anchor='center', width=100)
tv.heading('booked_date', text='Booked date')
tv.column('booked_date', anchor='center', width=100)
tv.heading('return_date', text='Due return date')
tv.column('return_date', anchor='center', width=100)
tv.heading('cost', text='Cost')
tv.column('cost', anchor='center', width=100)
tv.heading('delivery', text='Delivery')
tv.column('delivery', anchor='center', width=100)
tv.pack(fill=tk.BOTH, expand=1)
self.treeview = tv
def loadTable(self):
_user_bookings = []
# could use list comprehension to keep the syntax prettier but IDK how to do that with sql
# alchemy and I got no time to spend researching that
user_bookings = session.query(Booking).filter(Booking.user_id == self.CURRENT_USER.id)
# join the tables
for book in user_bookings:
tool = session.query(Tools).filter_by(id=book.tool_id).first()
data = {
"id": book.id,
"booked_date": book.booked_date,
"duration_of_booking": book.duration_of_booking,
"tool_id": book.tool_id,
"user_id": book.user_id,
"delivery": book.delivery,
"tool_name": tool.name,
"tool_daily_price": tool.daily_price,
"tool_half_day_price": tool.half_day_price,
}
# if the customer books a tool for x days + half day we write in in db as x.5
# here we calculate the price
if '.' in book.duration_of_booking:
data['cost'] = (int(book.duration_of_booking[:book.duration_of_booking.find('.')]) *
float(tool.daily_price)
+ float(tool.half_day_price))
else:
data['cost'] = (int(book.duration_of_booking) * float(tool.daily_price))
try:
return_date = session.query(Returns).filter_by(booking_id=book.id).first()
if return_date.returned == True:
data['return_date'] = "Returned"
except:
data['return_date'] = datetime.strptime(book.booked_date, '%Y-%m-%d') +\
timedelta(round(float(book.duration_of_booking)))
if book.delivery == True:
data['cost'] += float(tool.delivery_cost)
_user_bookings.append(data)
for booking in _user_bookings:
self.treeview.insert('', 'end', text=booking['id'],
values=(booking['tool_name'],
booking['booked_date'], booking['return_date'], booking['cost'], booking['delivery']))
|
nilq/baby-python
|
python
|
#coding:utf-8
"""DjangoAdmin URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from system.dict_view import (DictAdd, DictDel, DictEdit, DictList,
DictValueDel, DictValueForm, DictValueList,
DictView)
from system.log_view import LogForm, LogList
from system.menu_view import (GenerateSubMenu, GetChildren, MenuAdd, MenuDel,
MenuEdit, MenuIconSelect, MenuList, MenuSort,
MenuTree, MenuView)
from system.office_view import (OfficeAdd, OfficeDel, OfficeEdit,
OfficeGetChildren, OfficeList, OfficeTreeData,
OfficeView)
from system.role_view import (RoleAdd, RoleCheckName, RoleDel, RoleEdit,
RoleList, RolePermissionView,
RolePersmissionSave, RolePersmissionTree,
RoleUserList, RoleUserOut, RoleView)
from system.script_template_view import (script_template_list,
script_template_list_api,
script_template_update)
from system.user_view import (Login, Logout, UserAdd, UserCheckLoginName,
UserDel, UserEdit, UserList, UserSelect,
UserView)
from system.views import Home, Index, TreeSelect, permission_error
urlpatterns = [
#用户操作
url(r'^login/$', Login.as_view()),
url(r'^logout/$', Logout.as_view()),
url(r'^user/list/$', UserList.as_view()),
url(r'^user/add/$', UserAdd.as_view()),
url(r'^user/view/$', UserView.as_view()),
url(r'^user/edit/$', UserEdit.as_view()),
url(r'^user/del/$', UserDel.as_view()),
url(r'^user_check_loginname/$', UserCheckLoginName.as_view()),
url(r'^user/select/$', UserSelect.as_view()),
#操作日志操作
url(r'log/list/$',LogList.as_view()),
url(r'log/form/$',LogForm.as_view()),
#脚本模板操作
url(r'^script_template_list/$', script_template_list),
url(r'^script_template_update/$', script_template_update),
url(r'^script_template_list_api/$', script_template_list_api),
#dictionary operation
url(r'^dict/list/$', DictList.as_view()),
url(r'^dict/add/$', DictAdd.as_view()),
url(r'^dict/view/$', DictView.as_view()),
url(r'^dict/edit/$', DictEdit.as_view()),
url(r'^dict/del/$', DictDel.as_view()),
url(r'^dict/value/form/$', DictValueForm.as_view()),
url(r'^dict/value/list/$', DictValueList.as_view()),
url(r'^dict/value/del/$', DictValueDel.as_view()),
#role operation
url(r'^role/list/$', RoleList.as_view()),
url(r'^role/add/$', RoleAdd.as_view()),
url(r'^role/view/$', RoleView.as_view()),
url(r'^role/edit/$', RoleEdit.as_view()),
url(r'^role/del/$', RoleDel.as_view()),
url(r'^role/check/name/$', RoleCheckName.as_view()),
url(r'^role/user/list/$', RoleUserList.as_view()),
url(r'^role/user/out/$', RoleUserOut.as_view()),
url(r'^role/permission/view/$', RolePermissionView.as_view()),
url(r'^role/permission/tree/$', RolePersmissionTree.as_view()),
url(r'^role/permission/save/$', RolePersmissionSave.as_view()),
#office opration url
url(r'^office/list/$', OfficeList.as_view()),
url(r'^office/add/$', OfficeAdd.as_view()),
url(r'^office/view/$', OfficeView.as_view()),
url(r'^office/edit/$', OfficeEdit.as_view()),
url(r'^office/del/$', OfficeDel.as_view()),
url(r'^office/tree/data/$', OfficeTreeData.as_view()),
url(r'^office/getChildren/$', OfficeGetChildren.as_view()),
#tree select
url(r'^tree/select/$', TreeSelect.as_view()),
#menu url
url(r'^menu/list/$', MenuList.as_view()),
url(r'^menu/add/$', MenuAdd.as_view()),
url(r'^menu/view/$', MenuView.as_view()),
url(r'^menu/edit/$', MenuEdit.as_view()),
url(r'^menu/del/$', MenuDel.as_view()),
url(r'^menu/tree/$', MenuTree.as_view()),
url(r'^menu/getChildren/$', GetChildren.as_view()),
url(r'^menu/iconselect/$', MenuIconSelect.as_view()),
url(r'^menu/sort/$', MenuSort.as_view()),
url(r'^menu/generateSubMenu/$', GenerateSubMenu.as_view()),
#permission error
url(r'^permission_error/$', permission_error),
]
|
nilq/baby-python
|
python
|
"""
Encountering some difficulties getting pytest to
generate standard logging files. Exploring here
"""
import logging
def test_logfile():
logging.log(logging.INFO, 'whattup test')
|
nilq/baby-python
|
python
|
# coding=utf-8
from mxnet.gluon import nn
from .base import SegBaseResNet
from mxnetseg.nn import FCNHead, PPModule
from mxnetseg.utils import MODELS
@MODELS.add_component
class PSPNet(SegBaseResNet):
"""
Dilated ResNet50/101/152 based PSPNet.
Reference: Zhao, H., Shi, J., Qi, X., Wang, X., & Jia, J. (2017).
Pyramid Scene Parsing Network. In IEEE Conference on Computer Vision and
Pattern Recognition (pp. 6230–6239). https://doi.org/10.1109/CVPR.2017.660
"""
def __init__(self, nclass, backbone='resnet50', aux=True, height=None, width=None,
base_size=520, crop_size=480, pretrained_base=False,
norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs):
super(PSPNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size,
pretrained_base, dilate=True, norm_layer=norm_layer,
norm_kwargs=norm_kwargs)
with self.name_scope():
self.head = _PyramidHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
if self.aux:
self.aux_head = FCNHead(nclass=nclass, in_channels=1024, norm_layer=norm_layer,
norm_kwargs=norm_kwargs)
def hybrid_forward(self, F, x, *args, **kwargs):
_, _, c3, c4 = self.base_forward(x)
outputs = []
x = self.head(c4)
x = F.contrib.BilinearResize2D(x, **self._up_kwargs)
outputs.append(x)
if self.aux:
aux_out = self.aux_head(c3)
aux_out = F.contrib.BilinearResize2D(aux_out, **self._up_kwargs)
outputs.append(aux_out)
return tuple(outputs)
class _PyramidHead(nn.HybridBlock):
def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs=None):
super(_PyramidHead, self).__init__()
with self.name_scope():
self.pool = PPModule(2048, norm_layer, norm_kwargs, reduction=4)
self.seg_head = FCNHead(nclass, 4096, norm_layer, norm_kwargs)
def hybrid_forward(self, F, x, *args, **kwargs):
x = self.pool(x)
x = self.seg_head(x)
return x
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Write a Python script in a different directory (not the one containing mytest).
a. Verify that you can import mytest and call the three functions func1(),
func2(), and func3().
b. Create an object that uses MyClass. Verify that you call the hello() and
not_hello() methods.
"""
import mytest
from mytest import *
from mytest import func3
print '*' * 80
print "Print out 3 functions"
print '*' * 80
mytest.simple.func1() # Work with 'import mytest'
func2() # Work with 'from mytest import *'
func3() # Work with 'from mytest import func3'
aMyClassObj = MyClass("It", "will", "be a beautiful day tommorrow!")
print
print '*' * 80
print "Print out Hello and NonHello for a MyClass object"
print '*' * 80
aMyClassObj.Hello()
aMyClassObj.NonHello()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from api.models import Category, Comment, Genre, Review, Title
admin.site.register(Category)
admin.site.register(Genre)
admin.site.register(Title)
admin.site.register(Comment)
admin.site.register(Review)
|
nilq/baby-python
|
python
|
class Temperature:
def __init__(self, value, scale):
self.value = value
self.scale = scale
if scale == "C":
self.value_kelvin = value + 273.15
elif scale == "F":
self.value_kelvin = (value - 32) * 5 / 9 + 273.15
def __repr__(self):
return f"Temperature({self.value}, {self.scale!r})"
def __str__(self):
return f"Temperature is {self.value} °{self.scale}"
def __eq__(self, other):
return self.value_kelvin == other.value_kelvin
def __lt__(self, other):
return self.value_kelvin < other.value_kelvin
tc = Temperature(25, "C")
tf = Temperature(77, "F")
tf2 = Temperature(100, "F")
print(tc == tf) # True
print(tc < tf2) # True
|
nilq/baby-python
|
python
|
import numpy as np
from scipy.sparse.linalg import svds
from numpy_groupies import aggregate # for accumarray type functionality
import sparse
def sparse_unfold(data, mode):
'''
Unfolding of a sparse tensor
'''
data = data.copy()
# first step: swap axis with first
if mode != 0:
row_mode = data.coords[mode, :]
data.coords[mode, :] = data.coords[0, :]
data.coords[0, :] = row_mode
# second step: reshape
return data.reshape((data.shape[mode], -1))
#@jit(nopython=False, parallel=True)
def sparse_mttkrp(X, U, mode):
'''
Matricized (sparse) tensor times Khatri-Rao product of matrices
Inspired by code from TensorToolbox and slide set by Kolda:
https://www.osti.gov/servlets/purl/1146123
'''
t = X.ndim
if len(U) != t:
raise Exception("Factor list is wrong length")
if mode == 0:
R = U[1].shape[1]
else:
R = U[0].shape[1]
V = np.zeros((X.shape[mode], R))
for r in range(R):
# Z = (U[i][:,r] for i in [x for x in range(t) if x != n])
# V[:, r] = sparse_ttv(X, Z, n)
data = X.data.copy()
for i in [x for x in range(t) if x != mode]:
data *= U[i][X.coords[i, :], r]
V[:, r] = aggregate(X.coords[mode, :], data, func="sum", fill_value=0)
return(V)
#@jit(nopython=False, parallel=True)
def sparse_resid(U, data):
'''
Compute residuals between kruskal tensor and sparse data
'''
resid_array = kr_get_items(U, data.coords) - data.data
# def _kr_get_item_coords(coords):
# return kr_get_items(U, coords)
# resid_array = data.copy(deep=True)
# predictions = map(_kr_get_item_coords, [data.coords[:, i] for i in range(data.nnz)])
# #predictions = pmap(get_item_coords)([data.coords[:, i] for i in range(data.nnz)])
# #predictions = [kr_get_item(U, data.coords[:, i]) for i in range(data.nnz)]
# resid_array.data = np.array(list(predictions)) - resid_array.data
# for i in range(data.nnz):
# pred = kr_get_item(U, data.coords[:, i])
# resid_array.data[i] = pred - data.data[i]
return sparse.COO(data.coords, resid_array, shape=data.shape)
def sparse_unfold_svs(data, mode, nsv):
'''
Compute singular vectors of a sparse tensor unfolding
'''
A = sparse_unfold(data, mode).to_scipy_sparse()
max_sv = np.min(A.shape) - 1
if nsv > max_sv:
u, s, _ = svds(A, max_sv, tol=1e-8, return_singular_vectors='u')
u_rand = np.random.randn(u.shape[0], nsv - max_sv) / np.sqrt(u.shape[0])
u = np.hstack((u, u_rand))
else:
u, s, _ = svds(A, nsv, tol=1e-8, return_singular_vectors='u')
return u
def kr_dot(U1, U2):
'''
Hilbert-Schmidt inner product between two kruskal tensors represented by their factors
Bader and Kolda, 2007. "Efficient MATLAB Computations with Sparse and Factored Tensors".
.. math:: \langle T_1, T_2 \rangle
'''
r1 = U1[0].shape[1]
r2 = U2[0].shape[1]
t = len(U1)
assert t == len(U2), "tensor order mismatch"
hadamard_prod = np.ones((r1, r2))
for i in range(t):
hadamard_prod *= U1[i].T @ U2[i]
return np.sum(hadamard_prod.flatten())
def kr_rescale(U, desired_norm=1., norm='hs'):
if norm == 'hs':
'''
Rescale tensor to desired Hilbert-Schmidt norm
'''
t = len(U)
r = U[0].shape[1]
current_norm = np.sqrt(kr_dot(U, U))
scale_factor = (desired_norm / current_norm) ** (1./t)
Unew = [Ui.copy() * scale_factor for Ui in U]
return Unew
elif norm == 'std':
t = len(U)
r = U[0].shape[1]
scale_factor = (desired_norm / np.sqrt(r)) ** (1. / t)
Unew = [Ui.copy() * scale_factor for Ui in U]
return Unew
else:
raise Exception("norm %s unknown" % str(norm))
def kr_balance_factors(U):
'''
Rescale factors to have equal column-wise 2-norms
'''
t = len(U)
r = U[0].shape[1]
Unew = [Ui.copy() for Ui in U]
scales = np.ones(r)
for i in range(t):
column_norms = np.linalg.norm(U[i], axis=0)
Unew[i] = U[i] / column_norms[np.newaxis, :]
scales *= column_norms
for i in range(t):
Unew[i] *= scales[np.newaxis, :] ** (1./t)
return Unew
def kr_get_items(U, coords):
'''
Get entries in kruskal tensor by coordinates
coords : (nmodes x ndata)
'''
r = U[0].shape[1]
t = len(U)
n_items = coords.shape[1]
if n_items > 1:
values = np.zeros((n_items,))
for r in range(r):
summand = np.ones((n_items,))
for i in range(t):
summand *= U[i][coords[i, :], r]
values += summand
return values
else:
row = np.ones(r)
for k in range(t):
row *= U[k][coords[k], :]
return np.sum(row)
def sparse_mttkrp(X, U, mode):
'''
Matricized (sparse) tensor times Khatri-Rao product of matrices
Inspired by code from TensorToolbox and slide set by Kolda:
https://www.osti.gov/servlets/purl/1146123
'''
t = X.ndim
if len(U) != t:
raise Exception("Factor list is wrong length")
if mode == 0:
R = U[1].shape[1]
else:
R = U[0].shape[1]
n = X.shape[mode]
V = np.zeros((n, R))
for r in range(R):
# Z = (U[i][:,r] for i in [x for x in range(t) if x != n])
# V[:, r] = sparse_ttv(X, Z, n)
data = X.data.copy()
for i in [x for x in range(t) if x != mode]:
data *= U[i][X.coords[i, :], r]
V[:, r] = aggregate(X.coords[mode, :], data, func="sum", fill_value=0, size=n)
return V
def kr_random(n, t, r, rescale=False, normalize_cols=False, rvs='gaussian'):
U = []
for i in range(t):
if rvs == 'gaussian':
Ui = np.random.randn(n, r)
elif rvs == 'unif':
Ui = 2 * (np.random.rand(n, r) - 0.5)
else:
raise Exception('unrecognized type of random variable in rvs')
if normalize_cols:
Ui /= np.linalg.norm(Ui, axis=0)[np.newaxis, :]
U.append(Ui)
if rescale:
U = kr_rescale(U, rescale)
return U
|
nilq/baby-python
|
python
|
import os
import math
import json
import shutil
import tempfile
import subprocess
import numpy as np
from celery import shared_task
from PIL import Image, ImageDraw
from imagekit.utils import open_image, save_image
from pilkit.utils import extension_to_format
from pilkit.processors import ResizeToFit
from django.db.models import Sum, Q
from photos.models import FlickrUser, Photo
from licenses.models import License
from common.geom import homo_line, unit_to_sphere, sphere_to_unit, normalized_cross, abs_dot
from common.utils import progress_bar
from common.http import download
from pyquery import PyQuery as pq
@shared_task
def update_photo_license(photo_id):
p = Photo.objects.get(id=photo_id)
p.license = License.get_for_flickr_photo(p.flickr_user, p.flickr_id)
p.save()
@shared_task
def update_flickr_users(ids, show_progress=False):
""" Scrape Flickr for information about Flickr User profiles.
:param ids: list of database ids (not Flick usernames)
"""
values = FlickrUser.objects \
.filter(id__in=ids) \
.values_list('id', 'username')
if show_progress:
values = progress_bar(values)
for (id, username) in values:
html = download('https://www.flickr.com/people/%s/' % username)
if not html:
continue
d = pq(html)
profile = d('div.profile-section')
given_name = profile('span.given-name').text().strip()
family_name = profile('span.family-name').text().strip()
website_name = profile('a.url').text().strip()
website_url = profile('a.url').attr('href')
if website_url:
website_url = website_url.strip()
else:
website_url = ""
person = d('div.person')
display_name = person('span.character-name-holder').text().strip()
sub_name = person('h2').text().strip()
FlickrUser.objects.filter(id=id).update(
display_name=display_name,
sub_name=sub_name,
given_name=given_name,
family_name=family_name,
website_name=website_name,
website_url=website_url,
)
if show_progress:
print '%s: display: "%s" (%s), name: "%s" "%s", web: "%s" (%s)' % (
username, display_name, sub_name, given_name, family_name,
website_name, website_url)
@shared_task
def update_photos_num_shapes(photo_ids):
from shapes.models import MaterialShape
for photo_id in photo_ids:
num_shapes = (
MaterialShape.objects.filter(photo_id=photo_id)
.filter(**MaterialShape.DEFAULT_FILTERS).count())
if not num_shapes:
num_shapes = 0
num_vertices = (
MaterialShape.objects.filter(photo_id=photo_id)
.filter(**MaterialShape.DEFAULT_FILTERS)
.aggregate(s=Sum('num_vertices'))['s'])
if not num_vertices:
num_vertices = 0
Photo.objects.filter(id=photo_id).update(
num_shapes=num_shapes,
num_vertices=num_vertices,
)
@shared_task
def update_photos_num_intrinsic(photo_ids, show_progress=False):
from intrinsic.models import IntrinsicPoint, \
IntrinsicPointComparison, IntrinsicImagesDecomposition
iterator = progress_bar(photo_ids) if show_progress else photo_ids
for photo_id in iterator:
num_comparisons = IntrinsicPointComparison.objects \
.filter(photo_id=photo_id) \
.filter(Q(darker__isnull=False, darker_score__gt=0) |
Q(darker_method='A')) \
.count()
num_points = IntrinsicPoint.objects \
.filter(photo_id=photo_id) \
.count()
errors = IntrinsicImagesDecomposition.objects \
.filter(photo_id=photo_id,
algorithm__active=True,
mean_sum_error__isnull=False) \
.values_list('mean_error')
if errors:
median_intrinsic_error = np.median(errors)
else:
median_intrinsic_error = None
Photo.objects.filter(id=photo_id).update(
num_intrinsic_comparisons=num_comparisons,
num_intrinsic_points=num_points,
median_intrinsic_error=median_intrinsic_error,
)
@shared_task
def detect_vanishing_points(photo_id, dim=800):
""" Detects vanishing points for a photo and stores it in the database in
the photo model. """
# load photo
photo = Photo.objects.get(id=photo_id)
orig_image = open_image(photo.image_2048)
old_vanishing_lines = photo.vanishing_lines
old_vanishing_points = photo.vanishing_points
old_vanishing_length = photo.vanishing_length
detect_vanishing_points_impl(
photo, ResizeToFit(dim, dim).process(orig_image),
save=False)
if old_vanishing_length > photo.vanishing_length:
photo.vanishing_lines = old_vanishing_lines
photo.vanishing_points = old_vanishing_points
photo.vanishing_length = old_vanishing_length
if photo.vanishing_length:
photo.save()
def detect_vanishing_points_impl(photo, image, save=True):
# algorithm parameters
max_em_iter = 0 # if 0, don't do EM
min_cluster_size = 10
min_line_len2 = 4.0
residual_stdev = 0.75
max_clusters = 8
outlier_weight = 0.2
weight_clamp = 0.1
lambda_perp = 1.0
verbose = False
width, height = image.size
print 'size: %s x %s' % (width, height)
vpdetection_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'opt', 'vpdetection', 'matlab'
))
tmpdir = tempfile.mkdtemp()
try:
# save image to local tmpdir
localname = os.path.join(tmpdir, 'image.jpg')
with open(tmpdir + '/image.jpg', 'wb') as target:
save_image(image, target, format='JPEG', options={'quality': 90})
# detect line segments using LSD (Grompone, G., Jakubowicz, J., Morel,
# J. and Randall, G. (2010). LSD: A Fast Line Segment Detector with a
# False Detection Control. IEEE Transactions on Pattern Analysis and
# Machine Intelligence, 32, 722.)
linesname = os.path.join(tmpdir, 'lines.txt')
matlab_command = ";".join([
"try",
"addpath('../lsd-1.5/')",
"lines = lsd(double(rgb2gray(imread('%s'))))" % localname,
"save('%s', 'lines', '-ascii', '-tabs')" % linesname,
"catch",
"end",
"quit",
])
print 'matlab command: %s' % matlab_command
subprocess.check_call(args=[
'matlab', '-nodesktop', '-nosplash', '-nodisplay',
'-r', matlab_command
], cwd=vpdetection_dir)
# cluster lines using J-linkage (Toldo, R. and Fusiello, A. (2008).
# Robust multiple structures estimation with J-Linkage. European
# Conference on Computer Vision(ECCV), 2008.)
# and (Tardif J.-P., Non-iterative Approach for Fast and Accurate
# Vanishing Point Detection, 12th IEEE International Conference on
# Computer Vision, Kyoto, Japan, September 27 - October 4, 2009.)
clustername = os.path.join(tmpdir, 'clusters.txt')
subprocess.check_call(
args=['./vpdetection', linesname, clustername],
cwd=vpdetection_dir)
# collect line clusters
clusters_dict = {}
all_lines = []
for row in open(clustername, 'r').readlines():
cols = row.split()
idx = int(cols[4])
line = [float(f) for f in cols[0:4]]
# discard small lines
x1, y1, x2, y2 = line
len2 = (x1 - x2) ** 2 + (y2 - y1) ** 2
if len2 < min_line_len2:
continue
if idx in clusters_dict:
clusters_dict[idx].append(line)
all_lines.append(line)
else:
clusters_dict[idx] = [line]
finally:
shutil.rmtree(tmpdir)
# discard invalid clusters and sort by cluster length
thresh = 3 if max_em_iter else min_cluster_size
clusters = filter(lambda x: len(x) >= thresh, clusters_dict.values())
clusters.sort(key=line_cluster_length, reverse=True)
if max_em_iter and len(clusters) > max_clusters:
clusters = clusters[:max_clusters]
print "Using %s clusters and %s lines" % (len(clusters), len(all_lines))
if not clusters:
print "Not enough clusters"
return
# Solve for optimal vanishing point using V_GS in 5.2 section of
# (http://www-etud.iro.umontreal.ca/~tardif/fichiers/Tardif_ICCV2009.pdf).
# where "optimal" minimizes algebraic error.
vectors = []
for lines in clusters:
# Minimize 'algebraic' error to get an initial solution
A = np.zeros((len(lines), 3))
for i in xrange(0, len(lines)):
x1, y1, x2, y2 = lines[i]
A[i, :] = [y1 - y2, x2 - x1, x1 * y2 - y1 * x2]
__, __, VT = np.linalg.svd(A, full_matrices=False, compute_uv=True)
if VT.shape != (3, 3):
raise ValueError("Invalid SVD shape (%s)" % VT.size)
x, y, w = VT[2, :]
p = [x / w, y / w]
v = photo.vanishing_point_to_vector(
(p[0] / width, p[1] / height)
)
vectors.append(v)
# EM
if max_em_iter:
# complete orthonormal system
if len(vectors) >= 2:
vectors.append(normalized_cross(vectors[0], vectors[1]))
### EM refinement ###
x0 = None
x_opt = None
exp_coeff = 0.5 / (residual_stdev ** 2)
num_weights_nnz = 0
num_weights = 0
for em_iter in xrange(max_em_iter):
### E STEP ###
# convert back to vanishing points
points = vectors_to_points(photo, image, vectors)
# last column is the outlier cluster
weights = np.zeros((len(all_lines), len(vectors) + 1))
# estimate weights (assume uniform prior)
for i_p, p in enumerate(points):
weights[:, i_p] = [line_residual(l, p) for l in all_lines]
weights = np.exp(-exp_coeff * np.square(weights))
# outlier weight
weights[:, len(points)] = outlier_weight
# normalize each row (each line segment) to have unit sum
weights_row_sum = weights.sum(axis=1)
weights /= weights_row_sum[:, np.newaxis]
# add sparsity
weights[weights < weight_clamp] = 0
num_weights += weights.size
num_weights_nnz += np.count_nonzero(weights)
# check convergence
if (em_iter >= 10 and len(x0) == len(x_opt) and
np.linalg.norm(np.array(x0) - np.array(x_opt)) <= 1e-5):
break
# sort by weight
if len(vectors) > 1:
vectors_weights = [
(v, weights[:, i_v].sum()) for i_v, v in enumerate(vectors)
]
vectors_weights.sort(key=lambda x: x[1], reverse=True)
vectors = [x[0] for x in vectors_weights]
### M STEP ###
# objective function to minimize
def objective_function(x, *args):
cur_vectors = unpack_x(x)
cur_points = vectors_to_points(photo, image, cur_vectors)
# line-segment errors
residuals = [
weights[i_l, i_p] * line_residual(all_lines[i_l], p)
for i_p, p in enumerate(cur_points)
for i_l in np.flatnonzero(weights[:, i_p])
]
# penalize deviations from 45 or 90 degree angles
if lambda_perp:
residuals += [
lambda_perp * math.sin(4 * math.acos(abs_dot(v, w)))
for i_v, v in enumerate(cur_vectors)
for w in cur_vectors[:i_v]
]
return residuals
# slowly vary parameters
t = min(1.0, em_iter / 20.0)
# vary tol from 1e-2 to 1e-6
tol = math.exp(math.log(1e-2) * (1 - t) + math.log(1e-6) * t)
from scipy.optimize import leastsq
x0 = pack_x(vectors)
x_opt, __ = leastsq(objective_function, x0, ftol=tol, xtol=tol)
vectors = unpack_x(x_opt)
### BETWEEN ITERATIONS ###
if verbose:
print 'EM: %s iters, %s clusters, weight sparsity: %s%%' % (
em_iter, len(vectors), 100.0 * num_weights_nnz / num_weights)
print 'residual: %s' % sum(y ** 2 for y in objective_function(x_opt))
# complete orthonormal system if missing
if len(vectors) == 2:
vectors.append(normalized_cross(vectors[0], vectors[1]))
# merge similar clusters
cluster_merge_dot = math.cos(math.radians(t * 20.0))
vectors_merged = []
for v in vectors:
if (not vectors_merged or
all(abs_dot(v, w) < cluster_merge_dot for w in vectors_merged)):
vectors_merged.append(v)
if verbose and len(vectors) != len(vectors_merged):
print 'Merging %s --> %s vectors' % (len(vectors), len(vectors_merged))
vectors = vectors_merged
residual = sum(r ** 2 for r in objective_function(x_opt))
print 'EM: %s iters, residual: %s, %s clusters, weight sparsity: %s%%' % (
em_iter, residual, len(vectors), 100.0 * num_weights_nnz / num_weights)
# final points
points = vectors_to_points(photo, image, vectors)
# sanity checks
assert len(vectors) == len(points)
# re-assign clusters
clusters_points = [([], p) for p in points]
line_map_cluster = np.argmax(weights, axis=1)
for i_l, l in enumerate(all_lines):
i_c = line_map_cluster[i_l]
if i_c < len(points):
clusters_points[i_c][0].append(l)
# throw away small clusters
clusters_points = filter(
lambda x: len(x[0]) >= min_cluster_size, clusters_points)
# reverse sort by cluster length
clusters_points.sort(
key=lambda x: line_cluster_length(x[0]), reverse=True)
# split into two parallel arrays
clusters = [cp[0] for cp in clusters_points]
points = [cp[1] for cp in clusters_points]
else: # no EM
for i_v, lines in enumerate(clusters):
def objective_function(x, *args):
p = vectors_to_points(photo, image, unpack_x(x))[0]
return [line_residual(l, p) for l in lines]
from scipy.optimize import leastsq
x0 = pack_x([vectors[i_v]])
x_opt, __ = leastsq(objective_function, x0)
vectors[i_v] = unpack_x(x_opt)[0]
# delete similar vectors
cluster_merge_dot = math.cos(math.radians(20.0))
vectors_merged = []
clusters_merged = []
for i_v, v in enumerate(vectors):
if (not vectors_merged or
all(abs_dot(v, w) < cluster_merge_dot for w in vectors_merged)):
vectors_merged.append(v)
clusters_merged.append(clusters[i_v])
vectors = vectors_merged
clusters = clusters_merged
# clamp number of vectors
if len(clusters) > max_clusters:
vectors = vectors[:max_clusters]
clusters = clusters[:max_clusters]
points = vectors_to_points(photo, image, vectors)
# normalize to [0, 0], [1, 1]
clusters_normalized = [[
[l[0] / width, l[1] / height, l[2] / width, l[3] / height]
for l in lines
] for lines in clusters]
points_normalized = [
(x / width, y / height) for (x, y) in points
]
# save result
photo.vanishing_lines = json.dumps(clusters_normalized)
photo.vanishing_points = json.dumps(points_normalized)
photo.vanishing_length = sum(line_cluster_length(c)
for c in clusters_normalized)
if save:
photo.save()
# pack vectors into solution vector (x)
def pack_x(vecs):
x = []
for v in vecs:
x += list(unit_to_sphere(v))
return x
# unpack vectors from current solution (x)
def unpack_x(x):
return [
np.array(sphere_to_unit(x[i:i + 2]))
for i in xrange(0, len(x), 2)
]
def vectors_to_points(photo, image, vectors):
width, height = image.size
points = [photo.vector_to_vanishing_point(v) for v in vectors]
return [(p[0] * width, p[1] * height) for p in points]
def line_cluster_length(lines):
return sum(
math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
for x1, y1, x2, y2 in lines
)
def line_residual(l, p):
""" Returns the distance between an endpoint of l and the line from p to
the midpoint of l. Based on Equation (2) of [Tardif, ICCV 2009
http://www-etud.iro.umontreal.ca/~tardifj/fichiers/Tardif_ICCV2009.pdf] """
x1, y1, x2, y2 = l
midpoint = (0.5 * (x1 + x2), 0.5 * (y1 + y2))
e = homo_line(p, midpoint)
d = max(1e-4, e[0] ** 2 + e[1] ** 2)
return (e[0] * x1 + e[1] * y1 + e[2]) / math.sqrt(d)
@shared_task
def do_gist_tmp(pk, s, s2):
return
#from photos.management.commands import gist2
#gist2.do_gist(pk, s, s2)
@shared_task
def download_photo_task(photo_id, filename, format=None, larger_dimension=None):
""" Downloads a photo and stores it, potentially downsampling it and
potentially converting formats """
parent_dir = os.path.dirname(filename)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
photo = Photo.objects.get(id=photo_id)
if not larger_dimension and not format:
photo.image_orig.seek(0)
with open(filename, 'wb') as f:
shutil.copyfileobj(photo.image_orig, f)
else:
if larger_dimension <= 512:
image = open_image(photo.image_512)
elif larger_dimension <= 1024:
image = open_image(photo.image_1024)
elif larger_dimension <= 2048:
image = open_image(photo.image_2048)
else:
image = open_image(photo.image_orig)
if max(image.size) > larger_dimension:
if image.size[0] > image.size[1]:
image = image.resize((
larger_dimension,
larger_dimension * image.size[1] / image.size[0]), Image.ANTIALIAS)
else:
image = image.resize((
larger_dimension * image.size[0] / image.size[1],
larger_dimension), Image.ANTIALIAS)
if not format:
format = extension_to_format(os.path.splitext(filename).lower())
image.save(filename, format)
@shared_task
def photo_to_label_image_task(photo_id, color_map, attr='substance', larger_dimension=320,
filename=None, format=None, next_task=None):
""" Returns a PIL image where each pixel corresponds to a label.
filename: if specified, save the result to this filename with the specified format
(instead of returning it since PIL objects often can't be pickled)
next_task: task to start when this finishes
"""
from shapes.models import MaterialShape
from shapes.utils import parse_vertices, parse_triangles
photo = Photo.objects.get(id=photo_id)
image = open_image(photo.image_orig)
w, h = image.size
if w > h:
size = (larger_dimension, larger_dimension * h / w)
else:
size = (larger_dimension * w / h, larger_dimension)
label_image = Image.new(mode='RGB', size=size, color=(0, 0, 0))
drawer = ImageDraw.Draw(label_image)
shapes = MaterialShape.objects \
.filter(**{
'photo_id': photo.id,
attr + '__isnull': False,
}) \
.filter(**MaterialShape.DEFAULT_FILTERS) \
.order_by('-area')
has_labels = False
for shape in shapes:
vertices = parse_vertices(shape.vertices)
vertices = [(int(x * size[0]), int(y * size[1])) for (x, y) in vertices]
for tri in parse_triangles(shape.triangles):
points = [vertices[tri[t]] for t in (0, 1, 2)]
val = getattr(shape, attr + '_id')
if val in color_map:
color = color_map[val]
drawer.polygon(points, fill=color, outline=None)
has_labels = True
if not has_labels:
return None
if filename:
label_image.save(filename, format=format)
if next_task:
next_task()
else:
return label_image
|
nilq/baby-python
|
python
|
from collections import namedtuple
from unittest.mock import MagicMock, patch, PropertyMock
from onap_data_provider.resources.esr_system_info_resource import (
CloudRegion,
EsrSystemInfoResource,
)
ESR_RESOURCE_DATA = {
"esr-system-info-id": "Test ID",
"user-name": "Test name",
"password": "testpass",
"system-type": "test type",
"service-url": "test url",
"cloud-domain": "test cloud domain",
}
EsrSystemInfoNamedtuple = namedtuple("EsrSystemInfo", ["esr_system_info_id"])
@patch(
"onap_data_provider.resources.esr_system_info_resource.CloudRegion.esr_system_infos",
new_callable=PropertyMock,
)
def test_esr_system_info_resource_esr_system_info(mock_cloud_region_esr_system_infos):
cloud_region = CloudRegion(
cloud_owner="test",
cloud_region_id="test",
orchestration_disabled=True,
in_maint=True,
)
esr_resource = EsrSystemInfoResource(ESR_RESOURCE_DATA, cloud_region)
mock_cloud_region_esr_system_infos.return_value = iter([])
assert esr_resource.esr_system_info is None
mock_cloud_region_esr_system_infos.return_value = iter(
[EsrSystemInfoNamedtuple("Test ID")]
)
assert esr_resource.esr_system_info is not None
@patch(
"onap_data_provider.resources.esr_system_info_resource.EsrSystemInfoResource.esr_system_info",
new_callable=PropertyMock,
)
def test_esr_system_info_resource_exists(mock_esr_system_info):
mock_esr_system_info.return_value = None
cloud_region_mock = MagicMock()
esr_resource = EsrSystemInfoResource(ESR_RESOURCE_DATA, cloud_region_mock)
assert esr_resource.exists is False
mock_esr_system_info.return_value = 1
assert esr_resource.exists is True
@patch(
"onap_data_provider.resources.esr_system_info_resource.EsrSystemInfoResource.exists",
new_callable=PropertyMock,
)
def test_esr_system_info_resource_create(mock_exists):
cloud_region_mock = MagicMock()
esr_resource = EsrSystemInfoResource(ESR_RESOURCE_DATA, cloud_region_mock)
mock_exists.return_value = True
esr_resource.create()
cloud_region_mock.add_esr_system_info.assert_not_called()
mock_exists.return_value = False
esr_resource.create()
cloud_region_mock.add_esr_system_info.assert_called_once_with(
esr_system_info_id="Test ID",
user_name="Test name",
password="testpass",
system_type="test type",
system_status="active",
service_url="test url",
cloud_domain="test cloud domain",
default_tenant=None,
)
|
nilq/baby-python
|
python
|
import urwid
class UserInfoBar(urwid.Text):
PARTS = (
('name', '{u.name}'),
('level', '{s.level} level'),
('hp', '{s.hp}/{s.max_hp} hp'),
('exp', '{s.exp}/{s.max_exp} exp'),
('mp', '{s.mp}/{s.max_mp} mp'),
('gold', '{s.gold:.2f} gold'),
)
@classmethod
def info_markup_for(cls, user):
if not user.stats:
return ''
def intersperse(lst, sep):
seps = [sep] * (len(lst) * 2 - 1)
seps[0::2] = lst
return seps
markup = [(part, form.format(u=user, s=user.stats)) for part, form in cls.PARTS]
markup = intersperse(markup, ', ')
markup = ('info_bar', markup)
return markup
def __init__(self, user):
super().__init__(self.info_markup_for(user), align=urwid.CENTER)
self.user = user
urwid.connect_signal(user, 'profile_update', self.on_stats_update)
urwid.connect_signal(user, 'stats_update', self.on_stats_update)
def on_stats_update(self):
self.set_text(self.info_markup_for(self.user))
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json; from pprint import pprint
Settings = json.load(open('settings.txt'))
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.insert(0,'../')
from os.path import isdir
root = Settings['data_root']
from pak.datasets.CAD_120 import CAD_120
data = CAD_120(root)
suj1 = data.get_subject(1)
vid = suj1['arranging_objects']['0510175431']
pts3d = data.get_3d_points_from_skel(vid['skeleton'])
actions = vid['actions']
person = pts3d[0]
# =========
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111, projection='3d')
n_frames = len(pts3d)
for t in range(n_frames):
action = actions[t]
person = pts3d[t]
ax.clear()
ax.set_title(action)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim([-800, 800])
ax.set_ylim([-2100, -500])
ax.set_zlim([-800, 800])
data.plot(ax, person, plot_jids=True)
plt.pause(33/1000)
plt.show()
|
nilq/baby-python
|
python
|
class Counter:
def __init__(self):
self.count = 0
def count_up(self, channel):
self.count += 1
print('GPIO%02d count=%d' % (channel, self.count))
def __eq__(self, other):
return self.count == other
def __lt__(self, other):
return self.count < other
def main():
import RPi.GPIO as GPIO
import time
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup([5, 6, 13, 19, 26], GPIO.OUT)
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.setup(9, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
print('Press GPIO10.')
while GPIO.input(10) == GPIO.LOW:
time.sleep(0.1)
GPIO.output(5, GPIO.HIGH)
channel = GPIO.wait_for_edge(10, GPIO.FALLING)
if channel == 10:
GPIO.output(6, GPIO.HIGH)
channel = GPIO.wait_for_edge(10, GPIO.RISING)
if channel == 10:
GPIO.output(13, GPIO.HIGH)
channel = GPIO.wait_for_edge(10, GPIO.BOTH)
if channel == 10:
GPIO.output(19, GPIO.HIGH)
while True:
print('Wait timeout.')
channel = GPIO.wait_for_edge(10, GPIO.BOTH, timeout=1000)
if channel is None:
GPIO.output(26, GPIO.HIGH)
break
print('Press GPIO10.')
GPIO.add_event_detect(10, GPIO.RISING)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(5, GPIO.LOW)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.FALLING)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(6, GPIO.LOW)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.BOTH)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(13, GPIO.LOW)
GPIO.remove_event_detect(10)
def risen(ch):
print('risen GPIO%02d' % ch)
def fallen(ch):
print('fallen GPIO%02d' % ch)
GPIO.add_event_detect(10, GPIO.RISING, risen)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(5, GPIO.HIGH)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.FALLING, fallen)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(6, GPIO.HIGH)
GPIO.remove_event_detect(10)
GPIO.add_event_detect(10, GPIO.BOTH, risen)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(13, GPIO.HIGH)
GPIO.remove_event_detect(10)
def changed(ch):
print('changed GPIO%02d' % ch)
GPIO.add_event_detect(10, GPIO.BOTH)
GPIO.add_event_callback(10, fallen)
GPIO.add_event_callback(10, changed)
while not GPIO.event_detected(10):
time.sleep(1)
GPIO.output(26, GPIO.LOW)
GPIO.remove_event_detect(10)
print('Press! Press! Press!')
counter = Counter()
GPIO.add_event_detect(10, GPIO.RISING, callback=counter.count_up, bouncetime=100)
GPIO.add_event_callback(10, counter.count_up, bouncetime=500)
while counter < 10:
time.sleep(1)
GPIO.output(19, GPIO.LOW)
GPIO.remove_event_detect(10)
time.sleep(1)
finally:
GPIO.cleanup()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
main()
|
nilq/baby-python
|
python
|
import cv2 as cv
import time
import datetime
import requests
import json
import pathlib
import re
API_ROUTE_URL = "http://localhost:5000/sendEmail"
SECONDS_TO_RECORD_AFTER_DETECTION = 15
SECONDS_BETWEEN_RECORDINGS = 60
def drawRectangle(obj, frame):
for (x, y, width, heigth) in obj:
cv.rectangle(frame, (x, y), (x + width, y + heigth), (0, 255, 0), 2)
def makeApiRequest(fileName, email):
payload = json.dumps({
"to": email,
"path": f"{pathlib.Path(__file__).parent.resolve()}/videos",
"fileName": fileName
})
headers = {
"Content-Type": "application/json"
}
return requests.request("POST", API_ROUTE_URL, headers=headers, data=payload)
def validateEmail(email):
r = re.compile(r'^[\w-]+@(?:[a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}$')
if r.match(email):
return True
return False
def requestEmail():
email = ""
first_time = True
while True:
if first_time:
print("Insira o e-mail para onde vamos enviar os alertas:")
email = input()
if validateEmail(email):
print("\nE-mail cadastrado com sucesso!\n")
return email
else:
first_time = False
print("\nO e-mail inserido não é valido! Tente novamente:")
def main():
email = requestEmail()
# Setup
cap = cv.VideoCapture(2) # É necessário encontrar o indice da sua camera
frame_size = (int(cap.get(3)), int(cap.get(4)))
fourcc = cv.VideoWriter_fourcc(*"mp4v")
face_cascade = cv.CascadeClassifier(
cv.data.haarcascades + "haarcascade_frontalface_default.xml")
body_cascade = cv.CascadeClassifier(
cv.data.haarcascades + "haarcascade_fullbody.xml")
# Declaring variables
detection = False
last_time_saved = None
start_time = None
current_time = None
print("Sistema iniciando...\n")
# Main loop
while True:
_, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.25, 5)
bodies = body_cascade.detectMultiScale(gray, 1.25, 5)
if len(faces) + len(bodies) > 0:
if not detection:
if last_time_saved == None or time.time() - last_time_saved >= SECONDS_BETWEEN_RECORDINGS:
detection = True
start_time = time.time()
current_time = datetime.datetime.now().strftime("%d-%m-%Y-%H-%M-%S")
out = cv.VideoWriter(
f"./videos/{current_time}.mp4", fourcc, 20, frame_size
)
print(f"Gravação iniciada!")
if detection and time.time() - start_time >= SECONDS_TO_RECORD_AFTER_DETECTION:
detection = False
out.release()
last_time_saved = time.time()
print("Gravação finalizada!")
fileName = current_time + ".mp4"
try:
response = makeApiRequest(fileName, email)
if response.status_code == 200:
print(f"E-mail com arquivo {current_time}.mp4 enviado!\n")
except:
print("Algum erro ocorreu no envio do e-mail!")
drawRectangle(faces, frame)
drawRectangle(bodies, frame)
if detection:
out.write(frame)
# Mostra a camera na tela
cv.imshow("Camera", frame)
if (cv.waitKey(1) == ord("q")):
break
out.release()
cap.release()
cv.destroyAllWindows()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""Optimizer for inlining constant values."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ast
from ..asttools import name as nametools
from ..asttools import visitor
from ..astwrappers import name as namewrap
def _resolve_constant_value(name):
"""Get the AST node representing the constant value of a name."""
decl = name.declaration
if isinstance(decl, ast.Assign):
target = decl.targets[0]
value = decl.value
if isinstance(target, ast.Tuple) and isinstance(value, ast.Tuple):
for idx, elt in enumerate(target.elts):
if isinstance(elt, ast.Name) and elt.id == name.token:
return value.elts[idx]
else:
return value
class ConstantInliner(visitor.NodeTransformer):
"""NodeTransformer which places constant values in-line."""
def visit_Name(self, node):
"""Replace ast.Name with a value if it is a constant reference."""
n = namewrap.Name(node)
# Skip if not loading the value from memory.
if not isinstance(node.ctx, ast.Load):
return node
# Skip if value is not constant.
if not n.constant or n.node is n.declaration:
return node
# Skip if the node represents the initial assignment.
if n.node is n.declaration:
return node
# Skip if the constant value cannot be found.
if n.source in (
nametools.NAME_SOURCE.BUILTIN,
nametools.NAME_SOURCE.IMPORTED,
):
return node
value = _resolve_constant_value(n)
# Skip if the value is a complex typ.
if not (
isinstance(value, ast.Num) or
isinstance(value, ast.Str) or
isinstance(value, ast.Name)
):
return node
if value is None:
return node
return value
def visit_BinOp(self, node):
"""Perform binary ops if all values are constant."""
self.generic_visit(node)
if isinstance(node.op, ast.Add):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n + node.right.n)
if (
isinstance(node.left, ast.Str) and
isinstance(node.right, ast.Str)
):
return ast.Str(s=node.left + node.right)
if isinstance(node.op, ast.Sub):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n - node.right.n)
if isinstance(node.op, ast.Mult):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n * node.right.n)
if (
isinstance(node.left, ast.Str) and
isinstance(node.right, ast.Num)
):
return ast.Str(s=node.left * node.right)
if isinstance(node.op, ast.Div):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n / node.right.n)
if isinstance(node.op, ast.FloorDiv):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n // node.right.n)
if isinstance(node.op, ast.Mod):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n % node.right.n)
if isinstance(node.op, ast.Pow):
if (
isinstance(node.left, ast.Num) and
isinstance(node.right, ast.Num)
):
return ast.Num(n=node.left.n ** node.right.n)
return node
def optimize(node):
"""Optimize an AST by in-lining constant values."""
modified = True
while modified is True:
inliner = ConstantInliner(node)
inliner.visit()
modified = inliner.modified
|
nilq/baby-python
|
python
|
from ctypes import *
from vcx.common import do_call, create_cb, error_message
from vcx.error import VcxError, ErrorCode
from vcx.api.vcx_base import VcxBase
import json
class Schema(VcxBase):
"""
Object that represents a schema written on the ledger.
Attributes:
source_id: user generated unique identifier
schema_id: the ledger ID of the schema
attrs: attribute/value pairs
version: version of the schema
"""
def __init__(self, source_id: str, name: str, version: str, attrs: list):
VcxBase.__init__(self, source_id)
self._source_id = source_id
self._schema_id = None
self._attrs = attrs
self._name = name
self._version = version
def __del__(self):
self.release()
self.logger.debug("Deleted {} obj: {}".format(Schema, self.handle))
@property
def schema_id(self):
return self._schema_id
@schema_id.setter
def schema_id(self, x):
self._schema_id = x
@property
def name(self):
return self._name
@name.setter
def name(self, x):
self._name = x
@property
def attrs(self):
return self._attrs
@attrs.setter
def attrs(self, x):
self._attrs = x
@property
def version(self):
return self._version
@version.setter
def version(self, x):
self._version = x
@staticmethod
async def create(source_id: str, name: str, version: str, attrs: list, payment_handle: int):
"""
Creates a new schema object that is written to the ledger
:param source_id: Institution's unique ID for the schema
:param name: Name of schema
:param version: Version of the schema
:param attrs: Atttributes of the schema
:param payment_handle: NYI - payment of ledger fee is taken from wallet automatically
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema = await Schema.create(source_id, name, version, attrs, payment_handle)
:return: schema object, written to ledger
"""
constructor_params = (source_id, name, version, attrs)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_version = c_char_p(version.encode('utf-8'))
c_schema_data = c_char_p(json.dumps(attrs).encode('utf-8'))
c_payment = c_uint32(payment_handle)
c_params = (c_source_id, c_name, c_version, c_schema_data, c_payment)
schema = await Schema._create("vcx_schema_create", constructor_params, c_params)
schema.schema_id = await schema.get_schema_id()
return schema
@staticmethod
async def deserialize(data: dict):
"""
Create the object from a previously serialized object.
:param data: The output of the "serialize" call
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
data1 = await schema1.serialize()
:return: A re-instantiated object
"""
try:
# Todo: Find better way to access attr_names. Potential for issues.
schema = await Schema._deserialize("vcx_schema_deserialize",
json.dumps(data),
data['data']['source_id'],
data['data']['name'],
data['data']['version'],
data['data']['data'])
schema.schema_id = await schema.get_schema_id()
return schema
except KeyError:
raise VcxError(ErrorCode.InvalidSchema, error_message(ErrorCode.InvalidSchema))
@staticmethod
async def lookup(source_id: str, schema_id: str):
"""
Create a new schema object from an existing ledger schema
:param source_id: Institution's personal identification for the schema
:param schema_id: Ledger schema ID for lookup
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
id1 = await schema.get_schema_id()
data = await Schema.lookup(source_id, schema_id)
assert data.attrs.sort() == ['sex', 'age', 'name', 'height'].sort()
assert data.name == 'test-licence'
assert data.handle > 0
:return: schema object
"""
try:
schema = Schema(source_id, '', '', [])
if not hasattr(Schema.lookup, "cb"):
schema.logger.debug("vcx_schema_get_attributes: Creating callback")
Schema.lookup.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p))
c_source_id = c_char_p(source_id.encode('utf-8'))
c_schema_id = c_char_p(schema_id.encode('utf-8'))
handle, data = await do_call('vcx_schema_get_attributes',
c_source_id,
c_schema_id,
Schema.lookup.cb)
schema.logger.debug("created schema object")
schema_result = json.loads(data.decode())
schema.attrs = schema_result['data']
schema.name = schema_result['name']
schema.version = schema_result['version']
schema.handle = handle
return schema
except KeyError:
raise VcxError(ErrorCode.InvalidSchema)
async def serialize(self) -> dict:
"""
Serialize the object for storage
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
data1 = await schema1.serialize()
:return: serialized object
"""
return await self._serialize(Schema, 'vcx_schema_serialize')
def release(self) -> None:
"""
destroy the object and release any memory associated with it
:return: None
"""
self._release(Schema, 'vcx_schema_release')
async def get_schema_id(self):
"""
Get the ledger ID of the object
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
id1 = await schema.get_schema_id()
:return: ID string
"""
cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_handle = c_uint32(self.handle)
id = await do_call('vcx_schema_get_schema_id', c_handle, cb)
return id.decode()
async def get_payment_txn(self):
"""
Get the payment transaction information generated when paying the ledger fee
Example:
source_id = 'foobar123'
name = 'Address Schema'
version = '1.0'
attrs = ['address', 'city', 'state']
payment_handle = 0
schema1 = await Schema.create(source_id, name, version, attrs, payment_handle)
txn = await schema1.get_payment_txn()
:return: JSON object with input address and output UTXOs
"""
if not hasattr(Schema.get_payment_txn, "cb"):
self.logger.debug("vcx_schema_get_payment_txn: Creating callback")
Schema.get_payment_txn.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_credential_handle = c_uint32(self.handle)
payment_txn = await do_call('vcx_schema_get_payment_txn',
c_credential_handle,
Schema.get_payment_txn.cb)
return json.loads(payment_txn.decode())
|
nilq/baby-python
|
python
|
# import socket
# import time
# from threading import *
# def send_message(str):
# s.send(str.encode())
# # data = ''
# # data = s.recv(1024).decode()
# # print (data)
# print("START")
# # Initialize host and port
# host = "10.0.0.1"
# port = 8000
# print (host)
# print (port)
# # Initialize window variables
# nextSeqNum = 1
# nextAckExpected = 1
# windowSize = 7
# lastPacket = 100
# lastAckReceived=-1
# # Create Client Socket
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # s.settimeout(0.1)
# print("Trying to connect with server")
# # Connect Socket to server
# s.connect((host, port))
# print("Connection established with server")
# done = False
# while not done:
# if(nextSeqNum < nextAckExpected + windowSize)and nextSeqNum <= lastPacket and not done:
# # Create Packet (Data is Packet Number here)
# pkt = str(nextSeqNum) + ',' + 'Custom Data Here'
# # Send Packet
# send_message(pkt)
# print("Packet sent to server")
# # Increment nextSeqNum
# nextSeqNum = nextSeqNum + 1
# try:
# packet = s.recvfrom(1024).decode().split(',')
# print('Client received- '+str(packet))
# if packet[0] == nextAckExpected:
# nextAckExpected += 1
# lastAckReceived = time.time()
# if packet[0] == lastPacket:
# done = True
# except:
# if(time.time() - lastAckReceived > 0.1):
# for i in range(windowSize):
# pkt = str(i+nextAckExpected) + ',' + 'Custom Data Here'
# send_message(pkt)
# # send_message("hello there!")
# # print('server sent:', s.recv(1024).decode())
# s.close()
import socket
import time
from threading import *
def send_message(str):
s.send(str.encode())
# data = ''
# data = s.recv(1024).decode()
# print (data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
host = "10.0.0.1"
port = 8000
print (host)
print (port)
s.connect((host,port))
done = False
# Initialize window variables
nextSeqNum = 1
nextAckExpected = 1
windowSize = 7
lastPacket = 100
lastAckReceived=-1
while not done:
if(nextSeqNum < nextAckExpected + windowSize)and nextSeqNum <= lastPacket and not done:
# Create Packet (Data is Packet Number here)
pkt = str(nextSeqNum) + ',' + 'Custom Data Here'
# Send Packet
send_message(pkt)
print("Packet sent to server")
packet = s.recv(1024).decode().split(',')
print('Client received- '+str(packet))
# Increment nextSeqNum
nextSeqNum = nextSeqNum + 1
time.sleep(1)
# try:
# packet = s.recv(1024).decode().split(',')
# print('Client received- '+str(packet))
# if packet[0] == nextAckExpected:
# nextAckExpected += 1
# lastAckReceived = time.time()
# if packet[0] == lastPacket:
# done = True
# except:
# if(time.time() - lastAckReceived > 0.1):
# for i in range(windowSize):
# pkt = str(i+nextAckExpected) + ',' + 'Custom Data Here'
# send_message(pkt)
send_message("hello there!")
print('server sent:', s.recv(1024).decode())
s.close()
|
nilq/baby-python
|
python
|
import json
import click
import pandas as pd
@click.command()
@click.argument('jsonfile')
@click.argument('outfile')
def main(jsonfile, outfile):
with open(jsonfile, "r") as f:
features = json.load(f)["features"]
data = pd.DataFrame(None, index=range(len(features)), columns=["Name", "Long", "Lat"])
for i, f in enumerate(features):
data.iloc[i] = [
f["properties"].get("name", "Unbenannt"),
f["geometry"]["coordinates"][0],
f["geometry"]["coordinates"][1],
]
with open(outfile, "w") as f:
data.to_csv(f, index=False)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
'''
Vector Space Model stuff
'''
from .base import Space, AggSpace
from .mmspace import MMSpace
import sim
__all__ = ['Space', 'AggSpace', 'MMSpace']
|
nilq/baby-python
|
python
|
import os
import redis
from flask import Flask, g
from flask_bootstrap import Bootstrap
from app.facility_loader import load_facilities
from app.mod_api import mod_api
from app.mod_frontend import mod_frontend
def create_app(config_filename):
"""Creates and initialize app.
:param config_filename: Config to load.
:type config_filename: str
:return: App
:rtype:flask.app.Flask: App
"""
app = Flask(__name__)
# extensions
Bootstrap(app)
# configuration
app.config.from_pyfile(config_filename)
if 'REDIS_PORT_6379_TCP_ADDR' not in os.environ.keys():
os.environ['REDIS_PORT_6379_TCP_ADDR'] = 'localhost'
# logging
if not app.debug:
import logging
# loading this actually sets the logger up
from app.logger import HANDLER
app.logger.addHandler(HANDLER)
app.logger.setLevel(logging.DEBUG)
# register blueprints
app.register_blueprint(mod_frontend)
app.register_blueprint(mod_api, url_prefix='/api')
# create redis connection before every request
@app.before_request
def before_request():
g.redis = redis.StrictRedis(
host=os.environ['REDIS_PORT_6379_TCP_ADDR'])
# add facilities
app.facilities = load_facilities()
return app
|
nilq/baby-python
|
python
|
"""Runs tests (encapsulated in a Test class) and documents results in `test_output/logs.txt`"""
from datetime import datetime # for recording data and time of tests
from selenium import webdriver # for testing web app
import sqlite3 # for testing database
import os # for navigating between folders
import sys
import db_commands as db
import reader
import literature as lit
class Test():
def __init__(self):
self.tests = 0
self.success = 0
with open(os.path.join(sys.path[0], 'test_output', 'logs.txt'), 'a') as log:
now = datetime.now()
log.write("\n" + now.strftime("%d/%m/%Y %H:%M:%S"))
log.write(self.logic())
# ~ log.write(self.flask_app("localhost:5000/test", "Running!"))
log.write(self.db_commands())
log.write(self.bib_reader())
log.write(self.pdf_reader())
# ~ log.write(self.api_interactions())
log.write(self.lit_classes())
# ~ log.write(self.jamstack_gui())
# ~ log.write(self.web_scraper())
summary = f"\nRan {self.tests} tests: {self.success}/{self.tests} were successful."
print(summary)
log.write(summary)
log.write("\n")
def logic(self):
"""
Shows that the basic logic of my Test framework works.
"""
self.tests += 1
try:
assert True == True
self.success += 1
result = "\nTest Successful: Logic working as expected."
except:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = "\nTest Failed: Logic not working as expected."
print(result)
return result
def flask_app(self, page_location, confirmation):
"""
Tests that Flask App is running as expected.
Uses Selenium Webdriver to check Flask App is running as expected.
Args:
URL (string): the address where the Flask App is running.
page_title (string): the title of the webpage,
as it should be defined by <title> tags.
"""
self.tests += 1
driver = webdriver.Chrome(os.path.join(sys.path[0], 'chromedriver.exe'))
driver.get(page_location)
print(driver)
if driver.title == confirmation:
self.success += 1
result = "\nTest Successful: Flask App running as expected."
else:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = """\nTest Failed: Flask App not running as expected.
(It may not be broken -- you need to run it explicitly.)"""
print(result)
return result
def db_commands(self):
"""
Tests that database commands from ../db_commands.py are working as expected.
Args:
file (string): file location for db_commands.py
"""
self.tests += 1
details = ""
try:
q = db.Query(os.path.join(sys.path[0], 'citation_graph.db'))
assert q.test() == "connected"
details += '\n>>>Connection working'
q.create_table('test_table', ('key', 'other_column'))
details += '\n>>>create_table() working'
q.save_row_to_table('test_table', ('test_key', 'testing_testing'))
details += '\n>>>save_row_to_table() working'
assert q.search('test_table', 'key', 'test_key')[0] == ('test_key', 'testing_testing')
details += '\n>>>search() working'
q.remove_row('test_table', 1)
details += '\n>>>remove_row() working'
assert len(q.search('test_table', 'key', 'test_key')) == 0
q.save_row_to_table('test_table', ('test_key', 'testing_testing'), allow_duplicate=True)
q.save_row_to_table('test_table', ('test_key', 'testing_testing'), allow_duplicate=True)
assert len(q.search('test_table', 'key', 'test_key')) == 2
details += '\n>>>testing remove_duplicate_rows()'
q.remove_duplicate_rows('test_table', 'test_key')
assert len(q.search('test_table', 'key', 'test_key')) == 1
details += '\n>>>remove_duplicate_rows() working'
q.drop_table('test_table')
details += '\n>>>drop_table() working'
self.success += 1
result = "\nTest Successful: Database Commands working as expected."
except:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = "\nTest Failed: Database Commands not working as expected."
result += details
print(result)
return result
def bib_reader(self):
"""
Tests that reader.py is able to read in .bib files by running a check
on the _references.bib and _citations.bib files for @RWebberBurrows2018.
"""
self.tests += 1
try:
db_file = os.path.join(sys.path[0], 'citation_graph.db')
start = reader.Bib(db_file, 'RWebberBurrows2018')
self.success += 1
result = "\nTest Successful: .bib Reader working as expected."
except:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = """\nTest Failed: .bib Reader not working as expected.
(Check that the _references.bib and _citations.bib files
for @RWebberBurrows2018 are still in the bib_files folder)"""
print(result)
return result
def pdf_reader(self):
"""
Tests that reader.py is able to read and interpret .pdf files by
running a check on the pdf file of references for @RWebberBurrows2018.
"""
self.tests += 1
try:
db_file = os.path.join(sys.path[0], 'citation_graph.db')
get = reader.Pdf(db_file, 'RWebberBurrows2018')
data = get.refs()
assert len(data) == 216
self.success += 1
result = "\nTest Successful: PDF Reader working as expected."
except:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = "\nTest Failed: PDF Reader not working as expected."
print(result)
return result
def api_interactions(self):
"""
Tests that the DOI & OCI APIs are working by testing for doi='10.1186/ar4086'.
"""
self.tests += 1
try:
get = reader.Api('10.1186/ar4086')
responses = get.data()
for r in responses: assert r.status_code == 200
self.success += 1
result = "\nTest Successful: DOI & OCI API interactions working as expected."
except:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = "\nTest Failed: DOI & OCI API interactions not working as expected."
print(result)
return result
def lit_classes(self):
"""
Tests that the literature.py classes are working.
"""
self.tests += 1
db_file=os.path.join(sys.path[0], 'citation_graph.db')
details = ""
try:
test_text = lit.Text(db_file, key='TEST_TEXT')
test_text.remove()
details += "\n>>>lit.Text() class working"
test_book = lit.Book(db_file, key='TEST_BOOK')
test_book.remove()
details += "\n>>>lit.Book() class working"
test_chapter = lit.Chapter(db_file, key='TEST_CHAPTER')
test_chapter.remove()
details += "\n>>>lit.Chapter() class working"
test_article = lit.Article(db_file, key='TEST_ARTICLE')
test_article.remove()
details += "\n>>>lit.Article() class working"
test_creator = lit.Creator(db_file, surname='TEST_CREATOR')
test_creator.remove()
details += "\n>>>lit.Creator() class working"
test_citation = lit.Citation(db_file, citing='TEST_CITATION', cited='TEST_REFERENCE')
test_citation.remove()
details += "\n>>>lit.Citation() class working"
self.success += 1
result = "\nTest Successful: Literature Classes working as expected."
except:
e = sys.exc_info()
print(f"\n\n\nERROR: {e}")
result = "\nTest Failed: Literature Classes not working as expected."
result += details
print(result)
return result
if __name__ == '__main__':
Test()
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
HPC Web API
Preview # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import hpc_acm
from hpc_acm.api.default_api import DefaultApi # noqa: E501
from hpc_acm.rest import ApiException
class TestDefaultApi(unittest.TestCase):
"""DefaultApi unit test stubs"""
def setUp(self):
self.api = hpc_acm.api.default_api.DefaultApi() # noqa: E501
def tearDown(self):
pass
def test_cancel_clusrun_job(self):
"""Test case for cancel_clusrun_job
Cancel a clusrun # noqa: E501
"""
pass
def test_cancel_diagnostic_job(self):
"""Test case for cancel_diagnostic_job
Cancel a diagnostic test run # noqa: E501
"""
pass
def test_create_clusrun_job(self):
"""Test case for create_clusrun_job
Create a clusrun # noqa: E501
"""
pass
def test_create_diagnostic_job(self):
"""Test case for create_diagnostic_job
Create a diagnostic test run # noqa: E501
"""
pass
def test_get_clus_run_job_summary(self):
"""Test case for get_clus_run_job_summary
Get summary of ClusRun jobs # noqa: E501
"""
pass
def test_get_clusrun_job(self):
"""Test case for get_clusrun_job
Get a clusrun # noqa: E501
"""
pass
def test_get_clusrun_job_aggregation_result(self):
"""Test case for get_clusrun_job_aggregation_result
Get aggregation result of a clusrun job # noqa: E501
"""
pass
def test_get_clusrun_jobs(self):
"""Test case for get_clusrun_jobs
Get a list of clusruns # noqa: E501
"""
pass
def test_get_clusrun_output(self):
"""Test case for get_clusrun_output
Get the whole output of a task # noqa: E501
"""
pass
def test_get_clusrun_output_in_page(self):
"""Test case for get_clusrun_output_in_page
Get partial output of a task # noqa: E501
"""
pass
def test_get_clusrun_task(self):
"""Test case for get_clusrun_task
Get a task of a clusrun # noqa: E501
"""
pass
def test_get_clusrun_task_result(self):
"""Test case for get_clusrun_task_result
Get a task result of a clusrun # noqa: E501
"""
pass
def test_get_clusrun_tasks(self):
"""Test case for get_clusrun_tasks
Get tasks of a clusrun # noqa: E501
"""
pass
def test_get_diagnostic_job(self):
"""Test case for get_diagnostic_job
Get a diagnostic test run # noqa: E501
"""
pass
def test_get_diagnostic_job_aggregation_result(self):
"""Test case for get_diagnostic_job_aggregation_result
Get aggregation result of a diagnostic job # noqa: E501
"""
pass
def test_get_diagnostic_job_summary(self):
"""Test case for get_diagnostic_job_summary
Get summary of diagnostic jobs # noqa: E501
"""
pass
def test_get_diagnostic_jobs(self):
"""Test case for get_diagnostic_jobs
Get a list of diagnostic test runs # noqa: E501
"""
pass
def test_get_diagnostic_task(self):
"""Test case for get_diagnostic_task
Get a task of a diagnostic test run # noqa: E501
"""
pass
def test_get_diagnostic_task_result(self):
"""Test case for get_diagnostic_task_result
Get a task result of a diagnostic test run # noqa: E501
"""
pass
def test_get_diagnostic_tasks(self):
"""Test case for get_diagnostic_tasks
Get tasks of a diagnostic test run # noqa: E501
"""
pass
def test_get_diagnostic_tests(self):
"""Test case for get_diagnostic_tests
Get available diagnostic tests # noqa: E501
"""
pass
def test_get_metric_categories(self):
"""Test case for get_metric_categories
Get node metric categories # noqa: E501
"""
pass
def test_get_metrics_of_category(self):
"""Test case for get_metrics_of_category
Get node merics in a category # noqa: E501
"""
pass
def test_get_node(self):
"""Test case for get_node
Get a node # noqa: E501
"""
pass
def test_get_node_events(self):
"""Test case for get_node_events
Get events of a node # noqa: E501
"""
pass
def test_get_node_jobs(self):
"""Test case for get_node_jobs
Get jobs of a node # noqa: E501
"""
pass
def test_get_node_metadata(self):
"""Test case for get_node_metadata
get metadata of a node # noqa: E501
"""
pass
def test_get_node_metric_history(self):
"""Test case for get_node_metric_history
Get metric history of a node # noqa: E501
"""
pass
def test_get_node_scheduled_events(self):
"""Test case for get_node_scheduled_events
get scheduled events of a node # noqa: E501
"""
pass
def test_get_node_summary(self):
"""Test case for get_node_summary
Get summary of nodes # noqa: E501
"""
pass
def test_get_nodes(self):
"""Test case for get_nodes
Get a list of nodes # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from export_env_variables import *
import os
import sys
from os import path
from utils import *
from defs import *
import shutil
def save_logs_recursively(logs_root, dst_folder_name):
if not path.exists(logs_root):
print(logs_root + " doesn't exist")
return
logs_root_basename = path.basename(logs_root)
if path.exists(path.join(saved_logs_path, dst_folder_name)):
ans = raw_input("{} logs backup folder already exist. Merge 'm' or Replace 'r'? ".format(dst_folder_name))
if ans.lower() == 'r':
shutil.rmtree(path.join(saved_logs_path, dst_folder_name))
makedirs_ok(path.join(saved_logs_path, dst_folder_name))
logs_to_save_dst_path = []
logs_to_save_src_path = []
for root, dirnames, filenames in os.walk(logs_root):
for filename in filenames:
if filename.endswith(".log") or filename.endswith(".png") or filename == "val.txt" or filename == "train.txt" or filename == "shuffled_imgs_list_order.txt":
curr_dir = root[root.find(logs_root_basename) + len(logs_root_basename) + 1:]
logs_to_save_dst_path.append(path.join(saved_logs_path, dst_folder_name, curr_dir, filename))
logs_to_save_src_path.append(path.join(root, filename))
# makedirs_ok(path.join(saved_logs_path, dst_folder_name, curr_dir))
# shutil.copy2(path.join(root, filename), path.join(saved_logs_path, dst_folder_name, curr_dir, filename))
existing_logs = []
for root, dirnames, filenames in os.walk(logs_root):
for filename in filenames:
if filename.endswith(".log") or filename.endswith(".png"):
existing_logs.append(path.join(root, filename))
logs_that_will_be_lost = set(existing_logs) - set(logs_to_save_src_path)
for log in logs_that_will_be_lost:
print(log + " will be lost")
if len(logs_that_will_be_lost) > 0:
raw_input("\n\n\n", len(logs_that_will_be_lost), " Logs Will Be lost. Are you sure?")
for src_log, dst_log in zip(logs_to_save_src_path, logs_to_save_dst_path):
makedirs_ok(path.dirname(dst_log))
shutil.copy2(src_log, dst_log)
# -------------------------------------------------------------------------------------------------------
if __name__=="__main__":
src_path = modes_path
out_path = "modes"
if len(sys.argv) > 1:
src_path += "/" + sys.argv[1]
out_path = sys.argv[1]
save_logs_recursively(src_path, out_path)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import logging
from ..address_translator import AT
l = logging.getLogger('cle.backends.relocation')
class Relocation(object):
"""
A representation of a relocation in a binary file. Smart enough to
relocate itself.
:ivar owner_obj: The binary this relocation was originaly found in, as a cle object
:ivar symbol: The Symbol object this relocation refers to
:ivar relative_addr: The address in owner_obj this relocation would like to write to
:ivar rebased_addr: The address in the global memory space this relocation would like to write to
:ivar resolvedby: If the symbol this relocation refers to is an import symbol and that import has been resolved,
this attribute holds the symbol from a different binary that was used to resolve the import.
:ivar resolved: Whether the application of this relocation was succesful
"""
def __init__(self, owner, symbol, relative_addr):
super(Relocation, self).__init__()
self.owner_obj = owner
self.arch = owner.arch
self.symbol = symbol
self.relative_addr = relative_addr
self.resolvedby = None
self.resolved = False
self.resolvewith = None
if self.symbol is not None and self.symbol.is_import:
self.owner_obj.imports[self.symbol.name] = self
def resolve_symbol(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument
if self.symbol.is_static:
# A static symbol should only be resolved by itself.
self.resolve(self.symbol)
return True
weak_result = None
for so in solist:
symbol = so.get_symbol(self.symbol.name)
if symbol is not None and symbol.is_export:
if not symbol.is_weak:
self.resolve(symbol)
return True
elif weak_result is None:
weak_result = symbol
# TODO: Was this check obsolted by the addition of is_static?
# I think right now symbol.is_import = !symbol.is_export
elif symbol is not None and not symbol.is_import and so is self.owner_obj:
if not symbol.is_weak:
self.resolve(symbol)
return True
elif weak_result is None:
weak_result = symbol
if weak_result is not None:
self.resolve(weak_result)
return True
if self.symbol.is_weak:
return False
new_symbol = self.owner_obj.loader.extern_object.make_extern(self.symbol.name)
self.resolve(new_symbol)
return True
def resolve(self, obj):
self.resolvedby = obj
self.resolved = True
if self.symbol is not None:
if obj is not None:
l.debug('%s from %s resolved by %s from %s at %#x', self.symbol.name, self.owner_obj.provides, obj.name, obj.owner_obj.provides, obj.rebased_addr)
self.symbol.resolve(obj)
@property
def rebased_addr(self):
return AT.from_rva(self.relative_addr, self.owner_obj).to_mva()
@property
def linked_addr(self):
return AT.from_rva(self.relative_addr, self.owner_obj).to_lva()
warned_addr = False
@property
def addr(self):
if not Relocation.warned_addr:
print("\x1b[31;1mDeprecation warning: Relocation.addr is ambiguous, please use relative_addr, linked_addr, or rebased_addr\x1b[0m")
Relocation.warned_addr = True
return self.linked_addr
@property
def dest_addr(self):
return self.relative_addr
@property
def value(self): # pylint: disable=no-self-use
l.error('Value property of Relocation must be overridden by subclass!')
return 0
def relocate(self, solist, bypass_compatibility=False):
"""
Applies this relocation. Will make changes to the memory object of the
object it came from.
This implementation is a generic version that can be overridden in subclasses.
:param solist: A list of objects from which to resolve symbols.
"""
if not self.resolve_symbol(solist, bypass_compatibility):
return False
self.owner_obj.memory.write_addr_at(self.dest_addr, self.value)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
import os
from willow.image import Image, RGBImageBuffer
def _cv2():
try:
import cv2
except ImportError:
from cv import cv2
return cv2
def _numpy():
import numpy
return numpy
class BaseOpenCVImage(Image):
def __init__(self, image, size):
self.image = image
self.size = size
@classmethod
def check(cls):
_cv2()
@Image.operation
def get_size(self):
return self.size
@Image.operation
def has_alpha(self):
# Alpha is not supported by OpenCV
return False
@Image.operation
def has_animation(self):
# Animation is not supported by OpenCV
return False
class OpenCVColorImage(BaseOpenCVImage):
@classmethod
def check(cls):
super(OpenCVColorImage, cls).check()
_numpy()
@classmethod
@Image.converter_from(RGBImageBuffer)
def from_buffer_rgb(cls, image_buffer):
"""
Converts a Color Image buffer into a numpy array suitable for use with OpenCV
"""
numpy = _numpy()
cv2 = _cv2()
image = numpy.frombuffer(image_buffer.data, dtype=numpy.uint8)
image = image.reshape(image_buffer.size[1], image_buffer.size[0], 3)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return cls(image, image_buffer.size)
class OpenCVGrayscaleImage(BaseOpenCVImage):
face_haar_flags = 0
face_min_neighbors = 3
face_haar_scale = 1.1
face_min_size = (40, 40)
@Image.operation
def detect_features(self):
"""
Find interesting features of an image suitable for cropping to.
"""
numpy = _numpy()
cv2 = _cv2()
points = cv2.goodFeaturesToTrack(self.image, 20, 0.04, 1.0)
if points is None:
return []
else:
points = numpy.reshape(points, (-1, 2)) # Numpy returns it with an extra third dimension
return points.tolist()
@Image.operation
def detect_faces(self, cascade_filename='haarcascade_frontalface_alt2.xml'):
"""
Run OpenCV face detection on the image. Returns a list of coordinates representing a box around each face.
"""
cv2 = _cv2()
cascade_filename = self._find_cascade(cascade_filename)
cascade = cv2.CascadeClassifier(cascade_filename)
equalised_image = cv2.equalizeHist(self.image)
faces = cascade.detectMultiScale(equalised_image,
self.face_haar_scale,
self.face_min_neighbors,
self.face_haar_flags,
self.face_min_size)
return [(face[0],
face[1],
face[0] + face[2],
face[1] + face[3],
) for face in faces]
def _find_cascade(self, cascade_filename):
"""
Find the requested OpenCV cascade file. If a relative path was provided, check local cascades directory.
"""
if not os.path.isabs(cascade_filename):
cascade_filename = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data/cascades',
cascade_filename,
)
return cascade_filename
@classmethod
@Image.converter_from(OpenCVColorImage)
def from_color(cls, colour_image):
"""
Convert OpenCVColorImage to an OpenCVGrayscaleImage.
"""
cv2 = _cv2()
image = cv2.cvtColor(colour_image.image, cv2.COLOR_BGR2GRAY)
return cls(image, colour_image.size)
willow_image_classes = [OpenCVColorImage, OpenCVGrayscaleImage]
|
nilq/baby-python
|
python
|
# Django settings for clickwork project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
try:
from local_settings import BASE_PATH
except ImportError:
BASE_PATH = '.'
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
"default": {
"NAME": "default_db",
"ENGINE": "django.db.backends.postgresql_psycopg2"
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
DEFAULT_CHARSET="utf-8"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(BASE_PATH, '')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '7wwt(!57n(mx5)@v61^(7#a66hhtq_*51sqn+6l78-t*f=d)45'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.transaction.TransactionMiddleware'
)
ROOT_URLCONF = 'clickwork.urls'
# List of strings corresponding to task types.
# These are the task types that are exercised by unit tests, so
# these are included by default. To add others, change the
# TASK_TYPES variable in local_settings.py.
TASK_TYPES = ['simple']
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'main',
'user_management',
#'django_hudson'
)
# should be changed if you're going to user management
EMAIL_FROM = "Clickwork <clickwork@example.com>"
## Use this octal file permission number for the files
## in the archive that is created when a project is exported.
CLICKWORK_EXPORT_FILE_PERMISSIONS = 0444
## These are the usernames of people who should not both annotate the
## same task. E.g., (("a", "b"), ("c", "d", "e")) means that if "a"
## is one annotator for a task, then "b" should not be the other, and
## if "c" is one annotator, then neither "d" nor "e" should be the
## other.
CLICKWORK_KEEP_APART = (("TEST_EXCLUSION_1", "TEST_EXCLUSION_2"),)
try:
from local_settings import *
except ImportError:
pass
|
nilq/baby-python
|
python
|
import scipy.spatial.distance as distance
import numpy as np
import cPickle as pkl
import os
import sys
root_dir = 'home_directory/VIC/track3/new_15000'
fea_dir = os.path.join(root_dir, 'filtered_mot_mean_features')
dis_dir = os.path.join(root_dir, 'filtered_mot_distances')
# if not os.path.exists(fea_dir):
# os.makedirs(fea_dir)
# if not os.path.exists(dis_dir):
# os.makedirs(dis_dir)
'''
cache_files = ['Loc1_1_ave_1.pkl',
'Loc1_2_ave_1.pkl',
'Loc1_3_ave_1.pkl',
'Loc1_4_ave_1.pkl',
'Loc2_1_ave_1.pkl',
'Loc2_2_ave_1.pkl',
'Loc2_3_ave_1.pkl',
'Loc2_4_ave_1.pkl',
'Loc2_5_ave_1.pkl',
'Loc2_6_ave_1.pkl',
'Loc3_1_ave_1.pkl',
'Loc3_2_ave_1.pkl',
'Loc4_1_ave_1.pkl',
'Loc4_3_ave_1.pkl',
'Loc4_2_ave_1.pkl']
# concat all cache
all_fea_lst = []
all_idx_lst = []
for i in cache_files:
cache_f = os.path.join(fea_dir, i)
with open(cache_f, 'r') as f:
cache = pkl.load(f)
for k, v in cache.iteritems():
all_idx_lst.append(i[:7] + str(k))
all_fea_lst.append(v[2])
print i, 'concat finished'
print 'concat done!'
# all_lst = [all_idx_lst, np.array(all_fea_lst)]
with open(os.path.join(fea_dir, 'all_fea.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(all_fea_lst, f)
with open(os.path.join(fea_dir, 'all_idx.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(all_idx_lst, f)
print 'all ave fea dump finished!'
# with open(os.path.join(fea_dir, 'all_ave.pkl'), 'r') as f:
# all_lst = pkl.load(f)
#
# print 'cache loaded'
#
#
# # split cache
# all_fea_arr = all_lst[1]
# with open('home_directory/VIC/track3/filtered_mot_mean_features/all_fea.pkl', 'r') as f:
# all_fea_lst = pkl.load(f)
#
all_fea_arr = np.array(all_fea_lst)
num = len(all_fea_arr)
split_num = 50
each = num / split_num
for i in range(split_num):
sid = each * i
eid = each * (i + 1)
if i == split_num - 1:
eid = num
fea_arr_each = all_fea_arr[sid:eid]
all_ave_split_path = os.path.join(fea_dir, 'all_ave_split')
if not os.path.exists(all_ave_split_path):
os.makedirs(all_ave_split_path)
with open(os.path.join(all_ave_split_path, str(i)+'.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(fea_arr_each, f)
print i, 'split finished'
print 'all split finished'
exit(0)
'''
'''
with open(os.path.join(fea_dir, 'all_fea.pkl'), 'r') as f:
all_fea_lst = pkl.load(f)
all_fea_arr = np.array(all_fea_lst)
# calcualte distance
all_ave_split_path = os.path.join(fea_dir, 'all_ave_split')
if not os.path.exists(all_ave_split_path):
os.makedirs(all_ave_split_path)
with open(os.path.join(all_ave_split_path, sys.argv[1]+'.pkl'), 'r') as f:
split_arr = pkl.load(f)
all_res = distance.cdist(split_arr, all_fea_arr, 'cosine')
with open(os.path.join(dis_dir, 'dis_' + sys.argv[1] + '.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(all_res, f)
print 'split', sys.argv[1], 'done!'
exit(0)
'''
'''
# concat all splited distance
split_num = 50
with open(os.path.join(dis_dir, 'dis_0.pkl'), 'r') as f:
res = pkl.load(f)
print '0 /', split_num, 'cancat finished'
for i in range(1, split_num):
with open(os.path.join(dis_dir, 'dis_' + str(i) + '.pkl'), 'r') as f:
split_cache = pkl.load(f)
res = np.concatenate((res, split_cache), axis=0)
print i, '/', split_num, 'cancat finished'
print 'all concat finished, shape is', res.shape
with open(os.path.join(dis_dir, 'dis_all.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(res, f)
print 'concat done!'
'''
'''
# # calcute distance marix
# with open('home_directory/VIC/track3/filtered_mot_mean_features/all_fea.pkl', 'r') as f:
# all_fea = pkl.load(f)
# with open('home_directory/VIC/track3/filtered_mot_mean_features/all_idx.pkl', 'r') as f:
# all_idx = pkl.load(f)
#
# all_dis = distance.cdist(all_fea, all_fea, 'cosine')
# with open('home_directory/VIC/track3/filtered_mot_distances/dis_all.pkl', 'wb', pkl.HIGHEST_PROTOCOL) as f:
# pkl.dump(all_dis, f)
# print 'all dis calculate finished'
# split_cache = all_dis
'''
# filter the distance matrix with max_num=15 and threshould sys.argv[1]
split_num = int(sys.argv[1])
# # thresh = 0.3 # int(sys.argv[1])
#
res = []
cache_name = 'dis_' + str(split_num) + '.pkl'
# load each split cache
with open(os.path.join(dis_dir, cache_name), 'r') as f:
split_cache = pkl.load(f)
# load index cache
with open(os.path.join(fea_dir, 'all_idx.pkl'), 'r') as f:
all_idx = pkl.load(f)
print split_num, 'loaded cache'
all_locs = ['Loc1_1', 'Loc1_2', 'Loc1_3', 'Loc1_4', 'Loc2_1', 'Loc2_2', 'Loc2_3', 'Loc2_4',
'Loc2_5', 'Loc2_6', 'Loc3_1', 'Loc3_2', 'Loc4_1', 'Loc4_2', 'Loc4_3']
loc_to_idx = dict(zip((all_locs), xrange(len(all_locs))))
res_inds = []
res_diss = []
each_split_num = len(all_idx) / 50
for idx in range(split_cache.shape[0]):
each = split_cache[idx]
this_loc = all_idx[idx + split_num * each_split_num][:6]
# this_loc_idxs = [t_i for t_i in range(len(all_idx)) if all_idx[t_i][:6] == this_loc]
# other_loc_idx = list(set(range(len(all_idx))) - set(this_loc_idxs))
# each = each[other_loc_idx]
each_ind = []
each_dis = []
for loc in all_locs:
if loc == this_loc:
continue
loc_idxs = [t_i for t_i in range(len(all_idx)) if all_idx[t_i][:6] == loc]
ii = np.argsort(each[loc_idxs])[:4]
each_ind += list(np.array(loc_idxs)[ii])
# each_ind += list(np.argsort(each[loc_idxs])[:4] + loc_idxs[0])
each_dis += list(np.sort(each[loc_idxs])[:4])# list(each[np.array(loc_idxs)[ii]]) # list(np.sort(each[loc_idxs])[:4])
# print list(each[np.array(loc_idxs)[ii]])
# print list(np.sort(each[loc_idxs])[:4])
res_inds.append(each_ind)
res_diss.append(each_dis)
filtered_dis_dir = os.path.join(root_dir, 'filtered_mot_final_distance')
# with open(os.path.join(filtered_dis_dir, 'diss.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
# pkl.dump(res_diss, f)
# with open(os.path.join(filtered_dis_dir, 'inds.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
# pkl.dump(res_inds, f)
with open(os.path.join(filtered_dis_dir, 'diss/filtered_' + cache_name), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(res_diss, f)
with open(os.path.join(filtered_dis_dir, 'inds/filtered_idx_' + str(split_num) + '.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(res_inds, f)
print split_num, 'done'
'''
filtered_dis_dir = os.path.join(root_dir, 'filtered_mot_final_distance/diss')
# concat all filterd cache
all_split_num = 50
all_res = []
for i in range(all_split_num):
with open(os.path.join(filtered_dis_dir, 'filtered_dis_' + str(i) + '.pkl'), 'r') as f:
split_cache = pkl.load(f)
all_res += split_cache
print i, 'loaded'
with open(os.path.join(filtered_dis_dir, 'all_dis.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(all_res, f)
print 'dis done'
filtered_idx_dir = os.path.join(root_dir, 'filtered_mot_final_distance/inds')
all_res = []
for i in range(all_split_num):
with open(os.path.join(filtered_idx_dir, 'filtered_idx_' + str(i) + '.pkl'), 'r') as f:
split_cache = pkl.load(f)
all_res += split_cache
print i, 'loaded'
with open(os.path.join(filtered_idx_dir, 'all_inx.pkl'), 'wb', pkl.HIGHEST_PROTOCOL) as f:
pkl.dump(all_res, f)
print 'done'
'''
|
nilq/baby-python
|
python
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class EmbedIDFunction(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, w_type = in_types
type_check.expect(
x_type.dtype == numpy.int32,
x_type.ndim >= 1,
)
type_check.expect(
w_type.dtype == numpy.float32,
w_type.ndim == 2
)
def forward(self, inputs):
x, W = inputs
return W.take(x, axis=0),
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
x, W = inputs
gy = grad_outputs[0]
gW = xp.zeros_like(W)
if xp is numpy:
numpy.add.at(gW, x, gy)
else:
cuda.elementwise(
'T gy, int32 x, int32 n_out', 'raw T gW',
'int w_ind[] = {x, i % n_out}; atomicAdd(&gW[w_ind], gy)',
'embed_id_bwd')(
gy, xp.expand_dims(x, -1), gW.shape[1], gW)
return None, gW
def embed_id(x, W):
"""Efficient linear function for one-hot input.
This function implements so called *word embedding*. It takes two
arguments: a set of IDs (words) ``x`` in :math:`B` dimensional integer
vector, and a set of all ID (word) embeddings ``W`` in :math:`V\times d`
float32 matrix. It outputs :math:`B \times d` matrix whose ``i``-th
column is the ``x[i]``-th column of ``W``.
This function is only differentiable on the input ``W``.
Args:
x (~chainer.Variable): Input variable with one-hot representation.
W (~chainer.Variable): Representation of each ID (a.k.a.
word embeddings).
Returns:
~chainer.Variable: Output variable.
.. seealso:: :class:`EmbedID`
"""
return EmbedIDFunction()(x, W)
|
nilq/baby-python
|
python
|
from chainmodel.models.steem.operation import Operation
class RequestAccountRecovery(Operation):
tx_involves = ['account_to_recover', 'recovery_account']
tx_originator = 'account_to_recover'
|
nilq/baby-python
|
python
|
# Import standard Python Modules
import sys
import os
import datetime
# Import paho MQTT Client
import paho.mqtt.client as mqtt
# Import RPi.GPIO Module
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need \
superuser privileges. You can achieve \
this by using 'sudo' to run your script")
# Define callback functions which will be called when certain events happen.
def on_connect(client, userdata, flags, rc):
# Connected function will be called when the client connects.
print("Conectado con codigo resultante: "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
print("Suscribiendose al topic ->{0}".format("area0/luces"))
client.subscribe("area0/luces")
def on_message(client, userdata, message):
# Message function will be called when a subscribed feed has a new value.
messageStr=str(message.payload.decode("utf-8"))
print("message received " ,str(message.payload.decode("utf-8")))
print("message topic=", message.topic)
# Catch errors
try:
if("| " in messageStr):
pinNums, newStatePin =messageStr.split("| ")
# Convert string to integer list
pinNums=eval(pinNums)
logFile=open("log suscriptor.txt", "a", encoding="utf8")
if(newStatePin=="OFF"):
GPIO.output(pinNums, GPIO.LOW)
logFile.write("{}~{}~{}~{}\n".format(datetime.datetime.now(), messageStr, message.topic, "Apagando LEDs {}".format(pinNums)))
elif(newStatePin=="ON"):
GPIO.output(pinNums, GPIO.HIGH)
logFile.write("{}~{}~{}~{}\n".format(datetime.datetime.now(), messageStr, message.topic, "Encendiendo LEDs {}".format(pinNums)))
else:
print("Estado recibido incorrecto")
logFile.write("{}~{}~{}~{}\n".format(datetime.datetime.now(), messageStr, message.topic, "Error - Estado recibido incorrecto"))
logFile.close()
except Exception as e:
print("Error al ejecutar el mensaje recibido:\n{} line {}".format(e, sys.exc_info()[-1].tb_lineno))
logFile=open("log publish.txt", "a", encoding="utf8")
logFile.write("{}~{}~{}~{}\n".format(datetime.datetime.now(), messageStr, message.topic, "Error - No se puedo ejecutar el mensaje recibido debido a:\n{} line {}",format(e, sys.exc_info()[-1].tb_lineno)))
logFile.close()
def main():
if(len(sys.argv)!=2):
sys.stderr.write('Usage: "{0}" $hostAddress\n'.format(sys.argv[0]))
os._exit(1)
GPIO.setmode(GPIO.BCM)
# List with all GPIO pin numbers
pinList=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]
# Set GPIO pin signal OUT and initial value "shutdown"
GPIO.setup(pinList, GPIO.OUT, initial=GPIO.LOW)
# Create an MQTT client instance.
print("Creando instancia MQTT")
client = mqtt.Client()
# Setup the callback functions
client.on_message = on_message
client.on_connect = on_connect
print("conectando al broker")
client.connect(host=sys.argv[1], port=1883, keepalive=60)
# Write on log file
logFile=open("log suscriptor.txt", "a", encoding="utf8")
logFile.write("{}~{}~{}~{}\n".format(datetime.datetime.now(), "Nulo", "Nulo", "Aplicación iniciada"))
logFile.close()
client.loop_forever()
if __name__ == "__main__":
try:
main()
except:
print("{} line {}".format(sys.exc_info()[0], sys.exc_info()[-1].tb_lineno))
GPIO.cleanup()
|
nilq/baby-python
|
python
|
import sys
import cv2
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import QPalette, QBrush, QPixmap
import os
scanning_face_path = os.path.join(os.path.dirname(__file__),'fw\FaceSwap')
sys.path.append(scanning_face_path)
from scanning_face import show_face_information
global flag
flag = True
class Ui_MainWindow(QtWidgets.QWidget):
def __init__(self, parent= None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_camera = QtCore.QTimer()
self.cap = cv2.VideoCapture()
self.CAM_NUM = 0
self.set_ui()
self.slot_init()
self.__flag_work = 0
self.x =0
self.count = 0
def set_ui(self):
self.__layout_main = QtWidgets.QHBoxLayout()
self.__layout_fun_button = QtWidgets.QVBoxLayout()
self.__layout_data_show = QtWidgets.QVBoxLayout()
self.button_open_camera = QtWidgets.QPushButton(u'打开相机')
self.button_change = QtWidgets.QPushButton(u'切換功能')
self.button_close = QtWidgets.QPushButton(u'退出')
self.button_open_camera.setMinimumHeight(50)
self.button_change.setMinimumHeight(50)
self.button_close.setMinimumHeight(50)
self.label_show_camera = QLabel()
self.label_move = QtWidgets.QLabel()
self.label_move.setFixedSize(80,100) # Camera frame size
self.label_show_camera.setFixedSize(1060, 1000) # Main frame size
self.label_show_camera.setAutoFillBackground(False)
self.__layout_main.addLayout(self.__layout_fun_button)
self.__layout_main.addLayout(self.__layout_data_show)
self.__layout_data_show.addWidget(self.label_show_camera)
self.__layout_fun_button.addWidget(self.button_open_camera)
self.__layout_fun_button.addWidget(self.button_change)
self.__layout_fun_button.addWidget(self.button_close)
self.setLayout(self.__layout_main)
self.label_move.raise_()
self.setWindowTitle(u'攝像頭')
def show_camera(self):
flag, bgr_image = self.cap.read()
if flag:
print("camera start")
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
show = cv2.resize(rgb_image, (1080, 960))
showImage = QtGui.QImage(show, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
def show_view(self):
flag, bgr_image = self.cap.read()
if flag:
# rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
print("camera start")
faces_indention = scanning_face(flag,bgr_image)
rgb_image = faces_indention.show_face_information()
show = cv2.resize(rgb_image, (1080, 960))
showImage = QtGui.QImage(show, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
def slot_init(self):
self.button_open_camera.clicked.connect(self.button_open_camera_click)
self.button_change.clicked.connect(self.button_add_face_emotion)
self.timer_camera.timeout.connect(self.show_camera)
self.timer_camera.timeout.connect(self.show_view)
# self.button_close.clicked.connect(self.close)
# self.button_test.clicked.connect(self.test_click)
def button_open_camera_click(self):
if self.timer_camera.isActive() == False:
flag = self.cap.open(self.CAM_NUM)
if flag == False:
msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"請檢測相機與電腦是否連線正確", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.button_open_camera.setText(u'123')
else:
self.timer_camera.stop()
self.cap.release()
self.label_show_camera.clear()
def button_add_face_emotion(self):
if self.timer_camera.isActive() == False:
flag = self.cap.open(self.CAM_NUM)
if flag == False:
msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"請檢測相機與電腦是否連線正確", buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.button_open_camera.setText(u'123')
else:
self.timer_camera.stop()
self.cap.release()
self.label_show_camera.clear()
if __name__ == "__main__":
App = QApplication(sys.argv)
ex = Ui_MainWindow()
ex.show()
sys.exit(App.exec_())
|
nilq/baby-python
|
python
|
import logging
import time
from enum import Enum
from iot.devices import DeviceType
from iot.devices.base.multimedia import (
MultimediaKeyboardInterface,
MultimediaDevice
)
from iot.devices.errors import (
CommandNotFound, InvalidArgument,
BrandNotFound
)
logger = logging.getLogger(__name__)
class SetTopBoxBrands(Enum):
SAMSUNG = "samsung"
class SetTopBoxFactory:
__slots__ = ("mappings",)
def __init__(self):
self.mappings = {
SetTopBoxBrands.SAMSUNG.value: Samsung
}
def get_brand(self, brand):
return self.mappings.get(brand.lower(), None)
def create(self, room, id, brand, model):
kls = self.get_brand(brand)
if kls is None:
raise BrandNotFound
stb = kls(room, id, brand, model)
return stb
class SetTopBoxKeyboardInterface(MultimediaKeyboardInterface):
def mute(self):
pass
def unmute(self):
pass
def channel_up(self):
pass
def channel_down(self):
pass
def volume_up(self):
pass
def volume_down(self):
pass
class BaseSetTopBox(MultimediaDevice, SetTopBoxKeyboardInterface):
# TODO: Network provider channel mappings
# Maybe curl from this to create mapping?
# https://www.tvchannellists.com/Main_Page
# Should ideally do it another class too
device_type = DeviceType.SET_TOP_BOX
def get_digit(self, digit):
digits = self.get_command("digits")
# If not all digits (0-9) (10 numbers) are populated
# We will raise error
if not digits or len(digits) != 10:
raise CommandNotFound
return digits[digit]
def channel(self, chan_number):
"""Experimental function, may not work in some cases"""
cmd_to_send = []
for digit in list(chan_number):
try:
d = int(digit)
command_digit = self.get_digit(d)
cmd_to_send.append(command_digit)
except ValueError:
logger.error("Unable to convert digit to string, %s", digit)
raise InvalidArgument
except CommandNotFound:
raise
if len(cmd_to_send) > 0:
for cmd in cmd_to_send:
self.room.send(cmd)
# Introduce delay so that the IR receiver can work
time.sleep(2.25)
else:
raise InvalidArgument
class Samsung(BaseSetTopBox):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# Copyright 2014 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Raw network client for HTTP(S) communication with ERMREST service.
"""
import os
import subprocess
import hashlib
import json
import base64
import urlparse
from httplib import HTTPConnection, HTTPSConnection, HTTPException, OK, CREATED, ACCEPTED, NO_CONTENT, CONFLICT, FORBIDDEN, INTERNAL_SERVER_ERROR, SERVICE_UNAVAILABLE, BadStatusLine, CannotSendRequest, GATEWAY_TIMEOUT, METHOD_NOT_ALLOWED, NOT_FOUND
import sys
import traceback
import time
import shutil
import smtplib
import urllib
import re
import mimetypes
from email.mime.text import MIMEText
import socket
from dateutil.parser import parse
mail_footer = 'Do not reply to this message. This is an automated message generated by the system, which does not receive email messages.'
class ErmrestHTTPException(Exception):
def __init__(self, value, status, retry=False):
super(ErmrestHTTPException, self).__init__(value)
self.value = value
self.status = status
self.retry = retry
def __str__(self):
message = "%s." % self.value
return message
class ErmrestException(Exception):
def __init__(self, value, cause=None):
super(ErmrestException, self).__init__(value)
self.value = value
self.cause = cause
def __str__(self):
message = "%s." % self.value
if self.cause:
message += " Caused by: %s." % self.cause
return message
class MalformedURL(ErmrestException):
"""MalformedURL indicates a malformed URL.
"""
def __init__(self, cause=None):
super(MalformedURL, self).__init__("URL was malformed", cause)
class UnresolvedAddress(ErmrestException):
"""UnresolvedAddress indicates a failure to resolve the network address of
the Ermrest service.
This error is raised when a low-level socket.gaierror is caught.
"""
def __init__(self, cause=None):
super(UnresolvedAddress, self).__init__("Could not resolve address of host", cause)
class NetworkError(ErmrestException):
"""NetworkError wraps a socket.error exception.
This error is raised when a low-level socket.error is caught.
"""
def __init__(self, cause=None):
super(NetworkError, self).__init__("Network I/O failure", cause)
class ProtocolError(ErmrestException):
"""ProtocolError indicates a protocol-level failure.
In other words, you may have tried to add a tag for which no tagdef exists.
"""
def __init__(self, message='Network protocol failure', errorno=-1, response=None, cause=None):
super(ProtocolError, self).__init__("Ermrest protocol failure", cause)
self._errorno = errorno
self._response = response
def __str__(self):
message = "%s." % self.value
if self._errorno >= 0:
message += " HTTP ERROR %d: %s" % (self._errorno, self._response)
return message
class NotFoundError(ErmrestException):
"""Raised for HTTP NOT_FOUND (i.e., ERROR 404) responses."""
pass
class ErmrestClient (object):
"""Network client for ERMREST.
"""
## Derived from the ermrest iobox service client
def __init__(self, **kwargs):
self.baseuri = kwargs.get("baseuri")
o = urlparse.urlparse(self.baseuri)
self.scheme = o[0]
host_port = o[1].split(":")
self.host = host_port[0]
self.path = o.path
self.port = None
if len(host_port) > 1:
self.port = host_port[1]
self.mail_server = kwargs.get("mail_server")
self.mail_sender = kwargs.get("mail_sender")
self.mail_receiver = kwargs.get("mail_receiver")
self.logger = kwargs.get("logger")
self.watermark = kwargs.get("watermark")
self.ffmpeg = kwargs.get("ffmpeg")
self.video_resources = kwargs.get("video_resources")
self.data_scratch = kwargs.get("data_scratch")
self.timeout = kwargs.get("timeout") * 60
self.cookie = kwargs.get("cookie")
self.chunk_size = kwargs.get("chunk_size")
self.header = None
self.webconn = None
self.logger.debug('Client initialized.')
def send_request(self, method, url, body='', headers={}, sendData=False, ignoreErrorCodes=[]):
try:
request_headers = headers.copy()
url = self.url_cid(url)
if self.header:
headers.update(self.header)
self.logger.debug('Sending request: method="%s", url="%s://%s%s", headers="%s"' % (method, self.scheme, self.host, url, request_headers))
retry = False
try:
if sendData == False:
self.webconn.request(method, url, body, headers)
else:
"""
For file upload send the request step by step
"""
self.webconn.putrequest(method, url)
for key,value in headers.iteritems():
self.webconn.putheader(key,value)
self.webconn.endheaders()
self.webconn.send(body)
resp = self.webconn.getresponse()
self.logger.debug('Response: %d' % resp.status)
except socket.error, e:
retry = True
self.logger.debug('Socket error: %d' % (e.errno))
except (BadStatusLine, CannotSendRequest):
retry = True
except:
raise
if retry:
"""
Resend the request
"""
self.close()
self.connect()
self.sendMail('WARNING Video: The HTTPSConnection has been restarted', 'The HTTPSConnection has been restarted on "%s://%s".\n' % (self.scheme, self.host))
self.logger.debug('Resending request: method="%s", url="%s://%s%s"' % (method, self.scheme, self.host, url))
if sendData == False:
self.webconn.request(method, url, body, headers)
else:
self.webconn.putrequest(method, url)
for key,value in headers.iteritems():
self.webconn.putheader(key,value)
self.webconn.endheaders()
self.webconn.send(body)
resp = self.webconn.getresponse()
self.logger.debug('Response: %d' % resp.status)
if resp.status in [INTERNAL_SERVER_ERROR, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT]:
"""
Resend the request
"""
self.close()
self.connect()
self.sendMail('WARNING Video: The HTTPSConnection has been restarted', 'HTTP exception: %d.\nThe HTTPSConnection has been restarted on "%s://%s".\n' % (resp.status, self.scheme, self.host))
self.logger.debug('Resending request: method="%s", url="%s://%s%s", headers="%s"' % (method, self.scheme, self.host, url, request_headers))
if sendData == False:
self.webconn.request(method, url, body, headers)
else:
self.webconn.putrequest(method, url)
for key,value in headers.iteritems():
self.webconn.putheader(key,value)
self.webconn.endheaders()
self.webconn.send(body)
resp = self.webconn.getresponse()
self.logger.debug('Response: %d' % resp.status)
if resp.status not in [OK, CREATED, ACCEPTED, NO_CONTENT]:
errmsg = resp.read()
if resp.status not in ignoreErrorCodes:
self.logger.error('Error response: method="%s", url="%s://%s%s", status=%i, error: %s' % (method, self.scheme, self.host, url, resp.status, errmsg))
else:
self.logger.error('Error response: %s' % (errmsg))
raise ErmrestHTTPException("Error response (%i) received: %s" % (resp.status, errmsg), resp.status, retry)
return resp
except ErmrestHTTPException:
raise
except:
et, ev, tb = sys.exc_info()
self.logger.error('got HTTP exception: method="%s", url="%s://%s%s", error="%s"' % (method, self.scheme, self.host, url, str(ev)))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
self.sendMail('FAILURE Video: Unexpected Exception', 'Error generated during the HTTP request: method="%s", url="%s://%s%s", error="\n%s\n%s"' % (method, self.scheme, self.host, url, str(ev), ''.join(traceback.format_exception(et, ev, tb))))
raise
def connect(self, reconnect=False):
if self.scheme == 'https':
self.webconn = HTTPSConnection(host=self.host, port=self.port)
elif self.scheme == 'http':
self.webconn = HTTPConnection(host=self.host, port=self.port)
else:
raise ValueError('Scheme %s is not supported.' % self.scheme)
"""
if self.use_goauth:
auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '')
headers = dict(Authorization='Basic %s' % auth)
resp = self.send_request('GET', '/service/nexus/goauth/token?grant_type=client_credentials', '', headers, reconnect)
goauth = json.loads(resp.read())
self.access_token = goauth['access_token']
self.header = dict(Authorization='Globus-Goauthtoken %s' % self.access_token)
else:
#headers = {}
#headers["Content-Type"] = "application/x-www-form-urlencoded"
#resp = self.send_request("POST", "/ermrest/authn/session", "username=%s&password=%s" % (self.username, self.password), headers, reconnect)
#self.header = dict(Cookie=resp.getheader("set-cookie"))
"""
self.header = {'Cookie': self.cookie}
def close(self):
"""Closes the connection to the Ermrest service.
The underlying python documentation is not very helpful but it would
appear that the HTTP[S]Connection.close() could raise a socket.error.
Thus, this method potentially raises a 'NetworkError'.
"""
assert self.webconn
try:
self.webconn.close()
except socket.error as e:
raise NetworkError(e)
finally:
self.webconn = None
def sendMail(self, subject, text):
if self.mail_server and self.mail_sender and self.mail_receiver:
retry = 0
ready = False
while not ready:
try:
msg = MIMEText('%s\n\n%s' % (text, mail_footer), 'plain')
msg['Subject'] = subject
msg['From'] = self.mail_sender
msg['To'] = self.mail_receiver
s = smtplib.SMTP(self.mail_server)
s.sendmail(self.mail_sender, self.mail_receiver.split(','), msg.as_string())
s.quit()
self.logger.debug('Sent email notification.')
ready = True
except socket.gaierror as e:
if e.errno == socket.EAI_AGAIN:
time.sleep(100)
retry = retry + 1
ready = retry > 10
else:
ready = True
if ready:
et, ev, tb = sys.exc_info()
self.logger.error('got exception "%s"' % str(ev))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
except:
et, ev, tb = sys.exc_info()
self.logger.error('got exception "%s"' % str(ev))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
ready = True
def start(self):
self.connect()
try:
self.processVideo()
except:
et, ev, tb = sys.exc_info()
self.logger.error('got unexpected exception "%s"' % str(ev))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
self.sendMail('FAILURE Video Processing: unexpected exception', '%s\nThe process might have been stopped\n' % str(traceback.format_exception(et, ev, tb)))
raise
def processVideo(self):
transcode_only = os.getenv('TRANSCODE_ONLY', 'f').lower() in ['t', 'true']
self.logger.debug('TRANSCODE_ONLY: %s.' % (transcode_only))
url = '%s/entity/Immunofluorescence:Slide_Video/!Bytes::null::&Media_Type=video%%2Fmp4&MP4_URI::null::&Processing_Status::null::' % (self.path)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
resp = self.send_request('GET', url, '', headers, False)
movies = json.loads(resp.read())
movieids = []
for movie in movies:
movieids.append((movie['Accession_ID'], movie['Name'], movie['MD5'], movie['Identifier']))
self.logger.debug('Processing %d video(s).' % (len(movieids)))
for movieId,fileName,md5,uri in movieids:
#year = parse(rct).strftime("%Y")
f = self.getMovieFile(fileName, uri)
if f == None:
self.reportFailure(movieId, 'error')
continue
self.logger.debug('Transcoding and adding watermark to the video "%s"' % (fileName))
try:
#args = [self.ffmpeg, '-y', '-i', f, '-i', self.watermark, '-filter_complex', 'overlay=x=(main_w-overlay_w):y=0', '%s/%s' % (self.data_scratch, fileName)]
if transcode_only == True:
args = [self.ffmpeg, '-y', '-i', f, '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', '%s/%s' % (self.data_scratch, fileName)]
else:
args = [self.ffmpeg, '-y', '-i', f, '-i', self.watermark, '-vcodec', 'libx264', '-pix_fmt', 'yuv420p', '-filter_complex', 'pad=width=iw:height=ih+90:color=#71cbf4,overlay=(main_w-overlay_w)/2:main_h-90', '%s/%s' % (self.data_scratch, fileName)]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = p.communicate()
returncode = p.returncode
except:
et, ev, tb = sys.exc_info()
self.logger.error('got unexpected exception "%s"' % str(ev))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
self.sendMail('FAILURE Video: ffmpeg ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb)))
returncode = 1
if returncode != 0:
self.logger.error('Can not transcode and add watermark to the "%s" file.\nstdoutdata: %s\nstderrdata: %s\n' % (fileName, stdoutdata, stderrdata))
self.sendMail('FAILURE Video', 'Can not transcode and add watermark to the "%s" file.\nstdoutdata: %s\nstderrdata: %s\n' % (fileName, stdoutdata, stderrdata))
os.remove(f)
try:
os.remove('%s/%s' % (self.data_scratch, fileName))
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not remove file "%s/%s"\n%s"' % (self.data_scratch, fileName, str(ev)))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
"""
Update the Slide_Video table with the failure result.
"""
self.reportFailure(movieId, 'error')
continue
if transcode_only == True:
self.logger.debug('Uploading in hatrac the transcoded movie file "%s".' % (fileName))
else:
self.logger.debug('Uploading in hatrac the transcoded movie file "%s" with the watermark.' % (fileName))
newFile = '%s/%s' % (self.data_scratch, fileName)
file_size = os.path.getsize(newFile)
new_md5sum = self.md5sum(newFile, self.chunk_size)
new_md5 = self.md5hex(newFile)
new_sha256 = self.sha256sum(newFile)
new_uri = '%s%s' % (uri[0:-len(md5)], new_md5)
metadata = {"content_disposition": "filename*=UTF-8''%s" % fileName}
"""
Check if this file exists in hatrac
"""
if self.get_md5sum(new_uri) == new_md5sum:
self.logger.info('Skipping the upload of the file "%s" as it already exists hatrac.' % fileName)
os.remove(newFile)
else:
try:
self.uploadFile(new_uri, newFile, self.chunk_size, metadata)
os.remove(newFile)
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not transfer file "%s" in namespace "%s". Error: "%s"' % (fileName, new_uri, str(ev)))
self.sendMail('Failure Video: HATRAC Error', 'Can not upload file "%s" in namespace "%s". Error: "%s"' % (fileName, new_uri, str(ev)))
self.reportFailure(movieId, 'error')
os.remove(newFile)
os.remove(f)
continue
columns = ["MP4_URI", "Bytes", "MD5", "SHA256", "Processing_Status"]
os.remove(f)
columns = ','.join([urllib.quote(col, safe='') for col in columns])
url = '%s/attributegroup/Immunofluorescence:Slide_Video/Accession_ID;%s' % (self.path, columns)
body = []
obj = {'Accession_ID': movieId,
'MP4_URI': new_uri,
'Bytes': file_size,
'MD5': new_md5,
'SHA256': new_sha256,
"Processing_Status": 'success'
}
body.append(obj)
headers = {'Content-Type': 'application/json'}
resp = self.send_request('PUT', url, json.dumps(body), headers, False)
resp.read()
self.logger.debug('SUCCEEDED updated the entry for the "%s" file.' % (fileName))
self.logger.debug('Ended Transcoding and Adding Watermarkers.')
"""
Upload a file.
"""
def uploadFile(self, object_url, filePath, chunk_size, metadata):
try:
job_id = self.createUploadJob(object_url, filePath, chunk_size, metadata)
self.chunksUpload(object_url, filePath, job_id, chunk_size)
self.chunksUploadFinalization(object_url, job_id)
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not upload file "%s" in namespace "%s://%s%s". Error: "%s"' % (filePath, self.scheme, self.host, object_url, str(ev)))
raise
"""
Get the base64 digest string like sha256 utility would compute.
"""
def sha256base64(self, fpath, chunk_size):
h = hashlib.sha256()
try:
f = open(fpath, 'rb')
try:
b = f.read(chunk_size)
while b:
h.update(b)
b = f.read(chunk_size)
return base64.b64encode(h.digest())
finally:
f.close()
except:
return None
"""
Encode the content-disposition.
"""
def encode_disposition(self, orig):
m = re.match("^filename[*]=UTF-8''(?P<name>[-_.~A-Za-z0-9%]+)$", orig)
if m:
return orig
elif not orig.startswith("filename*=UTF-8''"):
raise ValueError('Cannot accept content-disposition "%s"; it must start with "filename*=UTF-8\'\'".' % orig)
else:
ret = ["filename*=UTF-8''"]
for c in orig[len("filename*=UTF-8''"):]:
m = m = re.match("(?P<name>[-_.~A-Za-z0-9%]+)$", c)
if m:
ret.append(c)
else:
#In case we want an URL encoding
#ret.append('%%%s' % c.encode('hex').upper())
ret.append('_')
return ''.join(ret)
"""
Create a job for uploading a file.
"""
def createUploadJob(self, object_url, filePath, chunk_size, metadata):
try:
md5,sha256 = self.content_checksum(filePath, chunk_size)
content_checksum = {"content-md5": md5,
"content-sha256": sha256}
content_disposition = metadata.get('content_disposition', None)
file_size = os.path.getsize(filePath)
url = '%s;upload' % object_url
headers = {'Content-Type': 'application/json'}
if mimetypes.inited == False:
mimetypes.init()
content_type,encoding = mimetypes.guess_type(filePath)
if content_type == None:
content_type = 'application/octet-stream'
obj = {"chunk-length": chunk_size,
"content-length": file_size,
"content-type": content_type}
obj.update(content_checksum)
if content_disposition != None:
obj['content-disposition'] = self.encode_disposition(content_disposition)
self.logger.debug('hatrac metadata: "%s"\n' % (json.dumps(obj)))
resp = self.send_request('POST', url, body=json.dumps(obj), headers=headers)
res = resp.read()
job_id = res.split('/')[-1][:-1]
self.logger.debug('Created job_id "%s" for url "%s://%s%s".' % (job_id, self.scheme, self.host, url))
return job_id
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not create job for uploading file "%s" in object "%s://%s%s". Error: "%s"' % (filePath, self.scheme, self.host, object_url, str(ev)))
raise
"""
Upload a file through chunks.
"""
def chunksUpload(self, object_url, filePath, job_id, chunk_size):
try:
file_size = os.path.getsize(filePath)
chunk_no = file_size / chunk_size
last_chunk_size = file_size % chunk_size
f = open(filePath, "rb")
for index in range(chunk_no):
position = index
body = f.read(chunk_size)
url = '%s;upload/%s/%d' % (object_url, job_id, position)
headers = {'Content-Type': 'application/octet-stream', 'Content-Length': '%d' % chunk_size}
resp = self.send_request('PUT', url, body=body, headers=headers, sendData=True)
res = resp.read()
if last_chunk_size > 0:
position = chunk_no
body = f.read(chunk_size)
url = '%s;upload/%s/%d' % (object_url, job_id, position)
headers = {'Content-Type': 'application/octet-stream', 'Content-Length': '%d' % last_chunk_size}
resp = self.send_request('PUT', url, body=body, headers=headers, sendData=True)
res = resp.read()
f.close()
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not upload chunk for file "%s" in namespace "%s://%s%s" and job_id "%s". Error: "%s"' % (filePath, self.scheme, self.host, url, job_id, str(ev)))
try:
f.close()
self.cancelJob(object_url, job_id)
except:
pass
raise
"""
Cancel a job.
"""
def cancelJob(self, object_url, job_id):
try:
url = '%s;upload/%s' % (object_url, job_id)
headers = {}
resp = self.send_request('DELETE', url, headers=headers)
res = resp.read()
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not cancel job "%s" for object "%s://%s%s". Error: "%s"' % (job_id, self.scheme, self.host, url, str(ev)))
raise
"""
Finalize the chunks upload.
"""
def chunksUploadFinalization(self, object_url, job_id):
try:
url = '%s;upload/%s' % (object_url, job_id)
headers = {}
resp = self.send_request('POST', url, headers=headers)
res = resp.read()
return res
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not finalize job "%s" for object "%s://%s%s". Error: "%s"' % (job_id, self.scheme, self.host, url, str(ev)))
raise
"""
Get the hexa md5 checksum of the file.
"""
def md5hex(self, fpath):
h = hashlib.md5()
try:
f = open(fpath, 'rb')
try:
b = f.read(4096)
while b:
h.update(b)
b = f.read(4096)
return h.hexdigest()
finally:
f.close()
except:
return None
"""
Get the checksum of the file.
"""
def sha256sum(self, fpath):
h = hashlib.sha256()
try:
f = open(fpath, 'rb')
try:
b = f.read(4096)
while b:
h.update(b)
b = f.read(4096)
return h.hexdigest()
finally:
f.close()
except:
return None
"""
Get the md5sum file from hatrac.
"""
def get_md5sum(self, url):
"""
Retrieve the md5sum of a file
"""
ret = None
if url != None:
headers = {'Accept': '*'}
try:
resp = self.send_request('HEAD', url, '', headers=headers, ignoreErrorCodes=[NOT_FOUND])
resp.read()
ret = resp.getheader('content-md5', None)
except:
pass
return ret
def reportFailure(self, movieId, error_message):
"""
Update the Slide_Video table with the transcode/watermark failure result.
"""
try:
columns = ["Processing_Status"]
columns = ','.join([urllib.quote(col, safe='') for col in columns])
url = '%s/attributegroup/Immunofluorescence:Slide_Video/Accession_ID;%s' % (self.path, columns)
body = []
obj = {'Accession_ID': movieId,
"Processing_Status": '%s' % error_message
}
body.append(obj)
headers = {'Content-Type': 'application/json'}
resp = self.send_request('PUT', url, json.dumps(body), headers, False)
resp.read()
self.logger.debug('SUCCEEDED updated the Slide_Video table for the movie Accession_ID "%s" with the Processing_Status result "%s".' % (movieId, error_message))
except:
et, ev, tb = sys.exc_info()
self.logger.error('got unexpected exception "%s"' % str(ev))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
self.sendMail('FAILURE Video: reportFailure ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb)))
def getMovieFile(self, fileName, uri):
try:
self.logger.debug('Processing file: "%s".' % (fileName))
movieFile = '%s/original_%s' % (self.data_scratch, fileName)
url = '%s' % (uri)
headers = {'Accept': '*'}
resp = self.send_request('GET', url, '', headers, False)
self.logger.debug('content-length: %s.' % (resp.getheader('content-length')))
#self.logger.debug('response headers: %s.' % (resp.getheaders()))
block_sz = 8192
f = open(movieFile, 'wb')
while True:
buffer = resp.read(block_sz)
if not buffer:
break
f.write(buffer)
f.close()
self.logger.debug('File "%s", %d bytes.' % (movieFile, os.stat(movieFile).st_size))
return movieFile
"""
url = '%s' % (uri)
headers = {'Accept': '*'}
resp = self.send_request('HEAD', url, '', headers, False)
resp.read()
content_location = resp.getheader('content-location', None)
if content_location != None:
self.logger.debug('content_location: %s.' % (content_location))
srcFile = urllib.unquote('%s%s' % (self.video_resources, content_location))
shutil.copyfile(srcFile, movieFile)
return movieFile
else:
self.logger.error('Can not get video file "%s"."%s"' % (fileName))
self.sendMail('FAILURE Video: reportFailure ERROR', 'Can not get hatrac location for the file "%s".' % fileName)
return None
"""
except:
et, ev, tb = sys.exc_info()
self.logger.error('Can not get video file "%s"\n"%s"' % (fileName, str(ev)))
self.logger.error('%s' % str(traceback.format_exception(et, ev, tb)))
self.sendMail('FAILURE Video: reportFailure ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb)))
return None
"""
Get the base64 digest string like md5 utility would compute.
"""
def md5sum(self, fpath, chunk_size):
h = hashlib.md5()
try:
f = open(fpath, 'rb')
try:
b = f.read(chunk_size)
while b:
h.update(b)
b = f.read(chunk_size)
return base64.b64encode(h.digest())
finally:
f.close()
except:
return None
"""
Append the cid=video string to the url query
"""
def url_cid(self, url):
"""
"""
ret = url
o = urlparse.urlparse(url)
if o.path.startswith('/ermrest/'):
delimiter = '?'
try:
o = urlparse.urlparse(url)
if o.query != '':
delimiter = '&'
ret = '%s%scid=video' % (url, delimiter)
except:
pass
return ret
"""
Get the base64 digest strings like the sha256 and the md5 utilities would compute.
"""
def content_checksum(self, fpath, chunk_size):
hmd5 = hashlib.md5()
hsha256 = hashlib.sha256()
try:
f = open(fpath, 'rb')
try:
b = f.read(chunk_size)
while b:
hmd5.update(b)
hsha256.update(b)
b = f.read(chunk_size)
return (base64.b64encode(hmd5.digest()), base64.b64encode(hsha256.digest()))
finally:
f.close()
except:
return (None, None)
|
nilq/baby-python
|
python
|
import setuptools
from ipregistry import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="ipregistry",
version=__version__,
author="Ipregistry",
author_email="support@ipregistry.co",
description="Official Python library for Ipregistry",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["cachetools", "requests", "six"],
url="https://github.com/ipregistry/ipregistry-python",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Localization"
],
)
|
nilq/baby-python
|
python
|
from .openchemistryquery import OpenChemistryQuery, DEFAULT_BASE_URL
|
nilq/baby-python
|
python
|
"""
QEMU machine module:
The machine module primarily provides the QEMUMachine class,
which provides facilities for managing the lifetime of a QEMU VM.
"""
# Copyright (C) 2015-2016 Red Hat Inc.
# Copyright (C) 2012 IBM Corp.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Based on qmp.py.
#
import errno
from itertools import chain
import logging
import os
import shutil
import signal
import socket
import subprocess
import tempfile
from types import TracebackType
from typing import (
Any,
BinaryIO,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
)
from . import console_socket, qmp
from .qmp import QMPMessage, QMPReturnValue, SocketAddrT
LOG = logging.getLogger(__name__)
class QEMUMachineError(Exception):
"""
Exception called when an error in QEMUMachine happens.
"""
class QEMUMachineAddDeviceError(QEMUMachineError):
"""
Exception raised when a request to add a device can not be fulfilled
The failures are caused by limitations, lack of information or conflicting
requests on the QEMUMachine methods. This exception does not represent
failures reported by the QEMU binary itself.
"""
class AbnormalShutdown(QEMUMachineError):
"""
Exception raised when a graceful shutdown was requested, but not performed.
"""
class QEMUMachine:
"""
A QEMU VM.
Use this object as a context manager to ensure
the QEMU process terminates::
with VM(binary) as vm:
...
# vm is guaranteed to be shut down here
"""
def __init__(self,
binary: str,
args: Sequence[str] = (),
wrapper: Sequence[str] = (),
name: Optional[str] = None,
test_dir: str = "/var/tmp",
monitor_address: Optional[SocketAddrT] = None,
socket_scm_helper: Optional[str] = None,
sock_dir: Optional[str] = None,
drain_console: bool = False,
console_log: Optional[str] = None):
'''
Initialize a QEMUMachine
@param binary: path to the qemu binary
@param args: list of extra arguments
@param wrapper: list of arguments used as prefix to qemu binary
@param name: prefix for socket and log file names (default: qemu-PID)
@param test_dir: where to create socket and log file
@param monitor_address: address for QMP monitor
@param socket_scm_helper: helper program, required for send_fd_scm()
@param sock_dir: where to create socket (overrides test_dir for sock)
@param drain_console: (optional) True to drain console socket to buffer
@param console_log: (optional) path to console log file
@note: Qemu process is not started until launch() is used.
'''
# Direct user configuration
self._binary = binary
self._args = list(args)
self._wrapper = wrapper
self._name = name or "qemu-%d" % os.getpid()
self._test_dir = test_dir
self._sock_dir = sock_dir or self._test_dir
self._socket_scm_helper = socket_scm_helper
if monitor_address is not None:
self._monitor_address = monitor_address
self._remove_monitor_sockfile = False
else:
self._monitor_address = os.path.join(
self._sock_dir, f"{self._name}-monitor.sock"
)
self._remove_monitor_sockfile = True
self._console_log_path = console_log
if self._console_log_path:
# In order to log the console, buffering needs to be enabled.
self._drain_console = True
else:
self._drain_console = drain_console
# Runstate
self._qemu_log_path: Optional[str] = None
self._qemu_log_file: Optional[BinaryIO] = None
self._popen: Optional['subprocess.Popen[bytes]'] = None
self._events: List[QMPMessage] = []
self._iolog: Optional[str] = None
self._qmp_set = True # Enable QMP monitor by default.
self._qmp_connection: Optional[qmp.QEMUMonitorProtocol] = None
self._qemu_full_args: Tuple[str, ...] = ()
self._temp_dir: Optional[str] = None
self._launched = False
self._machine: Optional[str] = None
self._console_index = 0
self._console_set = False
self._console_device_type: Optional[str] = None
self._console_address = os.path.join(
self._sock_dir, f"{self._name}-console.sock"
)
self._console_socket: Optional[socket.socket] = None
self._remove_files: List[str] = []
self._user_killed = False
def __enter__(self) -> 'QEMUMachine':
return self
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.shutdown()
def add_monitor_null(self) -> None:
"""
This can be used to add an unused monitor instance.
"""
self._args.append('-monitor')
self._args.append('null')
def add_fd(self, fd: int, fdset: int,
opaque: str, opts: str = '') -> 'QEMUMachine':
"""
Pass a file descriptor to the VM
"""
options = ['fd=%d' % fd,
'set=%d' % fdset,
'opaque=%s' % opaque]
if opts:
options.append(opts)
# This did not exist before 3.4, but since then it is
# mandatory for our purpose
if hasattr(os, 'set_inheritable'):
os.set_inheritable(fd, True)
self._args.append('-add-fd')
self._args.append(','.join(options))
return self
def send_fd_scm(self, fd: Optional[int] = None,
file_path: Optional[str] = None) -> int:
"""
Send an fd or file_path to socket_scm_helper.
Exactly one of fd and file_path must be given.
If it is file_path, the helper will open that file and pass its own fd.
"""
# In iotest.py, the qmp should always use unix socket.
assert self._qmp.is_scm_available()
if self._socket_scm_helper is None:
raise QEMUMachineError("No path to socket_scm_helper set")
if not os.path.exists(self._socket_scm_helper):
raise QEMUMachineError("%s does not exist" %
self._socket_scm_helper)
# This did not exist before 3.4, but since then it is
# mandatory for our purpose
if hasattr(os, 'set_inheritable'):
os.set_inheritable(self._qmp.get_sock_fd(), True)
if fd is not None:
os.set_inheritable(fd, True)
fd_param = ["%s" % self._socket_scm_helper,
"%d" % self._qmp.get_sock_fd()]
if file_path is not None:
assert fd is None
fd_param.append(file_path)
else:
assert fd is not None
fd_param.append(str(fd))
devnull = open(os.path.devnull, 'rb')
proc = subprocess.Popen(
fd_param, stdin=devnull, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=False
)
output = proc.communicate()[0]
if output:
LOG.debug(output)
return proc.returncode
@staticmethod
def _remove_if_exists(path: str) -> None:
"""
Remove file object at path if it exists
"""
try:
os.remove(path)
except OSError as exception:
if exception.errno == errno.ENOENT:
return
raise
def is_running(self) -> bool:
"""Returns true if the VM is running."""
return self._popen is not None and self._popen.poll() is None
@property
def _subp(self) -> 'subprocess.Popen[bytes]':
if self._popen is None:
raise QEMUMachineError('Subprocess pipe not present')
return self._popen
def exitcode(self) -> Optional[int]:
"""Returns the exit code if possible, or None."""
if self._popen is None:
return None
return self._popen.poll()
def get_pid(self) -> Optional[int]:
"""Returns the PID of the running process, or None."""
if not self.is_running():
return None
return self._subp.pid
def _load_io_log(self) -> None:
if self._qemu_log_path is not None:
with open(self._qemu_log_path, "r") as iolog:
self._iolog = iolog.read()
@property
def _base_args(self) -> List[str]:
args = ['-display', 'none', '-vga', 'none']
if self._qmp_set:
if isinstance(self._monitor_address, tuple):
moncdev = "socket,id=mon,host={},port={}".format(
*self._monitor_address
)
else:
moncdev = f"socket,id=mon,path={self._monitor_address}"
args.extend(['-chardev', moncdev, '-mon',
'chardev=mon,mode=control'])
if self._machine is not None:
args.extend(['-machine', self._machine])
for _ in range(self._console_index):
args.extend(['-serial', 'null'])
if self._console_set:
chardev = ('socket,id=console,path=%s,server=on,wait=off' %
self._console_address)
args.extend(['-chardev', chardev])
if self._console_device_type is None:
args.extend(['-serial', 'chardev:console'])
else:
device = '%s,chardev=console' % self._console_device_type
args.extend(['-device', device])
return args
def _pre_launch(self) -> None:
self._temp_dir = tempfile.mkdtemp(prefix="qemu-machine-",
dir=self._test_dir)
self._qemu_log_path = os.path.join(self._temp_dir, self._name + ".log")
self._qemu_log_file = open(self._qemu_log_path, 'wb')
if self._console_set:
self._remove_files.append(self._console_address)
if self._qmp_set:
if self._remove_monitor_sockfile:
assert isinstance(self._monitor_address, str)
self._remove_files.append(self._monitor_address)
self._qmp_connection = qmp.QEMUMonitorProtocol(
self._monitor_address,
server=True,
nickname=self._name
)
def _post_launch(self) -> None:
if self._qmp_connection:
self._qmp.accept()
def _post_shutdown(self) -> None:
"""
Called to cleanup the VM instance after the process has exited.
May also be called after a failed launch.
"""
# Comprehensive reset for the failed launch case:
self._early_cleanup()
if self._qmp_connection:
self._qmp.close()
self._qmp_connection = None
if self._qemu_log_file is not None:
self._qemu_log_file.close()
self._qemu_log_file = None
self._load_io_log()
self._qemu_log_path = None
if self._temp_dir is not None:
shutil.rmtree(self._temp_dir)
self._temp_dir = None
while len(self._remove_files) > 0:
self._remove_if_exists(self._remove_files.pop())
exitcode = self.exitcode()
if (exitcode is not None and exitcode < 0
and not (self._user_killed and exitcode == -signal.SIGKILL)):
msg = 'qemu received signal %i; command: "%s"'
if self._qemu_full_args:
command = ' '.join(self._qemu_full_args)
else:
command = ''
LOG.warning(msg, -int(exitcode), command)
self._user_killed = False
self._launched = False
def launch(self) -> None:
"""
Launch the VM and make sure we cleanup and expose the
command line/output in case of exception
"""
if self._launched:
raise QEMUMachineError('VM already launched')
self._iolog = None
self._qemu_full_args = ()
try:
self._launch()
self._launched = True
except:
self._post_shutdown()
LOG.debug('Error launching VM')
if self._qemu_full_args:
LOG.debug('Command: %r', ' '.join(self._qemu_full_args))
if self._iolog:
LOG.debug('Output: %r', self._iolog)
raise
def _launch(self) -> None:
"""
Launch the VM and establish a QMP connection
"""
devnull = open(os.path.devnull, 'rb')
self._pre_launch()
self._qemu_full_args = tuple(
chain(self._wrapper,
[self._binary],
self._base_args,
self._args)
)
LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
self._popen = subprocess.Popen(self._qemu_full_args,
stdin=devnull,
stdout=self._qemu_log_file,
stderr=subprocess.STDOUT,
shell=False,
close_fds=False)
self._post_launch()
def _early_cleanup(self) -> None:
"""
Perform any cleanup that needs to happen before the VM exits.
May be invoked by both soft and hard shutdown in failover scenarios.
Called additionally by _post_shutdown for comprehensive cleanup.
"""
# If we keep the console socket open, we may deadlock waiting
# for QEMU to exit, while QEMU is waiting for the socket to
# become writeable.
if self._console_socket is not None:
self._console_socket.close()
self._console_socket = None
def _hard_shutdown(self) -> None:
"""
Perform early cleanup, kill the VM, and wait for it to terminate.
:raise subprocess.Timeout: When timeout is exceeds 60 seconds
waiting for the QEMU process to terminate.
"""
self._early_cleanup()
self._subp.kill()
self._subp.wait(timeout=60)
def _soft_shutdown(self, timeout: Optional[int],
has_quit: bool = False) -> None:
"""
Perform early cleanup, attempt to gracefully shut down the VM, and wait
for it to terminate.
:param timeout: Timeout in seconds for graceful shutdown.
A value of None is an infinite wait.
:param has_quit: When True, don't attempt to issue 'quit' QMP command
:raise ConnectionReset: On QMP communication errors
:raise subprocess.TimeoutExpired: When timeout is exceeded waiting for
the QEMU process to terminate.
"""
self._early_cleanup()
if self._qmp_connection:
if not has_quit:
# Might raise ConnectionReset
self._qmp.cmd('quit')
# May raise subprocess.TimeoutExpired
self._subp.wait(timeout=timeout)
def _do_shutdown(self, timeout: Optional[int],
has_quit: bool = False) -> None:
"""
Attempt to shutdown the VM gracefully; fallback to a hard shutdown.
:param timeout: Timeout in seconds for graceful shutdown.
A value of None is an infinite wait.
:param has_quit: When True, don't attempt to issue 'quit' QMP command
:raise AbnormalShutdown: When the VM could not be shut down gracefully.
The inner exception will likely be ConnectionReset or
subprocess.TimeoutExpired. In rare cases, non-graceful termination
may result in its own exceptions, likely subprocess.TimeoutExpired.
"""
try:
self._soft_shutdown(timeout, has_quit)
except Exception as exc:
self._hard_shutdown()
raise AbnormalShutdown("Could not perform graceful shutdown") \
from exc
def shutdown(self, has_quit: bool = False,
hard: bool = False,
timeout: Optional[int] = 30) -> None:
"""
Terminate the VM (gracefully if possible) and perform cleanup.
Cleanup will always be performed.
If the VM has not yet been launched, or shutdown(), wait(), or kill()
have already been called, this method does nothing.
:param has_quit: When true, do not attempt to issue 'quit' QMP command.
:param hard: When true, do not attempt graceful shutdown, and
suppress the SIGKILL warning log message.
:param timeout: Optional timeout in seconds for graceful shutdown.
Default 30 seconds, A `None` value is an infinite wait.
"""
if not self._launched:
return
try:
if hard:
self._user_killed = True
self._hard_shutdown()
else:
self._do_shutdown(timeout, has_quit)
finally:
self._post_shutdown()
def kill(self) -> None:
"""
Terminate the VM forcefully, wait for it to exit, and perform cleanup.
"""
self.shutdown(hard=True)
def wait(self, timeout: Optional[int] = 30) -> None:
"""
Wait for the VM to power off and perform post-shutdown cleanup.
:param timeout: Optional timeout in seconds. Default 30 seconds.
A value of `None` is an infinite wait.
"""
self.shutdown(has_quit=True, timeout=timeout)
def set_qmp_monitor(self, enabled: bool = True) -> None:
"""
Set the QMP monitor.
@param enabled: if False, qmp monitor options will be removed from
the base arguments of the resulting QEMU command
line. Default is True.
@note: call this function before launch().
"""
self._qmp_set = enabled
@property
def _qmp(self) -> qmp.QEMUMonitorProtocol:
if self._qmp_connection is None:
raise QEMUMachineError("Attempt to access QMP with no connection")
return self._qmp_connection
@classmethod
def _qmp_args(cls, _conv_keys: bool = True, **args: Any) -> Dict[str, Any]:
qmp_args = dict()
for key, value in args.items():
if _conv_keys:
qmp_args[key.replace('_', '-')] = value
else:
qmp_args[key] = value
return qmp_args
def qmp(self, cmd: str,
conv_keys: bool = True,
**args: Any) -> QMPMessage:
"""
Invoke a QMP command and return the response dict
"""
qmp_args = self._qmp_args(conv_keys, **args)
return self._qmp.cmd(cmd, args=qmp_args)
def command(self, cmd: str,
conv_keys: bool = True,
**args: Any) -> QMPReturnValue:
"""
Invoke a QMP command.
On success return the response dict.
On failure raise an exception.
"""
qmp_args = self._qmp_args(conv_keys, **args)
return self._qmp.command(cmd, **qmp_args)
def get_qmp_event(self, wait: bool = False) -> Optional[QMPMessage]:
"""
Poll for one queued QMP events and return it
"""
if self._events:
return self._events.pop(0)
return self._qmp.pull_event(wait=wait)
def get_qmp_events(self, wait: bool = False) -> List[QMPMessage]:
"""
Poll for queued QMP events and return a list of dicts
"""
events = self._qmp.get_events(wait=wait)
events.extend(self._events)
del self._events[:]
self._qmp.clear_events()
return events
@staticmethod
def event_match(event: Any, match: Optional[Any]) -> bool:
"""
Check if an event matches optional match criteria.
The match criteria takes the form of a matching subdict. The event is
checked to be a superset of the subdict, recursively, with matching
values whenever the subdict values are not None.
This has a limitation that you cannot explicitly check for None values.
Examples, with the subdict queries on the left:
- None matches any object.
- {"foo": None} matches {"foo": {"bar": 1}}
- {"foo": None} matches {"foo": 5}
- {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
- {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
"""
if match is None:
return True
try:
for key in match:
if key in event:
if not QEMUMachine.event_match(event[key], match[key]):
return False
else:
return False
return True
except TypeError:
# either match or event wasn't iterable (not a dict)
return bool(match == event)
def event_wait(self, name: str,
timeout: float = 60.0,
match: Optional[QMPMessage] = None) -> Optional[QMPMessage]:
"""
event_wait waits for and returns a named event from QMP with a timeout.
name: The event to wait for.
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
match: Optional match criteria. See event_match for details.
"""
return self.events_wait([(name, match)], timeout)
def events_wait(self,
events: Sequence[Tuple[str, Any]],
timeout: float = 60.0) -> Optional[QMPMessage]:
"""
events_wait waits for and returns a single named event from QMP.
In the case of multiple qualifying events, this function returns the
first one.
:param events: A sequence of (name, match_criteria) tuples.
The match criteria are optional and may be None.
See event_match for details.
:param timeout: Optional timeout, in seconds.
See QEMUMonitorProtocol.pull_event.
:raise QMPTimeoutError: If timeout was non-zero and no matching events
were found.
:return: A QMP event matching the filter criteria.
If timeout was 0 and no event matched, None.
"""
def _match(event: QMPMessage) -> bool:
for name, match in events:
if event['event'] == name and self.event_match(event, match):
return True
return False
event: Optional[QMPMessage]
# Search cached events
for event in self._events:
if _match(event):
self._events.remove(event)
return event
# Poll for new events
while True:
event = self._qmp.pull_event(wait=timeout)
if event is None:
# NB: None is only returned when timeout is false-ish.
# Timeouts raise QMPTimeoutError instead!
break
if _match(event):
return event
self._events.append(event)
return None
def get_log(self) -> Optional[str]:
"""
After self.shutdown or failed qemu execution, this returns the output
of the qemu process.
"""
return self._iolog
def add_args(self, *args: str) -> None:
"""
Adds to the list of extra arguments to be given to the QEMU binary
"""
self._args.extend(args)
def set_machine(self, machine_type: str) -> None:
"""
Sets the machine type
If set, the machine type will be added to the base arguments
of the resulting QEMU command line.
"""
self._machine = machine_type
def set_console(self,
device_type: Optional[str] = None,
console_index: int = 0) -> None:
"""
Sets the device type for a console device
If set, the console device and a backing character device will
be added to the base arguments of the resulting QEMU command
line.
This is a convenience method that will either use the provided
device type, or default to a "-serial chardev:console" command
line argument.
The actual setting of command line arguments will be be done at
machine launch time, as it depends on the temporary directory
to be created.
@param device_type: the device type, such as "isa-serial". If
None is given (the default value) a "-serial
chardev:console" command line argument will
be used instead, resorting to the machine's
default device type.
@param console_index: the index of the console device to use.
If not zero, the command line will create
'index - 1' consoles and connect them to
the 'null' backing character device.
"""
self._console_set = True
self._console_device_type = device_type
self._console_index = console_index
@property
def console_socket(self) -> socket.socket:
"""
Returns a socket connected to the console
"""
if self._console_socket is None:
self._console_socket = console_socket.ConsoleSocket(
self._console_address,
file=self._console_log_path,
drain=self._drain_console)
return self._console_socket
|
nilq/baby-python
|
python
|
import os
import json
import numpy as np
import pandas as pd
import time
from hydroDL import kPath, utils
from hydroDL.data import usgs, transform, dbBasin
import statsmodels.api as sm
sn = 1e-5
def loadSite(siteNo, freq='D', trainSet='B10', the=[150, 50], codeLst=usgs.varC):
dirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-{}'.format(freq))
dirOut = os.path.join(dirRoot, trainSet)
saveName = os.path.join(dirOut, siteNo+'.csv')
if os.path.exists(saveName):
dfW = pd.read_csv(saveName, index_col=None).set_index('date')
else:
print('do calWRTDS before')
dfW = calWRTDS(siteNo, freq, trainSet=trainSet,
the=the, codeLst=usgs.varC)
return dfW[codeLst]
def loadMat(siteNoLst, codeLst, freq='D', trainSet='B10'):
dfW = loadSite(siteNoLst[0])
nt = len(dfW)
out = np.ndarray([nt, len(siteNoLst), len(siteNoLst)])
for indS, siteNo in enumerate(siteNoLst):
for indC, code in enumerate(codeLst):
dfW = loadSite(siteNo, freq=freq,
trainSet=trainSet, codeLst=codeLst)
out[:, indS, indC] = dfW[codeLst].values
def calWRTDS(siteNo, freq='D', trainSet='B10', the=[150, 50], fitAll=True, codeLst=usgs.varC, reCal=False):
dirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-{}'.format(freq))
dirOut = os.path.join(dirRoot, trainSet)
saveName = os.path.join(dirOut, siteNo)
if os.path.exists(saveName):
print('calculated {}'.format(siteNo))
if reCal is False:
return
t0 = time.time()
varQ = '00060'
varLst = codeLst+[varQ]
df = dbBasin.readSiteTS(siteNo, varLst=varLst, freq=freq)
dfYP = pd.DataFrame(index=df.index, columns=codeLst)
dfX = pd.DataFrame({'date': df.index}).set_index('date')
dfX = dfX.join(np.log(df[varQ]+sn)).rename(
columns={varQ: 'logQ'})
yr = dfX.index.year.values
t = yr+dfX.index.dayofyear.values/365
dfX['sinT'] = np.sin(2*np.pi*t)
dfX['cosT'] = np.cos(2*np.pi*t)
dfX['yr'] = yr
dfX['t'] = t
xVarLst = ['yr', 'logQ', 'sinT', 'cosT']
ind1, ind2 = defineTrainSet(df.index, trainSet)
# train / test
fitCodeLst = list()
for code in codeLst:
b1 = df.iloc[ind1][code].dropna().shape[0] > the[0]
b2 = df.iloc[ind2][code].dropna().shape[0] > the[1]
if b1 and b2:
fitCodeLst.append(code)
for code in fitCodeLst:
dfXY = dfX.join(np.log(df[code]+sn))
df1 = dfXY.iloc[ind1].dropna()
if fitAll:
df2 = dfXY[xVarLst+['t']].dropna()
else:
df2 = dfXY.iloc[ind2].dropna() # only fit for observations now
n = len(df1)
if n == 0:
break
# calculate weight
h = np.array([7, 2, 0.5]) # window [Y Q S] from EGRET
tLst = df2.index.tolist()
for t in tLst:
dY = np.abs((df2.loc[t]['t']-df1['t']).values)
dQ = np.abs((df2.loc[t]['logQ']-df1['logQ']).values)
dS = np.min(
np.stack([abs(np.ceil(dY)-dY), abs(dY-np.floor(dY))]), axis=0)
d = np.stack([dY, dQ, dS])
ww, ind = calWeight(d)
# fit WLS
Y = df1.iloc[ind][code].values
X = df1.iloc[ind][xVarLst].values
model = sm.WLS(Y, X, weights=ww).fit()
xp = df2.loc[t][xVarLst].values
yp = model.predict(xp)[0]
dfYP.loc[t][code] = np.exp(yp)-sn
t1 = time.time()
print(siteNo, code, t1-t0)
saveName = os.path.join(dirOut, siteNo)
dfYP.to_csv(saveName)
return dfYP
def defineTrainSet(t, trainSet):
if trainSet == 'B10':
yr = t.year.values
ind1 = np.where(yr < 2010)[0]
ind2 = np.where(yr >= 2010)[0]
return ind1, ind2
def testWRTDS(dataName, trainSet, testSet, codeLst):
DF = dbBasin.DataFrameBasin(dataName)
# Calculate WRTDS from train and test set
varX = ['00060']
varY = codeLst
d1 = dbBasin.DataModelBasin(DF, subset=trainSet, varX=varX, varY=varY)
d2 = dbBasin.DataModelBasin(DF, subset=testSet, varX=varX, varY=varY)
tt1 = pd.to_datetime(d1.t)
yr1 = tt1.year.values
t1= yr1+tt1.dayofyear.values/365
sinT1 = np.sin(2*np.pi*t1)
cosT1 = np.cos(2*np.pi*t1)
tt2 = pd.to_datetime(d2.t)
yr2 = tt2.year.values
t2= yr2+tt2.dayofyear.values/365
sinT2 = np.sin(2*np.pi*t2)
cosT2 = np.cos(2*np.pi*t2)
###
yOut = np.full([len(d2.t), len(d2.siteNoLst), len(varY)], np.nan)
t0 = time.time()
for indS, siteNo in enumerate(d2.siteNoLst):
for indC, code in enumerate(varY):
print('{} {} {} {}'.format(indS, siteNo, code, time.time()-t0))
y1 = d1.Y[:, indS, indC].copy()
q1 = d1.X[:, indS, 0].copy()
q1[q1 < 0] = 0
logq1 = np.log(q1+sn)
x1 = np.stack([logq1, yr1, sinT1, cosT1]).T
y2 = d2.Y[:, indS, indC].copy()
q2 = d2.X[:, indS, 0].copy()
q2[q2 < 0] = 0
logq2 = np.log(q2+sn)
x2 = np.stack([logq2, yr2, sinT2, cosT2]).T
[xx1, yy1], ind1 = utils.rmNan([x1, y1])
if testSet == 'all':
[xx2], ind2 = utils.rmNan([x2])
else:
[xx2, yy2], ind2 = utils.rmNan([x2, y2])
if len(ind1) < 40:
continue
for k in ind2:
dY = np.abs(t2[k]-t1[ind1])
dQ = np.abs(logq2[k]-logq1[ind1])
dS = np.min(
np.stack([abs(np.ceil(dY)-dY), abs(dY-np.floor(dY))]), axis=0)
d = np.stack([dY, dQ, dS])
ww, ind = calWeight(d)
model = sm.WLS(yy1[ind], xx1[ind], weights=ww).fit()
yp = model.predict(x2[k, :])[0]
yOut[k, indS, indC] = yp
return yOut
def calWeight(d, h=[7, 2, 0.5], the=100):
# window [Y Q S] from EGRET
n = d.shape[1]
if n > the:
hh = np.tile(h, [n, 1]).T
bW = False
while ~bW:
bW = np.sum(np.all(hh > d, axis=0)) > the
hh = hh*1.1 if not bW else hh
else:
htemp = np.max(d, axis=1)*1.1
hh = np.repeat(htemp[:, None], n, axis=1)
w = (1-(d/hh)**3)**3
w[w < 0] = 0
wAll = w[0]*w[1]*w[2]
ind = np.where(wAll > 0)[0]
ww = wAll[ind]
return ww, ind
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('payment/status/', views.PaycommitView.as_view()),
path('payment/<order_id>/', views.PayURLView.as_view()),
]
|
nilq/baby-python
|
python
|
# generate a random topo
import sys
import os
import random
import json
class Node():
def __init__(self, nodeid):
self.nodeid = nodeid
self.neighbors = {}
def connect(self, node, weight):
self.neighbors[node.nodeid] = weight
def topo_to_file(topo):
buff = ''
size = len(topo)
for node in topo.values():
for n in node.neighbors:
buff += '%d|%d|%d\n' % (node.nodeid, n, node.neighbors[n])
with open('dataset/fm/topo/fm_%d.txt'%size, 'w') as f:
f.write(buff)
f.close()
# write some announcements..
bgp_nodes = random.sample(list(topo.keys()), int(len(topo) * 0.2))
if bgp_nodes == []: bgp_nodes = [random.randint(1, len(topo))]
bgp_pref, bgp_announcements = {}, {}
for b in bgp_nodes:
deriv = random.randint(1, 10)
if deriv > 5: bgp_pref[b] = 4
else: bgp_pref[b] = 3
bgp_announcements[b] = {
"length": random.randint(1, 5),
"med": random.randint(1, 10)
}
fpref = open('dataset/fm/bgp/pref_%d.json'%size, 'w')
fannouncements = open('dataset/fm/bgp/announcement_%d.json'%size, 'w')
json.dump(bgp_pref, fpref)
json.dump(bgp_announcements, fannouncements)
fpref.close()
fannouncements.close()
if __name__ == '__main__':
topo_size = int(sys.argv[1])
node_list = {}
for i in range(1, topo_size + 1):
node_list[i] = Node(i)
for i in range(1, topo_size + 1):
for j in range(i + 1, topo_size + 1):
node_list[i].connect(node_list[j], random.randint(1, 10))
topo_to_file(node_list)
#os.makedirs('conf/%s/configs'%sys.argv[1].split('.')[0])
#topoToConf(node_list, 'conf', sys.argv[1].split('.')[0])
# topoToConf(topo, 'tmp/', 'bin', 2)
# topoToFile(topo, 'files/topo_test_%d.txt'%totalNode, 'files/connections_test_%d.txt'%totalNode)
# vis(topo)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import click
import clio
# These would be ideally stored in some secure persistence
accessToken = ''
refreshToken = ''
@click.group()
@click.option('--debug/--no-debug', default=False)
def cli(debug):
click.echo(f"Debug mode is {'on' if debug else 'off'}")
@cli.command()
@click.option(
'--domain', prompt=True,
default=lambda: os.getenv('CLIO_AUTH_DOMAIN'))
@click.option(
'--client_id', prompt=True,
default=lambda: os.getenv('CLIO_AUTH_CLIENTID'))
def login(domain):
"""Authenticate against IDP"""
global accessToken, refreshToken
click.echo('Login')
auth = clio.Authorization(domain, client_id)
auth.DeviceFlow()
accessToken, refreshToken = auth.Tokens()
@cli.command()
def status():
click.echo('Status')
if __name__ == '__main__':
cli()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.