content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""Define the setup function using setup.cfg."""
from setuptools import setup
setup()
|
nilq/baby-python
|
python
|
"""
====================
einsteinpy_geodesics
====================
Julia wrapper for Geodesics
"""
__version__ = "0.2.dev0"
from .geodesics_wrapper import solveSystem
|
nilq/baby-python
|
python
|
from leek.api.routes.api_v1 import api_v1_blueprint
from leek.api.routes.manage import manage_bp
from leek.api.routes.users import users_bp
from leek.api.routes.applications import applications_bp
from leek.api.routes.events import events_bp
from leek.api.routes.search import search_bp
from leek.api.routes.agent import agent_bp
from leek.api.routes.control import control_bp
def register_blueprints(app):
# Register blueprints
app.register_blueprint(api_v1_blueprint)
app.register_blueprint(manage_bp)
app.register_blueprint(users_bp)
app.register_blueprint(applications_bp)
app.register_blueprint(events_bp)
app.register_blueprint(search_bp)
app.register_blueprint(agent_bp)
app.register_blueprint(control_bp)
|
nilq/baby-python
|
python
|
from __future__ import division
from __future__ import absolute_import
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import atexit
STREAM_POOL = []
def get_stream():
if STREAM_POOL:
return STREAM_POOL.pop()
else:
return drv.Stream()
class AsyncInnerProduct:
def __init__(self, a, b, pagelocked_allocator):
self.gpu_result = gpuarray.dot(a, b)
self.gpu_finished_evt = drv.Event()
self.gpu_finished_evt.record()
self.gpu_finished = False
self.pagelocked_allocator = pagelocked_allocator
def get_host_result(self):
if not self.gpu_finished:
if self.gpu_finished_evt.query():
self.gpu_finished = True
self.copy_stream = get_stream()
self.host_dest = self.pagelocked_allocator(
self.gpu_result.shape, self.gpu_result.dtype, self.copy_stream
)
drv.memcpy_dtoh_async(
self.host_dest, self.gpu_result.gpudata, self.copy_stream
)
self.copy_finished_evt = drv.Event()
self.copy_finished_evt.record()
else:
if self.copy_finished_evt.query():
STREAM_POOL.append(self.copy_stream)
return self.host_dest
def _at_exit():
STREAM_POOL[:] = []
atexit.register(_at_exit)
|
nilq/baby-python
|
python
|
from setuptools import setup,find_packages
import os
pathroot = os.path.split(os.path.realpath(__file__))[0]
setup(
name='sqrt_std',
version='0.1.0',
packages = find_packages(),
entry_points = {
'console_scripts': ['sqrt_std=lib.sqrt_std:main'],
}
)
|
nilq/baby-python
|
python
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
def h2ocluster_status():
"""
Python API test: h2o.cluster_status()
Deprecated, use h2o.cluster().show_status(True)
"""
ret = h2o.cluster_status() # no return type
assert ret is None
if __name__ == "__main__":
pyunit_utils.standalone_test(h2ocluster_status)
else:
h2ocluster_status()
|
nilq/baby-python
|
python
|
#Main 'run through' of the program. This is run periodically
#to update the state of the file to match what has happened on
#Qualtrics, as well as to send out new surveys in accordance
#with how many have expired or been completed
import urllib
import config
import quapi
import parsers
import helpers
import filemanager
import os
from datetime import datetime, timedelta
#load config.
config.init()
curDir = os.path.dirname(__file__)
#read old validation file, exit if it's bad
valPath = os.path.join(curDir, 'validation.txt')
validation = filemanager.readcsv(valPath)
if validation[0][0] != 'GOOD':
print(validation[0][0])
print('Something went wrong at ' + str(datetime.now()) + ', stopping')
exit()
validation[0][0] = 'BAD'
filemanager.writecsv(validation, valPath)
#read old csv to get data.
csvPath = os.path.join(curDir, config.fileName)
arr = filemanager.readcsv(csvPath)
users = filemanager.arrayToUsers(arr)
precount = 0
#count how many completed surveys there were at the end of the last run
for i in range(0,len(users)):
if users[i].state == 'c':
preCount = preCount +1
#query qualtrics for responses
#TODO This part needs to be changed to accomodate more than two surveys
#It's silly to have essentially the same line twice, let's generalize it.
xmlResp1 = quapi.makeRequest(quapi.getLegacyResponseData(
config.survey1ID, config.survey1Questions))
xmlResp2 = quapi.makeRequest(quapi.getLegacyResponseData(
config.survey2ID,config.survey2Questions))
#TODO This part needs to be changed to accomodate more than two surveys
#It's silly to have essentially the same line twice, let's generalize it.
arr1 = parsers.parseForEmails(xmlResp1,config.survey1Refs)
arr2 = parsers.parseForEmails(xmlResp2,config.survey2Refs)
#integratge new responses with old data, set repeats and invalids to D,
#set completed surveys to C, send them thank yous
#TODO Generalize this to more than two surveys
for(d in range(0,2):
currSurv = arr1
if(d == 1)
currSurv = arr2
for i in range(0,len(currSurv)):
for j in range(0,len(users)):
if currSurv[i][0] == users[j].email:
users[j].state = 'c'
for k in range(0,len(users[j].childrenID)):
users[j].childrenID[k] = currSurv[i][k+1]
if users[j].childrenID[k] == None:
users[j].childrenID[k] = ''
users[j].childrenID[k] = users[j].childrenID[k].replace(' ', '')
#count number of completed surveys in integrated list
postCount = 0
for i in range(len(users)):
if users[i].state == 'c':
postCount = postCount + 1
#calculate number of 'credits' or number or maximum possible
#new surveys to be sent this round
credits = (postCount - preCount) * config.creditsForCompletions
#kill all nonvalid surveys
if config.allowOnlySuffixes == 1:
for i in range(0,len(users)):
if config.suffix not in users[i].email:
users[i].state == 'd'
for i in range(0,len(users)):
invalid = '~`!#$%^&*()_-+={}[]:>;\',</?*-+'
for j in users[i].email:
for k in invalid:
if j ==k:
users[i].state = 'd'
#kill expired surveys
for i in range(len(users)):
if ((users[i].sendTime + config.expiry) < datetime.now()
and users[i].state == 's'):
print('killed expired survey belonging to '+ users[i].email + 'at'
+ users[i].sendTime)
users[i].state = 'd'
credits = credits + 1
#add children to the list of users
for i in range(0, len(users)):
if users[i].state == 'c':
for j in range(0, len(users[i].childrenID)):
found = 0
for k in range(0, len(users)):
if users[i].childrenID[j] == users[k].email:
found = 1
if found == 0:
if users[i].childrenID[j]:
new = filemanager.User()
new.email = users[i].childrenID[j]
new.email = new.email.replace(' ', '')
new.parentID = users[i].email
new.state = 'n'
users.append(new)
#check for Qs more than 1 day old, send surveys, set state to S
for i in range(0,len(users)):
if (users[i].state == 'q'
and datetime.now() > (users[i].selectTime + config.delay)):
surv = helpers.chooseSurvey()
subj = ''
if users[i].parentID == '' or config.altSubject == 1:
subj = config.subject
else:
subj = config.subject2 + users[i].parentID
quapi.sendSurveySubjectExpiry(users[i].email, surv,subj)
users[i].state = 's'
users[i].sendTime = datetime.now()
users[i].survey = surv
#calculate ave distance of each N to Qs and Ss and Cs
listofListOfParents = [None] *len(users)
dists = [0] * len(users)
for i in range(0,len(users)):
listofListOfParents[i] = helpers.getParentList(users,i)
for i in range(0,len(users)):
if users[i].state == 'n':
if not users[i].parentID:
dists[i] = 10000
else:
for j in range(0,len(users)):
if (users[j].state == 's'or users[j].state == 'c'
or users[j].state == 'q'):
dists[i] = (dists[i] + helpers.calcDist(
listofListOfParents[i],listofListOfParents[j]))
#Count the total number of eligible surveys for denominator
running = 0
for i in range(0,len(users)):
if users[i].state == 's' or users[i].state == 'c' or users[i].state == 'q':
running = running +1
#set Ns to Qs until either credits, or total coupons are exceeded.
credits = 0
while credits > 0 and running < config.total:
index = dists.index(max(dists))
if users[index].state == 'n':
users[index].state = 'q'
users[index].selectTime = datetime.now()
credits = credits-1
running = running + 1
if running == config.total:
print('ran out of coupons at ' + str(datetime.now()))
exit()
dists[index] = 0
found = 0
for i in range(0,len(users)):
if users[i].state == 'n':
found = 1
if not found:
credits = 0
#write to csv
filemanager.writecsv(filemanager.usersToArray(users), csvpath)
print(str(datetime.now()))
validation[0][0] = 'GOOD'
filemanager.writecsv(validation,valpath)
|
nilq/baby-python
|
python
|
"""HTML Template Generator
Intended to parse HTML code and emit Python code compatible with Python
Templates.
TODO
- Change tag calls to pass void=True
Open Questions
1. How to merge the generic structure of parsed HTML with existing classes like
HTMLTemplate? For example: the <head> and <body> tags should be merged.
2. How to identify common patterns between templates and refactor those common
patterns into parent classes?
3. How to identify common patterns within a template and refactor to template
class methods (possibly with different parameters)?
"""
import argparse
import bs4
import requests
from urllib.parse import urljoin
class HTMLTemplateGenerator:
def __init__(self, url, name):
self.url = url
self.name = name
self.indent = 0
def put(self, value):
print(f"{' ' * self.indent}{value}")
def run(self):
self.put(f'class {self.name}(HTMLTemplate):')
self.indent += 4
self.put('def run(self):')
self.indent += 4
resp = requests.get(self.url)
soup = bs4.BeautifulSoup(resp.content, 'lxml')
for element in soup.contents:
self.visit(element)
def visit(self, element):
visit_name = 'visit_' + type(element).__name__.lower()
visit_method = getattr(self, visit_name)
visit_method(element)
def visit_navigablestring(self, element):
text = str(element)
text = text.strip()
if text:
self.put(f'self.add({text!r})')
def visit_comment(self, element):
text = str(element)
comment = f'<!--{text}-->'
self.put(f'self.add({comment!r})')
def visit_tag(self, element):
attrs = getattr(element, 'attrs', {})
for key, value in attrs.items():
if isinstance(value, list) and len(value) == 1:
value = value[0]
if key == 'href':
value = urljoin(self.url, value)
elif key == 'src':
value = urljoin(self.url, value)
attrs[key] = value
attrs_arg = f', attrs={attrs!r}' if attrs else ''
if not element.contents:
self.put(f'self.tag({element.name!r}{attrs_arg})')
else:
self.put(f'with self.tag({element.name!r}{attrs_arg}):')
self.indent += 4
for subelement in element.contents:
self.visit(subelement)
self.indent -= 4
def visit_doctype(self, element):
print(f"{' ' * self.indent}self.add('<!doctype {element}>')")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('name')
args = parser.parse_args()
generator = HTMLTemplateGenerator(args.url, args.name)
generator.run()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import dash
import dash_bootstrap_components as dbc
from apps.monitor import Monitor
from apps.controller import Controller
from apps.navbar import navbar
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=[
dbc.themes.BOOTSTRAP, 'assets/app.css'])
def start():
monitor = Monitor(app)
controller = Controller(app)
monitor.callbacks()
controller.callbacks()
app.layout = dbc.Container(
[
navbar,
dbc.Row(
[
dbc.Col(monitor.layout(), id="id-left-panel", width=6),
dbc.Col(controller.layout(), id="id-right-panel", width=6),
]
)
],
fluid=True,
style={"padding": 0}
)
app.run_server(debug=False, host='0.0.0.0')
if __name__ == "__main__":
start()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PosDishGroupModel import PosDishGroupModel
class KoubeiCateringPosDishgroupSyncModel(object):
def __init__(self):
self._pos_dish_group_model = None
@property
def pos_dish_group_model(self):
return self._pos_dish_group_model
@pos_dish_group_model.setter
def pos_dish_group_model(self, value):
if isinstance(value, PosDishGroupModel):
self._pos_dish_group_model = value
else:
self._pos_dish_group_model = PosDishGroupModel.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.pos_dish_group_model:
if hasattr(self.pos_dish_group_model, 'to_alipay_dict'):
params['pos_dish_group_model'] = self.pos_dish_group_model.to_alipay_dict()
else:
params['pos_dish_group_model'] = self.pos_dish_group_model
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiCateringPosDishgroupSyncModel()
if 'pos_dish_group_model' in d:
o.pos_dish_group_model = d['pos_dish_group_model']
return o
|
nilq/baby-python
|
python
|
with open("file.txt") as f:
something(f)
|
nilq/baby-python
|
python
|
#!/app/ansible2/bin/python
# -* coding: utf-8 -*-
DOCUMENTATION = '''
---
author: Arthur Reyes
module: pyvim_facts
description:
- This module gathers and correlates a larger number of useful facts
from a specified guest on VMWare vSphere.
version_added: "0.1"
requirements:
- pyVim
notes:
- This module disables SSL Security and warnings for invalid certificates.
- Tested with Ansible 2.0.1.0
options:
host:
description:
- The vSphere server that manages the cluster where the guest is
located on.
required: true
aliases: ['vsphere']
login:
description:
- A login name which can authenticate to the vSphere cluster.
required: true
aliases: ['admin']
password:
description:
- The password used to authenticate to the vSphere cluster.
required: true
aliases: ['secret']
port:
description:
- The port the vSphere listens on.
required: false
default: 443
guest:
description:
- The name of the guest to gather facts from the vSphere cluster.
Apparently the same guest name can exist in multiple datacenters, so
this value is ignored if uuid is defined.
required: true
uuid:
description:
- the instanceUuid of the guest. Useful to identify a unique guest
when multiple virtual machines with the same name exist across
clusters. If not defined and multiple guests are returned by a query
then this module will fail. If defined, guest name is ignored.
required: false
default: null
'''
import atexit
import sys
import requests
try:
from pyVim import connect
from pyVmomi import vmodl
from pyVmomi import vim
except ImportError:
print "failed=True msg='pyvmoni python module unavailable'"
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, aliases=['vsphere']),
port = dict(required=False, default=443),
login = dict(required=True, aliases=['admin']),
password = dict(required=True, aliases=['secret']),
guest = dict(required=True),
uuid = dict(required=False, default=None),
)
)
host = module.params.get('host')
port = module.params.get('port')
login = module.params.get('login')
password = module.params.get('password')
guest = module.params.get('guest')
uuid = module.params.get('uuid')
context = connect.ssl.SSLContext(connect.ssl.PROTOCOL_TLSv1)
context.verify_mode = connect.ssl.CERT_NONE
requests.packages.urllib3.disable_warnings()
try:
service_instance = connect.SmartConnect(host=host,
port=int(port),
user=login,
pwd=password,
sslContext=context)
except Exception, e:
module.fail_json(msg='Failed to connect to %s: %s' % (host, e))
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
VMView = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True)
vms = []
children = VMView.view
VMView.Destroy()
for child in children:
if uuid and child.summary.config.instanceUuid == uuid:
# defining a uuid in the module params overrides guest name
vms.append(child)
break
elif not uuid and child.summary.config.name == guest:
vms.append(child)
if len(vms) == 1:
vm = vms[0]
sane_disk = vm.summary.config.vmPathName.replace('[', '').replace('] ', '/')
sane_path = "/".join(sane_disk.split('/')[0:-1])
#sanitize the datastore name so we can use it as search criteria
datastore = sane_path.split('/')[0]
# corrolate datacenter facts
DCView = content.viewManager.CreateContainerView( content.rootFolder, [vim.Datacenter],
True )
for dc in DCView.view:
DSView = content.viewManager.CreateContainerView( dc, [vim.Datastore], True )
for ds in DSView.view:
if ds.info.name == datastore:
vm_host_datacenter = dc.name
break
DCView.Destroy()
DSView.Destroy()
# corrolate datastore facts
HSView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.HostSystem],
True)
esxhosts = HSView.view
HSView.Destroy()
for esxhost in esxhosts:
if esxhost.name == vm.summary.runtime.host.summary.config.name:
vm_host = esxhost
host_storage = vm_host.configManager.storageSystem
host_storage_info = host_storage.fileSystemVolumeInfo.mountInfo
for mount in host_storage_info:
if str(mount.volume.name) == str(datastore):
vm_host_datastore = mount.volume.name
vm_host_datastore_capacity = mount.volume.capacity
vm_host_datastore_max_blocks = mount.volume.maxBlocks
break
break
facts = {
'general' : {
'name': vm.summary.config.name,
'full_name': vm.summary.config.guestFullName,
'id': vm.summary.config.guestId,
'instance_uuid': vm.summary.config.instanceUuid,
'bios_uuid': vm.summary.config.uuid,
'processor_count': vm.summary.config.numCpu,
'memtotal_mb': vm.summary.config.memorySizeMB,
'datacenter': vm_host_datacenter,
}
}
facts['vm_state'] = {
'host': vm.summary.runtime.host.summary.config.name,
'power': vm.summary.runtime.powerState,
'status': vm.summary.overallStatus,
}
facts['hm_datastore'] = {
'name': vm_host_datastore,
'capacity': vm_host_datastore_capacity,
'max_block_size': vm_host_datastore_max_blocks,
'guest_disk': vm.summary.config.vmPathName,
'guest_path_sane': sane_path,
'guest_path': "/".join((vm.summary.config.vmPathName).split('/')[0:-1]),
'guest_disk_sane': sane_disk,
}
facts['vm_bios'] = {
'bootOrder': vm.config.bootOptions.bootOrder,
}
# enumerate network
ints = {}
intidx = 0
for entry in vm.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
int_name = 'eth' + str(intidx)
ints[int_name] = {
'address_type' : entry.addressType,
'mac' : entry.macAddress,
'mac_upper' : entry.macAddress.upper(),
'mac_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
intidx += 1
facts['vm_network'] = ints
# enumerate virtual medial
virtual_devices = {}
virtual_media_types = ['CD/DVD drive', 'USB controller', 'Floppy drive' ]
for entry in vm.config.hardware.device:
if hasattr(entry, 'macAddress'):
continue
if not any(device in entry.deviceInfo.label for device in virtual_media_types):
continue
virtual_devices[entry.deviceInfo.label] = {
'summary': entry.deviceInfo.summary,
'unitNumber': entry.unitNumber,
}
facts['vm_removeable_media'] = virtual_devices
elif len(vms) == 0:
module.fail_json(msg='no virtual machines found')
else:
# we only want a single unique host.
module.fail_json(msg='guest lookup returned multiple virtual machines: %s'(vms))
module.exit_json(ansible_facts=facts)
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
|
nilq/baby-python
|
python
|
from bk_db_tools.xlsx_data_replace import XlsxDataReplace
class SOther(XlsxDataReplace):
dataSets = [
{
'sheetName':'s_player_unit',
'firstColumn':1,
'firstRow':2,
'sql':
"""
select *
from (
select pu.unit_id
, mn.full_name
, ul.unit_level
, pu.use_for_gb12
, pu.use_for_db12
, pu.use_for_nb12
, pu.use_for_sf10
, pu.use_for_pc10
, pu.use_for_toa
, pu.use_for_dhole_griffon
, pu.use_for_dhole_inugami
, pu.use_for_dhole_warbear
, pu.use_for_dhole_fairy
, pu.use_for_dhole_pixie
, pu.use_for_dhole_werewolf
, pu.use_for_dhole_cat
, pu.use_for_dhole_howl
, pu.use_for_dhole_grim
, pu.use_for_dhole_karzhan
, pu.use_for_dhole_ellunia
, pu.use_for_dhole_lumel
, pu.use_for_dhole_khalderun
, pu.use_for_d_predator
, pu.use_for_rift_beast_fire
, pu.use_for_rift_beast_ice
, pu.use_for_rift_beast_wind
, pu.use_for_rift_beast_light
, pu.use_for_rift_beast_dark
, pu.use_for_r5
, pu.use_for_lab
, pu.use_for_arena
, pu.use_for_gwo
, pu.use_for_gwd
, pu.use_for_rta
, pu.spd_tune_max
, pu.spd_tune_atk_bar
, pu.spd_tune_buffer
, pu.buffs
, pu.spd_tune_strip
, pu.spd_tune_debuff
, pu.debuffs
, pu.spd_tune_dmg
, pu.cleanser
, pu.reviver
, pu.runed_as_healer
, pu.runed_as_tank
, pu.runed_as_bruiser
, mn.full_name || ' lvl ' || ul.unit_level || ' | ' || ul.unit_id as unit_select
from s_player_unit pu
join swex_unit_list ul on pu.unit_id = ul.unit_id
left join swarfarm_monster_names mn on mn.com2us_id = ul.com2us_id
union all
select ul2.unit_id
, mn2.full_name
, ul2.unit_level
, null as use_for_gb12
, null as use_for_db12
, null as use_for_nb12
, null as use_for_sf10
, null as use_for_pc10
, null as use_for_toa
, null as use_for_dhole_griffon
, null as use_for_dhole_inugami
, null as use_for_dhole_warbear
, null as use_for_dhole_fairy
, null as use_for_dhole_pixie
, null as use_for_dhole_werewolf
, null as use_for_dhole_cat
, null as use_for_dhole_howl
, null as use_for_dhole_grim
, null as use_for_dhole_karzhan
, null as use_for_dhole_ellunia
, null as use_for_dhole_lumel
, null as use_for_dhole_khalderun
, null as use_for_d_predator
, null as use_for_rift_beast_fire
, null as use_for_rift_beast_ice
, null as use_for_rift_beast_wind
, null as use_for_rift_beast_light
, null as use_for_rift_beast_dark
, null as use_for_r5
, null as use_for_lab
, null as use_for_arena
, null as use_for_gwo
, null as use_for_gwd
, null as use_for_rta
, null as spd_tune_max
, null as spd_tune_atk_bar
, null as spd_tune_buffer
, null as buffs
, null as spd_tune_strip
, null as spd_tune_debuff
, null as debuffs
, null as spd_tune_dmg
, null as cleanser
, null as reviver
, null as runed_as_healer
, null as runed_as_tank
, null as runed_as_bruiser
, mn2.full_name || ' lvl ' || ul2.unit_level || ' | ' || ul2.unit_id as unit_select
from swex_unit_list ul2
left join swarfarm_monster_names mn2 on mn2.com2us_id = ul2.com2us_id
left join s_player_unit pu2 on ul2.unit_id = pu2.unit_id
where pu2.unit_id is null
and ul2.unit_level >= 35
) as r
order by r.unit_level desc
, r.full_name;
"""
},
]
|
nilq/baby-python
|
python
|
import os
import h5py
import numpy as np
type_to_id = {'worse':[0,0,1], 'okay':[0,1,0], 'better':[1,0,0]}
dirs = ['can',
'lift',
'square',
'transport']
for env in dirs:
path = "datasets/" + env + "/mh"
old = path + "/low_dim.hdf5"
new = path + "/low_dim_fewer_better.hdf5"
os.system('cp {0} {1}'.format(old, new))
def add_task_ids_for_obs(data, keys, task_id):
for k in keys:
demo = data[k]
obs = demo['obs']
t = obs['object'].shape[0]
task_indices_arr = np.tile(np.array(task_id), (t, 1))
obs.create_dataset("task_id", shape=(t, len(task_id)), dtype='f8', data=task_indices_arr)
def add_task_ids_for_next_obs(data, keys, task_id):
for k in keys:
demo = data[k]
obs = demo['next_obs']
t = obs['object'].shape[0]
task_indices_arr = np.tile(np.array(task_id), (t, 1))
obs.create_dataset("task_id", shape=(t, len(task_id)), dtype='f8', data=task_indices_arr)
def remove_eighty_percent_better(f):
# Get demos to be deleted
sorted_better_demos = sorted(f['mask']['better'][:])
num_remaining_demos = int(len(sorted_better_demos)/5)
demos_to_be_deleted = sorted_better_demos[num_remaining_demos:]
print("demos to be deleted", demos_to_be_deleted)
print("nuber of remaining better demos", num_remaining_demos)
# Delete demos in masks
for k in f['mask'].keys():
original_arr = f['mask'][k][:]
new_arr = np.array([item for item in original_arr if item not in demos_to_be_deleted])
del f['mask'][k]
f['mask'].create_dataset(k, data=new_arr)
# Delete demos in data
demos_to_be_deleted_strings = [demo.decode("utf-8") for demo in demos_to_be_deleted]
for demo in f['data'].keys():
if demo in demos_to_be_deleted_strings:
del f['data'][demo]
def remove_demos_without_task_id(f):
demos_without_task_id = []
for demo in f['data'].keys():
if 'task_id' not in f['data'][demo]['obs'].keys():
demos_without_task_id.append(demo)
for demo in demos_without_task_id:
del f['data'][demo]
for k in f['mask'].keys():
original_arr = f['mask'][k][:]
new_arr = np.array([item for item in original_arr if item.decode("utf-8") not in demos_without_task_id])
del f['mask'][k]
f['mask'].create_dataset(k, data=new_arr)
for dir in dirs:
print('modifying ' + dir)
path = 'datasets/{}/mh/low_dim_fewer_better.hdf5'.format(dir)
f = h5py.File(path, "r+")
data=f['data']
mask=f['mask']
for k,v in type_to_id.items():
demos = [demo.decode("utf-8") for demo in mask[k]]
add_task_ids_for_obs(data, demos, v)
add_task_ids_for_next_obs(data, demos, v)
remove_demos_without_task_id(f)
remove_eighty_percent_better(f)
f.close()
|
nilq/baby-python
|
python
|
import sys
from pathlib import Path
path = str(Path(__file__).parents[1].resolve())
sys.path.append(path)
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile as sf
import torch
from jiwer import wer
import numpy as np
from sonorus.speech.lm import (
FairseqTokenDictionary,
W2lKenLMDecoder,
W2lViterbiDecoder,
W2lFairseqLMDecoder,
)
import optuna
from optuna.integration import BoTorchSampler
import joblib
def map_to_array(batch):
speech, _ = sf.read(batch["file"])
batch["speech"] = speech
return batch
def map_to_pred(batch):
input_values = processor(
batch["speech"], return_tensors="pt", padding="longest"
).input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)
batch["transcription"] = transcription
return batch
def map_to_pred_lm(batch, decoder):
input_values = processor(
batch["speech"], return_tensors="pt", padding="longest"
).input_values
with torch.no_grad():
logits = model(input_values.to("cuda")).logits
logits = logits.float().cpu().contiguous()
decoded = decoder.decode(logits)
# 1st sample, 1st best transcription
transcription = decoder.post_process(decoded)
batch["transcription"] = transcription
return batch
def get_wer(result, batch_size=-1, lm=False):
def transcripts():
return (
[x[0] for x in result["transcription"]] if lm else result["transcription"]
)
errors = []
if batch_size > 0:
for i in range(0, len(result), batch_size):
errors.append(
wer(
result["text"][i : i + batch_size],
transcripts()[i : i + batch_size],
)
)
else:
errors.append(wer(result["text"], transcripts()))
return np.mean(errors)
librispeech_eval = load_dataset(
"librispeech_asr",
"clean",
split="validation",
# split="test",
ignore_verifications=True,
) # ,
# download_mode="force_redownload")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
librispeech_eval = librispeech_eval.map(map_to_array)
result = librispeech_eval.map(
map_to_pred, batched=True, batch_size=1, remove_columns=["speech"]
)
print("Acoustic WER:", get_wer(result, batch_size=1000, lm=False))
token_dict = FairseqTokenDictionary(indexed_symbols=processor.get_vocab())
lexicon_path = "/home/harold/Documents/IISc-work/imperio/data/speech/fairseq/librispeech_lexicon.lst"
lm_path = "/home/harold/Documents/IISc-work/imperio/data/speech/fairseq/lm_librispeech_kenlm_word_4g_200kvocab.bin"
# decoder = W2lKenLMDecoder(
# token_dict=token_dict,
# lexicon=lexicon_path,
# lang_model=lm_path,
# beam=1500,
# beam_size_token=100,
# beam_threshold=25,
# lm_weight=1.5,
# word_weight=-1,
# unk_weight=float("-inf"),
# sil_weight=0,
# )
# result = librispeech_eval.map(lambda batch: map_to_pred_lm(batch, decoder), batched=True, batch_size=1, remove_columns=["speech"])
# print("KenLM WER:", get_wer(result, batch_size=1000, lm=True))
n_startup_trials = 10
bayes_opt_sampler = BoTorchSampler(n_startup_trials=n_startup_trials)
study = optuna.create_study(sampler=bayes_opt_sampler)
def objective(trial):
lm_weight = trial.suggest_float("lm_weight", 0, 5)
word_weight = trial.suggest_float("word_weight", -5, 5)
sil_weight = trial.suggest_float("sil_weight", -5, 5)
decoder = W2lKenLMDecoder(
token_dict=token_dict,
lexicon=lexicon_path,
lang_model=lm_path,
beam=500,
beam_size_token=100,
beam_threshold=25,
lm_weight=lm_weight,
word_weight=word_weight,
unk_weight=float("-inf"),
sil_weight=sil_weight,
)
result = librispeech_eval.map(
lambda batch: map_to_pred_lm(batch, decoder),
batched=True,
batch_size=1,
remove_columns=["speech"],
)
return get_wer(result, batch_size=1000, lm=True)
n_trials = 150
study.optimize(objective, n_trials=n_trials, show_progress_bar=True)
print("Best KenLM WER: ", study.best_value)
print("Best params: ", study.best_params)
joblib.dump(study, "speech-lm-hyperparams-opt-study.jb")
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
import logging
from django import template
from ..utils import get_seo_model
from ..models import SeoUrl
logger = logging.getLogger(__name__)
register = template.Library()
class SeoDataNode(template.Node):
def __init__(self, variable_name):
self.variable_name = variable_name
def render(self, context):
seo_model = get_seo_model()
flat_context = context.flatten()
path = flat_context['request'].path
logger.debug('Looking for SEO object')
for obj in flat_context.values():
if (hasattr(obj, 'get_absolute_url') and
obj.get_absolute_url() == path):
logger.debug('Found object: `{}`'.format(obj))
seo = {}
for field in seo_model._meta.fields:
if getattr(obj, field.name, '') != '':
logger.debug('Adding field `{}` to SEO dict'
.format(field.name))
seo[field.name] = getattr(obj, field.name)
if seo:
context[self.variable_name] = seo
logger.debug('Returning with object data')
return ''
logger.debug('Looking for SEO URL')
try:
seo_url = SeoUrl.objects.get(url=path)
except SeoUrl.DoesNotExist:
logger.debug('No SEO URL found')
return ''
logger.debug('SEO URL found')
seo = {}
for field in seo_model._meta.fields:
if getattr(seo_url, field.name, '') != '':
seo[field.name] = getattr(seo_url, field.name)
logger.debug('Adding field `{}` to SEO dict'
.format(field.name))
context[self.variable_name] = seo
logger.debug('Returning with URL data')
return ''
def do_get_seo_data(parser, token):
bits = token.split_contents()
if len(bits) > 1 and (len(bits) > 3 or bits[1] != 'as'):
raise template.TemplateSyntaxError(('Format is {} [as variable] '
.format(bits[0])))
try:
variable_name = bits[2]
except IndexError:
variable_name = 'seo'
return SeoDataNode(variable_name)
register.tag('get_seo_data', do_get_seo_data)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
packages = find_packages()
setup(name = 'cddm_experiment',
version = "1.0.0",
description = 'Tools for cross-differential dynamic microscopy experiment',
author = 'Andrej Petelin',
author_email = 'andrej.petelin@gmail.com',
url="https://github.com/pypa/sampleproject",
packages = packages,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",],
python_requires='>=3.7',
)
|
nilq/baby-python
|
python
|
class MyClass:
pass
obj = MyClass() # creating a MyClass Object
print(obj)
|
nilq/baby-python
|
python
|
def is_palindrome(input_string):
"""Check if a string is a palindrome
irrespective of capitalisation
Returns:
True if string is a palindrome
e.g
>>> is_palindrome("kayak")
OUTPUT : True
False if string is not a palindrome
e.g
>>> is_palindrome("Boat")
OUTPUT : False
"""
# Create variables to hold new strings to be compared
new_string = ""
reversed_string = ""
# Ensure that the string is not empty
if input_string != '':
# Change input into lower case and loop through each letter
for char in input_string.lower():
# Remove all white spaces
# Add each letter to a string
# Reverse the string
if char != " ":
new_string += char
reversed_string = ''.join(reversed(new_string))
# Compare the strings
if new_string == reversed_string:
return True
return False
return "String is empty"
# Tests
print(is_palindrome("kayak")) # Return True
print(is_palindrome("Hold Your fire")) # Return False
print(is_palindrome("Never Odd or Even")) # Return True
print(is_palindrome("abc")) # Return False
print(is_palindrome("")) # Return "String is empty"
|
nilq/baby-python
|
python
|
from izihawa_utils.text import camel_to_snake
def test_camel_to_snake():
assert camel_to_snake('CamelCase') == 'camel_case'
assert camel_to_snake('camelCase') == 'camel_case'
assert camel_to_snake('camelCase camel123Case') == 'camel_case camel123_case'
assert camel_to_snake('camelCase\ncamelCase') == 'camel_case\ncamel_case'
|
nilq/baby-python
|
python
|
from flask import Blueprint, request
from kaos_backend.controllers.notebook import NotebookController
from kaos_backend.util.flask import jsonify
from kaos_model.api import Response
def build_notebook_blueprint(controller: NotebookController):
blueprint = Blueprint('notebook', __name__)
@blueprint.route("/notebook/<workspace>", methods=["GET"])
@jsonify
def notebook_list(workspace):
return Response(
response=controller.list_notebooks(workspace)
)
@blueprint.route("/notebook/<workspace>", methods=["POST"])
@jsonify
def notebook_create(workspace):
user = request.args.get('user', 'default').replace('.', '')
cpu = request.args.get('cpu', None)
memory = request.args.get('memory', None)
gpu = int(request.args.get('gpu', 0))
return controller.submit_notebook(workspace, user, request.data, cpu=cpu, memory=memory, gpu=gpu)
@blueprint.route("/notebook/<workspace>/build/<job_id>/logs", methods=["GET"])
@jsonify
def build_notebook_logs(workspace, job_id):
return controller.get_build_logs(workspace, job_id)
@blueprint.route("/notebook/<notebook_name>", methods=["DELETE"])
@jsonify
def notebook_remove(notebook_name):
return controller.remove_notebook(notebook_name)
return blueprint
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# setup.py
from setuptools import setup
DESCRIPTION = "See ./README.md"
LONG_DESCRIPTION = DESCRIPTION
setup(
author="Dan'",
author_email="dan@home",
name="mfm",
version="0.0.0",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url="http://mfm.dan.net",
platforms=['OS Independant'],
license='See ./LICENSE',
classifiers=[
"Programming Language::Python::3.5",
],
packages=['src']
)
|
nilq/baby-python
|
python
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import SimpleITK as sitk
def convert_mask_to_distance_map(mask, squaredDistance=False, normalise=False):
raw_map = sitk.SignedMaurerDistanceMap(
mask,
insideIsPositive=True,
squaredDistance=squaredDistance,
useImageSpacing=True,
)
if normalise:
return raw_map / (sitk.GetArrayFromImage(raw_map).max())
else:
return raw_map
def convert_mask_to_reg_structure(mask, expansion=1, scale=lambda x: x):
distance_map = sitk.Cast(
convert_mask_to_distance_map(mask, squaredDistance=False), sitk.sitkFloat64
)
inverted_distance_map = sitk.Threshold(
distance_map
+ expansion * sitk.Cast(distance_map < (expansion), sitk.sitkFloat64),
lower=0,
upper=1000,
)
scaled_distance_map = inverted_distance_map / (
sitk.GetArrayViewFromImage(inverted_distance_map).max()
)
return scale(scaled_distance_map)
def initial_registration_command_iteration(method):
"""
Utility function to print information during initial (rigid, similarity, affine, translation) registration
"""
print(
"{0:3} = {1:10.5f}".format(
method.GetOptimizerIteration(), method.GetMetricValue()
)
)
def deformable_registration_command_iteration(method):
"""
Utility function to print information during demons registration
"""
print("{0:3} = {1:10.5f}".format(method.GetElapsedIterations(), method.GetMetric()))
def stage_iteration(method):
"""
Utility function to print information during stage change in registration
"""
print(
f"Number of parameters = {method.GetInitialTransform().GetNumberOfParameters()}"
)
def control_point_spacing_distance_to_number(image, grid_spacing):
"""
Convert grid spacing specified in distance to number of control points
"""
image_spacing = np.array(image.GetSpacing())
image_size = np.array(image.GetSize())
number_points = image_size * image_spacing / np.array(grid_spacing)
return (number_points + 0.5).astype(int)
def alignment_registration(fixed_image, moving_image, default_value=0, moments=True):
moving_image_type = moving_image.GetPixelIDValue()
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
initial_transform = sitk.CenteredTransformInitializer(
fixed_image, moving_image, sitk.VersorRigid3DTransform(), moments
)
aligned_image = sitk.Resample(moving_image, fixed_image, initial_transform)
aligned_image = sitk.Cast(aligned_image, moving_image_type)
return aligned_image, initial_transform
def initial_registration(
fixed_image,
moving_image,
moving_structure=False,
fixed_structure=False,
options={
"shrink_factors": [8, 2, 1],
"smooth_sigmas": [4, 2, 0],
"sampling_rate": 0.1,
"final_interp": 3,
"metric": "mean_squares",
"optimiser": "gradient_descent",
"number_of_iterations": 50,
},
default_value=-1024,
trace=False,
reg_method="Similarity",
):
"""
Rigid image registration using ITK
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image): the moving image, transformed to match fixed_image
options (dict) : registration options
structure (bool) : True if the image is a structure image
Returns
registered_image (sitk.Image): the rigidly registered moving image
transform (transform : the transform, can be used directly with
sitk.ResampleImageFilter
"""
# Re-cast
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
moving_image_type = moving_image.GetPixelIDValue()
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
# Get the options
shrink_factors = options["shrink_factors"]
smooth_sigmas = options["smooth_sigmas"]
sampling_rate = options["sampling_rate"]
final_interp = options["final_interp"]
metric = options["metric"]
optimiser = options["optimiser"]
number_of_iterations = options["number_of_iterations"]
# Initialise using a VersorRigid3DTransform
initial_transform = sitk.CenteredTransformInitializer(
fixed_image, moving_image, sitk.Euler3DTransform(), False
)
# Set up image registration method
registration = sitk.ImageRegistrationMethod()
registration.SetShrinkFactorsPerLevel(shrink_factors)
registration.SetSmoothingSigmasPerLevel(smooth_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration.SetMovingInitialTransform(initial_transform)
if metric == "correlation":
registration.SetMetricAsCorrelation()
elif metric == "mean_squares":
registration.SetMetricAsMeanSquares()
elif metric == "mattes_mi":
registration.SetMetricAsMattesMutualInformation()
elif metric == "joint_hist_mi":
registration.SetMetricAsJointHistogramMutualInformation()
elif metric == "ants":
try:
ants_radius = options["ants_radius"]
except:
ants_radius = 3
registration.SetMetricAsANTSNeighborhoodCorrelation(ants_radius)
# to do: add the rest
registration.SetInterpolator(sitk.sitkLinear) # Perhaps a small gain in improvement
registration.SetMetricSamplingPercentage(sampling_rate)
registration.SetMetricSamplingStrategy(sitk.ImageRegistrationMethod.REGULAR)
# This is only necessary if using a transform comprising changes with different units
# e.g. rigid (rotation: radians, translation: mm)
# It can safely be left on
registration.SetOptimizerScalesFromPhysicalShift()
if moving_structure:
registration.SetMetricMovingMask(moving_structure)
if fixed_structure:
registration.SetMetricFixedMask(fixed_structure)
if reg_method.lower() == "translation":
registration.SetInitialTransform(sitk.TranslationTransform(3))
elif reg_method.lower() == "similarity":
registration.SetInitialTransform(sitk.Similarity3DTransform())
elif reg_method.lower() == "affine":
registration.SetInitialTransform(sitk.AffineTransform(3))
elif reg_method.lower() == "rigid":
registration.SetInitialTransform(sitk.VersorRigid3DTransform())
elif reg_method.lower() == "scaleversor":
registration.SetInitialTransform(sitk.ScaleVersor3DTransform())
elif reg_method.lower() == "scaleskewversor":
registration.SetInitialTransform(sitk.ScaleSkewVersor3DTransform())
else:
raise ValueError(
"You have selected a registration method that does not exist.\n Please select from Translation, Similarity, Affine, Rigid"
)
if optimiser.lower() == "lbfgsb":
registration.SetOptimizerAsLBFGSB(
gradientConvergenceTolerance=1e-5,
numberOfIterations=number_of_iterations,
maximumNumberOfCorrections=50,
maximumNumberOfFunctionEvaluations=1024,
costFunctionConvergenceFactor=1e7,
trace=trace,
)
elif optimiser.lower() == "exhaustive":
"""
This isn't well implemented
Needs some work to give options for sampling rates
Use is not currently recommended
"""
samples = [10, 10, 10, 10, 10, 10]
registration.SetOptimizerAsExhaustive(samples)
elif optimiser.lower() == "gradient_descent_line_search":
registration.SetOptimizerAsGradientDescentLineSearch(
learningRate=1.0, numberOfIterations=number_of_iterations
)
elif optimiser.lower() == "gradient_descent":
registration.SetOptimizerAsGradientDescent(
learningRate=1.0, numberOfIterations=number_of_iterations
)
if trace:
registration.AddCommand(
sitk.sitkIterationEvent,
lambda: initial_registration_command_iteration(registration),
)
output_transform = registration.Execute(fixed=fixed_image, moving=moving_image)
# Combine initial and optimised transform
combined_transform = sitk.CompositeTransform([initial_transform, output_transform])
registered_image = transform_propagation(
fixed_image,
moving_image,
combined_transform,
default_value=default_value,
interp=final_interp,
)
registered_image = sitk.Cast(registered_image, moving_image_type)
return registered_image, combined_transform
def transform_propagation(
fixed_image,
moving_image,
transform,
structure=False,
default_value=-1024,
interp=sitk.sitkNearestNeighbor,
debug=False,
):
"""
Transform propagation using ITK
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image) : the moving image, to be propagated
transform (sitk.transform) : the transformation; e.g. VersorRigid3DTransform,
AffineTransform
structure (bool) : True if the image is a structure image
interp (int) : the interpolation
sitk.sitkNearestNeighbor
sitk.sitkLinear
sitk.sitkBSpline
Returns
registered_image (sitk.Image) : the rigidly registered moving image
"""
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image)
resampler.SetTransform(transform)
resampler.SetInterpolator(interp)
if structure:
resampler.SetDefaultPixelValue(0)
else:
resampler.SetDefaultPixelValue(default_value)
output_image = resampler.Execute(moving_image)
if structure and interp > 1:
if debug:
print(
"Note: Higher order interpolation on binary mask - using 32-bit floating point output"
)
output_image = sitk.Cast(output_image, sitk.sitkFloat32)
# Safe way to remove dodgy values that can cause issues later
output_image = sitk.Threshold(output_image, lower=1e-5, upper=100.0)
else:
output_image = sitk.Cast(output_image, moving_image.GetPixelID())
return output_image
def smooth_and_resample(
image,
shrink_factor,
smoothing_sigma,
isotropic_resample=False,
resampler=sitk.sitkLinear,
):
"""
Args:
image: The image we want to resample.
shrink_factor: A number greater than one, such that the new image's size is
original_size/shrink_factor.
If isotropic_resample is True, this will instead define the voxel size (mm)
smoothing_sigma: Sigma for Gaussian smoothing, this is in physical (image spacing) units,
not pixels.
isotropic_resample: A flag that changes the behaviour to resample the image to isotropic voxels of size (shrink_factor)
Return:
Image which is a result of smoothing the input and then resampling it using the given sigma
and shrink factor.
"""
if smoothing_sigma > 0:
# smoothed_image = sitk.SmoothingRecursiveGaussian(image, smoothing_sigma)
maximumKernelWidth = int(
max([8 * smoothing_sigma * i for i in image.GetSpacing()])
)
smoothed_image = sitk.DiscreteGaussian(
image, smoothing_sigma ** 2, maximumKernelWidth
)
else:
smoothed_image = image
original_spacing = image.GetSpacing()
original_size = image.GetSize()
if isotropic_resample:
scale_factor = (
shrink_factor * np.ones_like(image.GetSize()) / np.array(image.GetSpacing())
)
new_size = [
int(sz / float(sf) + 0.5) for sz, sf in zip(original_size, scale_factor)
]
if not isotropic_resample:
if type(shrink_factor) == list:
new_size = [
int(sz / float(sf) + 0.5)
for sz, sf in zip(original_size, shrink_factor)
]
else:
new_size = [int(sz / float(shrink_factor) + 0.5) for sz in original_size]
new_spacing = [
((original_sz - 1) * original_spc) / (new_sz - 1)
for original_sz, original_spc, new_sz in zip(
original_size, original_spacing, new_size
)
]
return sitk.Resample(
smoothed_image,
new_size,
sitk.Transform(),
resampler,
image.GetOrigin(),
new_spacing,
image.GetDirection(),
0.0,
image.GetPixelID(),
)
def multiscale_demons(
registration_algorithm,
fixed_image,
moving_image,
initial_transform=None,
initial_displacement_field=None,
shrink_factors=None,
smoothing_sigmas=None,
iteration_staging=None,
isotropic_resample=False,
return_field=False,
):
"""
Run the given registration algorithm in a multiscale fashion. The original scale should not be
given as input as the original images are implicitly incorporated as the base of the pyramid.
Args:
registration_algorithm: Any registration algorithm that has an Execute(fixed_image,
moving_image, displacement_field_image) method.
fixed_image: Resulting transformation maps points from this image's spatial domain to the
moving image spatial domain.
moving_image: Resulting transformation maps points from the fixed_image's spatial domain to
this image's spatial domain.
initial_transform: Any SimpleITK transform, used to initialize the displacement field.
initial_displacement_field: Initial displacement field, if this is provided
initial_transform will be ignored
shrink_factors: Shrink factors relative to the original image's size.
smoothing_sigmas: Amount of smoothing which is done prior to resmapling the image using the
given shrink factor. These are in physical (image spacing) units.
Returns:
SimpleITK.DisplacementFieldTransform
[Optional] Displacemment (vector) field
"""
# Create image pyramid.
fixed_images = []
moving_images = []
for shrink_factor, smoothing_sigma in reversed(
list(zip(shrink_factors, smoothing_sigmas))
):
fixed_images.append(
smooth_and_resample(
fixed_image,
shrink_factor,
smoothing_sigma,
isotropic_resample=isotropic_resample,
)
)
moving_images.append(
smooth_and_resample(
moving_image,
shrink_factor,
smoothing_sigma,
isotropic_resample=isotropic_resample,
)
)
# Create initial displacement field at lowest resolution.
# Currently, the pixel type is required to be sitkVectorFloat64 because of a constraint imposed
# by the Demons filters.
if not initial_displacement_field:
if initial_transform:
initial_displacement_field = sitk.TransformToDisplacementField(
initial_transform,
sitk.sitkVectorFloat64,
fixed_images[-1].GetSize(),
fixed_images[-1].GetOrigin(),
fixed_images[-1].GetSpacing(),
fixed_images[-1].GetDirection(),
)
else:
if len(moving_image.GetSize()) == 2:
initial_displacement_field = sitk.Image(
fixed_images[-1].GetWidth(),
fixed_images[-1].GetHeight(),
sitk.sitkVectorFloat64,
)
elif len(moving_image.GetSize()) == 3:
initial_displacement_field = sitk.Image(
fixed_images[-1].GetWidth(),
fixed_images[-1].GetHeight(),
fixed_images[-1].GetDepth(),
sitk.sitkVectorFloat64,
)
initial_displacement_field.CopyInformation(fixed_images[-1])
else:
initial_displacement_field = sitk.Resample(
initial_displacement_field, fixed_images[-1]
)
# Run the registration.
iters = iteration_staging[0]
registration_algorithm.SetNumberOfIterations(iters)
initial_displacement_field = registration_algorithm.Execute(
fixed_images[-1], moving_images[-1], initial_displacement_field
)
# Start at the top of the pyramid and work our way down.
for i, (f_image, m_image) in enumerate(
reversed(list(zip(fixed_images[0:-1], moving_images[0:-1])))
):
initial_displacement_field = sitk.Resample(initial_displacement_field, f_image)
iters = iteration_staging[i + 1]
registration_algorithm.SetNumberOfIterations(iters)
initial_displacement_field = registration_algorithm.Execute(
f_image, m_image, initial_displacement_field
)
output_displacement_field = sitk.Resample(
initial_displacement_field, initial_displacement_field
)
if return_field:
return (
sitk.DisplacementFieldTransform(initial_displacement_field),
output_displacement_field,
)
else:
return sitk.DisplacementFieldTransform(initial_displacement_field)
def fast_symmetric_forces_demons_registration(
fixed_image,
moving_image,
resolution_staging=[8, 4, 1],
iteration_staging=[10, 10, 10],
isotropic_resample=False,
initial_displacement_field=None,
smoothing_sigma_factor=1,
smoothing_sigmas=False,
default_value=-1024,
ncores=1,
structure=False,
interp_order=2,
trace=False,
return_field=False,
):
"""
Deformable image propagation using Fast Symmetric-Forces Demons
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image) : the moving image, to be deformable registered (must be in
the same image space)
resolution_staging (list[int]) : down-sampling factor for each resolution level
iteration_staging (list[int]) : number of iterations for each resolution level
isotropic_resample (bool) : flag to request isotropic resampling of images, in which
case resolution_staging is used to define voxel size (mm) per level
initial_displacement_field (sitk.Image) : Initial displacement field to use
ncores (int) : number of processing cores to use
structure (bool) : True if the image is a structure image
smoothing_sigma_factor (float) : the relative width of the Gaussian smoothing kernel
interp_order (int) : the interpolation order
1 = Nearest neighbour
2 = Bi-linear splines
3 = B-Spline (cubic)
Returns
registered_image (sitk.Image) : the registered moving image
output_transform : the displacement field transform
[optional] deformation_field
"""
# Cast to floating point representation, if necessary
moving_image_type = moving_image.GetPixelID()
if fixed_image.GetPixelID() != 6:
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
if moving_image.GetPixelID() != 6:
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
# Set up the appropriate image filter
registration_method = sitk.FastSymmetricForcesDemonsRegistrationFilter()
# Multi-resolution framework
registration_method.SetNumberOfThreads(ncores)
registration_method.SetSmoothUpdateField(True)
registration_method.SetSmoothDisplacementField(True)
registration_method.SetStandardDeviations(1.5)
# This allows monitoring of the progress
if trace:
registration_method.AddCommand(
sitk.sitkIterationEvent,
lambda: deformable_registration_command_iteration(registration_method),
)
if not smoothing_sigmas:
smoothing_sigmas = [i * smoothing_sigma_factor for i in resolution_staging]
output = multiscale_demons(
registration_algorithm=registration_method,
fixed_image=fixed_image,
moving_image=moving_image,
shrink_factors=resolution_staging,
smoothing_sigmas=smoothing_sigmas,
iteration_staging=iteration_staging,
isotropic_resample=isotropic_resample,
initial_displacement_field=initial_displacement_field,
return_field=return_field,
)
if return_field:
output_transform, deformation_field = output
else:
output_transform = output
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed_image)
resampler.SetInterpolator(interp_order)
if structure:
resampler.SetDefaultPixelValue(0)
else:
resampler.SetDefaultPixelValue(default_value)
resampler.SetTransform(output_transform)
registered_image = resampler.Execute(moving_image)
if structure:
registered_image = sitk.Cast(registered_image, sitk.sitkFloat32)
registered_image = sitk.BinaryThreshold(
registered_image, lowerThreshold=1e-5, upperThreshold=100
)
registered_image.CopyInformation(fixed_image)
registered_image = sitk.Cast(registered_image, moving_image_type)
if return_field:
resampled_field = sitk.Resample(deformation_field, fixed_image)
return registered_image, output_transform, resampled_field
else:
return registered_image, output_transform
def apply_field(
input_image,
transform,
structure=False,
default_value=-1024,
interp=sitk.sitkNearestNeighbor,
):
"""
Transform a volume of structure with the given deformation field.
Args
input_image (sitk.Image) : the image to transform
transform (sitk.Transform) : the transform to apply to the structure or mask
structure (bool) : if true, the input will be treated as a struture, as a volume otherwise
interp (int) : the type of interpolation to use, eg. sitk.sitkNearestNeighbor
Returns
resampled_image (sitk.Image) : the transformed image
"""
input_image_type = input_image.GetPixelIDValue()
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(input_image)
if structure:
resampler.SetDefaultPixelValue(0)
else:
resampler.SetDefaultPixelValue(default_value)
resampler.SetTransform(transform)
resampler.SetInterpolator(interp)
resampled_image = resampler.Execute(sitk.Cast(input_image, sitk.sitkFloat32))
return sitk.Cast(resampled_image, input_image_type)
def bspline_registration(
fixed_image,
moving_image,
moving_structure=False,
fixed_structure=False,
options={
"resolution_staging": [8, 4, 2],
"smooth_sigmas": [4, 2, 1],
"sampling_rate": 0.1,
"optimiser": "LBFGS",
"metric": "correlation",
"initial_grid_spacing": 64,
"grid_scale_factors": [1, 2, 4],
"interp_order": 3,
"default_value": -1024,
"number_of_iterations": 20,
},
isotropic_resample=False,
initial_isotropic_size=1,
initial_isotropic_smooth_scale=0,
trace=False,
ncores=8,
debug=False,
):
"""
B-Spline image registration using ITK
IMPORTANT - THIS IS UNDER ACTIVE DEVELOPMENT
Args
fixed_image (sitk.Image) : the fixed image
moving_image (sitk.Image): the moving image, transformed to match fixed_image
options (dict) : registration options
structure (bool) : True if the image is a structure image
Returns
registered_image (sitk.Image): the rigidly registered moving image
transform (transform : the transform, can be used directly with
sitk.ResampleImageFilter
Notes:
- smooth_sigmas are relative to resolution staging
e.g. for image spacing of 1x1x1 mm^3, with smooth sigma=2 and resolution_staging=4, the scale of the Gaussian filter would be 2x4 = 8mm (i.e. 8x8x8 mm^3)
"""
# Get the settings
resolution_staging = options["resolution_staging"]
smooth_sigmas = options["smooth_sigmas"]
sampling_rate = options["sampling_rate"]
optimiser = options["optimiser"]
metric = options["metric"]
initial_grid_spacing = options["initial_grid_spacing"]
grid_scale_factors = options["grid_scale_factors"]
number_of_iterations = options["number_of_iterations"]
interp_order = options["interp_order"]
default_value = options["default_value"]
# Re-cast input images
fixed_image = sitk.Cast(fixed_image, sitk.sitkFloat32)
moving_image_type = moving_image.GetPixelID()
moving_image = sitk.Cast(moving_image, sitk.sitkFloat32)
# (Optional) isotropic resample
# This changes the behaviour, so care should be taken
# For highly anisotropic images may be preferable
if isotropic_resample:
# First, copy the fixed image so we can resample back into this space at the end
fixed_image_original = fixed_image
fixed_image_original.MakeUnique()
fixed_image = smooth_and_resample(
fixed_image,
initial_isotropic_size,
initial_isotropic_smooth_scale,
isotropic_resample=True,
)
moving_image = smooth_and_resample(
moving_image,
initial_isotropic_size,
initial_isotropic_smooth_scale,
isotropic_resample=True,
)
else:
fixed_image_original = fixed_image
# Set up image registration method
registration = sitk.ImageRegistrationMethod()
registration.SetNumberOfThreads(ncores)
registration.SetShrinkFactorsPerLevel(resolution_staging)
registration.SetSmoothingSigmasPerLevel(smooth_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Choose optimiser
if optimiser == "LBFGSB":
registration.SetOptimizerAsLBFGSB(
gradientConvergenceTolerance=1e-5,
numberOfIterations=number_of_iterations,
maximumNumberOfCorrections=5,
maximumNumberOfFunctionEvaluations=1024,
costFunctionConvergenceFactor=1e7,
trace=trace,
)
elif optimiser == "LBFGS":
registration.SetOptimizerAsLBFGS2(
numberOfIterations=number_of_iterations,
solutionAccuracy=1e-2,
hessianApproximateAccuracy=6,
deltaConvergenceDistance=0,
deltaConvergenceTolerance=0.01,
lineSearchMaximumEvaluations=40,
lineSearchMinimumStep=1e-20,
lineSearchMaximumStep=1e20,
lineSearchAccuracy=0.01,
)
elif optimiser == "CGLS":
registration.SetOptimizerAsConjugateGradientLineSearch(
learningRate=0.05, numberOfIterations=number_of_iterations
)
registration.SetOptimizerScalesFromPhysicalShift()
elif optimiser == "GradientDescent":
registration.SetOptimizerAsGradientDescent(
learningRate=5.0,
numberOfIterations=number_of_iterations,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10,
)
registration.SetOptimizerScalesFromPhysicalShift()
elif optimiser == "GradientDescentLineSearch":
registration.SetOptimizerAsGradientDescentLineSearch(
learningRate=1.0, numberOfIterations=number_of_iterations
)
registration.SetOptimizerScalesFromPhysicalShift()
# Set metric
if metric == "correlation":
registration.SetMetricAsCorrelation()
elif metric == "mean_squares":
registration.SetMetricAsMeanSquares()
elif metric == "demons":
registration.SetMetricAsDemons()
elif metric == "mutual_information":
try:
number_of_histogram_bins = options["number_of_histogram_bins"]
except:
number_of_histogram_bins = 30
registration.SetMetricAsMattesMutualInformation(
numberOfHistogramBins=number_of_histogram_bins
)
registration.SetInterpolator(sitk.sitkLinear)
# Set sampling
if type(sampling_rate) == float:
registration.SetMetricSamplingPercentage(sampling_rate)
elif type(sampling_rate) in [np.ndarray, list]:
registration.SetMetricSamplingPercentagePerLevel(sampling_rate)
registration.SetMetricSamplingStrategy(sitk.ImageRegistrationMethod.REGULAR)
# Set masks
if moving_structure is not False:
registration.SetMetricMovingMask(moving_structure)
if fixed_structure is not False:
registration.SetMetricFixedMask(fixed_structure)
# Set control point spacing
transform_domain_mesh_size = control_point_spacing_distance_to_number(
fixed_image, initial_grid_spacing
)
if debug:
print(f"Initial grid size: {transform_domain_mesh_size}")
# Initialise transform
initial_transform = sitk.BSplineTransformInitializer(
fixed_image,
transformDomainMeshSize=[int(i) for i in transform_domain_mesh_size],
)
registration.SetInitialTransformAsBSpline(
initial_transform, inPlace=True, scaleFactors=grid_scale_factors
)
# (Optionally) add iteration commands
if trace:
registration.AddCommand(
sitk.sitkIterationEvent,
lambda: initial_registration_command_iteration(registration),
)
registration.AddCommand(
sitk.sitkMultiResolutionIterationEvent,
lambda: stage_iteration(registration),
)
# Run the registration
output_transform = registration.Execute(fixed=fixed_image, moving=moving_image)
# Resample moving image
registered_image = transform_propagation(
fixed_image_original,
moving_image,
output_transform,
default_value=default_value,
interp=interp_order,
)
registered_image = sitk.Cast(registered_image, moving_image_type)
# Return outputs
return registered_image, output_transform
|
nilq/baby-python
|
python
|
"""Overview plots of transcet"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from os.path import join
from src import sonic_layer_depth
plt.ion()
bbox = dict(boxstyle='round', fc='w')
savedir = 'reports/jasa/figures'
c_fields = np.load('data/processed/inputed_decomp.npz')
z_a = c_fields['z_a']
x_a = c_fields['x_a']
c_bg = c_fields['c_bg']
c_tilt = c_fields['c_tilt']
c_spice = c_fields['c_spice']
c_total = c_fields['c_total']
prof_i = 150
plt_i = z_a <= 150.
c_field = c_total[plt_i, :]
prop_i = z_a <= 150.
sld_z, _ = sonic_layer_depth(z_a[plt_i], c_field)
fig, ax = plt.subplots(figsize=(6.5, 3))
ax.plot(x_a / 1e3, sld_z, 'k')
reg = linregress(x_a, sld_z)
ax.plot(x_a / 1e3, x_a * reg.slope + reg.intercept, 'C0')
#ax[0].text(120, 20, f'm={reg.slope * 1e3:0.3f} m'+' km$^{-1}$',
#bbox=bbox)
ax.set_xlabel('Range (km)')
ax.set_ylabel('Sonic layer depth (m)')
ax.grid()
ax.set_ylim(150, 0)
ax.set_xlim(0, 970)
pos = ax.get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 += 0.04
pos.y1 += 0.08
ax.set_position(pos)
fig.savefig(join(savedir, 'sld_linregress.png'), dpi=300)
"""
fig, ax = plt.subplots(3, 1, sharex=True, figsize=(6.5, 6))
ax[0].plot(sec4.x_a / 1e3, sld_z, 'k')
reg = linregress(sec4.x_a, sld_z)
ax[0].plot(sec4.x_a / 1e3, sec4.x_a * reg.slope + reg.intercept, 'C0')
#ax[0].text(120, 20, f'm={reg.slope * 1e3:0.3f} m'+' km$^{-1}$',
#bbox=bbox)
i_40 = np.argmin(np.abs(sec4.z_a - 40))
tau = sec4.spice[i_40, :]
reg = linregress(sec4.x_a, tau)
ax[1].plot(sec4.x_a / 1e3, tau, 'k')
ax[1].plot(sec4.x_a / 1e3, sec4.x_a * reg.slope + reg.intercept, 'C0')
#ax[1].text(20, 2.3, f'm={reg.slope * 1e3:0.3e}'+' kg/m$^3$ km$^{-1}$',
#bbox=bbox)
sig = sec4.sigma0[i_40, :]
reg = linregress(sec4.x_a, sig)
ax[2].plot(sec4.x_a / 1e3, sig, 'k')
ax[2].plot(sec4.x_a / 1e3, sec4.x_a * reg.slope + reg.intercept, 'C0')
#ax[2].text(20, 24.95, f'm={reg.slope * 1e3:0.3e}' +' kg /m$^3$ km$^{-1}$',
#bbox=bbox)
ax[2].set_xlabel('Range (km)')
ax[0].set_ylabel('Sonic layer depth (m)')
ax[1].set_ylabel(r'$\tau$ (kg / m$^3$)')
ax[2].set_ylabel(r'$\sigma_0$ (kg / m$^3$)')
ax[0].grid()
ax[1].grid()
ax[2].grid()
ax[0].set_xlim(0, 970)
ax[0].set_ylim(150, 0)
pos = ax[0].get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 += 0.04
pos.y1 += 0.10
ax[0].set_position(pos)
pos = ax[1].get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 += 0.005
pos.y1 += 0.065
ax[1].set_position(pos)
pos = ax[2].get_position()
pos.x0 -= 0.01
pos.x1 += 0.08
pos.y0 -= 0.03
pos.y1 += 0.03
ax[2].set_position(pos)
fig.savefig(join(savedir, 'sld_dens_linregress.png'), dpi=300)
"""
|
nilq/baby-python
|
python
|
import time
from datetime import datetime, timedelta
import funcy
from unittest import mock
import asyncio
from octoprint.events import EventManager
async def wait_untill(
condition,
poll_period=timedelta(seconds=1),
timeout=timedelta(seconds=10),
condition_name="no_name",
time=time.time,
*condition_args,
**condition_kwargs,
):
"""
Waits untill the following condition function returns true
args:
condition: A zero-arity callable
poll_period: How often to call the condition
time: Callable that returns the current time in seconds since epoch
sleep: Callable that blocks the thread for a certain amount of seconds
timeout: Total time to wait for the condition
condition_name: A human friendly name for the condition that will be mentioned in the Timeout error
throws:
TimeoutError
"""
# holds the starting time in seconds since the epoch
start_time = int(time())
cond_callable = funcy.partial(condition, *condition_args, **condition_kwargs)
condition_is_true = cond_callable()
while (
int(time()) < start_time + timeout.total_seconds()
and not condition_is_true
):
await asyncio.sleep(poll_period.total_seconds())
condition_is_true = cond_callable()
if not condition_is_true:
raise TimeoutError(
f"Waited {timeout} time for condition '{condition_name}' to be True"
)
async def wait_untill_event(
event_manager: EventManager,
event,
payload=None,
poll_period=timedelta(seconds=1),
timeout=timedelta(seconds=10),
):
try:
subscriber = mock.Mock()
def event_was_published():
return subscriber.call_args == mock.call(event, payload)
event_manager.subscribe(event=event, callback=subscriber)
await wait_untill(
condition=event_was_published,
poll_period=poll_period,
timeout=timeout,
condition_name=f"Event {event} was published",
)
finally:
event_manager.unsubscribe(event=event, callback=subscriber)
|
nilq/baby-python
|
python
|
from subsystems.drivesubsystem import DriveSubsystem
import commands2
import wpilib
#time in seconds
ticksDistance = 0
class rotateArm(commands2.CommandBase):
def __init__(self, power: float, distance: float) -> None:
super().__init__()
self.power = power
self.distance = distance
def initialize(self):
pass
def execute(self) -> None:
self.rotateArm.set(self.power)
def end(self):
self.rotateArm.set(0)
def isFinished(self) -> bool:
return self.rotateArmEncoder.getPosition() > self.distance or self.rotateArmEncoder.getPosition() < -self.distance
|
nilq/baby-python
|
python
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import structlog
import arrow
from voltha.protos.events_pb2 import AlarmEventType, \
AlarmEventSeverity, AlarmEventState, AlarmEventCategory
# TODO: In the device adapter, the following alarms are still TBD
# (Taken from microsemi, so mileage may vare
# ON_ALARM_SOFTWARE_ERROR = 0
# PON_ALARM_LOS = 1
# PON_ALARM_LOSI = 2
# PON_ALARM_DOWI = 3
# PON_ALARM_LOFI = 4
# PON_ALARM_RDII = 5
# PON_ALARM_LOAMI = 6
# PON_ALARM_LCDGI = 7
# PON_ALARM_LOAI = 8
# PON_ALARM_SDI = 9
# PON_ALARM_SFI = 10
# PON_ALARM_PEE = 11
# PON_ALARM_DGI = 12
# PON_ALARM_LOKI = 13
# PON_ALARM_TIWI = 14
# PON_ALARM_TIA = 15
# PON_ALARM_VIRTUAL_SCOPE_ONU_LASER_ALWAYS_ON = 16
# PON_ALARM_VIRTUAL_SCOPE_ONU_SIGNAL_DEGRADATION = 17
# PON_ALARM_VIRTUAL_SCOPE_ONU_EOL = 18
# PON_ALARM_VIRTUAL_SCOPE_ONU_EOL_DATABASE_IS_FULL = 19
# PON_ALARM_AUTH_FAILED_IN_REGISTRATION_ID_MODE = 20
# PON_ALARM_SUFI = 21
class AdapterAlarms:
def __init__(self, adapter, device_id):
self.log = structlog.get_logger(device_id=device_id)
self.adapter = adapter
self.device_id = device_id
self.lc = None
def format_id(self, alarm):
return 'voltha.{}.{}.{}'.format(self.adapter.name,
self.device_id,
alarm)
def format_description(self, _object, alarm, status):
return '{} Alarm - {} - {}'.format(_object.upper(),
alarm.upper(),
'Raised' if status else 'Cleared')
def send_alarm(self, context_data, alarm_data):
try:
current_context = {}
if isinstance(context_data, dict):
for key, value in context_data.iteritems():
current_context[key] = str(value)
alarm_event = self.adapter.adapter_agent.create_alarm(
id=alarm_data.get('id', 'voltha.{}.{}.olt'.format(self.adapter.name,
self.device_id)),
resource_id=str(alarm_data.get('resource_id', self.device_id)),
description="{}.{} - {}".format(self.adapter.name, self.device_id,
alarm_data.get('description')),
type=alarm_data.get('type'),
category=alarm_data.get('category'),
severity=alarm_data.get('severity'),
state=alarm_data.get('state'),
raised_ts=alarm_data.get('ts', 0),
context=current_context
)
self.adapter.adapter_agent.submit_alarm(self.device_id, alarm_event)
except Exception as e:
self.log.exception('failed-to-send-alarm', e=e)
class AlarmBase(object):
def __init__(self, handler, object_type, alarm,
alarm_category,
resource_id=None,
alarm_type=AlarmEventType.EQUIPMENT,
alarm_severity=AlarmEventSeverity.CRITICAL):
self._handler = handler
self._object_type = object_type
self._alarm = alarm
self._alarm_category = alarm_category
self._alarm_type = alarm_type
self._alarm_severity = alarm_severity
self._resource_id = resource_id
def get_alarm_data(self, status):
data = {
'ts': arrow.utcnow().timestamp,
'description': self._handler.alarms.format_description(self._object_type,
self._alarm,
status),
'id': self._handler.alarms.format_id(self._alarm),
'type': self._alarm_type,
'category': self._alarm_category,
'severity': self._alarm_severity,
'state': AlarmEventState.RAISED if status else AlarmEventState.CLEARED
}
if self._resource_id is not None:
data['resource_id'] = self._resource_id
return data
def get_context_data(self):
return {} # You should override this if needed
def raise_alarm(self):
alarm_data = self.get_alarm_data(True)
context_data = self.get_context_data()
self._handler.alarms.send_alarm(context_data, alarm_data)
def clear_alarm(self):
alarm_data = self.get_alarm_data(False)
context_data = self.get_context_data()
self._handler.alarms.send_alarm(context_data, alarm_data)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
import numpy as np
from PIL import Image
def load_raw(f, w=3280, h=3280):
# Metadata: width, height
# Metadata: pixelPacking
arr = np.fromfile(f, np.uint8, w*h*3//2).reshape((h,w//2,3)).astype('H')
# Image is big endian 12 bit 2d array, bayered.
# This is detailed in the TXT (JSON metadata).
# Bayer pattern is r,gr:gb,b but upper left pixel is blue
# Metadata: mosaic
b = (arr[0::2,:,0]<<4 | arr[0::2,:,1]>>4) & 0xfff
g0 = (arr[0::2,:,1]<<8 | arr[0::2,:,2] ) & 0xfff
g1 = (arr[1::2,:,0]<<4 | arr[1::2,:,1]>>4) & 0xfff
r = (arr[1::2,:,1]<<8 | arr[1::2,:,2] ) & 0xfff
# Subsampled RGB image for now. Just a proof of concept.
a = np.zeros((h//2,w//2,3))
a[:,:,:]=168 # black level
a[:,:,0] = r
a[:,:,1] = (g0+g1)/2 # Average since we have more green photosites
a[:,:,2] = b
# Rescale a to 0..1 levels (Metadata: pixelFormat)
a = (a-168)/(4095-168)
a = np.maximum(a, 0)
# White balance (Metadata: color)
a[:,:,0] *= 1.015625
a[:,:,2] *= 1.2578125
# Gamma (Metadata: color)
a **= 0.416660010814666748046875
a = np.minimum(a, 1.0) # Gain may have pushed values out of range
#print(a.max(), a.min())
img = Image.frombytes('RGB', (w//2,h//2), (a*255).astype('B').tobytes())
return img
# Rendering thoughts: use scipy or ndsplines for interpolation?
# Can both trace rays to specific depth for focusing, or do full 4D
# interpolation for lightfield conversion.
if __name__=='__main__':
from sys import argv
name = argv[1] if argv[1:] else "../0001.RAW"
#'sha1-d004cadb9917237bde5145d77d970a4b252de1e9.RAW'
load_raw(open(name,'rb')).show()
|
nilq/baby-python
|
python
|
ANSI_COLOURS = [
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
for i, name in enumerate(ANSI_COLOURS):
globals()[name] = str(30 + i)
globals()['intense_' + name] = str(30 + i) + ';1'
def get_colours():
cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
'intense_magenta', 'intense_red', 'intense_blue']
cs = [globals()[c] for c in cs]
i = 0
while True:
yield cs[i % len(cs)]
i += 1
|
nilq/baby-python
|
python
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from ..network import *
from ..component import *
from .BaseAgent import *
from copy import deepcopy
import numpy as np
class PPOAgent(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn(self.task.state_dim, self.task.action_dim)
self.opt = config.optimizer_fn(self.network.parameters())
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
def iteration(self):
config = self.config
rollout = []
states = self.states
for _ in range(config.rollout_length):
actions, log_probs, _, values = self.network.predict(states)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
rollout.append([states, values.detach(), actions.detach(), log_probs.detach(), rewards, 1 - terminals])
states = next_states
self.states = states
pending_value = self.network.predict(states)[-1]
rollout.append([states, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
next_value = rollout[i + 1][1]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount * terminals * next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, actions, log_probs, returns, advantages]
states, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim=0), zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
_, log_probs, entropy_loss, values = self.network.predict(sampled_states, sampled_actions)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) - config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
class PPOContinualLearnerAgent(BaseContinualLearnerAgent):
def __init__(self, config):
BaseContinualLearnerAgent.__init__(self, config)
self.config = config
self.task = None if config.task_fn is None else config.task_fn()
if config.eval_task_fn is None:
self.evaluation_env = None
else:
self.evaluation_env = config.eval_task_fn(config.log_dir)
self.task = self.evaluation_env if self.task is None else self.task
tasks_ = self.task.get_all_tasks(config.cl_requires_task_label)
tasks = [tasks_[task_id] for task_id in config.task_ids]
del tasks_
self.config.cl_tasks_info = tasks
label_dim = 0 if tasks[0]['task_label'] is None else len(tasks[0]['task_label'])
# set seed before creating network to ensure network parameters are
# same across all shell agents
torch.manual_seed(config.seed)
self.network = config.network_fn(self.task.state_dim, self.task.action_dim, label_dim)
_params = list(self.network.parameters())
self.opt = config.optimizer_fn(_params, config.lr)
self.total_steps = 0
self.episode_rewards = np.zeros(config.num_workers)
self.last_episode_rewards = np.zeros(config.num_workers)
self.states = self.task.reset()
self.states = config.state_normalizer(self.states)
self.layers_output = None
self.data_buffer = Replay(memory_size=int(1e4), batch_size=256)
self.curr_train_task_label = None
self.curr_eval_task_label = None
def iteration(self):
config = self.config
rollout = []
states = self.states
if self.curr_train_task_label is not None:
task_label = self.curr_train_task_label
else:
task_label = self.task.get_task()['task_label']
assert False, 'manually set (temporary) breakpoint. code should not get here.'
task_label = tensor(task_label)
batch_dim = config.num_workers
if batch_dim == 1:
batch_task_label = task_label.reshape(1, -1)
else:
batch_task_label = torch.repeat_interleave(task_label.reshape(1, -1), batch_dim, dim=0)
for _ in range(config.rollout_length):
_, actions, log_probs, _, values, _ = self.network.predict(states, \
task_label=batch_task_label)
next_states, rewards, terminals, _ = self.task.step(actions.cpu().detach().numpy())
self.episode_rewards += rewards
rewards = config.reward_normalizer(rewards)
for i, terminal in enumerate(terminals):
if terminals[i]:
self.last_episode_rewards[i] = self.episode_rewards[i]
self.episode_rewards[i] = 0
next_states = config.state_normalizer(next_states)
# save data to buffer for the detect module
self.data_buffer.feed_batch([states, actions, rewards, terminals, next_states])
rollout.append([states, values.detach(), actions.detach(), log_probs.detach(), \
rewards, 1 - terminals])
states = next_states
self.states = states
pending_value = self.network.predict(states, task_label=batch_task_label)[-2]
rollout.append([states, pending_value, None, None, None, None])
processed_rollout = [None] * (len(rollout) - 1)
advantages = tensor(np.zeros((config.num_workers, 1)))
returns = pending_value.detach()
for i in reversed(range(len(rollout) - 1)):
states, value, actions, log_probs, rewards, terminals = rollout[i]
terminals = tensor(terminals).unsqueeze(1)
rewards = tensor(rewards).unsqueeze(1)
actions = tensor(actions)
states = tensor(states)
next_value = rollout[i + 1][1]
returns = rewards + config.discount * terminals * returns
if not config.use_gae:
advantages = returns - value.detach()
else:
td_error = rewards + config.discount*terminals*next_value.detach() - value.detach()
advantages = advantages * config.gae_tau * config.discount * terminals + td_error
processed_rollout[i] = [states, actions, log_probs, returns, advantages]
states, actions, log_probs_old, returns, advantages = map(lambda x: torch.cat(x, dim=0), \
zip(*processed_rollout))
advantages = (advantages - advantages.mean()) / advantages.std()
grad_norms_ = []
batcher = Batcher(states.size(0) // config.num_mini_batches, [np.arange(states.size(0))])
for _ in range(config.optimization_epochs):
batcher.shuffle()
while not batcher.end():
batch_indices = batcher.next_batch()[0]
batch_indices = tensor(batch_indices).long()
sampled_states = states[batch_indices]
sampled_actions = actions[batch_indices]
sampled_log_probs_old = log_probs_old[batch_indices]
sampled_returns = returns[batch_indices]
sampled_advantages = advantages[batch_indices]
batch_dim = sampled_states.shape[0]
batch_task_label = torch.repeat_interleave(task_label.reshape(1, -1), batch_dim, \
dim=0)
_, _, log_probs, entropy_loss, values, outs = self.network.predict(sampled_states, \
sampled_actions, task_label=batch_task_label, return_layer_output=True)
ratio = (log_probs - sampled_log_probs_old).exp()
obj = ratio * sampled_advantages
obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,
1.0 + self.config.ppo_ratio_clip) * sampled_advantages
policy_loss = -torch.min(obj, obj_clipped).mean(0) \
- config.entropy_weight * entropy_loss.mean()
value_loss = 0.5 * (sampled_returns - values).pow(2).mean()
self.opt.zero_grad()
(policy_loss + value_loss).backward()
norm_ = nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)
grad_norms_.append(norm_.detach().cpu().numpy())
self.opt.step()
steps = config.rollout_length * config.num_workers
self.total_steps += steps
self.layers_output = outs
return np.mean(grad_norms_)
class BaselineAgent(PPOContinualLearnerAgent):
'''
PPO continual learning agent baseline (experience catastrophic forgetting)
'''
def __init__(self, config):
PPOContinualLearnerAgent.__init__(self, config)
def task_train_start(self, task_label):
self.curr_train_task_label = task_label
return
def task_train_end(self):
self.curr_train_task_label = None
return
def task_eval_start(self, task_label):
self.curr_eval_task_label = task_label
return
def task_eval_end(self):
self.curr_eval_task_label = None
return
class LLAgent(PPOContinualLearnerAgent):
'''
PPO continual learning agent using supermask superposition algorithm
task oracle available: agent informed about task boundaries (i.e., when
one task ends and the other begins)
supermask lifelong learning algorithm: https://arxiv.org/abs/2006.14769
'''
def __init__(self, config):
PPOContinualLearnerAgent.__init__(self, config)
self.seen_tasks = {} # contains task labels that agent has experienced so far.
self.new_task = False
self.curr_train_task_label = None
def _label_to_idx(self, task_label):
eps = 1e-5
found_task_idx = None
for task_idx, seen_task_label in self.seen_tasks.items():
if np.linalg.norm((task_label - seen_task_label), ord=2) < eps:
found_task_idx = task_idx
break
return found_task_idx
def _select_mask(self, agents, masks, ensemble=False):
found_mask = None
if ensemble:
raise NotImplementedError
else:
for agent, mask in zip(agents, masks):
if mask is not None:
found_mask = mask
break
return found_mask
def task_train_start(self, task_label):
task_idx = self._label_to_idx(task_label)
if task_idx is None:
# new task. add it to the agent's seen_tasks dictionary
task_idx = len(self.seen_tasks) # generate an internal task index for new task
self.seen_tasks[task_idx] = task_label
self.new_task = True
set_model_task(self.network, task_idx)
self.curr_train_task_label = task_label
return
def task_train_end(self):
self.curr_train_task_label = None
cache_masks(self.network)
if self.new_task:
set_num_tasks_learned(self.network, len(self.seen_tasks))
self.new_task = False # reset flag
return
def task_eval_start(self, task_label):
self.network.eval()
task_idx = self._label_to_idx(task_label)
if task_idx is None:
# agent has not been trained on current task
# being evaluated. therefore use a random mask
# TODO: random task hardcoded to the first learnt
# task/mask. update this later to use a random
# previous task, or implementing a way for
# agent to use an ensemble of different mask
# internally for the task not yet seen.
task_idx = 0
set_model_task(self.network, task_idx)
self.curr_eval_task_label = task_label
return
def task_eval_end(self):
self.curr_eval_task_label = None
self.network.train()
# resume training the model on train task label if training
# was on before running evaluations.
if self.curr_train_task_label is not None:
task_idx = self._label_to_idx(self.curr_train_task_label)
set_model_task(self.network, task_idx)
return
class ShellAgent_SP(LLAgent):
'''
Lifelong learning (ppo continual learning with supermask) agent in ShELL
settings. All agents executing in a single/uni process (SP) setting.
'''
def __init__(self, config):
LLAgent.__init__(self, config)
def ping_agents(self, agents):
task_label = self.task.get_task()['task_label']
task_idx = self._label_to_idx(task_label)
masks = [agent.ping_response(task_label) for agent in agents]
mask = self._select_mask(agents, masks)
if mask is not None:
# function from deep_rl/network/ssmask_utils.py
set_mask(self.network, mask, task_idx)
return True
else:
return False
def ping_response(self, task_label):
task_idx = self._label_to_idx(task_label)
# get task mask.
if task_idx is None:
mask = None
else:
mask = get_mask(self.network, task_idx)
return mask
class ShellAgent_DP(LLAgent):
'''
Lifelong learning (ppo continual learning with supermask) agent in ShELL
settings. All agents executing in a distributed (multi-) process (DP) setting.
'''
def __init__(self, config):
LLAgent.__init__(self, config)
_mask = get_mask(self.network, task=0)
self.mask_info = {}
for key, value in _mask.items():
self.mask_info[key] = tuple(value.shape)
model_mask_dim = 0
for k, v in self.mask_info.items():
model_mask_dim += np.prod(v)
self.model_mask_dim = model_mask_dim
def infuse_masks(self, masks):
print('to be implemented')
return False
def ping_response(self, task_label):
task_idx = self._label_to_idx(task_label)
# get task mask.
if task_idx is None:
mask = None
else:
mask = get_mask(self.network, task_idx)
return mask
class LLAgent_NoOracle(PPOContinualLearnerAgent):
'''
PPO continual learning agent using supermask superposition algorithm
with *no task oracle*: agent is not informed about task boundaries
(i.e., when one task ends and the other begins) and has to detect task
change by itself.
supermask lifelong learning algorithm: https://arxiv.org/abs/2006.14769
'''
def __init__(self, config):
PPOContinualLearnerAgent.__init__(self, config)
self.seen_tasks = {} # contains task labels that agent has experienced so far.
self.new_task = False
self.curr_train_task_label = None
def _name_to_idx(self, name):
found_task_idx = None
for task_idx, value in self.seen_tasks.items():
seen_task_label, task_name = value
if name == task_name:
found_task_idx = task_idx
break
return found_task_idx
def _label_to_idx(self, task_label):
eps = 1e-5
found_task_idx = None
for task_idx, value in self.seen_tasks.items():
seen_task_label, task_name = value
if np.linalg.norm((task_label - seen_task_label), ord=2) < eps:
found_task_idx = task_idx
break
return found_task_idx
def _select_mask(self, agents, masks, ensemble=False):
found_mask = None
if ensemble:
raise NotImplementedError
else:
for agent, mask in zip(agents, masks):
if mask is not None:
found_mask = mask
break
return found_mask
def update_task_label(self, task_label):
# TODO: consider other ways to update the label as detect module
# alters it. Maybe moving average?
task_idx = self._label_to_idx(self.curr_train_task_label)
self.seen_tasks[task_idx][0] = task_label
self.curr_train_task_label = task_label
def set_first_task(self, task_label, task_name):
# start first task
task_idx = 0 # first task idx is 0
self.seen_tasks[task_idx] = [task_label, task_name]
self.new_task = True
set_model_task(self.network, task_idx)
self.curr_train_task_label = task_label
return
def task_change_detected(self, task_label, task_name):
# end current task (if any)
if self.curr_train_task_label is not None:
cache_masks(self.network)
if self.new_task:
set_num_tasks_learned(self.network, len(self.seen_tasks))
self.new_task = False # reset flag
self.curr_train_task_label = None
# start next task
# use task label or task name to check if task already exist in model
task_idx = self._label_to_idx(task_label)
if task_idx is None:
# new task. add it to the agent's seen_tasks dictionary
task_idx = len(self.seen_tasks) # generate an internal task index for new task
self.seen_tasks[task_idx] = [task_label, task_name]
self.new_task = True
set_model_task(self.network, task_idx)
self.curr_train_task_label = task_label
return
def task_eval_start(self, task_name):
self.network.eval()
task_idx = self._name_to_idx(task_name)
if task_idx is None:
# agent has not been trained on current task
# being evaluated. therefore use a random mask
# TODO: random task hardcoded to the first learnt
# task/mask. update this later to use a random
# previous task, or implementing a way for
# agent to use an ensemble of different mask
# internally for the task not yet seen.
task_idx = 0
set_model_task(self.network, task_idx)
return
def task_eval_end(self):
self.network.train()
# resume training the model on train task label if training
# was on before running evaluations.
if self.curr_train_task_label is not None:
task_idx = self._label_to_idx(self.curr_train_task_label)
set_model_task(self.network, task_idx)
return
|
nilq/baby-python
|
python
|
import itertools
def flatten(*arg):
return [item for sublist in arg for item in sublist]
def intersection(*args):
result = set(args[0])
for i in range(1, len(args)):
result = result.intersection(set(args[i]))
return result
def is_unique(*item_lists):
all_items = flatten(*item_lists)
previous = None
for item in sorted(all_items):
if item == previous:
return False
previous = item
return True
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from torch.autograd import Function
class GdnFunction(Function):
@staticmethod
def forward(ctx, x, gamma, beta):
ctx.save_for_backward(x, gamma, beta)
n, c, h, w = list(x.size())
tx = x.permute(0, 2, 3, 1).contiguous()
tx = tx.view(-1, c)
tx2 = tx * tx
denominator = tx2.mm(gamma) + beta
ty = tx / torch.sqrt(denominator)
y = ty.view(n, h, w, c)
y = y.permute(0, 3, 1, 2).contiguous()
return y
@staticmethod
def backward(ctx, grad_output):
x, gamma, beta = ctx.saved_variables
n, c, h, w = list(grad_output.size())
tx = x.permute(0, 2, 3, 1).contiguous()
tx = tx.view(-1, c)
tx2 = tx * tx
denominator = tx2.mm(gamma) + beta
tdzdy = grad_output.permute(0, 2, 3, 1).contiguous()
tdzdy = tdzdy.view(-1, c)
gy = (tdzdy * torch.pow(denominator, -0.5) - (tdzdy * tx *
torch.pow(denominator, -1.5)).mm(gamma.t()) * tx)
gy = gy.view(n, h, w, c)
grad_input = gy.permute(0, 3, 1, 2).contiguous()
tmp = -0.5 * torch.pow(denominator, -1.5) * tx * tdzdy
grad_beta = torch.sum(tmp, 0)
grad_gamma = tx2.t().mm(tmp)
return grad_input, grad_gamma, grad_beta
class Gdn(nn.Module):
def __init__(self, input_channel):
super(Gdn, self).__init__()
self.input_channel = input_channel
self.gamma = nn.Parameter(torch.Tensor(input_channel, input_channel))
self.beta = nn.Parameter(torch.Tensor(input_channel))
def forward(self, input):
return GdnFunction.apply(input, self.gamma, self.beta)
def __str__(self):
return self.__class__.__name__ + '(gamma_size=(%d, %d), beta_size=(%d))' %\
(self.gamma.size()[0], self.gamma.size()[1], self.beta.size()[0])
__repr__ = __str__
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
**Callable creation utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.utilfunc.utilfuncmake` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
from pytest import raises
# ....................{ GLOBALS }....................
# Arbitrary global referenced in functions created below.
AND_SEE_THE_GREAT_ACHILLES = 'whom we knew'
# ....................{ TESTS ~ make }....................
#FIXME: Consider excising. Although awesome, this is no longer needed.
# def test_copy_func_shallow_pass() -> None:
# '''
# Test successful usage of the
# :func:`beartype._util.func.utilfuncmake.copy_func_shallow` function.
# '''
#
# # Defer heavyweight imports.
# from beartype.roar import BeartypeDecorWrapperException
# from beartype._util.func.utilfuncmake import copy_func_shallow
#
# # Tuple of the names of all attributes expected to be shallowly copied.
# ATTRS_NAME_COPIED = (
# '__annotations__',
# '__closure__',
# '__code__',
# '__defaults__',
# '__doc__',
# '__globals__',
# # '__kwdefaults__',
# '__module__',
# '__name__',
# '__qualname__',
# )
#
# # String returned by the in_memoriam() function declared below when passed
# # an even integer.
# IN_MEMORIAM_RETURN_IF_PARAM_EVEN = 'And all we met was fair and good,'
#
# # String returned by the in_memoriam() function declared below when passed
# # an even integer.
# IN_MEMORIAM_RETURN_IF_PARAM_ODD = ' And all was good that Time could bring,'
#
# # String suffixing the string returned by that function.
# IN_MEMORIAM_RETURN_SUFFIX = 'I sing to him that rests below,'
#
# # Arbitrary closure to be shallowly copied.
# def in_memoriam(
# # Mandatory parameter.
# the_shadow: int,
#
# # Optional parameter.
# the_shroud: str = IN_MEMORIAM_RETURN_SUFFIX,
# ) -> str:
# '''
# The Shadow sits and waits for me.
# '''
#
# return (
# IN_MEMORIAM_RETURN_IF_PARAM_EVEN + the_shroud
# if the_shadow % 2 == 0 else
# IN_MEMORIAM_RETURN_IF_PARAM_ODD + the_shroud
# )
#
# # Set a custom attribute on this callable to be shallowly copied.
# in_memoriam.the_clock = '''
# And in the dusk of thee, the clock
# Beats out the little lives of men.'''
#
# # Function shallowly copied from this callable.
# captive_void = copy_func_shallow(func=in_memoriam)
#
# # Assert this copy returns the expected value.
# assert captive_void(27) == (
# f'{IN_MEMORIAM_RETURN_IF_PARAM_ODD}{IN_MEMORIAM_RETURN_SUFFIX}')
#
# # Assert this copy shares the same custom attribute as the original.
# assert captive_void.the_clock == in_memoriam.the_clock
#
# # Assert this copy contains the same dunder attributes.
# for attr_name_copied in ATTRS_NAME_COPIED:
# assert (
# getattr(captive_void, attr_name_copied) ==
# getattr(in_memoriam, attr_name_copied)
# )
#
# # Assert this function rejects C-based functions.
# with raises(BeartypeDecorWrapperException):
# copy_func_shallow(
# func=iter, exception_cls=BeartypeDecorWrapperException)
# ....................{ TESTS ~ make }....................
def test_make_func_pass() -> None:
'''
Test successful usage of the
:func:`beartype._util.func.utilfuncmake.make_func` function.
'''
# Defer heavyweight imports.
from beartype._util.func.utilfuncmake import make_func
from typing import Optional
# Arbitrary local referenced in functions created below.
THO_MUCH_IS_TAKEN = 'much abides; and tho’'
# Arbitrary callable wrapped by wrappers created below.
def we_are_not_now_that_strength_which_in_old_days() -> str:
'''
One equal temper of heroic hearts,
'''
return 'Moved earth and heaven, that which we are, we are;'
# Arbitrary wrapper accessing both globally and locally scoped attributes,
# exercising most optional parameters.
ulysses = make_func(
func_name='it_may_be_that_the_gulfs_will_wash_us_down',
func_code='''
def it_may_be_that_the_gulfs_will_wash_us_down(
it_may_be_we_shall_touch_the_happy_isles: Optional[str]) -> str:
return (
AND_SEE_THE_GREAT_ACHILLES +
THO_MUCH_IS_TAKEN +
we_are_not_now_that_strength_which_in_old_days() +
(
it_may_be_we_shall_touch_the_happy_isles or
'Made weak by time and fate, but strong in will'
)
)
''',
func_globals={
'AND_SEE_THE_GREAT_ACHILLES': AND_SEE_THE_GREAT_ACHILLES,
'THO_MUCH_IS_TAKEN': THO_MUCH_IS_TAKEN,
'we_are_not_now_that_strength_which_in_old_days': (
we_are_not_now_that_strength_which_in_old_days),
},
func_locals={
'Optional': Optional,
},
func_wrapped=we_are_not_now_that_strength_which_in_old_days,
)
# Assert this wrapper wrapped this wrappee.
assert ulysses.__doc__ == (
we_are_not_now_that_strength_which_in_old_days.__doc__)
# Assert this wrapper returns an expected value.
odyssey = ulysses('Made weak by time and fate, but strong in will')
assert 'Made weak by time and fate, but strong in will' in odyssey
# Arbitrary callable accessing no scoped attributes.
to_strive_to_seek_to_find = make_func(
func_name='to_strive_to_seek_to_find',
func_code='''
def to_strive_to_seek_to_find(and_not_to_yield: str) -> str:
return and_not_to_yield
''',
)
# Assert this wrapper returns an expected value.
assert (
to_strive_to_seek_to_find('Tis not too late to seek a newer world.') ==
'Tis not too late to seek a newer world.'
)
def test_make_func_fail() -> None:
'''
Test unsuccessful usage of the
:func:`beartype._util.func.utilfuncmake.make_func` function.
'''
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorWrapperException
from beartype.roar._roarexc import _BeartypeUtilCallableException
from beartype._util.func.utilfuncmake import make_func
# Assert that attempting to create a function whose name collides with that
# of a caller-defined local variable raises the expected exception.
with raises(_BeartypeUtilCallableException):
make_func(
func_name='come_my_friends',
func_code='''
def come_my_friends(T: str) -> str:
return T + 'is not too late to seek a newer world'
''',
func_label='Magnanimous come_my_friends() function',
func_locals={
'come_my_friends': 'Push off, and sitting well in order smite',
},
)
# Assert that attempting to execute a syntactically invalid snippet raises
# the expected exception.
with raises(BeartypeDecorWrapperException):
make_func(
func_name='to_sail_beyond_the_sunset',
func_code='''
def to_sail_beyond_the_sunset(and_the_baths: str) -> str:
Of all the western stars, until I die.
''',
func_label='Heroic to_sail_beyond_the_sunset() function',
exception_cls=BeartypeDecorWrapperException,
)
# Assert that attempting to execute a syntactically valid snippet failing
# to declare this function raises the expected exception.
with raises(BeartypeDecorWrapperException):
make_func(
func_name='you_and_i_are_old',
func_code='''
def old_age_hath_yet_his_honour_and_his_toil() -> str:
return 'Death closes all: but something ere the end'
''',
func_label='Geriatric you_and_i_are_old() function',
exception_cls=BeartypeDecorWrapperException,
)
|
nilq/baby-python
|
python
|
from os import *
import traceback
import sys
if len(sys.argv) != 2:
print("Utilisation: %s fichier"%(sys.argv[0]))
exit(0)
SIZE=256
try:
f=open(sys.argv[1], O_RDONLY)
bs = read(f, SIZE)
while len(bs) > 0:
write(sys.stdout.fileno(), bs)
bs = read(f, SIZE)
close(f)
except OSError as e:
traceback.print_exc()
print(e.strerror)
exit(1)
|
nilq/baby-python
|
python
|
from falcor import *
def render_graph_DefaultRenderGraph():
g = RenderGraph('DefaultRenderGraph')
loadRenderPassLibrary('BSDFViewer.dll')
loadRenderPassLibrary('AccumulatePass.dll')
loadRenderPassLibrary('TemporalDelayPass.dll')
loadRenderPassLibrary('Antialiasing.dll')
loadRenderPassLibrary('BlitPass.dll')
loadRenderPassLibrary('CSM.dll')
loadRenderPassLibrary('DebugPasses.dll')
loadRenderPassLibrary('DepthPass.dll')
loadRenderPassLibrary('ErrorMeasurePass.dll')
loadRenderPassLibrary('ExampleBlitPass.dll')
loadRenderPassLibrary('FLIPPass.dll')
loadRenderPassLibrary('WhittedRayTracer.dll')
loadRenderPassLibrary('PixelInspectorPass.dll')
loadRenderPassLibrary('ForwardLightingPass.dll')
loadRenderPassLibrary('GBuffer.dll')
loadRenderPassLibrary('SkyBox.dll')
loadRenderPassLibrary('ImageLoader.dll')
loadRenderPassLibrary('MegakernelPathTracer.dll')
loadRenderPassLibrary('MinimalPathTracer.dll')
loadRenderPassLibrary('OptixDenoiser.dll')
loadRenderPassLibrary('PassLibraryTemplate.dll')
loadRenderPassLibrary('SceneDebugger.dll')
loadRenderPassLibrary('SimplePostFX.dll')
loadRenderPassLibrary('SSAO.dll')
loadRenderPassLibrary('TestPasses.dll')
loadRenderPassLibrary('SVGFPass.dll')
loadRenderPassLibrary('ToneMapper.dll')
loadRenderPassLibrary('Utils.dll')
ImageLoader = createPass('ImageLoader', {'outputFormat': ResourceFormat.BGRA8UnormSrgb, 'filename': 'C:\\Users\\Song\\Desktop\\pictures\\65578040_p0.jpg', 'mips': False, 'srgb': True, 'arrayIndex': 0, 'mipLevel': 0})
g.addPass(ImageLoader, 'ImageLoader')
ExampleBlitPass = createPass('ExampleBlitPass')
g.addPass(ExampleBlitPass, 'ExampleBlitPass')
g.addEdge('ImageLoader.dst', 'ExampleBlitPass.input')
g.markOutput('ExampleBlitPass.output')
return g
DefaultRenderGraph = render_graph_DefaultRenderGraph()
try: m.addGraph(DefaultRenderGraph)
except NameError: None
|
nilq/baby-python
|
python
|
import histogram_dct as hd
import pandas as pd
# load data
data = pd.read_excel(r'Example\distributions.xlsx')
# uniform distribution:
hd.histogram_dct(data['unf'], bin=12.5, name='uniform distribution')
# normal distribution:
hd.histogram_dct(data['nrm'], bin=4, name='normal distribution')
# exp distribution:
hd.histogram_dct(data['exp'], bin=5, name='exp distribution')
# mix distribution:
hd.histogram_dct(data['mix'], bin=5, name='mixed distribution')
# use the output dictionary (find the maximum key):
max_key, max_vlu, dct = None, 0, hd.histogram_dct(data['nrm'], bin=5)
for key, value in dct.items():
if max_vlu < value:
max_vlu =value
max_key =key
print(f'maximum key: {max_key}\nmaximum value: {max_vlu}'
f'\nprecent: {round(max_vlu/sum(dct.values())*100,2)}%')
|
nilq/baby-python
|
python
|
""" Python Character Mapping Codec generated from '8859-2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x02d8, # BREVE
0x00a3: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00a5: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00a6: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00a9: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00aa: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00ab: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x00ac: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x00ae: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00af: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x02db, # OGONEK
0x00b3: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b5: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00b6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00b7: 0x02c7, # CARON
0x00b9: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ba: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00bb: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x00bc: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00bd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00be: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bf: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00c0: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00c3: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c5: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00c6: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00cf: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00d2: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d5: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00d8: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00d9: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00db: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00de: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00e0: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00e3: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00e5: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00e6: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00ef: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f1: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00f2: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00f5: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00f8: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00f9: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fe: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flip(nn.Module):
"""
Flip indices transformation.
Example:
>>> f = stribor.Flip()
>>> x = torch.tensor([[1, 2], [3, 4]])
>>> f(x)[0]
tensor([[2, 1],
[4, 3]])
>>> f = stribor.Flip([0, 1])
>>> f(x)[0]
tensor([[4, 3],
[2, 1]])
Args:
dims (List[int]): Dimensions along which to flip the order of values.
Default: [-1]
"""
def __init__(self, dims=[-1]):
super().__init__()
self.dims = dims
def forward(self, x, **kwargs):
y = torch.flip(x, self.dims)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = torch.flip(y, self.dims)
return x, torch.zeros_like(x)
class Permute(nn.Module):
"""
Permute indices along the last dimension.
Example:
>>> torch.manual_seed(123)
>>> f = stribor.Permute(3)
>>> f(torch.tensor([1, 2, 3]))
(tensor([2, 3, 1]), tensor([0, 0, 0]))
>>> f.inverse(torch.tensor(tensor([2, 3, 1])))
(tensor([1, 2, 3]), tensor([0, 0, 0]))
Args:
dim (int): Dimension of data
"""
def __init__(self, dim):
super().__init__()
self.permutation = torch.randperm(dim)
self.inverse_permutation = torch.empty(dim).long()
self.inverse_permutation[self.permutation] = torch.arange(dim)
def forward(self, x):
y = x[..., self.permutation]
return y, torch.zeros_like(y)
def inverse(self, y):
x = y[..., self.inverse_permutation]
return x, torch.zeros_like(x)
|
nilq/baby-python
|
python
|
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
# Copyright (c) 2018, S.J.M. Steffann. This software is licensed under the BSD
# 3-Clause License. Please see the LICENSE file in the project root directory.
# ••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
# Generated by Django 2.0.7 on 2018-07-15 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instances', '0002_zaphod'),
]
operations = [
migrations.RenameField(
model_name='marvin',
old_name='alive',
new_name='is_alive',
),
migrations.AlterField(
model_name='marvin',
name='is_alive',
field=models.BooleanField(default=True, verbose_name='is alive'),
),
migrations.AlterModelOptions(
name='marvin',
options={'ordering': ('-is_alive', 'instance_type', '-last_seen')},
),
]
|
nilq/baby-python
|
python
|
from typing import Dict, Optional
from fastapi import APIRouter, Depends, HTTPException
from src.api.serializers import UserIn, UserOut, UsersOut
from src.services.auth import get_current_user, get_user_login_from_token
from src.services.users import (
create_user,
get_user_by_id,
get_user_by_login,
search_users,
)
from starlette import status
router = APIRouter()
@router.post("/users", response_model=UserOut, status_code=status.HTTP_201_CREATED)
async def create_client_handler(user: UserIn):
await create_user(
login=user.login,
password=user.password,
first_name=user.first_name,
last_name=user.last_name,
age=user.age,
sex=user.sex,
interests=user.interests,
city_id=user.city_id,
)
user_data = await get_user_by_login(user.login)
return user_data
@router.get("/users/me", response_model=UserOut, status_code=status.HTTP_200_OK)
async def get_my_user_info(current_user: Dict = Depends(get_current_user)):
return current_user
@router.get(
"/users/{user_id}",
dependencies=[Depends(get_user_login_from_token)],
response_model=UserOut,
status_code=status.HTTP_200_OK,
)
async def get_user_info_by_id(user_id: int):
user_data = await get_user_by_id(user_id)
if not user_data:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="User not found",
)
return user_data
@router.get(
"/users",
dependencies=[Depends(get_user_login_from_token)],
response_model=UsersOut,
status_code=status.HTTP_200_OK,
)
async def get_users(first_name: Optional[str] = None, last_name: Optional[str] = None):
users_data = await search_users(first_name, last_name)
return {"data": users_data}
|
nilq/baby-python
|
python
|
import maestro
import time
import numpy as np
import scipy as sp
import scipy.interpolate
import sys
a1 = 0.1
a2 = 0.26
# In degrees from straight config
ee_pos_list = [
np.array([0.12, 0.17]),
#np.array([0.12, 0]),
#np.array([0.14, 0]),
np.array([0.325, 0.12]),
np.array([0.12, 0.17]),
]
time_list = [0., 1., 2.]
# Automatically cycle smoothly
# Do interpolation
times = np.arange(min(time_list), max(time_list), 0.1)
ee_pos_generator = sp.interpolate.interp1d(
time_list, np.stack(ee_pos_list, axis=0), axis=0,
kind="linear",
bounds_error=False,
fill_value=(ee_pos_list[0], ee_pos_list[-1])
)
def do_ik(pos):
# https://robotacademy.net.au/lesson/inverse-kinematics-for-a-2-joint-robot-arm-using-geometry/
q2 = np.arccos( (pos[0]**2 + pos[1]**2 - a1**2 - a2**2) / (2 * a1 * a2) )
q1 = np.arctan2(pos[1], pos[0]) - np.arctan2((a2 * np.sin(q2)), (a1 + a2 * np.cos(q2)))
# 0 for q2 is actually 90*
return np.array([0, q1, q2 - np.pi/2.])*180./np.pi
n_servos = 3
servo_min = np.array([500, 500, 500])
servo_centers = np.array([1500, 1500, 1500])
servo_ms_per_deg = np.array([1000/90., 1000/90., 1000/90.])
if __name__ == "__main__":
servo = maestro.Controller(ttyStr='COM6')
while (1):
start_time = time.time()
t = time.time() - start_time
while t < max(time_list):
t = time.time() - start_time
ee_pos = ee_pos_generator(t)
pos = do_ik(ee_pos)
print("%f: %s -> %s" % (t, ee_pos, pos))
pos_in_ms = servo_centers + pos * servo_ms_per_deg
for k in range(n_servos):
# Commands in quarter-ms
servo.setTarget(k,int(pos_in_ms[k]*4))
servo.close()
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
#####################################
#adjust as per ratio required
widthImg = 640
heightImg = 480
#####################################
cap = cv2.VideoCapture(0)
cap.set(3, widthImg)
cap.set(4, heightImg)
cap.set(10, 150) #id: 10, represents brigthness => adjust as required based on setting
#Preprocessing Function
def preProcessing(img):
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5,5), 1)
imgCanny = cv2.Canny(imgBlur, 200, 200) #modify threshold as needed
kernel = np.ones((5,5))
imgDil = cv2.dilate(imgCanny,kernel,iterations=2)
imgThres = cv2.erode(imgDil, kernel, iterations=1)
return imgThres
#Contours function
def getContours(img):
biggest = np.array([])
maxArea = 0
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 5000: #modify based on size
# cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02*perimeter, True)
if area > maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv2.drawContours(imgContour, biggest, -1, (255, 0, 0), 20) # show edges
return biggest
#Reorder function
def reorder(myPoints):
myPoints = myPoints.reshape((4,2))
myPointsNew = np.zeros((4, 1, 2),np.int32)
add = myPoints.sum(axis=1) #use axis 1
# print("add", add)
myPointsNew[0] = myPoints[np.argmin(add)] #smallest sum at 0,0
myPointsNew[3] = myPoints[np.argmax(add)] #largest sum at widthImg, heightImg
diff = np.diff(myPoints, axis=1)
myPointsNew[1] = myPoints[np.argmin(diff)] # smaller of h and w
myPointsNew[2] = myPoints[np.argmax(diff)] # smaller of w and h
# print("New Points", myPointsNew)
return myPointsNew
#Warping function
def getWarp(img, biggest):
biggest = reorder(biggest)
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgOutput = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
#minor crop to adjust can adjust if needed
imgCropped = imgOutput[20:imgOutput.shape[0]-20, 20:imgOutput.shape[1]-20]
imgCropped = cv2.resize(imgCropped, (widthImg, heightImg))
return imgCropped
#Stack function
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
#Body
while True:
success, img = cap.read()
img = cv2.resize(img, (widthImg, heightImg))
imgContour = img.copy()
imgThres = preProcessing(img)
biggest = getContours(imgThres)
#if biggest.size != 0:
imgWarped = getWarp(img, biggest)
imgArray = ([img, imgThres], [imgContour, imgWarped])
# else:
# imgArray = ([img, imgThres], [img, img])
stackedImages = stackImages(0.6, imgArray)
cv2.imshow("Steps", stackedImages)
if cv2.waitKey(1) & 0xFFF == ord('q'):
break
|
nilq/baby-python
|
python
|
import django_tables2 as tables
from net.models import Connection
from utils.tables import BaseTable, ButtonsColumn, SelectColumn
class ConnectionTable(BaseTable):
pk = SelectColumn()
ipv6_address = tables.Column(linkify=True, verbose_name="IPv6")
ipv4_address = tables.Column(linkify=True, verbose_name="IPv4")
internet_exchange_point = tables.LinkColumn()
router = tables.LinkColumn()
buttons = ButtonsColumn(Connection)
class Meta(BaseTable.Meta):
model = Connection
fields = (
"pk",
"state",
"vlan",
"ipv6_address",
"ipv4_address",
"internet_exchange_point",
"router",
"interface",
"buttons",
)
default_columns = (
"pk",
"state",
"vlan",
"ipv6_address",
"ipv4_address",
"router",
"buttons",
)
empty_text = "None"
|
nilq/baby-python
|
python
|
import os
import json
from .constants import CUST_ATTR_GROUP
def default_custom_attributes_definition():
json_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"custom_attributes.json"
)
with open(json_file_path, "r") as json_stream:
data = json.load(json_stream)
return data
def app_definitions_from_app_manager(app_manager):
_app_definitions = []
for app_name, app in app_manager.applications.items():
if app.enabled and app.is_host:
_app_definitions.append(
(app_name, app.full_label)
)
# Sort items by label
app_definitions = []
for key, label in sorted(_app_definitions, key=lambda item: item[1]):
app_definitions.append({key: label})
if not app_definitions:
app_definitions.append({"empty": "< Empty >"})
return app_definitions
def tool_definitions_from_app_manager(app_manager):
_tools_data = []
for tool_name, tool in app_manager.tools.items():
_tools_data.append(
(tool_name, tool.label)
)
# Sort items by label
tools_data = []
for key, label in sorted(_tools_data, key=lambda item: item[1]):
tools_data.append({key: label})
# Make sure there is at least one item
if not tools_data:
tools_data.append({"empty": "< Empty >"})
return tools_data
def get_openpype_attr(session, split_hierarchical=True, query_keys=None):
custom_attributes = []
hier_custom_attributes = []
if not query_keys:
query_keys = [
"id",
"entity_type",
"object_type_id",
"is_hierarchical",
"default"
]
# TODO remove deprecated "pype" group from query
cust_attrs_query = (
"select {}"
" from CustomAttributeConfiguration"
# Kept `pype` for Backwards Compatiblity
" where group.name in (\"pype\", \"{}\")"
).format(", ".join(query_keys), CUST_ATTR_GROUP)
all_avalon_attr = session.query(cust_attrs_query).all()
for cust_attr in all_avalon_attr:
if split_hierarchical and cust_attr["is_hierarchical"]:
hier_custom_attributes.append(cust_attr)
continue
custom_attributes.append(cust_attr)
if split_hierarchical:
# return tuple
return custom_attributes, hier_custom_attributes
return custom_attributes
def join_query_keys(keys):
"""Helper to join keys to query."""
return ",".join(["\"{}\"".format(key) for key in keys])
def query_custom_attributes(session, conf_ids, entity_ids, table_name=None):
"""Query custom attribute values from ftrack database.
Using ftrack call method result may differ based on used table name and
version of ftrack server.
Args:
session(ftrack_api.Session): Connected ftrack session.
conf_id(list, set, tuple): Configuration(attribute) ids which are
queried.
entity_ids(list, set, tuple): Entity ids for which are values queried.
table_name(str): Table nam from which values are queried. Not
recommended to change until you know what it means.
"""
output = []
# Just skip
if not conf_ids or not entity_ids:
return output
if table_name is None:
table_name = "ContextCustomAttributeValue"
# Prepare values to query
attributes_joined = join_query_keys(conf_ids)
attributes_len = len(conf_ids)
# Query values in chunks
chunk_size = int(5000 / attributes_len)
# Make sure entity_ids is `list` for chunk selection
entity_ids = list(entity_ids)
for idx in range(0, len(entity_ids), chunk_size):
entity_ids_joined = join_query_keys(
entity_ids[idx:idx + chunk_size]
)
call_expr = [{
"action": "query",
"expression": (
"select value, entity_id from {}"
" where entity_id in ({}) and configuration_id in ({})"
).format(table_name, entity_ids_joined, attributes_joined)
}]
if hasattr(session, "call"):
[result] = session.call(call_expr)
else:
[result] = session._call(call_expr)
for item in result["data"]:
output.append(item)
return output
|
nilq/baby-python
|
python
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests for naming of modules when lowering JAX into MLIR.
# RUN: %PYTHON %s | FileCheck %s
from absl import app
import jax
from jax import lax
import numpy as np
from jax.tests.filecheck.jax_filecheck_helpers import print_ir
jax.config.update("jax_enable_mlir", True)
jax.config.update("jax_enable_x64", True)
def main(_):
# CHECK-LABEL: TEST: neg int32[7]
# CHECK: module @jit_neg
# CHECK: func public @main
print_ir(np.empty([7], np.int32))(lax.neg)
# CHECK-LABEL: TEST: foo int32[7]
# CHECK: module @jit_foo
# CHECK: func public @main
@print_ir(np.empty([7], np.int32))
@jax.jit
def foo(x): return x + 2
if __name__ == "__main__":
app.run(main)
|
nilq/baby-python
|
python
|
from flask_restplus import Api
from jsonschema import FormatChecker
import logging
log = logging.getLogger(__name__)
# Instantiate a Flask-RESTPlus API
api = Api(version='1.0', title='iter8 analytics REST API',
description='API to perform analytics to support canary releases '
'and A/B tests',
format_checker=FormatChecker(formats=("date-time",)))
def build_http_error(msg, http_code):
'''Returns a specific error message and HTTP code pip that can be used by '
'the REST API'''
return {'message': msg}, http_code
@api.errorhandler
def default_error_handler(e):
'''Error handler for uncaught exceptions'''
message = 'An unexpected error occurred'
log.exception(message)
return {'message': message}, 500
|
nilq/baby-python
|
python
|
import sys
def subst(s, ls):
if s == "":
return ""
for j in xrange(0, len(ls), 2):
i = s.find(ls[j])
if i != -1:
return subst(s[:i], ls) + ls[j + 1] + subst(s[i + len(ls[j]) :], ls)
return s
test_cases = open(sys.argv[1], "r")
for test in test_cases:
s, sub = test.strip().split(";")
print subst(s, sub.split(","))
test_cases.close()
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from daiquiri.metadata.models import Column
class ColumnSerializer(serializers.ModelSerializer):
class Meta:
model = Column
fields = (
'id',
'order',
'name',
'description',
'unit',
'ucd',
'datatype',
'arraysize',
'principal',
'indexed',
'std'
)
|
nilq/baby-python
|
python
|
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.views.generic import ListView, DetailView, CreateView
from django.views.generic.edit import FormView, UpdateView
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from .models import Adventure, Room, Choice
from .forms import AdventureForm, RoomForm, ChoiceForm
# Create your views here.
class AdventureDetailView(DetailView):
model = Adventure
template_name = "adventure/adventure_detail.html"
context_object_name = 'adventure'
#adds start_room to context data
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
thisslug = self.kwargs.get("slug")
room = Room.startroom.filter(adventure__slug=thisslug).first()
if room:
context['start_room'] = room.pk
return context
class AdventureListView(ListView):
model = Adventure
template_name = "adventure/adventure_list.html"
context_object_name = 'adventure_list'
class AdventureCreateView(CreateView):
model = Adventure
template_name = "adventure/adventure_update.html"
form_class = AdventureForm
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.owner = self.request.user
return super().form_valid(form)
class AdventureUpdateView(UpdateView):
model = Adventure
template_name = "adventure/adventure_update.html"
form_class = AdventureForm
class AdventurePlayView(DetailView):
model = Adventure
current_room = None
template_name = "room/room_play.html"
context_object_name = 'room'
def get_object(self, queryset=None):
obj = super().get_object(queryset=queryset)
def set_current_room():
pass
def get_first_room():
query = Room.objects.filter(adventure__slug=thisslug).filter(start_room=True).first()
room = get_object_or_404(query)
return room
class RoomCreateView(CreateView):
model = Room
template_name = "adventure/adventure_update.html"
form_class = AdventureForm
def form_valid(self, form):
slug = self.kwargs.get(self.slug_url_kwarg)
queryset = Adventure.objects.filter(adventure__slug=slug).filter()
## TODO: finish this
self.object = form.save(commit=False)
self.object.adventure = self.request.user
return super().form_valid(form)
class RoomPlayView(DetailView):
model = Room
template_name = "room/room_play.html"
context_object_name = 'room'
slug_url_kwarg = 'adv_slug'
def get_object(self, queryset=None, **kwargs):
slug = self.kwargs.get(self.slug_url_kwarg)
r_pk = self.kwargs.get('pk')
queryset = Room.objects.filter(adventure__slug=slug).filter(pk=r_pk)
obj = super().get_object(queryset=queryset)
return obj
class RoomDetailView(DetailView):
model = Room
template_name = "room/room_detail.html"
context_object_name = 'room'
class RoomListView(ListView):
model = Adventure
template_name = "room/room_list.html"
context_object_name = 'room_list'
class RoomCreateView(CreateView):
model = Room
template_name = "room/room_update.html"
form_class = RoomForm
class RoomUpdateView(UpdateView):
model = Room
template_name = "room/room_update.html"
form_class = RoomForm
def register_view(request):
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('adventure-list')
return render(request, 'user/register.html', {'form': form})
|
nilq/baby-python
|
python
|
"""
Unit tests for PDF class
"""
import numpy as np
import unittest
import qp
class EvalFuncsTestCase(unittest.TestCase):
""" Tests of evaluations and interpolation functions """
def setUp(self):
"""
Make any objects that are used in multiple tests.
"""
self.xpts = np.linspace(0, 3, 7)
self.hpdfs = np.random.random((10, 50)) #pylint: disable=no-member
self.hbins = np.linspace(0, 5, 51)
self.hbins2 = np.linspace(0, 5, 51) + np.expand_dims(np.linspace(0.1, 1., 10), -1)
self.xvals = np.linspace(0, 5, 50)
self.xvals2 = np.linspace(0, 5, 50) + np.expand_dims(np.linspace(0.1, 1., 10), -1)
self.yvals1d = self.hpdfs[0]
self.rows = np.expand_dims(np.arange(10), -1)
self.grid = self.xpts * np.ones((10, 7))
self.range_grid = (self.rows * np.ones((10), int)).astype(int)
def tearDown(self):
"Clean up any mock data files created by the tests."
def _check_interface_function(self, ifunc, xvals, yvals, **kwargs):
v0 = ifunc(self.xpts, self.rows, xvals, yvals, **kwargs)
v1 = ifunc(self.grid.flatten(), self.rows.flatten(), xvals, yvals, **kwargs)
v2 = ifunc(self.grid, self.rows, xvals, yvals, **kwargs)
_ = ifunc(self.xpts, np.arange(7), xvals, yvals, **kwargs)
assert np.allclose(v0, v1)
assert np.allclose(v0, v2)
def test_evaluate_hist_x_multi_y(self):
""" Test the evaluate_hist_x_multi_y function """
self._check_interface_function(qp.utils.evaluate_hist_x_multi_y,
self.hbins, self.hpdfs)
def test_evaluate_hist_multi_x_multi_y(self):
""" Test the evaluate_hist_multi_x_multi_y function """
self._check_interface_function(qp.utils.evaluate_hist_multi_x_multi_y,
self.hbins2, self.hpdfs)
def test_interpolate_x_multi_y(self):
""" Test the interpolate_x_multi_y """
self._check_interface_function(qp.utils.interpolate_x_multi_y,
self.xvals, self.hpdfs, bounds_error=False, fill_value=0)
def test_interpolate_multi_x_multi_y(self):
""" Test the interpolate_multi_x_multi_y """
self._check_interface_function(qp.utils.interpolate_multi_x_multi_y,
self.xvals2, self.hpdfs, bounds_error=False, fill_value=0)
def test_interpolate_multi_x_y(self):
""" Test the interpolate_multi_x_y """
self._check_interface_function(qp.utils.interpolate_multi_x_y,
self.xvals2, self.yvals1d, bounds_error=False, fill_value=0)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import itsdangerous, json, atexit, traceback, logging
from flask import redirect, render_template, url_for, abort, \
flash, request
from flask_login import login_user, logout_user, current_user, login_required
from flask_mail import Message
from flask_admin import Admin, AdminIndexView, expose
from flask_admin.contrib.sqla import ModelView
from flask_admin.form import SecureForm
from app_manager import app, db, ts, mail, DAY
from forms import SignupForm, LoginForm, UsernameForm, ResetPasswordForm, \
ChangePasswordForm, NominationForm, VoteForm, BanForm, AdminForm, \
NomIDForm, PhaseNomForm, PhaseVoteForm, PhaseStaticForm, SetPhaseForm, \
ClearForm, RemoveNomForm
from models import User, Award, Nomination, State
from login_manager import login_manager
from dbutils import clear_noms, clear_votes
from werkzeug.exceptions import default_exceptions
from urllib.parse import urlparse, urljoin
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.base import JobLookupError
from logging.handlers import SMTPHandler
from io import StringIO
scheduler = BackgroundScheduler(timezone="US/Eastern")
@app.route("/", methods=["GET"])
def index():
return render_template("index.html", phase=phase())
@app.route("/signup", methods=["GET", "POST"])
def signup():
if current_user.is_authenticated:
return redirect(url_for("index"))
form = SignupForm()
if form.validate_on_submit():
user = User(username=form.username.data.lower(),
password=form.password.data)
db.session.add(user)
db.session.flush()
if send_confirm_link(user.id, user.email):
db.session.commit()
flash("Account created! Please click the confirmation link sent "
"to %s" % user.email, "success")
return redirect(url_for("index"))
return render_template("signup.html", form=form)
@app.route("/confirm/<token>", methods=["GET"])
def confirm_email(token):
try:
userID, email = ts.loads(token, salt="email-confirm-key", max_age=DAY)
except itsdangerous.SignatureExpired:
return render_template("activate_expired.html", token=token)
except:
abort(404)
user = User.query.filter_by(id=userID).first_or_404()
if user.email != email:
abort(404) # this shouldn't ever happen
if user.email_confirmed == True:
return render_template("already_confirmed.html")
user.email_confirmed = True
db.session.commit()
flash("Email confirmed! Sign in!", "success")
return redirect(url_for("signin"))
@app.route("/newlink/<token>", methods=["GET"])
def new_link(token):
try:
userID, email = ts.loads(token, salt="email-confirm-key") # ignore age
except:
abort(404)
user = User.query.filter_by(id=userID).first_or_404()
if user.email != email:
abort(404) # this shouldn't ever happen
if send_confirm_link(userID, email):
flash("New confirmation link sent, check your email!", "success")
return redirect(url_for("index"))
else:
# send them back to the expired confirm page
return redirect(url_for("confirm_email", token=token))
@app.route("/resend", methods=["GET", "POST"])
def resend():
form = UsernameForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data.lower()).first_or_404()
if user.email_confirmed == True:
flash("Your email is already confirmed!", "error")
elif send_confirm_link(user.id, user.email):
flash("New confirmation link sent, check your email!", "success")
return redirect(url_for("index"))
return render_template("resend.html", form=form)
@app.route("/signin", methods=["GET", "POST"])
def signin():
if current_user.is_authenticated:
next_url = request.args.get("next")
if not is_safe_url(next_url):
abort(400)
return redirect(next_url or url_for("index"))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data.lower()).first_or_404()
if not user.email_confirmed:
flash("Please click the confirmation link sent to your email first",
"error")
elif user.is_correct_password(form.password.data):
if user.banned:
flash("Your account has been banned", "error")
elif login_user(user, remember=True):
flash("Logged in successfully", "success")
next_url = request.args.get("next")
if not is_safe_url(next_url):
abort(400)
return redirect(next_url or url_for("index"))
else:
flash("Account inactive", "error")
else:
flash("Password incorrect, try again", "error")
return render_template("signin.html", form=form)
@app.route("/signout", methods=["GET"])
def signout():
if current_user.is_authenticated:
logout_user()
flash("Logged out", "success")
return redirect(url_for("index"))
@app.route("/reset", methods=["GET", "POST"])
def reset():
if current_user.is_authenticated:
return redirect(url_for("index"))
form = UsernameForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data.lower()).first_or_404()
subject = "Password reset requested"
token = ts.dumps(user.username, salt="recover-key")
recover_url = url_for("reset_with_token", token=token, _external=True)
html = render_template("email/recover.html", recover_url=recover_url)
if send_email(user.email, subject, html):
flash("A password reset link has sent to your email address", "success")
return redirect(url_for("index"))
return render_template("reset.html", form=form)
@app.route("/reset/<token>", methods=["GET", "POST"])
def reset_with_token(token):
if current_user.is_authenticated:
return redirect(url_for("index"))
try:
username = ts.loads(token, salt="recover-key", max_age=DAY)
except itsdangerous.SignatureExpired:
return render_template("recover_expired.html")
except:
abort(404)
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first_or_404()
user.password = form.password.data
db.session.commit()
flash("Password reset successfully! Sign in!", "success")
return redirect(url_for("signin"))
return render_template("reset_with_token.html", form=form)
@app.route("/changepass", methods=["GET", "POST"])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.is_correct_password(form.currentpass.data):
current_user.password = form.password.data
db.session.commit()
flash("Password changed!", "success")
login_user(current_user, remember=True)
return redirect(url_for("index"))
else:
flash("Current password incorrect, try again", "error")
return render_template("change_password.html", form=form)
@app.route("/awards", methods=["GET", "POST"])
@login_required
def awards():
p = phase()
if p == 0:
return render_template("nominees.html", awards=list_awards())
if p == 2:
return render_template("voting.html", form=VoteForm(),
awards=list_awards())
# else: nominations
form = NominationForm()
if form.validate_on_submit():
award = Award.query.filter_by(id=form.award_id.data).first_or_404()
award.nominations.append(Nomination(name=form.entry.data,
creator=current_user))
db.session.commit()
flash("Nomination successful!", "success")
return redirect(url_for("awards"))
return render_template("nominations.html", form=form, awards=list_awards())
@app.route("/submit_vote", methods=["POST"])
def submit_vote():
result = { "success" : 0,
"message" : "An error occurred" }
if phase() != 2:
result["message"] = "Not voting phase!"
return json.dumps(result), 200 # return 200 so message displays
if not current_user.is_authenticated:
# rather than login_required, this allows returning a json
result["message"] = "Not logged in"
return json.dumps(result), 200
form = VoteForm()
if form.validate() or True:
try:
nom_id = int(form.nomid.data)
except:
return json.dumps(result), 200
nom = Nomination.query.filter_by(id=nom_id).first()
if nom is None:
return json.dumps(result), 200
for sel in current_user.selections:
if sel in nom.award.nominations:
# take away vote from other nom in this category
# clicking same button will simply remove the vote
current_user.selections.remove(sel)
result["no_vote"] = str(sel.id)
if sel == nom:
# we removed the vote, so we are done
result["success"] = 1
result["message"] = "Vote removed"
db.session.commit()
return json.dumps(result), 200
break
# only add vote if it was a different nomination's button
nom.voters.append(current_user)
result["success"] = 2
result["message"] = "Vote submitted"
result["vote"] = str(nom.id)
db.session.commit()
return json.dumps(result), 200
# Admin Interface
class MyAdminIndexView(AdminIndexView):
def is_accessible(self):
if not current_user.is_active or not current_user.is_authenticated:
return False
return current_user.is_admin
def _handle_view(self, name, **kwds):
if not self.is_accessible():
if current_user.is_authenticated:
abort(403)
else:
return login_manager.unauthorized()
@expose("/", methods=["GET", "POST"])
def index(self):
spform = SetPhaseForm()
pnform = PhaseNomForm()
pvform = PhaseVoteForm()
psform = PhaseStaticForm()
bform = BanForm()
aform = AdminForm()
nform = NomIDForm()
cform = ClearForm()
if ((spform.static.data or spform.nom.data or spform.vote.data) and
spform.validate_on_submit()):
self.set_phase(spform)
return self.check_full_index()
if ((pnform.pnon.data and pnform.validate_on_submit()) or
pnform.pnoff.data):
self.phase_sched(pnform, 1)
return self.check_full_index()
if ((pvform.pvon.data and pvform.validate_on_submit()) or
pvform.pvoff.data):
self.phase_sched(pvform, 2)
return self.check_full_index()
if ((psform.pson.data and psform.validate_on_submit()) or
psform.psoff.data):
self.phase_sched(psform, 0)
return self.check_full_index()
if (bform.ban.data or bform.unban.data) and bform.validate_on_submit():
if self.ban(bform):
return self.check_full_index()
if (aform.give.data or aform.take.data) and aform.validate_on_submit():
self.change_admin(aform)
return self.check_full_index()
if ((nform.rem.data or nform.rwarn.data or nform.rban.data) and
nform.validate_on_submit()):
self.remove_nom(nform.nomid.data, nform.rwarn.data, nform.rban.data)
return self.check_full_index()
if ((cform.cnoms.data or cform.cvotes.data) and
cform.validate_on_submit()):
self.clear(cform)
return self.check_full_index()
full = self.get_full()
s = State.query.first()
if s.dtnom is not None:
pnform.dtnom.data = s.dtnom
if s.dtvote is not None:
pvform.dtvote.data = s.dtvote
if s.dtstatic is not None:
psform.dtstatic.data = s.dtstatic
return self.render("admin/index.html", spform=spform, pnform=pnform,
pvform=pvform, psform=psform, aform=aform, bform=bform, nform=nform,
cform=cform, awards=list_awards(), full=full, phase=phase())
@expose("/noms/<awd>", methods=["GET", "POST"])
def list_noms(self, awd):
form = RemoveNomForm()
if form.validate_on_submit():
self.remove_nom(form.nomid.data, form.warn.data, form.ban.data)
return redirect(url_for("admin.list_noms", awd=awd))
award = Award.query.filter_by(id=awd).first_or_404()
return self.render("admin/list_noms.html", form=form, award=award)
@expose("/guide", methods=["GET"])
def guide(self):
return self.render("admin/guide.html")
def set_phase(self, form):
p = 0 if form.static.data else 1 if form.nom.data else 2
assign_phase(p)
flash("Phase changed to %s" %
("static", "nominating", "voting")[p], "success")
def clear(self, form):
if form.cnoms.data:
clear_votes() # must be done first
clear_noms()
flash("Cleared all nominations", "success")
elif form.cvotes.data:
clear_votes()
flash("Cleared all votes", "success")
else:
abort(400)
def phase_sched(self, form, p):
if p == 1:
kwds = pndict
cancel = form.pnoff.data
dt = form.dtnom.data
pname = "Nominating"
elif p == 2:
kwds = pvdict
cancel = form.pvoff.data
dt = form.dtvote.data
pname = "Voting"
else:
kwds = psdict
cancel = form.psoff.data
dt = form.dtstatic.data
pname = "Static"
if cancel:
try:
scheduler.remove_job(kwds["id"])
flash("Canceled %s Phase" % pname, "success")
except JobLookupError:
flash("%s Phase schedule not found or "
"already passed" % pname, "warning")
dt = None
else:
scheduler.add_job(replace_existing=True,
run_date=dt, **kwds)
flash("Scheduled %s Phase for %s Eastern" %
(pname, dt.strftime("%A %B %d %Y at %I:%M %p")), "success")
s = State.query.first()
if p == 1:
s.dtnom = dt
elif p == 2:
s.dtvote = dt
else:
s.dtstatic = dt
db.session.commit()
def ban(self, bform):
user = User.query.filter_by(
username=bform.banuser.data.lower()).first_or_404()
if bform.ban.data:
user.ban()
msg = "Banned "
if bform.email.data:
subject = "Your account has been banned"
html = render_template("email/ban.html", award_name=None)
msg += "and notified "
elif bform.unban.data:
user.unban()
msg = "Unbanned "
if bform.email.data:
subject = "Your account is no longer banned"
html = render_template("email/unban.html")
msg += "and notified "
db.session.flush()
if not bform.email.data or send_email(user.email, subject, html):
db.session.commit()
flash(msg + user.username, "success") # flash once commit passes
return True
return False
def change_admin(self, aform):
user = User.query.filter_by(
username=aform.adminuser.data.lower()).first_or_404()
if aform.give.data:
user.give_admin()
msg = "Made %s an admin" % user.username
elif aform.take.data:
user.take_admin()
msg = "Removed %s as admin" % user.username
db.session.commit()
flash(msg, "success") # flash once commit passes
def remove_nom(self, nomid, warn, ban):
nom = Nomination.query.filter_by(id=nomid).first_or_404()
awd = nom.award
user = nom.creator
db.session.delete(nom) # any of the buttons will remove the nom
msgs = ["Removed %r ('%s' for '%s')" % (nom, nom.name, awd.name)]
if warn:
subject = "Inappropriate Content Warning"
html = render_template("email/warning.html", award_name=awd.name)
msgs.append("Warning sent to %s" % user.username)
elif ban:
user.ban()
subject = "Your account has been banned"
html = render_template("email/ban.html", award_name=awd.name)
msgs.append("Banned and notified %s" % user.username)
db.session.flush()
if not (warn or ban) or send_email(user.email, subject, html):
db.session.commit()
for msg in msgs: # flash once commit passes
flash(msg, "success")
return True
return False
def check_full_index(self):
full = self.get_full()
if full:
return redirect("/admin/?full")
else:
return redirect("/admin/")
def get_full(self):
full = request.args.get("full")
# if full appears as anything in request, render the full page
return full is not None
class MyModelView(ModelView):
form_base_class = SecureForm
is_accessible = MyAdminIndexView.is_accessible
_handle_view = MyAdminIndexView._handle_view
column_display_pk = True
class UserView(MyModelView):
column_exclude_list = ("_password", "sessTokenTime")
admin = Admin(app, name="Kudos Admin", template_mode="bootstrap3",
index_view=MyAdminIndexView())
admin.add_view(UserView(User, db.session))
admin.add_view(MyModelView(Award, db.session))
admin.add_view(MyModelView(Nomination, db.session))
admin.add_view(MyModelView(State, db.session))
def handle_error(e):
try:
code = e.code
except AttributeError:
code = 500
return render_template("error.html", error=e), code
def init_error_mail():
class MySMTPHandler(SMTPHandler):
def emit(self, record):
if current_user and current_user.is_authenticated:
record.username = current_user.username
else:
record.username = None
return super().emit(record)
def getSubject(self, record):
return f"{self.subject} ({record.levelname}) - {record.asctime}"
fromaddr = app.config["MAIL_USERNAME"]
tls = app.config.get("MAIL_USE_TLS", False)
ssl = app.config.get("MAIL_USE_SSL", False)
secure = () if tls or ssl else None
port = app.config["MAIL_PORT"] if not ssl else app.config["MAIL_PORT_TLS"]
mail_handler = MySMTPHandler(
mailhost=(app.config["MAIL_SERVER"], port),
fromaddr=f"Kudos <{fromaddr}>",
toaddrs=[fromaddr], # send it back to admin account
subject="Kudos Failure",
credentials=(fromaddr, app.config["MAIL_PASSWORD"]),
secure=secure)
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(logging.Formatter(
'[%(asctime)s] %(levelname)s in %(module)s by User <%(username)s>:\n'
'%(message)s'
))
app.logger.addHandler(mail_handler)
for code in default_exceptions:
app.register_error_handler(code, handle_error)
if not app.debug:
init_error_mail()
def send_confirm_link(userID, email):
subject = "Confirm your email"
token = ts.dumps([userID, email], salt="email-confirm-key")
confirm_url = url_for("confirm_email", token=token, _external=True)
html = render_template("email/activate.html", confirm_url=confirm_url)
return send_email(email, subject, html)
def try_send_msg(msg):
st = StringIO()
traceback.print_stack(limit=50, file=st)
try:
mail.send(msg)
except Exception as e:
msg = str(e) + "\n\nCalling stack:\n" + st.getvalue() + "\n"
app.logger.exception(msg)
flash("Email send error, try again", "error")
db.session.rollback() # assume we always want to undo flush
return False
return True
def send_email(email, subject, html, **kwds):
msg = Message("[KUDOS] " + subject, recipients=[email], html=html, **kwds)
return try_send_msg(msg)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return (test_url.scheme in ("http", "https") and
ref_url.netloc == test_url.netloc)
def phase():
return State.query.first().phase
def list_awards():
return Award.query.order_by(Award.order).all()
def assign_phase(p):
s = State.query.first()
s.phase = p
db.session.commit()
pndict = dict(
func=assign_phase,
args=[1],
id="nom",
name="Change phase to nominating")
pvdict = dict(
func=assign_phase,
args=[2],
id="vote",
name="Change phase to voting")
psdict = dict(
func=assign_phase,
args=[0],
id="static",
name="Change phase to static")
@app.before_first_request
def initScheduler():
# this implementation assumes there is only one dyno on heroku
s = State.query.first()
if s.dtnom is not None:
scheduler.add_job(run_date=s.dtnom, **pndict)
if s.dtvote is not None:
scheduler.add_job(run_date=s.dtvote, **pvdict)
if s.dtstatic is not None:
scheduler.add_job(run_date=s.dtstatic, **psdict)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
if __name__ == "__main__":
app.run(debug=True) # should only be on debug when run locally
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# Updates by Pablo Palafox 2021
import torch.nn as nn
import torch
import torch.nn.functional as F
import kornia
from utils import embedder
from utils import geometry_utils
class PoseDecoder(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
xyz_in_all=None,
latent_dropout=False,
positional_enc=False,
n_positional_freqs=8,
n_alpha_epochs=80,
):
super(PoseDecoder, self).__init__()
input_dim = 3
output_dim = 3
if positional_enc:
self.n_positional_freqs = n_positional_freqs
self.pos_embedder, pos_embedder_out_dim = embedder.get_embedder_nerf(
n_positional_freqs, input_dims=input_dim
)
input_dim = pos_embedder_out_dim
self.n_alpha_epochs = n_alpha_epochs
self.alpha_const = n_positional_freqs / n_alpha_epochs if n_alpha_epochs > 0 else self.n_positional_freqs
dims = [latent_size + input_dim] + dims + [output_dim]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for l in range(0, self.num_layers - 1):
if l + 1 in latent_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
if self.xyz_in_all and l != self.num_layers - 2:
out_dim -= 3
if weight_norm and l in self.norm_layers:
setattr(self, "lin" + str(l), nn.utils.weight_norm(nn.Linear(dims[l], out_dim)))
else:
print(l, dims[l], out_dim)
setattr(self, "lin" + str(l), nn.Linear(dims[l], out_dim))
if (not weight_norm) and self.norm_layers is not None and l in self.norm_layers:
setattr(self, "bn" + str(l), nn.LayerNorm(out_dim))
self.relu = nn.ReLU()
self.dropout_prob = dropout_prob
self.dropout = dropout
# input: N x (L+3)
def forward(self, input, epoch=None):
xyz = input[:, -3:]
if hasattr(self, "pos_embedder"):
alpha = self.alpha_const * epoch if self.n_alpha_epochs > 0 else self.alpha_const
input_pos_embed = self.pos_embedder(xyz, alpha)
x = torch.cat([input[:, :-3], input_pos_embed], 1)
input_embed = x.clone()
else:
if input.shape[1] > 3 and self.latent_dropout:
latent_vecs = input[:, :-3]
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], 1)
else:
x = input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.latent_in:
if hasattr(self, "pos_embedder"):
x = torch.cat([x, input_embed], 1)
else:
x = torch.cat([x, input], 1)
elif l != 0 and self.xyz_in_all:
x = torch.cat([x, xyz], 1)
x = lin(x)
if l < self.num_layers - 2:
if self.norm_layers is not None and l in self.norm_layers and not self.weight_norm:
bn = getattr(self, "bn" + str(l))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and l in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
# Apply predicted translation
xyz_warped = xyz + x
return xyz_warped, x
class PoseDecoderSE3(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
xyz_in_all=None,
latent_dropout=False,
positional_enc=False,
n_positional_freqs=8,
n_alpha_epochs=80,
):
super(PoseDecoderSE3, self).__init__()
input_dim = 3
output_dim = 9
if positional_enc:
self.n_positional_freqs = n_positional_freqs
self.pos_embedder, pos_embedder_out_dim = embedder.get_embedder_nerf(
n_positional_freqs, input_dims=input_dim
)
input_dim = pos_embedder_out_dim
self.n_alpha_epochs = n_alpha_epochs
self.alpha_const = n_positional_freqs / n_alpha_epochs if n_alpha_epochs > 0 else self.n_positional_freqs
dims = [latent_size + input_dim] + dims + [output_dim]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for l in range(0, self.num_layers - 1):
if l + 1 in latent_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
if self.xyz_in_all and l != self.num_layers - 2:
out_dim -= 3
if weight_norm and l in self.norm_layers:
setattr(self, "lin" + str(l), nn.utils.weight_norm(nn.Linear(dims[l], out_dim)))
else:
setattr(self, "lin" + str(l), nn.Linear(dims[l], out_dim))
if (not weight_norm) and self.norm_layers is not None and l in self.norm_layers:
setattr(self, "bn" + str(l), nn.LayerNorm(out_dim))
# Initialize last layer from a uniform distribution U(-1e-5, 1e-5) to initialize the deformation near the identity (nerfie)
lin_last = getattr(self, "lin" + str(self.num_layers - 2))
torch.nn.init.uniform_(lin_last.weight, a=-1e-5, b=1e-5)
self.relu = nn.ReLU()
self.dropout_prob = dropout_prob
self.dropout = dropout
# input: N x (L+3)
def forward(self, input, epoch=None):
xyz = input[:, -3:]
if hasattr(self, "pos_embedder"):
alpha = self.alpha_const * epoch if self.n_alpha_epochs > 0 else self.alpha_const
input_pos_embed = self.pos_embedder(xyz, alpha)
x = torch.cat([input[:, :-3], input_pos_embed], 1)
input_embed = x.clone()
else:
if input.shape[1] > 3 and self.latent_dropout:
latent_vecs = input[:, :-3]
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], 1)
else:
x = input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.latent_in:
if hasattr(self, "pos_embedder"):
x = torch.cat([x, input_embed], 1)
else:
x = torch.cat([x, input], 1)
elif l != 0 and self.xyz_in_all:
x = torch.cat([x, xyz], 1)
x = lin(x)
if l < self.num_layers - 2:
if self.norm_layers is not None and l in self.norm_layers and not self.weight_norm:
bn = getattr(self, "bn" + str(l))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and l in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
#######################################################
# Apply SE(3) transformation to input point xyz
#######################################################
# Extract v (rotation), s (pivot point), t (translation)
v, s, t = x[:, :3], x[:, 3:-3], x[:, -3:]
# Convert log-quaternion to unit quaternion
q = kornia.quaternion_log_to_exp(v)
# Points centered around pivot points s
xyz_pivot = xyz - s
# Apply rotation
xyz_rotated = geometry_utils.rotate_points_with_quaternions(p=xyz_pivot, q=q)
# Transform back to world space by adding s and also add the additional translation
xyz_warped = xyz_rotated + s + t
return xyz_warped, x
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-08-03 06:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hacker', '0017_auto_20180803_0633'),
]
operations = [
migrations.AddField(
model_name='academicdata',
name='major',
field=models.CharField(default='None', help_text='What is your current major?', max_length=255),
preserve_default=False,
),
]
|
nilq/baby-python
|
python
|
import re
from bottle import route, run, template
# shamelessly stolen from https://www.w3schools.com/howto/howto_js_sort_table.asp
js = """
<script>
function sortTable(n) {
var table, rows, switching, i, x, y, shouldSwitch, dir, switchcount = 0;
table = document.getElementById("tbl");
switching = true;
dir = "asc";
while (switching) {
switching = false;
rows = table.rows;
for (i = 1; i < (rows.length - 1); i++) {
shouldSwitch = false;
x = rows[i].getElementsByTagName("TD")[n].innerHTML.toLowerCase();
y = rows[i + 1].getElementsByTagName("TD")[n].innerHTML.toLowerCase();
if (y.includes("->")) {
xNum = x.match( /\d+/ )
x = Number(xNum[0]);
yNum = y.match( /\d+/ )
y = Number(yNum[0]);
}
if (dir == "asc") {
if (x > y) {
shouldSwitch = true;
break;
}
} else if (dir == "desc") {
if (x < y) {
shouldSwitch = true;
break;
}
}
}
if (shouldSwitch) {
rows[i].parentNode.insertBefore(rows[i + 1], rows[i]);
switching = true;
switchcount ++;
} else {
if (switchcount == 0 && dir == "asc") {
dir = "desc";
switching = true;
}
}
}
}
</script>
"""
css = """
<style>
table {
border-collapse: collapse;
}
th {
text-align: left;
}
th:hover {
cursor: pointer;
}
table, th, td {
border: 1px solid;
}
th, td {
padding-left: 10px;
padding-right: 10px;
padding-top: 5px;
}
tr:hover {
background-color: coral;
}
</style>
"""
head = f"<head>{js}{css}</head>"
@route("/")
def index():
html = f"<html>{head}<body><table id=\"tbl\"><tr><th onclick=\"sortTable(0)\">Ports</th><th onclick=\"sortTable(1)\">Container Name</th></tr>"
output = []
with open('/code/ports.txt') as f:
raw = f.readlines()
for l in raw:
words = [w.strip(',').lstrip('0.0.0.0:') for w in l.split() if ":::" not in w]
container = words[-1]
ports = [w for w in words[:-1] if "->" in w]
for port in ports:
row = f"<tr><td>{port}</td><td>{container}</td></tr>"
output.append(row)
key = lambda s: int(re.search(r'\d+', s)[0])
html += "".join(sorted(output, key=key)) + "</table></body></html>"
return html
run(host='0.0.0.0', port=80, debug=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import alley_oop
|
nilq/baby-python
|
python
|
import glob
import json
import os
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from sklearn.preprocessing import MinMaxScaler
POSE_BODY_25_PAIRS_RENDER_GPU = \
[1, 8, 1, 2, 1, 5, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11,
8, 12, 12, 13, 13, 14, 1, 0, 0, 15, 15, 17, 0, 16, 16, 18, 14,
19, 19, 20, 14, 21, 11, 22, 22, 23, 11, 24]
POSE_BODY_25_COLORS_RENDER_GPU = \
[255, 0, 85,
255, 0, 0,
255, 85, 0,
255, 170, 0,
255, 255, 0,
170, 255, 0,
85, 255, 0,
0, 255, 0,
255, 0, 0,
0, 255, 85,
0, 255, 170,
0, 255, 255,
0, 170, 255,
0, 85, 255,
0, 0, 255,
255, 0, 170,
170, 0, 255,
255, 0, 255,
85, 0, 255,
0, 0, 255,
0, 0, 255,
0, 0, 255,
0, 255, 255,
0, 255, 255,
0, 255, 255]
def main():
parser = argparse.ArgumentParser(
description="Convert poses to Parameter Space to Human Action Recognition"
)
parser.add_argument("--poses_base_dir", type=str,
default='/home/murilo/dataset/KTH',
help="Name of directory where input points are located.")
parser.add_argument("--input_dir", type=str,
default='2DPoses',
help="Name of directory to output computed features.")
parser.add_argument("--output_images_dir", type=str,
default='2DPoses_SpaceParam_Images',
help="Name of directory to output Parameter Space images.")
parser.add_argument("--image_height", type=int,
default='240',
help="(Frame Size)Image height to compute max distance in Parameter Space.")
parser.add_argument("--image_width", type=int,
default='320',
help="(Frame Size)Image width to compute max distance in Parameter Space.")
parser.add_argument("--draw_body_ids", type=int,
default='1',
help="Whether draw body joint ids in image with points in Parameter Space.")
parser.add_argument("--number_frames", type=int,
default=20,
help="Number of frames to extract features.")
parser.add_argument("--stride", type=int,
default=1,
help="Stride to compute features from the frames.")
args = parser.parse_args()
convert_parameter_space(args)
def convert_parameter_space(args):
# here compute image diagonal = max distance in Parameter Space
max_distance = int(((args.image_height ** 2) + (args.image_width ** 2)) ** (1 / 2))
print(max_distance)
thetas = np.linspace(-np.pi / 2, np.pi / 2, 180)
#poses_dir = os.path.join(args.poses_base_dir, args.input_dir)
points = 14
for root, directories, filenames in os.walk(os.path.join(args.poses_base_dir, args.input_dir)):
for directory in directories:
video_dir = os.path.join(root, directory)
print(video_dir)
frames = sorted(glob.glob(video_dir + '/*.json'))
if len(frames) > 0:
for x in range(0, len(frames), args.stride):
if x + args.number_frames < len(frames):
img_parameter_traj = {}
draw = {}
for u in range(14):
img_parameter_traj[u] = Image.new('RGB', (180 + 20, int(max_distance)), color='black')
draw[u] = ImageDraw.Draw(img_parameter_traj[u])
prev_points_parameter_space = None
for y in range(x, x + args.number_frames + 1):
body_parts = read_body_parts_file(frames[y])
if len(body_parts) > 0:
# compute parameter space points and draw image with points
points_parameter_space = \
compute_parameter_space(body_parts, max_distance, thetas)
if prev_points_parameter_space is None:
prev_points_parameter_space = points_parameter_space
else:
for a in range(len(points_parameter_space)):
#for a in [2,3,4,5]:
#if 1 == 1:
#a = 4
x1 = prev_points_parameter_space[a][0]
y1 = prev_points_parameter_space[a][1]
x2 = points_parameter_space[a][0]
y2 = points_parameter_space[a][1]
color_id = points_parameter_space[a][2]
shape = (x1, y1, x2, y2)
draw[a].line(shape, fill=get_color(color_id))
e_size = 2
draw[a].ellipse((x1 - e_size, abs(y1) - e_size, x1 + e_size, abs(y1) + e_size),
fill=get_color(color_id))
draw[a].ellipse((x2 - e_size, abs(y2) - e_size, x2 + e_size, abs(y2) + e_size),
fill=get_color(color_id))
prev_points_parameter_space = points_parameter_space
images_dir = video_dir.replace(args.input_dir, args.output_images_dir)
#images_dir, video_name = os.path.split(images_dir)
if not os.path.exists(images_dir):
os.makedirs(images_dir)
for i in range(14):
file = os.path.join(images_dir, str(i) + '_'+ str(x) + '_trajectories.png')
img_parameter_traj[i].save(file)
def read_body_parts_file(key_points_file):
body_parts_int = {}
# Read json pose points
with open(key_points_file) as f:
data = json.load(f)
body_parts = data['part_candidates'][0]
if len(body_parts) > 0:
for key, value in body_parts.items():
body_parts_int[int(key)] = [item for item in value]
return body_parts_int
def compute_parameter_space(body_parts, max_distance, thetas, draw_body_ids=True):
# Create image degrees x max_distance
points_parameter_space = {}
for i in range(0, 14, 1):
degree = degree_disc = theta = rho1 = rho2 = 0
x1, y1, x2, y2, color_id, id1, id2 = return_body_points_coord(i, body_parts)
if x1 > 0 and y1 > 0 and x2 > 0 and y2 > 0:
#print(i)
# print('x1:\t%i\ty1:\t%i\t\tx2:\t%i\ty2:\t%i' % (x1, y1, x2, y2))
if y1 - y2 != 0:
theta = np.arctan((x2 - x1) / (y1 - y2))
else:
theta = 0
# here convert theta from radians to degrees
degree = round(theta * (180 / np.pi))
# here find theta in thetas discrete list (only for image plot)
degree_disc = min(range(len(thetas)), key=lambda x: abs(thetas[x] - theta))
# position_min_degree = min(thetas, key=lambda x: abs(x - theta))
# compute rho from theta
rho1 = x1 * np.cos(theta) + y1 * np.sin(theta)
rho2 = x2 * np.cos(theta) + y2 * np.sin(theta)
#print(rho1, rho2)
#print(int(rho1), int(degree), x1, y1)
points_parameter_space[i] = (degree_disc, rho1, color_id)
# points_hough[i] = (degree, degree_disc, theta, int(rho))
return points_parameter_space
def return_body_points_coord(i, body_parts):
x1 = y1 = x2 = y2 = x = color_id = id1 = id2 = 0
if i == 0: # 1 => 0 Neck
x = 13
elif i == 1: # 1 => 8 Upper body
x = 0
elif i == 2: # 2 => 3 Right Arm
x = 3
elif i == 3: # 3 => 4 Right Forearm
x = 4
elif i == 4: # 5 => 6 Left Arm
x = 5
elif i == 5: # 6 => 7 Left Forearm
x = 6
elif i == 6: # 9 => 10 Right Thigh
x = 8
elif i == 7: # 10 => 11 Right Leg
x = 9
elif i == 8: # 12 => 13 Left Thigh
x = 11
elif i == 9: # 13 => 14 Left Leg
x = 12
elif i == 10: # 8 => 9 Right Hip
x = 7
elif i == 11: # 8 => 12 Left Hip
x = 10
elif i == 12: # 1 => 2 Right Shoulder
x = 1
elif i == 13: # 1 => 5 Left Shoulder
x = 2
x = x * 2
if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(
body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):
x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])
x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
color_id = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3
id1 = POSE_BODY_25_PAIRS_RENDER_GPU[x]
id2 = POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]
return x1, y1, x2, y2, color_id, id1, id2
def draw_body(body_parts, height, width):
img = Image.new('RGB', (width, height), color='black')
draw = ImageDraw.Draw(img)
for k in sorted(body_parts):
if len(body_parts[k]) > 0:
x, y = get_max_prob(body_parts[k])
draw.point((x, y), fill=get_color(k * 3))
ctd = 0
for x in range(0, len(POSE_BODY_25_PAIRS_RENDER_GPU), 2):
print(x, x + 1)
print(POSE_BODY_25_PAIRS_RENDER_GPU[x], POSE_BODY_25_PAIRS_RENDER_GPU[x + 1])
print(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]], body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
print('\n')
if (len(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]]) > 0 and len(
body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]]) > 0):
x1, y1 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x]])
x2, y2 = get_max_prob(body_parts[POSE_BODY_25_PAIRS_RENDER_GPU[x + 1]])
draw.line((x1, y1, x2, y2), fill=get_color(POSE_BODY_25_PAIRS_RENDER_GPU[x + 1] * 3), width=1)
ctd = ctd + 1
print(ctd)
img.show()
img.save('pil_red.png')
def get_max_prob(body_part):
m = 0
x = 0
y = 0
for p in range(0, len(body_part), 3):
if body_part[p + 2] > m:
m = float(body_part[p + 2])
x = int(body_part[p])
y = int(body_part[p + 1])
return x, y
def get_color(k):
return POSE_BODY_25_COLORS_RENDER_GPU[k], \
POSE_BODY_25_COLORS_RENDER_GPU[k + 1], \
POSE_BODY_25_COLORS_RENDER_GPU[k + 2]
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from flask import render_template,redirect,session,request, flash
from flask_app import app
from ..models import user, bag
@app.route("/bag/create")
def new_bag():
if 'user_id' not in session:
return redirect('/')
data = {
"id":session['user_id']
}
return render_template("add_bag.html", user = user.User.get_by_id(data))
@app.route('/bag/new', methods = ['POST'])
def create_bag():
if 'user_id' not in session:
return redirect('/logout')
if not bag.Bag.validate_bag(request.form):
return redirect('/bag/create')
data = {
"name":request.form["name"],
"driver":request.form["driver"],
"woods":request.form["woods"],
"hybrids":request.form["hybrids"],
"irons":request.form["irons"],
"wedges":request.form["wedges"],
"putter":request.form["putter"],
"user_id": session["user_id"]
}
bag.Bag.create_bag(data)
return redirect('/dashboard')
#show specific bag
@app.route('/bag/<int:id>')
def show(id):
if 'user_id' not in session:
return redirect('/')
data = {
"id":id
}
user_data = {
"id":session['user_id']
}
return render_template("show_bag.html", bag=bag.Bag.get_by_id(data), user = user.User.get_by_id(user_data))
@app.route('/edit/<int:id>')
def edit_bag(id):
if 'user_id' not in session:
return redirect('/logout')
data = {
"id":id
}
user_data = {
"id":session['user_id']
}
return render_template("edit_bag.html", edit = bag.Bag.get_by_id(data), user = user.User.get_by_id(user_data))
@app.route("/bag/update",methods=['POST'])
def update_bag():
if 'user_id' not in session:
return redirect('/logout')
if not bag.Bag.validate_bag(request.form):
return redirect('/bag/create')
data = {
"name":request.form["name"],
"driver":request.form["driver"],
"woods":request.form["woods"],
"hybrids":request.form["hybrids"],
"irons":request.form["irons"],
"wedges":request.form["wedges"],
"putter":request.form["putter"],
"id": request.form['id']
}
bag.Bag.update(data)
return redirect('/dashboard')
@app.route("/like", methods = ["POST"])
def like():
data = {
'bag_id': request.form['bag_id'],
'user_id': session['user_id']
}
user.User.like(data)
return redirect ("/dashboard")
@app.route("/bag/delete/<int:id>")
def delete(id):
if 'user_id' not in session:
return redirect('/logout')
data = {
"id": id
}
bag.Bag.delete(data)
return redirect ('/dashboard')
|
nilq/baby-python
|
python
|
import os
import zipfile
from arelle.CntlrCmdLine import parseAndRun
# from https://specifications.xbrl.org/work-product-index-registries-units-registry-1.0.html
REGISTRY_CONFORMANCE_SUITE = 'tests/resources/conformance_suites/utr/registry/utr-conf-cr-2013-05-17.zip/utr-conf-cr-2013-05-17/2013-05-17'
STRUCTURE_CONFORMANCE_SUITE_ZIP = 'tests/resources/conformance_suites/utr/structure/utr-structure-conf-cr-2013-11-18.zip'
STRUCTURE_CONFORMANCE_SUITE = os.path.join(STRUCTURE_CONFORMANCE_SUITE_ZIP, 'conf/utr-structure')
BASE_ARGS = [
'--testcaseResultsCaptureWarnings',
'--utr',
'--validate',
]
REGISTRY_ARGS = BASE_ARGS + [
'--file', os.path.join(REGISTRY_CONFORMANCE_SUITE, 'index.xml'),
'--utrUrl', 'tests/resources/conformance_suites/utr/registry/utr.xml',
'--csvTestReport', 'UTRunit-report.csv',
'--logFile', 'UTRunit-log.txt',
]
STRUCTURE_ARGS = BASE_ARGS + [
'--file', os.path.join(STRUCTURE_CONFORMANCE_SUITE, 'index.xml'),
'--utrUrl', os.path.join(STRUCTURE_CONFORMANCE_SUITE, 'utr-for-structure-conformance-tests.xml'),
'--csvTestReport', 'UTRstr-report.csv',
'--logFile', 'UTRstr-log.txt',
]
if __name__ == "__main__":
print('Running registry tests...')
parseAndRun(REGISTRY_ARGS)
print('Running structure tests...')
parseAndRun(STRUCTURE_ARGS)
print('Running malformed UTRs tests...')
malformed_utr_files = []
with zipfile.ZipFile(STRUCTURE_CONFORMANCE_SUITE_ZIP, 'r') as zipf:
for f in zipfile.Path(zipf, 'conf/utr-structure/malformed-utrs/').iterdir():
if f.is_file() and f.name.endswith('.xml'):
malformed_utr_files.append((f.at, f.name))
for path_in_zip, name in malformed_utr_files:
basename = name.removesuffix('.xml')
args = BASE_ARGS + [
'--file', os.path.join(STRUCTURE_CONFORMANCE_SUITE, 'tests', '01-simple', 'simpleValid.xml'),
'--utrUrl', os.path.join(STRUCTURE_CONFORMANCE_SUITE_ZIP, path_in_zip),
'--csvTestReport', f'UTRstr-report-{basename}.csv',
'--logFile', f'UTRstr-log-{basename}.txt',
]
parseAndRun(args)
|
nilq/baby-python
|
python
|
import os
import torch as torch
import numpy as np
from io import BytesIO
import scipy.misc
#import tensorflow as tf
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torch.autograd import Variable
from matplotlib import pyplot as plt
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from skimage import io
TILE_SIZE = 256
BASE_DIR = '/raid.dell1/world/'
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return np.array(img.convert('RGB'))
def xy_from_geodetic(lat, lon):
lat = np.clip(lat, -85.05, 85.05)
lon = np.clip(lon, -179.9999999, 179.9999999)
x = lon/360.0 + .5
sinlat = np.sin(np.radians(lat))
y = 0.5 - np.log((1 + sinlat) / (1 - sinlat)) / (4 * np.pi)
return x,y
def get_tile_idx(x, y, zoom):
if (not 0 <= x < 1.0) or ((not 0 <= y < 1.0)):
return 'NULL'
if zoom == 0:
return ''
if x >= .5 and y >= .5:
q = '3'
elif y >= .5:
q = '2'
elif x >= .5:
q = '1'
else:
q = '0'
return q + get_tile_idx(x*2 % 1, y*2 % 1, zoom - 1)
def get_tile_pos(x, y, zoom):
print(x,y)
if zoom == 0:
return x, y
return get_tile_pos(x*2 % 2, y*2 % 2, zoom - 1)
def get_tile(idx):
if idx == 'NULL':
return np.zeros((TILE_SIZE, TILE_SIZE, 3), dtype=np.uint8)
sub_path = ''.join([a + '/' for a in idx])
path = BASE_DIR + sub_path + 'img.jpeg'
return pil_loader(path)
def get_custom_tile(x, y, zoom):
assert 0 <= x < 1
assert 0 <= y < 1
assert zoom >= 1
zoom = int(zoom)
tiles = []
delta = (.5)**(zoom + 1)
for x_off in [-delta, delta]:
tiles.append([])
for y_off in [-delta, delta]:
tiles[-1].append(get_tile(get_tile_idx(x + x_off, y + y_off, zoom)))
tiles[-1] = np.vstack(tiles[-1])
tiles = np.hstack(tiles)
#x_pos, y_pos = get_tile_pos(x,y,zoom)
for i in range(zoom):
x = (x - .25) * 2 % 1.0 + .5
y = (y - .25) * 2 % 1.0 + .5
x_pix = int((x * TILE_SIZE))
y_pix = int((y * TILE_SIZE))
return tiles[y_pix - TILE_SIZE//2: y_pix + TILE_SIZE//2,
x_pix - TILE_SIZE//2: x_pix + TILE_SIZE//2]
def get_custom_tile_geodetic(lat, lon, zoom):
x,y = xy_from_geodetic(lat, lon)
return get_custom_tile(x, y, zoom)
class SatImageDataset(Dataset):
def __init__(self, transform=None, size=1000000, max_zoom=8):
self.size = size
self.transform = transform
self.samples = np.random.uniform(0.0, 1.0, size=(self.size, 3))
self.samples[:,2] = 8.0 # np.floor(max_zoom*self.samples[:,2]) + 1.0
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.samples[idx]
image = Image.fromarray(get_custom_tile(*sample))
if self.transform:
image = self.transform(image)
meta = torch.FloatTensor(sample)
sample = {'image': image, 'meta': meta}
return sample
class dataloader:
def __init__(self, config):
self.batch_table = {4:32, 8:32, 16:32, 32:16, 64:8, 128:8, 256:12, 512:3, 1024:1} # change this according to available gpu memory.
self.batchsize = int(self.batch_table[pow(2,2)]) # we start from 2^2=4
self.imsize = int(pow(2,2))
self.num_workers = 0
def renew(self, resl):
print('[*] Renew dataloader configuration, load data from {}.'.format('raid.dell1'))
self.batchsize = int(self.batch_table[pow(2,resl)])
self.imsize = int(pow(2,resl))
self.dataset = SatImageDataset(
transform=transforms.Compose([
transforms.Resize(size=(self.imsize,self.imsize), interpolation=Image.NEAREST),
transforms.ToTensor(),
]))
self.dataloader = DataLoader(
dataset=self.dataset,
batch_size=self.batchsize,
shuffle=True,
num_workers=self.num_workers
)
def __iter__(self):
return iter(self.dataloader)
def __next__(self):
return next(self.dataloader)
def __len__(self):
return len(self.dataloader.dataset)
def get_batch(self):
dataIter = iter(self.dataloader)
return next(dataIter)
|
nilq/baby-python
|
python
|
import logging
from datetime import datetime, timezone
from brownie import chain
from yearn.historical_helper import export_historical, time_tracking
from yearn.networks import Network
from yearn.treasury.treasury import StrategistMultisig
from yearn.utils import closest_block_after_timestamp
logger = logging.getLogger('yearn.historical_sms_exporter')
def main():
start = datetime.now(tz=timezone.utc)
end = {
Network.Mainnet: datetime(2021, 1, 28, 9, 10, tzinfo=timezone.utc), # first inbound sms tx
Network.Fantom: datetime(2021, 6, 17, tzinfo=timezone.utc), # Fantom SMS deployed
}[chain.id]
data_query = {
Network.Mainnet: 'sms_assets{network="ETH"}',
Network.Fantom: 'sms_assets{network="FTM"}',
}[chain.id]
export_historical(
start,
end,
export_chunk,
export_snapshot,
data_query
)
def export_chunk(chunk, export_snapshot_func):
sms = StrategistMultisig()
for snapshot in chunk:
ts = snapshot.timestamp()
export_snapshot_func(
{
'treasury': sms,
'snapshot': snapshot,
'ts': ts,
'exporter_name': 'historical_sms'
}
)
@time_tracking
def export_snapshot(sms, snapshot, ts, exporter_name):
block = closest_block_after_timestamp(ts)
assert block is not None, "no block after timestamp found"
sms.export(block, ts)
logger.info("exported SMS snapshot %s", snapshot)
|
nilq/baby-python
|
python
|
def write_fbk(file_name, feat_path):
with open(file_name, 'r') as f:
lines = f.readlines()
for lin_num, x in enumerate(lines):
audio_name = x.split("/wav/")[1].split(".")[0]
feat_name = ''.join([feat_path, audio_name, '.fbk'])
lines[lin_num] = ''.join([x.strip(), ' ', feat_name, '\n'])
with open(file_name, 'w') as f:
for line in lines:
f.writelines(line)
def main():
file_name = '/data/mifs_scratch/mjfg/zs323/yr4project/speechchain/egs/babel/asrtts/exp/tts_/outputs_snapshot.ep.200_denorm/convert/lib/coding/segmented_test.dev.fbk'
feat_path = '/data/mifs_scratch/mjfg/zs323/yr4project/speechchain/egs/babel/asrtts/exp/tts_/outputs_snapshot.ep.200_denorm/fbk/'
write_fbk(file_name, feat_path)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import numpy as np
from skimage.morphology import label
class Dice(nn.Module):
"""The Dice score.
"""
def __init__(self):
super().__init__()
def forward(self, output, target):
"""
Args:
output (torch.Tensor) (N, C, *): The model output.
target (torch.LongTensor) (N, 1, *): The data target.
Returns:
metric (torch.Tensor) (C): The dice scores for each class.
"""
# Get the one-hot encoding of the prediction and the ground truth label.
pred = output.argmax(dim=1, keepdim=True)
pred = torch.zeros_like(output).scatter_(1, pred, 1)
target = torch.zeros_like(output).scatter_(1, target, 1)
# Calculate the dice score.
reduced_dims = list(range(2, output.dim())) # (N, C, *) --> (N, C)
intersection = 2.0 * (pred * target).sum(reduced_dims)
union = pred.sum(reduced_dims) + target.sum(reduced_dims)
score = intersection / (union + 1e-10)
return score.mean(dim=0)
class Accuracy(nn.Module):
"""The accuracy for the classification task.
"""
def __init__(self):
super().__init__()
def forward(self, output, target):
"""
Args:
output (torch.Tensor) (N, C): The model output.
target (torch.LongTensor) (N): The data target.
Returns:
metric (torch.Tensor) (0): The accuracy.
"""
pred = torch.argmax(output, dim=1)
return (pred == target).float().mean()
class FalseNegativeSize(nn.Module):
"""The false negative target size.
"""
def __init__(self):
super().__init__()
def forward(self, output, target):
"""
Args:
output (torch.Tensor) (N, C, *): The model output.
target (torch.LongTensor) (N, 1, *): The data target.
Returns:
metric (torch.Tensor) (C): The average false negative size for each class.
"""
scores = []
# Get the one-hot encoding of the prediction and the ground truth label.
pred = output.argmax(dim=1, keepdim=True)
pred = torch.zeros_like(output).scatter_(1, pred, 1)
target = torch.zeros_like(output).scatter_(1, target, 1)
# Calculate the score for each class
for i in range(1, output.shape[1]):
label_target = label(target[:, i].squeeze(dim=0).cpu().numpy(), connectivity=output.dim()-2)
label_target_list = np.unique(label_target)[1:]
_pred = pred[:, i].squeeze(dim=0).cpu().numpy()
score = []
for target_id in label_target_list:
if (np.sum((_pred == 1) * (label_target == target_id)) == 0):
score.append(np.sum(label_target == target_id) / 1000.0)
scores.append(score)
return scores
|
nilq/baby-python
|
python
|
import sort_for_vexflow
import pretty_midi
def notation(orchestra, inst, tech, dyn, note, tgt, onoff, microtone,masking_order_idx):
annotations=[]
orchestration_slice=[]
tgts=[]
for i in range(len(inst)):
# Check that you input proper values:
if tech[i] in list(orchestra[inst[i]].keys()):
if dyn[i] in list(orchestra[inst[i]][tech[i]].keys()):
if int(note[i]) in list(orchestra[inst[i]][tech[i]][dyn[i]].keys()):
orchestration_slice.append(
[inst[i], tech[i], dyn[i], int(note[i]), tgt[i], onoff[i]]) # Note comes as string, convert to int
# Do annotations
annotations.append(inst[i] + " " + dyn[i] + " " + tech[i])
# If marked as target, add to target list
if tgt[i]:
tgts.append(i)
highlights = []
for i in range(len(orchestration_slice)):
highlights.append('')
for i in range(len(masking_order_idx)):
try:
if i == 0:
highlights[masking_order_idx[i]] = 'red'
# outer_style[masking_order_idx[i]]['backgroundColor'] = 'red'
if i == 1:
highlights[masking_order_idx[i]] = 'magenta'
# outer_style[masking_order_idx[i]]['backgroundColor'] = 'magenta'
if i == 2:
highlights[masking_order_idx[i]] = 'yellow'
# outer_style[masking_order_idx[i]]['backgroundColor'] = 'yellow'
except:
pass
for i in range(len(note)):
note[i]=int(note[i])+microtone[i]
note, annotations, tgts, highlights, srt_idx = sort_for_vexflow.sort_notes(note, annotations, tgts, highlights)
notes = [pretty_midi.note_number_to_name(int(round(i))) for i in note] # Change to note names
notes = [i.lower() for i in notes]
return {"notes":notes, "notenumbers":note, "instruments":annotations, "target":tgts, "highlights":highlights}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-06-18 07:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0005_auto_20180618_1123'),
]
operations = [
migrations.RemoveField(
model_name='proposalapprovergroup',
name='activities',
),
migrations.RemoveField(
model_name='proposalapprovergroup',
name='regions',
),
migrations.RemoveField(
model_name='proposalassessorgroup',
name='activities',
),
migrations.RemoveField(
model_name='proposalassessorgroup',
name='regions',
),
]
|
nilq/baby-python
|
python
|
def shared_function(x, sep=':'):
return sep.join(['got', x])
|
nilq/baby-python
|
python
|
from cleaner_console import Console
if __name__ == '__main__':
Console()
|
nilq/baby-python
|
python
|
from node import constants
def shout(data):
data['type'] = 'shout'
return data
def proto_page(uri, pubkey, guid, text, signature, nickname, PGPPubKey, email,
bitmessage, arbiter, notary, notary_description, notary_fee,
arbiter_description, sin, homepage, avatar_url):
data = {
'type': 'page',
'uri': uri,
'pubkey': pubkey,
'senderGUID': guid,
'text': text,
'nickname': nickname,
'PGPPubKey': PGPPubKey,
'email': email,
'bitmessage': bitmessage,
'arbiter': arbiter,
'notary': notary,
'notary_description': notary_description,
'notary_fee': notary_fee,
'arbiter_description': arbiter_description,
'sin': sin,
'homepage': homepage,
'avatar_url': avatar_url,
'v': constants.VERSION
}
return data
def query_page(guid):
data = {
'type': 'query_page',
'findGUID': guid,
'v': constants.VERSION
}
return data
def proto_store(key, value, originalPublisherID, age):
data = {
'type': 'store',
'key': key,
'value': value,
'originalPublisherID': originalPublisherID,
'age': age,
'v': constants.VERSION
}
return data
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class representing a source container repository or directory."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import enum
from googlecloudsdk.core import exceptions
import six
class UnknownSourceError(exceptions.Error):
"""The provided source could not be identified."""
pass
class SourceRef(object):
"""Reference to image or local directory."""
class SourceType(enum.Enum):
DIRECTORY = 1
IMAGE = 2
def __str__(self):
return 'SourceRef({}, {})'.format(self.source_type, self.source_path)
def __repr__(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, SourceRef):
return False
return (other.source_type == self.source_type and
other.source_path == self.source_path)
def __init__(self, source_type, source_path):
self.source_type = source_type
self.source_path = source_path
@classmethod
def MakeImageRef(cls, image_arg):
"""Create a SourceRef from provided image name."""
return cls(cls.SourceType.IMAGE, six.text_type(image_arg))
@classmethod
def MakeDirRef(cls, source_arg):
"""Create a SourceRef from the provided directory name."""
if os.path.isdir(source_arg):
return cls(cls.SourceType.DIRECTORY, source_arg)
raise UnknownSourceError(
'Could not identify source [{}]'.format(source_arg))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
controlbeast.utils.loader
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2013 by the ControlBeast team, see AUTHORS.
:license: ISC, see LICENSE for details.
"""
import importlib
import os
import re
def __filter_members(item):
"""
Filter function to detect classes within a module or package
:param str item: the item to be tested with this filter
"""
exclude = (
re.escape('__builtins__'),
re.escape('__cached__'),
re.escape('__doc__'),
re.escape('__file__'),
re.escape('__loader__'),
re.escape('__name__'),
re.escape('__package__'),
re.escape('__path__')
)
pattern = re.compile('|'.join(exclude))
return not pattern.search(item)
def __filter_modules(item):
"""
Filter function to detect processor modules and packages
:param str item: the item to be tested with this filter
"""
exclude = (
re.escape('__init__.py'),
re.escape('base.py')
)
pattern = re.compile('|'.join(exclude))
return not pattern.search(item)
def detect_class_modules(module, parent=object):
"""
Detect available class modules or packages and return a dictionary of valid class names, referring to
the module they are contained within.
:param str module: the module or package to be scanned for classes
:param parent: the class potential candidates must be derived off
"""
# initialise result dictionary
result = {}
# get a list of all files and directories inside the module
try:
package_instance = importlib.import_module(module)
except ImportError:
return result
if package_instance.__file__[-11:] == '__init__.py':
gen_dir = os.listdir(os.path.dirname(os.path.realpath(package_instance.__file__)))
else:
gen_dir = [os.path.realpath(package_instance.__file__)]
# only consider modules and packages, and exclude the base module
for file_candidate in filter(__filter_modules, gen_dir):
# Python files are modules; the name needs to be without file ending
if file_candidate[-3:] == '.py':
file_candidate = file_candidate[:-3]
# try if the detected package or module can be imported
try:
class_module_candidate = importlib.import_module('.'.join([module, file_candidate]))
except ImportError:
class_module_candidate = None
# if the module or module could be imported, test if it contains classes derived from the parent class
if class_module_candidate:
for member_candidate in filter(__filter_members, dir(class_module_candidate)):
try:
if issubclass(getattr(class_module_candidate, member_candidate), parent) \
and getattr(class_module_candidate, member_candidate).__name__ != parent.__name__:
result[member_candidate] = class_module_candidate.__name__
except TypeError:
pass
# return the dictionary
return result
def load_member(module, member):
"""
Load a member (function, class, ...) from a module and return it
:param str module: the module or package name where the class should be loaded from
:param str member: the name of the member to be loaded
"""
try:
module = importlib.import_module(module)
except ImportError:
return None
try:
result = getattr(module, member)
except AttributeError:
return None
return result
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.4 on 2021-06-14 12:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0004_auto_20210614_0157'),
]
operations = [
migrations.AddField(
model_name='tag',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
nilq/baby-python
|
python
|
from __future__ import annotations
from typing import (
Any,
Dict,
Iterator,
Mapping,
MutableMapping,
Optional,
Tuple,
TypeVar,
)
from apiwrappers import utils
VT = TypeVar("VT")
class NoValue:
__slots__: Tuple[str, ...] = tuple()
def __repr__(self):
return f"{self.__class__.__name__}()"
class CaseInsensitiveDict(MutableMapping[str, VT]):
__slots__ = ("_data",)
def __init__(self, data: Optional[Mapping[str, VT]] = None, **kwargs: VT):
self._data: Dict[str, Tuple[str, VT]] = {}
if data is not None:
self.update(data)
self.update(kwargs)
def __getitem__(self, key: str) -> VT:
return self._data[key.lower()][1]
def __setitem__(self, key: str, value: VT) -> None:
self._data[key.lower()] = (key, value)
def __delitem__(self, key: str) -> None:
del self._data[key.lower()]
def __iter__(self) -> Iterator[str]:
return (original_key for original_key, value in self._data.values())
def __len__(self) -> int:
return len(self._data)
def __repr__(self) -> str:
if self._data:
return f"{self.__class__.__name__}({dict(self)})"
return f"{self.__class__.__name__}()"
class Url:
"""
Class to work with formatted string URLs and joining urls and path.
Sometimes it useful to keep original format string in place, for example,
for logging or metrics. This class stores original format string and its
replacements fields, substituting it when needed.
Args:
template: a URL as format string, e.g. "https://example.org/users/{id}".
replacements: values to format template with.
Usage::
>>> from apiwrappers import Url
>>> url = Url("https://example.org")
>>> url("/users/{id}", id=1)
Url('https://example.org/users/{id}', id=1)
>>> str(url("/users/{id}", id=1))
'https://example.org/users/1'
"""
def __init__(self, template: str, **replacements: Any):
self.template = template
self.replacements = replacements
def __str__(self) -> str:
return self.template.format_map(self.replacements)
def __repr__(self) -> str:
params = ", ".join(f"{k}={repr(v)}" for k, v in self.replacements.items())
if self.replacements:
return f"{self.__class__.__name__}({repr(self.template)}, {params})"
return f"{self.__class__.__name__}({repr(self.template)})"
def __call__(self, path: str, **replacements: Any) -> Url:
"""
Joins path with current URL and return a new instance.
Args:
path: a path as format string, e.g. "/users/{id}".
replacements: values to path with.
Returns: New instance with a url joined with path.
"""
url = utils.build_url(self.template, path)
return Url(url, **{**self.replacements, **replacements})
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return str(self) == other
if isinstance(other, self.__class__):
return (
self.template == other.template
and self.replacements == other.replacements
)
return NotImplemented
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-from setuptools import setup, find_packages
from baseapp import get_version
setup(
name='feincms_baseapp',
version=get_version(),
description='This is a base app and contenttype for Feincms.',
author='',
author_email='',
url='https://github.com/',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
]
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/6/24 20:55
# @Author : ganliang
# @File : doctest_test.py
# @Desc : doctest测试 执行模块测试
import doctest
import src.deco
if __name__ == "__main__":
doctest.testmod(src.deco)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumby/flask-thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2015 thumby.io dev@thumby.io
class FlaskThumbor:
__name__ = "FlaskThumbor"
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
from libthumbor import CryptoURL
from flask import current_app
thumbor_server = app.config.get('THUMBOR_SERVER', None)
thumbor_key = app.config.get('THUMBOR_KEY', None)
if thumbor_server is None or thumbor_key is None:
raise RuntimeError(
'Make sure both THUMBOR_SERVER (URL for the thumbor server that will serve your images) and '
'THUMBOR_KEY (security key for the thumbor server you\'re connecting to) are set in your '
'Flask configuration.'
)
app.thumbor_crypto = CryptoURL(key=thumbor_key)
app.thumbor_server = thumbor_server.rstrip('/')
@app.context_processor
def utility_processor():
def thumbor(**kw):
return '%s%s' % (
current_app.thumbor_server,
current_app.thumbor_crypto.generate(**kw)
)
return dict(thumbor=thumbor)
|
nilq/baby-python
|
python
|
import os
import sys
'''
3个空瓶换一瓶
input: n个空瓶
outpu: 最终可换瓶数
'''
def demo1():
while True:
try:
a = int(input())
if a != 0:
print(a//2)
except:
break
######################################
'''
input: n以及n个随机数组成的数组
output: 去重排序后的数组
'''
def demo2():
while True:
try:
n,m = int(input()),set()
for i in range(n):
m.add(int(input()))
for i in sorted(m):
print(i)
except:
break
########################################
'''
input: 十六进制
ouput: 十进制
'''
def hex2dec():
while True:
try:
print(int(input(),16))
except:
break
#########################################
'''
input: n
ouput: 数组a[n],隔二删一,输出最后一个下标
'''
def shangshu():
while True:
try:
n = int(input())
except:
exit()
r = 0
i = 2
while i <= n:
r = (r+3) %i
i += 1
print(r)
#########################################
'''
输入一个字符串,求出该字符串包含的字符集合
'''
def char():
while True:
try:
res,a = "",input()
for i in a:
if i not in res:
res+=i
print(res)
except:
break
##########################################
'''
sudoku
思路:深搜+剪枝
'''
def isok(mat,i,j,num):#判断填入数字
for row in range(0,9):#遍历列
if mat[row][j] == num:
return False
for col in range(0,9):#遍历行
if mat[i][col] == num:
return False
ii = i//3
jj = j//3
#遍历该位置所处的3*3矩阵,若该数字已出现过,则不合法
for row in range(ii*3,ii*3+3):
for col in range(jj*3,jj*3+3):
if mat[row][col] == num:
return False
return True
def dfs(mat,i,j):#深度优先遍历
if i==9:#所有行已遍历完,则结束
return mat
if j==9:#所有列已遍历完,则进入到下一行
return dfs(mat,i+1,0)
flag = False#flag表示该行有需要填充的格子
for col in range(j,9):#遍历该行的所有列,如果有值为0,则需要进行填充
if mat[i][col]==0:
flag = True
isChange =False#ischange表示是否已进行填充
for num in range(1,10):
if isOk(mat,i,col,num):#找出1-9中能够合法填入的数字
isChange =True
mat[i][col] = num
tpp = dfs(mat,i,col+1)#将该位置填充后,该行的后续位置是否有解
if tpp == None:#如果后续位置无解,则将该位置重新置为0,未填充状态
isChange = False
mat[i][col] = 0
continue#尝试下一个数字
else:
return tpp
if isChange==False:#找不到合法数字进行填充
return None
if flag==False:#该行所有位置已填满,进入到下一行
return dfs(mat,i+1,0)
def sudoku():
while True:
isCon = True
mat = []
for i in range(9):
line = sys.stdin.readline().strip()
if not line:
isCon = False
break
line =[int(i) for i in line.split(' ')]
mat.append(line)
if isCon ==False:
break
mat = dfs(mat,0,0)
for line in mat:
print(' '.join(str(j) for j in line))
#####################################################
if __name__ == '__main__':
char()
|
nilq/baby-python
|
python
|
class SETTING:
server_list = {
"presto": {
"connect_type": "PrestoConnector",
"url": {
"username": "hive"
,"host": ""
,"port": 3600
,"param" : "hive"
,"schema": "default"
,"metastore": "mysql+pymysql://hive:hive@/hive"
},
"table_whitelist": [],
"table_blacklist": [],
},
"hive": {
"connect_type": "HiveSqlaConnector",
"url": {
"username": "yarn"
,"host": ""
,"port": 10000
,"schema": "default"
,"param" : "auth=NONE"
,"metastore": "mysql+pymysql://hive:hive@/hive"
},
"table_whitelist": [],
"table_blacklist": [],
},
}
|
nilq/baby-python
|
python
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import subprocess
import sys
import tempfile
import time
import logging
LOG = logging.getLogger(__name__)
class SSHError(Exception):
pass
class AsyncSSH:
_ready = False
def __init__(self, username=None, hostname=None, key=None, port=22, cb=None):
if cb:
self.cb = cb
self.key = key
self.username = username if username else "root"
self.hostname = hostname
self.port = str(port)
def cb(self, line):
LOG.debug(repr(line))
@asyncio.coroutine
def run(self, command, stdin=None, return_output=False,
strip_output=True, raise_on_error=True, user=None):
if not self._ready:
try:
yield from self.wait()
except Exception:
if raise_on_error:
raise
else:
return -1
if not user:
user = self.username
output = b""
if isinstance(stdin, str):
f = tempfile.TemporaryFile()
f.write(stdin.encode())
f.flush()
f.seek(0)
stdin = f
cmd = ["ssh", "-T", "-o", "StrictHostKeyChecking=no",
"%s@%s" % (user, self.hostname), "-p", self.port]
if self.key:
cmd += ["-i", self.key]
if isinstance(command, str):
cmd += command.split(" ")
else:
cmd += command
LOG.debug("Running '%s'" % cmd)
process = asyncio.create_subprocess_exec(*cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
process = yield from process
try:
while not process.stdout.at_eof():
line = yield from process.stdout.readline()
self.cb(line)
if return_output:
output += line
except asyncio.CancelledError:
LOG.debug("Terminated.")# Killing child process.")
raise
try:
process.terminate()
yield from process.wait()
except:
LOG.exception("Error waiting for child")
raise
yield from process.wait()
if process.returncode and raise_on_error:
LOG.error("Command failed: %s" % line)
msg = "Cmd '%s' failed. Exit code: %d" % (" ".join(cmd),
process.returncode)
raise SSHError(msg)
if return_output:
output = output.decode()
if strip_output:
return output.strip()
return output
LOG.debug("Returning %s" % process.returncode)
return process.returncode
@asyncio.coroutine
def wait(self, timeout=300):
start = time.time()
while 1:
try:
r, w = yield from asyncio.open_connection(self.hostname,
int(self.port))
self._ready = True
w.close()
return
except ConnectionError:
pass
if time.time() - start > timeout:
raise Exception("Timeout waiting for "
"%s:%s" % (self.hostname, self.port))
LOG.debug("Waiting for ssh %s:%s" % (self.hostname, self.port))
yield from asyncio.sleep(1)
@asyncio.coroutine
def scp_get(self, src, dst):
cmd = ["scp", "-B", "-o", "StrictHostKeyChecking no"]
if self.key:
cmd += ["-i", self.key]
cmd += ["-P", self.port]
cmd += ["-r", "%s@%s:%s" % (self.username, self.hostname, src), dst]
LOG.debug("Runnung %s" % " ".join(cmd))
process = asyncio.create_subprocess_exec(*cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
process = yield from process
try:
while not process.stdout.at_eof():
line = yield from process.stdout.read()
LOG.debug("scp: %s" % line)
except asyncio.CancelledError:
process.terminate()
asyncio.async(process.wait(), loop=asyncio.get_event_loop())
raise
return process.returncode
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2017/8/23 下午12:54
# @Author : chenyuelong
# @Mail : yuelong_chen@yahoo.com
# @File : read.py
# @Software: PyCharm
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
class read():
'''
fastq中每条read
'''
def __init__(self,*args):
self._readID = args[0]
self._readseq = args[1]
self._readinfo = args[2]
self._readq = args[3]
def main():
pass
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
awsscripter.cli
This module implements awsscripter's CLI, and should not be directly imported.
"""
import os
import warnings
import click
import colorama
import yaml
from awsscripter.cli.init.init import init_group
from awsscripter.cli.audit.audit import audit_group
from awsscripter.cli.stack.stack import stack_group
from awsscripter.cli.monitoring.monitor import monitoring_group
from awsscripter.cli.testcommand.testcommand import testcommand_group
from awsscripter.cli.list.list import list_group
from awsscripter.stack.helpers import setup_logging, catch_exceptions
from awsscripter import __version__
from awsscripter.cli.security.security import security_group
from awsscripter.cli.testcommand.testcommand import testcommand_group
from awsscripter.cli.awsbilling.awsbilling import billing_group
@click.group()
@click.version_option(version=__version__, prog_name="awsscripter")
@click.option("--debug", is_flag=True, help="Turn on debug logging.")
@click.option("--dir", "directory", help="Specify awsscripter directory.")
@click.option(
"--output", type=click.Choice(["yaml", "json"]), default="yaml",
help="The formatting style for command output.")
@click.option("--no-colour", is_flag=True, help="Turn off output colouring.")
@click.option(
"--var", multiple=True, help="A variable to template into config files.")
@click.option(
"--var-file", multiple=True, type=click.File("rb"),
help="A YAML file of variables to template into config files.")
@click.pass_context
@catch_exceptions
def cli(
ctx, debug, directory, no_colour, output, var, var_file
):
"""
awsscripter is a tool to manage your cloud native infrastructure deployments.
"""
logger = setup_logging(debug, no_colour)
colorama.init()
# Enable deprecation warnings
warnings.simplefilter("always", DeprecationWarning)
ctx.obj = {
"user_variables": {},
"output_format": output,
"no_colour": no_colour,
"awsscripter_dir": directory if directory else os.getcwd()
}
if var_file:
for fh in var_file:
parsed = yaml.safe_load(fh.read())
ctx.obj["user_variables"].update(parsed)
# the rest of this block is for debug purposes only
existing_keys = set(ctx.obj["user_variables"].keys())
new_keys = set(parsed.keys())
overloaded_keys = existing_keys & new_keys # intersection
if overloaded_keys:
logger.debug(
"Duplicate variables encountered: {0}. "
"Using values from: {1}."
.format(", ".join(overloaded_keys), fh.name)
)
if var:
# --var options overwrite --var-file options
for variable in var:
variable_key, variable_value = variable.split("=")
if variable_key in ctx.obj["user_variables"]:
logger.debug(
"Duplicate variable encountered: {0}. "
"Using value from --var option."
.format(variable_key)
)
ctx.obj["user_variables"].update({variable_key: variable_value})
cli.add_command(init_group)
cli.add_command(audit_group)
cli.add_command(stack_group)
cli.add_command(monitoring_group)
cli.add_command(testcommand_group)
cli.add_command(security_group)
cli.add_command(list_group)
cli.add_command(billing_group)
|
nilq/baby-python
|
python
|
import tfchain.polyfill.encoding.object as jsobj
import tfchain.polyfill.array as jsarr
import tfchain.polyfill.asynchronous as jsasync
import tfchain.polyfill.crypto as jscrypto
import tfchain.client as tfclient
import tfchain.errors as tferrors
from tfchain.chain import NetworkType, Type
from tfchain.balance import WalletBalance, SingleSigWalletBalance, MultiSigWalletBalance
from tfchain.encoding.siabin import SiaBinaryEncoder
from tfchain.types import ConditionTypes, transactions, FulfillmentTypes
from tfchain.types.transactions.Base import TransactionBaseClass
from tfchain.types.transactions.Minting import TransactionV128, TransactionV129, TransactionV130
from tfchain.types.IO import CoinInput
from tfchain.types.CryptoTypes import PublicKey, PublicKeySpecifier
from tfchain.types.PrimitiveTypes import Hash, Currency
from tfchain.types.ConditionTypes import UnlockHash, UnlockHashType, ConditionUnlockHash, ConditionMultiSignature, ConditionCustodyFee
from tfchain.types.FulfillmentTypes import FulfillmentMultiSignature, PublicKeySignaturePair
def assymetric_key_pair_generate(entropy, index):
if not isinstance(entropy, (bytes, bytearray)) and not jsarr.is_uint8_array(entropy):
raise TypeError("entropy is of an invalid type {}".format(type(entropy)))
if not isinstance(index, int):
raise TypeError("index is of an invalid type {}".format(type(index)))
encoder = SiaBinaryEncoder()
encoder.add_array(entropy)
encoder.add_int(index)
entropy = jscrypto.blake2b(encoder.data)
return jscrypto.AssymetricSignKeyPair(entropy)
def public_key_from_assymetric_key_pair(pair):
if not isinstance(pair, jscrypto.AssymetricSignKeyPair):
raise TypeError("pair is of an invalid type {}".format(type(pair)))
return PublicKey(specifier=PublicKeySpecifier.ED25519, hash=pair.key_public)
def unlockhash_from_assymetric_key_pair(pair):
pk = public_key_from_assymetric_key_pair(pair)
return pk.unlockhash
class TFChainWallet:
"""
Tfchain Wallet object
"""
def __init__(self, network_type, pairs, client):
if not isinstance(network_type, NetworkType):
raise TypeError("network_type is expected to be a tfchain.chain.NetworkType, invalid: {} ({})".format(network_type, type(network_type)))
self._network_type = network_type
if not jsobj.is_js_arr(pairs) or jsarr.is_empty(pairs):
raise TypeError("pairs is expected to be a non-empty list/array of SigningKey pairs, not be of type {}".format(type(pairs)))
self._pairs = pairs
if not isinstance(client, tfclient.TFChainClient):
raise TypeError("client is expected to be a TFChainClient, not be of type {}".format(type(client)))
self._client = client
# store all addresses as well
self._addresses = []
for pair in self._pairs:
uh = unlockhash_from_assymetric_key_pair(pair)
address = uh.__str__()
self._addresses.append(address)
# add sub-apis
self._minter = TFChainMinter(wallet=self)
# self._atomicswap = TFChainAtomicSwap(wallet=self)
# self._threebot = TFChainThreeBot(wallet=self)
# self._erc20 = TFChainERC20(wallet=self)
@property
def addresses(self):
"""
:returns: the addresses owned by this wallet
:rtype: list/array
"""
return self._addresses
@property
def pairs(self):
"""
:returns: the signng key pairs owned by this wallet
:rtype: list/array
"""
return self._pairs
@property
def client(self):
"""
:returns: the (explorer) tfchain client used by this wallet
:rtype: tfchain.client.TFChainClent
"""
return self._client
@property
def network_type(self):
"""
:returns: the type of the (tfchain) network type
:rtype: tfchain.network.Type
"""
return self._network_type
@property
def address(self):
"""
:returns: the primary (=first) address owned by this wallet
:rtype: str
"""
return self.addresses[0]
@property
def address_count(self):
"""
:returns: the amount of addresses owned by this wallet
:rtype: int
"""
return len(self.addresses)
@property
def minter(self):
"""
Minter used to update the (Coin) Minter Definition
as well as to mint new coins, only useable if this wallet
has (co-)ownership over the current (coin) minter definition.
"""
return self._minter
# @property
# def atomicswap(self):
# """
# Atomic Swap API used to create atomic swap contracts as initiator or participator,
# as well as to redeem and refund existing unredeemed atomic swap contrats.
# """
# return self._atomicswap
# @property
# def threebot(self):
# """
# ThreeBot API used to register new 3Bots and
# manage existing 3Bot records.
# """
# return self._threebot
# @property
# def erc20(self):
# """
# ERC20 API used to send coins to ERC20 Addresses,
# and register TFT addresses that can than be used as ERC20 Withdraw addresses.
# """
# return self._erc20
@property
def addresses_multisig(self):
"""
The multi signature wallet addresses co-owned and linked to this wallet,
as reported by the internal balance reporter.
"""
balance = self.balance
return balance.addresses_multisig
@property
def balance(self):
"""
The balance "sheet" of the wallet.
"""
return self.balance_get()
def balance_get(self, chain_info=None):
"""
The balance "sheet" of the wallet.
"""
aggregator = SingleSigWalletBalanceAggregator(self, chain_info=chain_info)
return aggregator.fetch_and_aggregate()
@property
def transactions(self):
"""
Get all transactions linked to a personal wallet address.
"""
# for each address get all transactions
def generator():
for address in self.addresses:
yield self._unlockhash_get(address)
transactions = set()
def gatherer(result):
if result.transactions:
transactions.update(result.transactions)
p = jsasync.promise_pool_new(generator, cb=gatherer)
# define sort cb that will sort it prior to the final return
def cb():
# sort all transactions
def txn_arr_sort(a, b):
height_a = pow(2, 64) if a.height < 0 else a.height
height_b = pow(2, 64) if b.height < 0 else b.height
if height_a < height_b:
return -1
if height_a > height_b:
return 1
tx_order_a = pow(2, 64) if a.transaction_order < 0 else a.transaction_order
tx_order_b = pow(2, 64) if b.transaction_order < 0 else b.transaction_order
if tx_order_a < tx_order_b:
return -1
if tx_order_a > tx_order_b:
return 1
return 0
return jsarr.sort(transactions, txn_arr_sort, reverse=True)
# return promise chain
return jsasync.chain(p, cb)
# def coins_send(self, recipient, amount, source=None, refund=None, lock=None, data=None):
# """
# Send the specified amount of coins to the given recipient,
# optionally locked. Arbitrary data can be attached as well if desired.
# If the given recipient is a valid ERC20 address, than this will send
# the specified amount to that ERC20 address and no lock or data is allowed to be defined.
# The recipient is one of:
# - None: recipient is the Free-For-All wallet
# - str (or unlockhash): recipient is a personal wallet
# - list: recipient is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): recipient is a sigcount-of-addresscount MultiSig wallet
# - an ERC20 address (str/ERC20Address), amount will be send to this ERC20 address
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# The lock can be a str, or int:
# - when it is an int it represents either a block height or an epoch timestamp (in seconds)
# - when a str it can be a Jumpscale Datetime (e.g. '12:00:10', '31/10/2012 12:30', ...) or a Jumpscale Duration (e.g. '+ 2h', '+7d12h', ...)
# Returns a TransactionSendResult.
# @param recipient: see explanation above
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# @param lock: optional lock that can be used to lock the sent amount to a specific time or block height, see explation above
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# if ERC20Address.is_valid_value(recipient):
# if lock != None:
# raise ValueError("a lock cannot be applied when sending coins to an ERC20 Address")
# if data != None:
# raise ValueError("data cannot be added to the transaction when sending coins to an ERC20 Address")
# # all good, try to send to the ERC20 address
# return self.erc20.coins_send(address=recipient, amount=amount, source=source, refund=refund)
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be sent")
# # define recipient
# recipient = ConditionTypes.from_recipient(recipient, lock=lock)
# # fund amount
# balance = self.balance
# miner_fee = self.network_type.minimum_miner_fee()
# inputs, remainder, suggested_refund = balance.fund(amount+miner_fee, source=source)
# # define the refund condition
# if refund == None: # automatically choose a refund condition if none is given
# if suggested_refund == None:
# refund = ConditionTypes.unlockhash_new(unlockhash=self.address)
# else:
# refund = suggested_refund
# else:
# # use the given refund condition (defined as a recipient)
# refund = ConditionTypes.from_recipient(refund)
# # create transaction
# txn = tftransactions.new()
# # add main coin output
# txn.coin_output_add(value=amount, condition=recipient)
# # add refund coin output if needed
# if remainder > 0:
# txn.coin_output_add(value=remainder, condition=refund)
# # add the miner fee
# txn.miner_fee_add(miner_fee)
# # add the coin inputs
# txn.coin_inputs = inputs
# # if there is data to be added, add it as well
# if data:
# txn.data = data
# # generate the signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # txn should be fulfilled now
# submit = txn.is_fulfilled()
# if submit:
# # submit the transaction
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # and return the created/submitted transaction for optional user consumption
# return TransactionSendResult(txn, submit)
def coin_transaction_builder_new(self):
"""
Create a transaction builder that can be used to
add multiple outputs, in a chained manner, and send them all at once.
ERC20 coin outputs are not supported in the Coin Transaction Builder.
"""
return CoinTransactionBuilder(self)
def transaction_sign(self, txn, submit=None, balance=None):
"""
Sign in all places of the transaction where it is still possible,
and on which the wallet has authority to do so.
Returns a TransactionSignResult.
@param txn: transaction to sign, a JSON-encoded txn or already loaded in-memory as a valid Transaction type
"""
# validate and/or normalize txn parameter
if isinstance(txn, (str, dict)):
txn = transactions.from_json(txn)
elif not isinstance(txn, TransactionBaseClass):
raise TypeError("txn value has invalid type {} and cannot be signed".format(type(txn)))
to_submit = submit
balance_is_cached = (balance != None)
def cb(balance):
# check all parentids from the specified coin inputs,
# and set the coin outputs for the ones this wallet knows about
# and that are still unspent
if len(txn.coin_inputs) > 0:
# collect all known outputs
known_outputs = {}
for co in balance.outputs_available:
known_outputs[co.id.__str__()] = co
for co in balance.outputs_unconfirmed_available:
known_outputs[co.id.__str__()] = co
# mark the coin inputs that are known as available outputs by this wallet
for ci in txn.coin_inputs:
parentid = ci.parentid.__str__()
if parentid in known_outputs:
ci.parent_output = known_outputs[parentid]
p = None
# check for specific transaction types, as to
# be able to add whatever content we know we can add
if isinstance(txn, (TransactionV128, TransactionV129)):
def cb(condition):
txn.parent_mint_condition = condition
if not txn.mint_fulfillment_defined():
txn.mint_fulfillment = FulfillmentTypes.from_condition(txn.parent_mint_condition)
# set the parent mint condition
# and define the current fulfillment if it is not defined
p = jsasync.chain(self.client.minter.condition_get(), cb)
def sign_and_such():
# generate the signature requests
sig_requests = txn.signature_requests_new()
if len(sig_requests) == 0:
# possible if the wallet does not own any of the still required signatures,
# or for example because the wallet does not know about the parent outputs of
# the inputs still to be signed
def nop_cb(resolve, reject):
resolve(TransactionSignResult(txn, False, False))
return jsasync.promise_new(nop_cb)
# fulfill the signature requests that we can fulfill
signature_count = 0
for request in sig_requests:
try:
key_pair = self.key_pair_get(request.wallet_address)
pk = public_key_from_assymetric_key_pair(key_pair)
input_hash = request.input_hash_new(public_key=pk)
signature = key_pair.sign(input_hash.value)
request.signature_fulfill(public_key=pk, signature=signature)
signature_count += 1
except KeyError:
pass # this is acceptable due to how we directly try the key_pair_get method
# check if fulfilled, and if so, we'll submit unless the callee does not want that
is_fulfilled = txn.is_fulfilled()
submit = (to_submit and is_fulfilled)
if not submit: # return as-is
def stub_cb(resolve, reject):
resolve(TransactionSignResult(
transaction=txn,
signed=(signature_count>0),
submitted=submit,
))
return jsasync.promise_new(stub_cb)
# submit, and only then return
def id_cb(id):
txn.id = id
if balance_is_cached:
addresses = balance.addresses
# if the balance is cached, also update the balance
for idx, ci in enumerate(txn.coin_inputs):
if ci.parent_output.condition.unlockhash.__str__() in addresses:
balance.output_add(txn, idx, confirmed=False, spent=True)
for idx, co in enumerate(txn.coin_outputs):
if co.condition.unlockhash.__str__() in addresses:
# add the id to the coin_output, so we can track it has been spent
co.id = txn.coin_outputid_new(idx)
balance.output_add(txn, idx, confirmed=False, spent=False)
# return the signed result
return TransactionSignResult(
transaction=txn,
signed=(signature_count>0),
submitted=submit,
)
return jsasync.chain(self._transaction_put(transaction=txn), id_cb)
# sign now, or chain it and sign when possible
if p == None:
return sign_and_such()
return jsasync.chain(p, sign_and_such)
if balance_is_cached:
if not isinstance(balance, WalletBalance):
raise TypeError("balance is of unexpected type: {} ({})".format(balance, type(balance)))
# execute the balance cb directly if a cached balance is given
return cb(balance)
# chain the balance object first
return jsasync.chain(self.balance, cb)
def key_pair_get(self, unlockhash):
"""
Get the private/public key pair for the given unlock hash.
If the unlock has is not owned by this wallet a KeyError exception is raised.
"""
if isinstance(unlockhash, UnlockHash):
unlockhash = unlockhash.__str__()
if not isinstance(unlockhash, str):
raise TypeError("unlockhash cannot be of type {}".format(type(unlockhash)))
if unlockhash[:2] == '00':
return self._pairs[0]
for index, address in enumerate(self.addresses):
if address == unlockhash:
return self._pairs[index]
raise KeyError("wallet does not own unlock hash {}".format(unlockhash))
def _unlockhash_get(self, address):
return self._client.unlockhash_get(address)
def _transaction_put(self, transaction):
return self._client.transaction_put(transaction)
class TFChainMinter():
"""
TFChainMinter contains all Coin Minting logic.
"""
def __init__(self, wallet):
if not isinstance(wallet, TFChainWallet):
raise TypeError("wallet is expected to be a TFChainWallet")
self._wallet = wallet
# def definition_set(self, minter, data=None):
# """
# Redefine the current minter definition.
# Arbitrary data can be attached as well if desired.
# The minter is one of:
# - str (or unlockhash): minter is a personal wallet
# - list: minter is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): minter is a sigcount-of-addresscount MultiSig wallet
# Returns a TransactionSendResult.
# @param minter: see explanation above
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# # create empty Mint Definition Txn, with a newly generated Nonce set already
# txn = tftransactions.mint_definition_new()
# # add the minimum miner fee
# txn.miner_fee_add(self._minium_miner_fee)
# # set the new mint condition
# txn.mint_condition = ConditionTypes.from_recipient(minter)
# # minter definition must be of unlock type 1 or 3
# ut = txn.mint_condition.unlockhash.type
# if ut not in (UnlockHashType.PUBLIC_KEY, UnlockHashType.MULTI_SIG):
# raise ValueError("{} is an invalid unlock hash type and cannot be used for a minter definition".format(ut))
# # optionally set the data
# if data != None:
# txn.data = data
# # get and set the current mint condition
# txn.parent_mint_condition = self._current_mint_condition_get()
# # create a raw fulfillment based on the current mint condition
# txn.mint_fulfillment = FulfillmentTypes.from_condition(txn.parent_mint_condition)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# submit = txn.is_fulfilled()
# if submit:
# txn.id = self._transaction_put(transaction=txn)
# # return the txn, as well as the submit status as a boolean
# return TransactionSendResult(txn, submit)
# def coins_new(self, recipient, amount, lock=None, data=None):
# """
# Create new (amount of) coins and give them to the defined recipient.
# Arbitrary data can be attached as well if desired.
# The recipient is one of:
# - None: recipient is the Free-For-All wallet
# - str (or unlockhash/bytes/bytearray): recipient is a personal wallet
# - list: recipient is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): recipient is a sigcount-of-addresscount MultiSig wallet
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# The lock can be a str, or int:
# - when it is an int it represents either a block height or an epoch timestamp (in seconds)
# - when a str it can be a Jumpscale Datetime (e.g. '12:00:10', '31/10/2012 12:30', ...) or a Jumpscale Duration (e.g. '+ 2h', '+7d12h', ...)
# Returns a TransactionSendResult.
# @param recipient: see explanation above
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param lock: optional lock that can be used to lock the sent amount to a specific time or block height, see explation above
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# # create empty Mint Definition Txn, with a newly generated Nonce set already
# txn = tftransactions.mint_coin_creation_new()
# # add the minimum miner fee
# txn.miner_fee_add(self._minium_miner_fee)
# balance = self._wallet.balance
# # parse the output
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be sent")
# # define recipient
# recipient = ConditionTypes.from_recipient(recipient, lock=lock)
# # and add it is the output
# txn.coin_output_add(value=amount, condition=recipient)
# # optionally set the data
# if data != None:
# txn.data = data
# # get and set the current mint condition
# txn.parent_mint_condition = self._current_mint_condition_get()
# # create a raw fulfillment based on the current mint condition
# txn.mint_fulfillment = FulfillmentTypes.from_condition(txn.parent_mint_condition)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# submit = txn.is_fulfilled()
# if submit:
# txn.id = self._transaction_put(transaction=txn)
# # update balance of wallet
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # return the txn, as well as the submit status as a boolean
# return TransactionSendResult(txn, submit)
def coins_burn(self, amount, source=None, refund=None, data=None, balance=None):
"""
Burn the specified amount of coins,
paying miner fees on top of it.
"""
txn = TransactionV130()
miner_fee = self._minium_miner_fee
amount = Currency(amount)
balance_is_cached = (balance != None)
if amount.less_than_or_equal_to(0):
raise ValueError("a strict positive amount is required to be burned")
def balance_cb(balance):
# compute the amount of coin inputs we can accept, and ensure we do not have more
# > 16e3 is the maximum size allowed by rivine-chains
# > 307 is the size in bytes of a txn without arb. data, one miner fee, and no inputs/outputs
# > 169 bytes is required per (coin) input
max_input_count = (16e3 - 307 - len(txn.data)) // 169
# fund amount
inputs, remainder, suggested_refund = balance.fund(amount.plus(miner_fee), source=source, max_input_count=max_input_count)
# define the refund condition
if refund == None: # automatically choose a refund condition if none is given
if suggested_refund == None:
refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
else:
refund = suggested_refund
else:
# use the given refund condition (defined as a recipient)
refund = ConditionTypes.from_recipient(refund)
# add refund coin output if needed
if remainder.greater_than(0):
txn.coin_output_add(value=remainder, condition=refund)
# add the miner fee
txn.miner_fee_add(miner_fee)
# add the coin inputs
txn.coin_inputs = inputs
# add custody fees if the wallet is linked to a goldchain network
if self._wallet._network_type.chain_type() == Type.GOLDCHAIN:
total_custody_fee = Currency()
for ci in txn.coin_inputs:
if not ci.parent_output:
raise Exception("BUG: cannot define the required custody fee if no parent output is linked to coin input {}".format(ci.parentid.__str__()))
total_custody_fee = total_custody_fee.plus(ci.parent_output.custody_fee)
txn.coin_output_add(value=total_custody_fee, condition=ConditionCustodyFee(balance.chain_time))
# if there is data to be added, add it as well
if data != None:
txn.data = data
# generate the signature requests
sig_requests = txn.signature_requests_new()
if len(sig_requests) == 0:
raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# fulfill the signature requests that we can fulfill
for request in sig_requests:
try:
key_pair = self._wallet.key_pair_get(request.wallet_address)
pk = public_key_from_assymetric_key_pair(key_pair)
input_hash = request.input_hash_new(public_key=pk)
signature = key_pair.sign(input_hash.value)
request.signature_fulfill(public_key=pk, signature=signature)
except KeyError:
pass # this is acceptable due to how we directly try the key_pair_get method
# txn should be fulfilled now
submit = txn.is_fulfilled()
if not submit: # return as-is
def stub_cb(resolve, reject):
resolve(TransactionSendResult(txn, submit))
return jsasync.promise_new(stub_cb)
# submit, and only then return
def id_cb(id):
txn.id = id
if balance_is_cached:
addresses = balance.addresses
for idx, ci in enumerate(txn.coin_inputs):
if ci.parent_output.condition.unlockhash.__str__() in addresses:
balance.output_add(txn, idx, confirmed=False, spent=True)
# return the semd result
return TransactionSendResult(txn, submit)
return jsasync.chain(self._wallet._transaction_put(transaction=txn), id_cb)
if balance != None:
if not isinstance(balance, WalletBalance):
raise TypeError("balance is of unexpected type: {} ({})".format(balance, type(balance)))
# if balance is given, execute the balance cb directly
return balance_cb(balance)
# else fetch the balance first and get it than
return jsasync.chain(self._wallet.balance, balance_cb)
@property
def _minium_miner_fee(self):
return self._wallet.network_type.minimum_miner_fee()
# from tfchain.types.ConditionTypes import ConditionAtomicSwap, OutputLock, AtomicSwapSecret, AtomicSwapSecretHash
# from tfchain.types.FulfillmentTypes import FulfillmentAtomicSwap
# class TFChainAtomicSwap():
# """
# TFChainAtomicSwap contains all Atomic Swap logic.
# """
# def __init__(self, wallet):
# if not isinstance(wallet, TFChainWallet):
# raise TypeError("wallet is expected to be a TFChainWallet")
# self._wallet = wallet
# def initiate(self, participator, amount, refund_time='+48h', source=None, refund=None, data=None, submit=True):
# """
# Initiate an atomic swap contract, targeted at the specified address,
# with the given amount. By default a 48 hours duration (starting from last block time)
# is used as time until contract can be refunded, but this can be changed.
# The participator is one of:
# - None: participator is the Free-For-All wallet
# - str (or unlockhash): participator is a personal wallet
# - list: participator is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
# - tuple (addresses, sigcount): participator is a sigcount-of-addresscount MultiSig wallet
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# Returns the AtomicSwapInitiationResult.
# @param participator: see explanation above
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param duration: the duration until the atomic swap contract becomes refundable
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# @param submit: True by default, if False the transaction will not be sent even if possible (e.g. if you want to double check)
# """
# # create a random secret
# secret = AtomicSwapSecret.random()
# secret_hash = AtomicSwapSecretHash.from_secret(secret)
# # create the contract
# result = self._create_contract(
# recipient=participator, amount=amount, refund_time=refund_time,
# source=source, refund=refund, data=data, secret_hash=secret_hash,
# submit=submit)
# # return the contract, transaction, submission status as well as secret
# return AtomicSwapInitiationResult(
# AtomicSwapContract(coinoutput=result.transaction.coin_outputs[0], unspent=True,
# current_timestamp=self._chain_time),
# secret, result.transaction, result.submitted)
# def participate(self, initiator, amount, secret_hash, refund_time='+24h', source=None, refund=None, data=None, submit=True):
# """
# Initiate an atomic swap contract, targeted at the specified address,
# with the given amount. By default a 24 hours duration (starting from last block time)
# is used as time until contract can be refunded, but this can be changed.
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# Returns the AtomicSwapParticipationResult.
# @param initiator: str (or unlockhash) of a personal wallet
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param secret_hash: the secret hash to be use, the same secret hash as used for the initiation contract
# @param duration: the duration until the atomic swap contract becomes refundable
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (can only be a personal wallet address)
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# @param submit: True by default, if False the transaction will not be sent even if possible (e.g. if you want to double check)
# """
# # normalize secret hash
# secret_hash = AtomicSwapSecretHash(value=secret_hash)
# # create the contract and return the contract, transaction and submission status
# result = self._create_contract(
# recipient=initiator, amount=amount, refund_time=refund_time, source=source,
# refund=refund, data=data, secret_hash=secret_hash, submit=submit)
# return AtomicSwapParticipationResult(
# AtomicSwapContract(coinoutput=result.transaction.coin_outputs[0], unspent=True, current_timestamp=self._chain_time),
# result.transaction, result.submitted)
# def verify(self, outputid, amount=None, secret_hash=None, min_refund_time=None, sender=False, receiver=False, contract=None):
# """
# Verify the status and content of the Atomic Swap Contract linked to the given outputid.
# An exception is returned if the contract does not exist, has already been spent
# or is not valid according to this validation
# Returns the verified contract.
# @param outputid: str or Hash that identifies the coin output to whuich this contract is linked
# @param amount: validate amount if defined, int or str that defines the amount of TFT to set, see explanation above
# @param secret_hash: validate secret hash if defined, str or BinaryData
# @param min_refund_time: validate contract's refund time if defined, 0 if expected to be refundable, else the minimun time expected until it becomes refundable
# @param sender: if True it is expected that this wallet is registered as the sender of this contract
# @param receiver: if True it is expected that this wallet is registered as the receiver of this contract
# @param contract: if contract fetched in a previous call already, one can verify it also by directly passing it to this method
# """
# if contract == None:
# co = None
# spend_txn = None
# # try to fetch the contract
# try:
# # try to fetch the coin output that is expected to contain the secret
# co, _, spend_txn = self._wallet.client.coin_output_get(outputid)
# except tfchain.errors.ExplorerNoContent as exc:
# raise tfchain.errors.AtomicSwapContractNotFound(outputid=outputid) from exc
# # check if the contract hasn't been spent already
# if spend_txn != None:
# # if a spend transaction exists,
# # it means the contract was already spend, and can therefore no longer be redeemed
# raise tfchain.errors.AtomicSwapContractSpent(contract=AtomicSwapContract(
# coinoutput=co, unspent=False, current_timestamp=self._chain_time), transaction=spend_txn)
# # create the unspent contract
# contract = AtomicSwapContract(coinoutput=co, unspent=True, current_timestamp=self._chain_time)
# elif not isinstance(contract, AtomicSwapContract):
# raise TypeError("contract was expected to be an AtomicSwapContract, not to be of type {}".format(type(contract)))
# else:
# # verify the outputid is the same
# if contract.outputid != outputid:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="output identifier is expected to be {}, not {}".format(str(outputid), str(contract.outputid)),
# contract=contract)
# # if amount is given verify it
# if amount != None:
# amount = Currency(value=amount)
# if amount != contract.amount:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="amount is expected to be {}, not {}".format(amount.str(with_unit=True), contract.amount.str(with_unit=True)),
# contract=contract)
# # if secret hash is given verify it
# if secret_hash != None:
# # normalize secret hash
# secret_hash = AtomicSwapSecretHash(value=secret_hash)
# if secret_hash != contract.secret_hash:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="secret_hash is expected to be {}, not {}".format(str(secret_hash), str(contract.secret_hash)),
# contract=contract)
# # if min_refund_time is given verify it
# if min_refund_time != None:
# chain_time = self._chain_time
# if isinstance(min_refund_time, str):
# min_refund_time = OutputLock(value=min_refund_time, current_timestamp=chain_time).value
# elif not isinstance(min_refund_time, int):
# raise TypeError("expected minimum refund time to be an integer or string, not to be of type {}".format(type(min_refund_time)))
# min_duration = max(0, min_refund_time-chain_time)
# chain_time = self._chain_time
# if chain_time >= contract.refund_timestamp:
# contract_duration = 0
# else:
# contract_duration = contract.refund_timestamp - chain_time
# if min_duration <= 0:
# if contract_duration != 0:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="contract cannot be refunded yet while it was expected to be possible already",
# contract=contract)
# elif contract_duration < min_duration:
# if contract_duration == 0:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="contract was expected to be non-refundable for at least {} more, but it can be refunded already since {}".format(
# duration.toString(min_duration), epoch2HRDateTime(contract.refund_timestamp)),
# contract=contract)
# elif contract_duration < min_duration:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="contract was expected to be available for redemption for at least {}, but it is only available for {}".format(
# duration.toString(min_duration), duration.toString(contract_duration)),
# contract=contract)
# # if expected to be authorized to be the sender, verify this
# if sender and contract.sender not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="wallet not registered as sender of this contract", contract=contract)
# # if expected to be authorized to be the receiver, verify this
# if receiver and contract.receiver not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapContractInvalid(
# message="wallet not registered as receiver of this contract", contract=contract)
# # return the contract for further optional consumption,
# # according to our validations it is valid
# return contract
# def redeem(self, outputid, secret, data=None):
# """
# Redeem an unspent Atomic Swap contract.
# Returns the sent transaction.
# @param outputid: the identifier of the coin output that contains the atomic swap contract
# @param secret: secret, matching the contract's secret hash, used to redeem the contract
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# co = None
# spend_txn = None
# # try to fetch the contract
# try:
# # try to fetch the coin output that is expected to contain the secret
# co, _, spend_txn = self._wallet.client.coin_output_get(outputid)
# except tfchain.errors.ExplorerNoContent as exc:
# raise tfchain.errors.AtomicSwapContractNotFound(outputid=outputid) from exc
# # generate the contract
# contract = AtomicSwapContract(coinoutput=co, unspent=False, current_timestamp=self._chain_time) # either it is spent already or we'll spend it
# # check if the contract hasn't been spent already
# if spend_txn != None:
# # if a spend transaction exists,
# # it means the contract was already spend, and can therefore no longer be redeemed
# raise tfchain.errors.AtomicSwapContractSpent(contract=contract, transaction=spend_txn)
# # verify the defined secret
# if not contract.verify_secret(secret):
# raise tfchain.errors.AtomicSwapInvalidSecret(contract=contract)
# # ensure this wallet is authorized to be the receiver
# if contract.receiver not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapForbidden(message="unauthorized to redeem: wallet does not contain receiver address {}".format(contract.receiver), contract=contract)
# # create the fulfillment
# fulfillment = FulfillmentTypes.atomic_swap_new(secret=secret)
# # create, sign and submit the transaction
# return self._claim_contract(contract=contract, as_sender=False, fulfillment=fulfillment, data=data)
# def refund(self, outputid, data=None):
# """
# Refund an unspent Atomic Swap contract.
# Returns the sent transaction.
# @param outputid: the identifier of the coin output that contains the atomic swap contract
# @param data: optional data that can be attached ot the sent transaction (str or bytes), with a max length of 83
# """
# co = None
# spend_txn = None
# # try to fetch the contract
# try:
# # try to fetch the coin output that is expected to contain the secret
# co, _, spend_txn = self._wallet.client.coin_output_get(outputid)
# except tfchain.errors.ExplorerNoContent as exc:
# raise tfchain.errors.AtomicSwapContractNotFound(outputid=outputid) from exc
# # generate the contract
# contract = AtomicSwapContract(coinoutput=co, unspent=False, current_timestamp=self._chain_time) # either it is spent already or we'll spend it
# # check if the contract hasn't been spent already
# if spend_txn != None:
# # if a spend transaction exists,
# # it means the contract was already spend, and can therefore no longer be redeemed
# raise tfchain.errors.AtomicSwapContractSpent(contract=contract, transaction=spend_txn)
# # verify the contract can be refunded already
# time = self._chain_time
# if time < contract.refund_timestamp:
# raise tfchain.errors.AtomicSwapForbidden(
# message="unauthorized to refund: contract can only be refunded since {}".format(epoch2HRDateTime(contract.refund_timestamp)),
# contract=contract)
# # ensure this wallet is authorized to be the sender (refunder)
# if contract.sender not in self._wallet.addresses:
# raise tfchain.errors.AtomicSwapForbidden(message="unauthorized to refund: wallet does not contain sender address {}".format(contract.sender), contract=contract)
# # create the fulfillment
# fulfillment = FulfillmentTypes.atomic_swap_new()
# # create, sign and submit the transaction
# return self._claim_contract(contract=contract, as_sender=True, fulfillment=fulfillment, data=data)
# def _create_contract(self, recipient, amount, refund_time, source, refund, data, secret_hash, submit):
# """
# Create a new atomic swap contract,
# the logic for both the initiate as well as participate phase.
# """
# # define the amount
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be swapped")
# # define the miner fee
# miner_fee = self._minium_miner_fee
# # ensure the amount is bigger than the miner fee,
# # otherwise the contract cannot be redeemed/refunded
# if amount <= miner_fee:
# raise tfchain.errors.AtomicSwapInsufficientAmountError(amount=amount, minimum_miner_fee=miner_fee)
# # define the coin inputs
# balance = self._wallet.balance
# inputs, remainder, suggested_refund = balance.fund(amount+miner_fee, source=source)
# # define the refund
# if refund != None:
# refund = ConditionTypes.from_recipient(refund)
# elif suggested_refund != None:
# refund = ConditionTypes.from_recipient(suggested_refund)
# else:
# refund = ConditionTypes.from_recipient(self._wallet.address)
# # define the sender
# if isinstance(refund, ConditionUnlockHash):
# sender = refund.unlockhash
# else:
# sender = self._wallet.address
# # create and populate the transaction
# txn = tftransactions.new()
# txn.coin_inputs = inputs
# txn.miner_fee_add(self._minium_miner_fee)
# txn.data = data
# # define refund time already, so we can use the chain time as the current time
# if isinstance(refund_time, str):
# chain_time = self._chain_time
# refund_time = OutputLock(value=refund_time, current_timestamp=chain_time).value
# elif not isinstance(refund_time, int):
# raise TypeError("expected refund time to be an integer or string, not to be of type {}".format(type(refund_time)))
# # define the atomic swap contract and add it as a coin output
# asc = ConditionTypes.atomic_swap_new(
# sender=sender, receiver=recipient, hashed_secret=secret_hash, lock_time=refund_time)
# txn.coin_output_add(condition=asc, value=amount)
# # optionally add a refund coin output
# if remainder > 0:
# txn.coin_output_add(condition=refund, value=remainder)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # assign all coin output ID's for atomic swap contracts,
# # as we always care about the contract's output ID and
# # the refund coin output has to be our coin output
# for idx, co in enumerate(txn.coin_outputs):
# co.id = txn.coin_outputid_new(idx)
# # submit if possible
# submit = submit and txn.is_fulfilled()
# if submit:
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# balance.output_add(co, confirmed=False, spent=False)
# # return the txn, as well as the submit status as a boolean
# return TransactionSendResult(txn, submit)
# def _claim_contract(self, contract, as_sender, fulfillment, data):
# """
# claim an unspent atomic swap contract
# """
# # create the contract and fill in the easy content
# txn = tftransactions.new()
# miner_fee = self._minium_miner_fee
# txn.miner_fee_add(miner_fee)
# txn.data = data
# # define the coin input
# txn.coin_input_add(parentid=contract.outputid, fulfillment=fulfillment, parent_output=contract.coin_output)
# # and the coin output
# txn.coin_output_add(
# condition=ConditionTypes.unlockhash_new(contract.sender if as_sender else contract.receiver),
# value=contract.amount-miner_fee)
# # get all signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # submit if possible
# submit = txn.is_fulfilled()
# if not submit:
# raise Exception("BUG: transaction should be fulfilled at ths point, please fix or report as an isuse")
# # assign transactionid
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# balance = self._wallet.balance
# addresses = self._wallet.addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # return the txn
# return txn
# @property
# def _chain_time(self):
# """
# Returns the time according to the chain's network.
# """
# info = self._wallet.client.blockchain_info_get()
# return info.timestamp
# @property
# def _minium_miner_fee(self):
# """
# Returns the minimum miner fee
# """
# return self._wallet.network_type.minimum_miner_fee()
# def _output_get(self, outputid):
# """
# Get the transactions linked to the given outputID.
# @param: id of te
# """
# return self._wallet.client.output_get(outputid)
# def _transaction_put(self, transaction):
# """
# Submit the transaction to the network using the parent's wallet client.
# Returns the transaction ID.
# """
# return self._wallet.client.transaction_put(transaction=transaction)
# class TFChainThreeBot():
# """
# TFChainThreeBot contains all ThreeBot logic.
# """
# def __init__(self, wallet):
# if not isinstance(wallet, TFChainWallet):
# raise TypeError("wallet is expected to be a TFChainWallet")
# self._wallet = wallet
# def record_new(self, months=1, names=None, addresses=None, key_index=None, source=None, refund=None):
# """
# Create a new 3Bot by creating a new record on the BlockChain,
# by default 1 month rent is already paid for the 3Bot, but up to 24 months can immediately be pre-paid
# against a discount if desired.
# At least one name or one address is required, and up to 5 names and 10 addresses can
# exists for a single 3Bot.
# If no key_index is given a new key pair is generated for the wallet,
# otherwise the key pair on the given index of the wallet is used.
# Returns a TransactionSendResult.
# @param months: amount of months to be prepaid, at least 1 month is required, maximum 24 months is allowed
# @param names: 3Bot Names to add to the 3Bot as aliases (minumum 0, maximum 5)
# @param addresses: Network Addresses used to reach the 3Bot (minimum 0, maximum 10)
# @param key_index: if None is given a new key pair is generated, otherwise the key pair on the defined index is used.
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# # create the txn and fill the easiest properties already
# txn = tftransactions.threebot_registration_new()
# txn.number_of_months = months
# if names == None and addresses == None:
# raise ValueError("at least one name or one address is to be given, none is defined")
# txn.names = names
# txn.addresses = addresses
# # get the fees, and fund the transaction
# balance = self._fund_txn(txn, source, refund)
# # if the key_index is not defined, generate a new public key,
# # otherwise use the key_index given
# if key_index == None:
# txn.public_key = self._wallet.public_key_new()
# else:
# if not isinstance(key_index, int):
# raise TypeError("key index is to be of type int, not type {}".format(type(key_index)))
# addresses = self._wallet.addresses
# if key_index < 0 or key_index >= len(addresses):
# raise ValueError("key index {} is OOB, index cannot be negative, and can be maximum {}".format(key_index, len(addresses)-1))
# txn.public_key = self._wallet.key_pair_get(unlockhash=addresses[key_index]).public_key
# # sign, submit, update Balance and return result
# return self._sign_and_submit_txn(txn, balance)
# def record_update(self, identifier, months=0, names_to_add=None, names_to_remove=None, addresses_to_add=None, addresses_to_remove=None, source=None, refund=None):
# """
# Update the record of an existing 3Bot, for which this Wallet is authorized to make such changes.
# Names and addresses can be added and removed. Removal of data is always for free, adding data costs money.
# Extra months can also be paid (up to 24 months in total), as to extend the expiration time further in the future.
# One of months, names_to_add, names_to_remove, addresses_to_add, addresses_to_remove has to be a value other than 0/None.
# Returns a TransactionSendResult.
# @param months: amount of months to be paid and added to the current months, if the 3Bot was inactive, the starting time will be now
# @param names_to_add: 3Bot Names to add to the 3Bot as aliases (minumum 0, maximum 5)
# @param names_to_remove: 3Bot Names to add to the 3Bot as aliases (minumum 0, maximum 5)
# @param addresses_to_add: Network Addresses to add and used to reach the 3Bot (minimum 0, maximum 10)
# @param addresses_to_remove: Network Addresses to remove and used to reach the 3Bot (minimum 0, maximum 10)
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# if months < 1 and not reduce((lambda r, v: r or (v != None)), [names_to_add, names_to_remove, addresses_to_add, addresses_to_remove], False):
# raise ValueError("extra months is to be given or one name/address is to be added/removed, none is defined")
# # create the txn and fill the easiest properties already
# txn = tftransactions.threebot_record_update_new()
# txn.botid = identifier
# txn.number_of_months = months
# txn.names_to_add = names_to_add
# txn.names_to_remove = names_to_remove
# txn.addresses_to_add = addresses_to_add
# txn.addresses_to_remove = addresses_to_remove
# # get the 3Bot Public Key
# record = self._wallet.client.threebot.record_get(identifier)
# # set the parent public key
# txn.parent_public_key = record.public_key
# # ensure the 3Bot is either active, or will be come active
# if record.expiration <= self._chain_time and months == 0:
# raise tfchain.errors.ThreeBotInactive(identifier, record.expiration)
# # get the fees, and fund the transaction
# balance = self._fund_txn(txn, source, refund)
# # sign, submit, update Balance and return result
# return self._sign_and_submit_txn(txn, balance)
# def name_transfer(self, sender, receiver, names, source=None, refund=None):
# """
# Transfer one or multiple 3Bot names from the sender 3Bot to the receiver 3Bot.
# Both the Sender and Receiver 3Bots have to be active at the time of transfer.
# Returns a TransactionSendResult.
# @param sender: identifier of the existing and active 3Bot sender bot
# @param receiver: identifier of the existing and active 3Bot receiver bot
# @param names: 3Bot Names to transfer (minumum 0, maximum 5)
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# # create the txn and fill the easiest properties already
# txn = tftransactions.threebot_name_transfer_new()
# txn.sender_botid = sender
# txn.receiver_botid = receiver
# txn.names = names
# if len(txn.names) == 0:
# raise ValueError("at least one (3Bot) name has to be transfered, but none were defined")
# # keep track of chain time
# chain_time = self._chain_time
# # get and assign the 3Bot's public key for the sender
# record = self._wallet.client.threebot.record_get(sender)
# txn.sender_parent_public_key = record.public_key
# # ensure sender bot is active
# if record.expiration <= chain_time:
# raise tfchain.errors.ThreeBotInactive(sender, record.expiration)
# # get and assign the 3Bot's public key for the receiver
# record = self._wallet.client.threebot.record_get(receiver)
# txn.receiver_parent_public_key = record.public_key
# # ensure receiver bot is active
# if record.expiration <= chain_time:
# raise tfchain.errors.ThreeBotInactive(receiver, record.expiration)
# # get the fees, and fund the transaction
# balance = self._fund_txn(txn, source, refund)
# # sign and update Balance and return result,
# # only if the 3Bot owns both public keys, the Txn will be already,
# # submitted as well
# return self._sign_and_submit_txn(txn, balance)
# def _fund_txn(self, txn, source, refund):
# """
# common fund/refund/inputs/fees logic for all 3Bot Transactions
# """
# # get the fees, and fund the transaction
# miner_fee = self._minium_miner_fee
# bot_fee = txn.required_bot_fees
# balance = self._wallet.balance
# inputs, remainder, suggested_refund = balance.fund(miner_fee+bot_fee, source=source)
# # add the coin inputs
# txn.coin_inputs = inputs
# # add refund coin output if needed
# if remainder > 0:
# # define the refund condition
# if refund == None: # automatically choose a refund condition if none is given
# if suggested_refund == None:
# refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
# else:
# refund = suggested_refund
# else:
# # use the given refund condition (defined as a recipient)
# refund = ConditionTypes.from_recipient(refund)
# txn.refund_coin_output_set(value=remainder, condition=refund)
# # add the miner fee
# txn.transaction_fee = miner_fee
# # return balance object
# return balance
# def _sign_and_submit_txn(self, txn, balance):
# """
# common sign and submit logic for all 3Bot Transactions
# """
# # generate the signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # txn should be fulfilled now
# submit = txn.is_fulfilled()
# if submit:
# # submit the transaction
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # and return the created/submitted transaction for optional user consumption
# return TransactionSendResult(txn, submit)
# @property
# def _minium_miner_fee(self):
# """
# Returns the minimum miner fee
# """
# return self._wallet.network_type.minimum_miner_fee()
# def _transaction_put(self, transaction):
# """
# Submit the transaction to the network using the parent's wallet client.
# Returns the transaction ID.
# """
# return self._wallet.client.transaction_put(transaction=transaction)
# @property
# def _chain_time(self):
# """
# Returns the time according to the chain's network.
# """
# info = self._wallet.client.blockchain_info_get()
# return info.timestamp
# class TFChainERC20():
# """
# TFChainERC20 contains all ERC20 (wallet) logic.
# """
# def __init__(self, wallet):
# if not isinstance(wallet, TFChainWallet):
# raise TypeError("wallet is expected to be a TFChainWallet")
# self._wallet = wallet
# def coins_send(self, address, amount, source=None, refund=None):
# """
# Send the specified amount of coins to the given ERC20 address.
# The amount can be a str or an int:
# - when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
# - when defining as a str you can use the following space-stripped and case-insentive formats:
# - '123456789': same as when defining the amount as an int
# - '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
# - '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
# - '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
# Returns a TransactionSendResult.
# @param address: str or ERC20Address value to which the money is to be send
# @param amount: int or str that defines the amount of TFT to set, see explanation above
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# amount = Currency(value=amount)
# if amount <= 0:
# raise ValueError("no amount is defined to be sent")
# # create transaction
# txn = tftransactions.erc20_convert_new()
# # define the amount and recipient
# txn.address = ERC20Address(value=address)
# txn.value = amount
# # fund the transaction
# balance = self._fund_txn(txn, source, refund, txn.value)
# # sign, submit and return the transaction
# return self._sign_and_submit_txn(txn, balance)
# def address_register(self, value=None, source=None, refund=None):
# """
# Register an existing TFT address of this wallet as an ERC20 Withdraw Address,
# either by specifying the address itself or by specifying the index of the address.
# If no value is defined a new key pair will be defined.
# Returns a TransactionSendResult.
# @param value: index of the TFT address or address itself, the address has to be owned by this wallet
# @param source: one or multiple addresses/unlockhashes from which to fund this coin send transaction, by default all personal wallet addresses are used, only known addresses can be used
# @param refund: optional refund address, by default is uses the source if it specifies a single address otherwise it uses the default wallet address (recipient type, with None being the exception in its interpretation)
# """
# if value == None:
# public_key = self._wallet.public_key_new()
# elif isinstance(value, (str, UnlockHash)):
# try:
# public_key = self._wallet.key_pair_get(unlockhash=value).public_key
# except KeyError as exc:
# if isinstance(value, str):
# value = UnlockHash.from_json(value)
# raise tfchain.errors.ERC20RegistrationForbidden(address=value) from exc
# elif isinstance(value, int) and not isinstance(value, bool):
# addresses = self._wallet.addresses
# if value < 0 or value >= len(addresses):
# raise ValueError("address index {} is not a valid index for this wallet, has to be in the inclusive range of [0, {}]".format(
# value, len(addresses)-1))
# public_key = self._wallet.key_pair_get(unlockhash=addresses[value]).public_key
# else:
# raise ValueError("value has to be a str, UnlockHash or int, cannot identify an address using value {} (type: {})".format(value, type(value)))
# # create transaction
# txn = tftransactions.erc20_address_registration_new()
# # define the public key
# txn.public_key = public_key
# # fund the transaction
# balance = self._fund_txn(txn, source, refund, txn.registration_fee)
# # sign, submit and return the transaction
# return self._sign_and_submit_txn(txn, balance)
# def address_get(self, value=None):
# """
# Get the registration info of an existing TFT address of this wallet as an ERC20 Withdraw Address,
# either by specifying the address itself or by specifying the index of the address.
# If no value is defined the first wallet address will be used.
# Returns an ERC20AddressInfo named tuple.
# @param value: index of the TFT address or address itself, the address has to be owned by this wallet
# """
# if value == None:
# public_key = self._wallet.key_pair_get(unlockhash=self._wallet.address).public_key
# elif isinstance(value, (str, UnlockHash)):
# try:
# public_key = self._wallet.key_pair_get(unlockhash=value).public_key
# except KeyError as exc:
# if isinstance(value, str):
# value = UnlockHash.from_json(value)
# raise tfchain.errors.AddressNotInWallet(address=value) from exc
# elif isinstance(value, int) and not isinstance(value, bool):
# addresses = self._wallet.addresses
# if value < 0 or value >= len(addresses):
# raise ValueError("address index {} is not a valid index for this wallet, has to be in the inclusive range of [0, {}]".format(
# value, len(addresses)-1))
# public_key = self._wallet.key_pair_get(unlockhash=addresses[value]).public_key
# else:
# raise ValueError("value has to be a str, UnlockHash or int, cannot identify an address using value {} (type: {})".format(value, type(value)))
# # look up the wallet address and return it
# return self._wallet.client.erc20.address_get(unlockhash=public_key.unlockhash)
# def addresses_get(self):
# """
# Get the information for all registered ERC20 withdraw addresses.
# Can return a empty list if no addresses of this wallet were registered as an ERC20 withdraw address.
# Returns a list of ERC20AddressInfo named tuples.
# """
# results = []
# # scan for some new keys first, to ensure we get all addresses
# self._wallet._key_scan()
# # get the ERC20 info for all addresses that are registered as ERC20 withdraw addresses, if any
# for address in self._wallet.addresses:
# try:
# info = self._wallet.client.erc20.address_get(address)
# results.append(info)
# except tfchain.errors.ExplorerNoContent:
# pass
# # return all found info, if anything
# return results
# def _fund_txn(self, txn, source, refund, amount):
# """
# common fund/refund/inputs/fees logic for all ERC20 Transactions
# """
# # get the fees, and fund the transaction
# miner_fee = self._minium_miner_fee
# balance = self._wallet.balance
# inputs, remainder, suggested_refund = balance.fund(miner_fee+amount, source=source)
# # add the coin inputs
# txn.coin_inputs = inputs
# # add refund coin output if needed
# if remainder > 0:
# # define the refund condition
# if refund == None: # automatically choose a refund condition if none is given
# if suggested_refund == None:
# refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
# else:
# refund = suggested_refund
# else:
# # use the given refund condition (defined as a recipient)
# refund = ConditionTypes.from_recipient(refund)
# txn.refund_coin_output_set(value=remainder, condition=refund)
# # add the miner fee
# txn.transaction_fee = miner_fee
# # return balance object
# return balance
# def _sign_and_submit_txn(self, txn, balance):
# """
# common sign and submit logic for all ERC20 Transactions
# """
# # generate the signature requests
# sig_requests = txn.signature_requests_new()
# if len(sig_requests) == 0:
# raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# # fulfill the signature requests that we can fulfill
# for request in sig_requests:
# try:
# key_pair = self._wallet.key_pair_get(request.wallet_address)
# input_hash = request.input_hash_new(public_key=key_pair.public_key)
# signature = key_pair.sign(input_hash)
# request.signature_fulfill(public_key=key_pair.public_key, signature=signature)
# except KeyError:
# pass # this is acceptable due to how we directly try the key_pair_get method
# # txn should be fulfilled now
# submit = txn.is_fulfilled()
# if submit:
# # submit the transaction
# txn.id = self._transaction_put(transaction=txn)
# # update balance
# for ci in txn.coin_inputs:
# balance.output_add(ci.parent_output, confirmed=False, spent=True)
# addresses = self._wallet.addresses + balance.multisig_addresses
# for idx, co in enumerate(txn.coin_outputs):
# if str(co.condition.unlockhash) in addresses:
# # add the id to the coin_output, so we can track it has been spent
# co.id = txn.coin_outputid_new(idx)
# balance.output_add(co, confirmed=False, spent=False)
# # and return the created/submitted transaction for optional user consumption
# return TransactionSendResult(txn, submit)
# @property
# def _minium_miner_fee(self):
# """
# Returns the minimum miner fee
# """
# return self._wallet.network_type.minimum_miner_fee()
# def _transaction_put(self, transaction):
# """
# Submit the transaction to the network using the parent's wallet client.
# Returns the transaction ID.
# """
# return self._wallet.client.transaction_put(transaction=transaction)
class TransactionSendResult():
"""
TransactionSendResult is a named tuple,
used as the result for a generic transaction send call.
"""
def __init__(self, transaction, submitted):
self._transaction = transaction
self._submitted = submitted
@property
def transaction(self):
return self._transaction
@property
def submitted(self):
return self._submitted
class TransactionSignResult:
"""
TransactionSignResult is a named tuple,
used as the result for a transaction sign call.
"""
def __init__(self, transaction, signed, submitted):
self._transaction = transaction
self._signed = signed
self._submitted = submitted
@property
def transaction(self):
return self._transaction
@property
def signed(self):
return self._signed
@property
def submitted(self):
return self._submitted
# class AtomicSwapInitiationResult():
# """
# AtomicSwapInitiationResult is a named tuple,
# used as the result for an atomic swap initiation call.
# """
# def __init__(self, contract, secret, transaction, submitted):
# self._contract = contract
# self._secret = secret
# self._transaction = transaction
# self._submitted = submitted
# @property
# def contract(self):
# return self._contract
# @property
# def secret(self):
# return self._secret
# @property
# def transaction(self):
# return self._transaction
# @property
# def submitted(self):
# return self._submitted
# class AtomicSwapParticipationResult():
# """
# AtomicSwapParticipationResult is a named tuple,
# used as the result for an atomic swap participation call.
# """
# def __init__(self, contract, transaction, submitted):
# self._contract = contract
# self._transaction = transaction
# self._submitted = submitted
# @property
# def contract(self):
# return self._contract
# @property
# def transaction(self):
# return self._transaction
# @property
# def submitted(self):
# return self._submitted
class SingleSigWalletBalanceAggregator:
"""
State class to serve as the red line throughout
the chained promise-based balance gathering for a (regular/personal) wallet,
which can involve the merging of results of multiple (single-sig) addresses.
"""
def __init__(self, wallet, chain_info=None):
if not isinstance(wallet, TFChainWallet):
raise TypeError("expected wallet to be of type TFChainWallet, not: {} ({})".format(wallet, type(wallet)))
self._wallet = wallet
self._balance = SingleSigWalletBalance()
self._info = chain_info
if self._info != None and not isinstance(self._info, tfclient.ExplorerBlockchainInfo):
raise TypeError("info has to be an ExplorerBlockchainInfo object, invalid: {} ({})".format(self._info, type(self._info)))
def fetch_and_aggregate(self):
if self._info != None:
return jsasync.chain(
self._personal_pool_chain_get(),
self._balance_get,
)
return jsasync.chain(
self._wallet._client.blockchain_info_get(),
self._collect_chain_info,
self._personal_pool_chain_get,
self._balance_get,
)
def _collect_chain_info(self, info):
self._info = info
def _personal_pool_chain_get(self):
return jsasync.promise_pool_new(
self._personal_address_generator,
cb=self._collect_personal_balance,
)
def _personal_address_generator(self):
for address in self._wallet.addresses:
yield self._wallet._unlockhash_get(address)
def _collect_personal_balance(self, result):
balance = result.balance(info=self._info)
self._balance = self._balance.balance_add(balance)
def _balance_get(self):
return self._balance
class CoinTransactionBuilder():
def __init__(self, wallet):
self._txn = transactions.new()
self._txn_send = False
self._wallet = wallet
@property
def transaction(self):
return self._txn
def output_add(self, recipient, amount, lock=None):
"""
Add an output to the transaction, returning the transaction
itself to allow for chaining.
The recipient is one of:
- None: recipient is the Free-For-All wallet
- str (or unlockhash): recipient is a personal wallet
- list: recipient is a MultiSig wallet where all owners (specified as a list of addresses) have to sign
- tuple (addresses, sigcount): recipient is a sigcount-of-addresscount MultiSig wallet
- an ERC20 address (str/ERC20Address), amount will be send to this ERC20 address
The amount can be a str or an int:
- when it is an int, you are defining the amount in the smallest unit (that is 1 == 0.000000001 TFT)
- when defining as a str you can use the following space-stripped and case-insentive formats:
- '123456789': same as when defining the amount as an int
- '123.456': define the amount in TFT (that is '123.456' == 123.456 TFT == 123456000000)
- '123456 TFT': define the amount in TFT (that is '123456 TFT' == 123456 TFT == 123456000000000)
- '123.456 TFT': define the amount in TFT (that is '123.456 TFT' == 123.456 TFT == 123456000000)
@param recipient: see explanation above
@param amount: int or str that defines the amount of TFT to set, see explanation above
@param lock: optional lock that can be used to lock the sent amount to a specific time or block height, see explation above
"""
if self._txn_send:
raise RuntimeError("coin transaction builder is already consumed")
amount = Currency(value=amount)
if amount.less_than_or_equal_to(0):
raise ValueError("no amount is defined to be sent")
recipient = ConditionTypes.from_recipient(recipient, lock=lock)
self._txn.coin_output_add(value=amount, condition=recipient)
return self
def send(self, source=None, refund=None, data=None, balance=None, merge=False, merge_min_co_count=None):
if self._txn_send:
raise RuntimeError("coin transaction builder is already consumed")
txn = self._txn
self._txn_send = True
balance_is_cached = (balance != None)
def balance_cb(balance):
if not merge: # regular fund logic
# fund amount
amount = Currency.sum(*[co.value for co in txn.coin_outputs])
miner_fee = self._wallet.network_type.minimum_miner_fee()
# compute the amount of coin inputs we can accept, and ensure we do not have more
# > 16e3 is the maximum size allowed by rivine-chains
# > 307 is the size in bytes of a txn without arb. data, one miner fee, and no inputs/outputs
# > 51 bytes is required per (coin) output
# > 169 bytes is required per (coin) input
extra_bytes_count = 0
if len(txn.coin_outputs) > 0 and txn.coin_outputs[0].condition.ctype == 3:
extra_bytes_count = 17 # add 17 bytes for lock time condition
max_input_count = (16e3 - 307 - (51 * len(txn.coin_outputs)) - len(txn.data) - extra_bytes_count) // 169
# fund the txn
inputs, remainder, suggested_refund = balance.fund(amount.plus(miner_fee), source=source, max_input_count=max_input_count)
# if there is data to be added, add it as well
if data != None:
txn.data = data
# check that we do not have more than the max amount of coin inputs
if len(inputs) > max_input_count:
raise tferrors.InsufficientFunds(
"insufficient big funds funds in this wallet: {} coin inputs overflow the allowed {} inputs".format(
len(inputs), max_input_count))
else: # merge logic
# gather all outputs
all_outputs = []
for co in balance.outputs_available:
all_outputs.append(co)
if len(all_outputs) < 92:
for co in balance.outputs_unconfirmed_available:
all_outputs.append(co)
all_outputs.sort(lambda co: float(co.value.str()))
# select outputs to use (low ones)
output_count = min(len(all_outputs), 92) # 92 is a hardcoded constant of allowed coin outputs
if not output_count or (merge_min_co_count and output_count < min(92, merge_min_co_count)):
# early return in case not enough outputs to merge
def stub_cb(resolve, reject):
resolve(TransactionSendResult(txn, False))
return jsasync.promise_new(stub_cb)
used_outputs = all_outputs[:output_count]
# select the inputs from these inputs
inputs = [CoinInput.from_coin_output(co) for co in used_outputs]
remainder = Currency()
suggested_refund = None
# select and create the output for these inputs
miner_fee = self._wallet.network_type.minimum_miner_fee()
txn.coin_output_add(
Currency.sum(*[co.value for co in used_outputs]).minus(miner_fee),
used_outputs[output_count-1].condition, # use the address with the highest value
)
# add refund coin output if needed
if remainder.greater_than(0):
# define the refund condition
if refund == None: # automatically choose a refund condition if none is given
if suggested_refund == None:
refund = ConditionTypes.unlockhash_new(unlockhash=self._wallet.address)
else:
refund = suggested_refund
else:
# use the given refund condition (defined as a recipient)
refund = ConditionTypes.from_recipient(refund)
txn.coin_output_add(value=remainder, condition=refund)
# add the miner fee
txn.miner_fee_add(miner_fee)
# add the coin inputs
txn.coin_inputs = inputs
# add custody fees if the wallet is linked to a goldchain network
if self._wallet.network_type.chain_type() == Type.GOLDCHAIN:
total_custody_fee = Currency()
for ci in txn.coin_inputs:
if not ci.parent_output:
raise Exception("BUG: cannot define the required custody fee if no parent output is linked to coin input {}".format(ci.parentid.__str__()))
total_custody_fee = total_custody_fee.plus(ci.parent_output.custody_fee)
txn.coin_output_add(value=total_custody_fee, condition=ConditionCustodyFee(balance.chain_time))
# generate the signature requests
sig_requests = txn.signature_requests_new()
if len(sig_requests) == 0:
raise Exception("BUG: sig requests should not be empty at this point, please fix or report as an issue")
# fulfill the signature requests that we can fulfill
for request in sig_requests:
try:
key_pair = self._wallet.key_pair_get(request.wallet_address)
pk = public_key_from_assymetric_key_pair(key_pair)
input_hash = request.input_hash_new(public_key=pk)
signature = key_pair.sign(input_hash.value)
request.signature_fulfill(public_key=pk, signature=signature)
except KeyError:
pass # this is acceptable due to how we directly try the key_pair_get method
# txn should be fulfilled now
submit = txn.is_fulfilled()
if not submit: # return as-is
def stub_cb(resolve, reject):
resolve(TransactionSendResult(txn, submit))
return jsasync.promise_new(stub_cb)
# submit, and only then return
def id_cb(id):
txn.id = id
if balance_is_cached:
addresses = balance.addresses
for idx, ci in enumerate(txn.coin_inputs):
if ci.parent_output.condition.unlockhash.__str__() in addresses:
balance.output_add(txn, idx, confirmed=False, spent=True)
for idx, co in enumerate(txn.coin_outputs):
if co.condition.unlockhash.__str__() in addresses:
# add the id to the coin_output, so we can track it has been spent
co.id = txn.coin_outputid_new(idx)
balance.output_add(txn, idx, confirmed=False, spent=False)
# return the semd result
return TransactionSendResult(txn, submit)
return jsasync.chain(self._wallet._transaction_put(transaction=txn), id_cb)
if balance != None:
if not isinstance(balance, WalletBalance):
raise TypeError("balance is of unexpected type: {} ({})".format(balance, type(balance)))
# if balance is given, execute the balance cb directly
return balance_cb(balance)
# else fetch the balance first and get it than
return jsasync.chain(self._wallet.balance, balance_cb)
|
nilq/baby-python
|
python
|
import configparser
from fast_arrow import Client, OptionOrder
print("----- running {}".format(__file__))
config = configparser.ConfigParser()
config.read('config.debug.ini')
#
# initialize fast_arrow client and authenticate
#
client = Client(
username = config['account']['username'],
password = config['account']['password'])
client.authenticate()
#
# fetch option orders
#
option_orders_all = OptionOrder.all(client)
#
# in case you have lots, only use first 25
# (unroll process fetches contract data for each leg)
#
option_orders = option_orders_all[0:25]
#
# unroll option orders ... ie, break each option leg into its own row
# this is helpful when doing detailed P/L reporting
#
option_orders_unrolled = OptionOrder.unroll_option_legs(client, option_orders)
#
# let's print out the results
#
print(option_orders_unrolled[0].keys())
|
nilq/baby-python
|
python
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from datetime import time
from operator import itemgetter
import jsonschema
from flask import request
from wtforms.fields import BooleanField, FloatField, HiddenField, IntegerField, SelectField, StringField, TextAreaField
from wtforms.fields.html5 import DecimalField, EmailField
from wtforms.validators import DataRequired, Email, InputRequired, NumberRange, Optional, ValidationError
from wtforms.widgets.html5 import NumberInput
from indico.core import signals
from indico.core.config import config
from indico.modules.designer import PageLayout, PageOrientation, PageSize, TemplateType
from indico.modules.designer.util import get_default_template_on_category, get_inherited_templates
from indico.modules.events.features.util import is_feature_enabled
from indico.modules.events.payment import payment_settings
from indico.modules.events.registration.models.forms import ModificationMode
from indico.modules.events.registration.models.invitations import RegistrationInvitation
from indico.modules.events.registration.models.registrations import Registration
from indico.util.i18n import _
from indico.util.placeholders import get_missing_placeholders, render_placeholder_info
from indico.web.forms.base import IndicoForm, generated_data
from indico.web.forms.fields import EmailListField, FileField, IndicoDateTimeField, IndicoEnumSelectField, JSONField
from indico.web.forms.fields.principals import PrincipalListField
from indico.web.forms.fields.simple import HiddenFieldList, IndicoEmailRecipientsField
from indico.web.forms.validators import HiddenUnless, IndicoEmail, LinkedDateTime
from indico.web.forms.widgets import CKEditorWidget, SwitchWidget
def _check_if_payment_required(form, field):
if not field.data:
return
if not is_feature_enabled(form.event, 'payment'):
raise ValidationError(_('You have to enable the payment feature in order to set a registration fee.'))
class RegistrationFormForm(IndicoForm):
_price_fields = ('currency', 'base_price')
_registrant_notification_fields = ('notification_sender_address',
'message_pending', 'message_unpaid', 'message_complete')
_manager_notification_fields = ('manager_notifications_enabled', 'manager_notification_recipients')
_special_fields = _price_fields + _registrant_notification_fields + _manager_notification_fields
title = StringField(_("Title"), [DataRequired()], description=_("The title of the registration form"))
introduction = TextAreaField(_("Introduction"),
description=_("Introduction to be displayed when filling out the registration form"))
contact_info = StringField(_("Contact info"),
description=_("How registrants can get in touch with somebody for extra information"))
moderation_enabled = BooleanField(_("Moderated"), widget=SwitchWidget(),
description=_("If enabled, registrations require manager approval"))
require_login = BooleanField(_("Only logged-in users"), widget=SwitchWidget(),
description=_("Users must be logged in to register"))
require_user = BooleanField(_("Registrant must have account"), widget=SwitchWidget(),
description=_("Registrations emails must be associated with an Indico account"))
limit_registrations = BooleanField(_("Limit registrations"), widget=SwitchWidget(),
description=_("Whether there is a limit of registrations"))
registration_limit = IntegerField(_("Capacity"), [HiddenUnless('limit_registrations'), DataRequired(),
NumberRange(min=1)],
description=_("Maximum number of registrations"))
modification_mode = IndicoEnumSelectField(_("Modification allowed"), enum=ModificationMode,
description=_("Will users be able to modify their data? When?"))
publish_registrations_enabled = BooleanField(_('Publish registrations'), widget=SwitchWidget(),
description=_("Registrations from this form will be displayed in the "
"event page"))
publish_registration_count = BooleanField(_("Publish number of registrations"), widget=SwitchWidget(),
description=_("Number of registered participants will be displayed in "
"the event page"))
publish_checkin_enabled = BooleanField(_('Publish check-in status'), widget=SwitchWidget(),
description=_("Check-in status will be shown publicly on the event page"))
base_price = DecimalField(_('Registration fee'), [NumberRange(min=0, max=999999.99), Optional(),
_check_if_payment_required], filters=[lambda x: x if x is not None else 0],
widget=NumberInput(step='0.01'),
description=_("A fixed fee all users have to pay when registering."))
currency = SelectField(_('Currency'), [DataRequired()], description=_('The currency for new registrations'))
notification_sender_address = StringField(_('Notification sender address'), [IndicoEmail()],
filters=[lambda x: (x or None)])
message_pending = TextAreaField(_("Message for pending registrations"),
description=_("Text included in emails sent to pending registrations"))
message_unpaid = TextAreaField(_("Message for unpaid registrations"),
description=_("Text included in emails sent to unpaid registrations"))
message_complete = TextAreaField(_("Message for complete registrations"),
description=_("Text included in emails sent to complete registrations"))
manager_notifications_enabled = BooleanField(_('Enabled'), widget=SwitchWidget(),
description=_("Enable notifications to managers about registrations"))
manager_notification_recipients = EmailListField(_('List of recipients'),
[HiddenUnless('manager_notifications_enabled',
preserve_data=True), DataRequired()],
description=_("Email addresses that will receive notifications"))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super(RegistrationFormForm, self).__init__(*args, **kwargs)
self._set_currencies()
self.notification_sender_address.description = _('Email address set as the sender of all '
'notifications sent to users. If empty, '
'then {0} is used.'.format(config.NO_REPLY_EMAIL))
def _set_currencies(self):
currencies = [(c['code'], '{0[code]} ({0[name]})'.format(c)) for c in payment_settings.get('currencies')]
self.currency.choices = sorted(currencies, key=lambda x: x[1].lower())
class RegistrationFormScheduleForm(IndicoForm):
start_dt = IndicoDateTimeField(_("Start"), [Optional()], default_time=time(0, 0),
description=_("Moment when registrations will be open"))
end_dt = IndicoDateTimeField(_("End"), [Optional(), LinkedDateTime('start_dt')], default_time=time(23, 59),
description=_("Moment when registrations will be closed"))
modification_end_dt = IndicoDateTimeField(_("Modification deadline"), [Optional(), LinkedDateTime('end_dt')],
default_time=time(23, 59),
description=_("Deadline until which registration information can be "
"modified (defaults to the end date if empty)"))
def __init__(self, *args, **kwargs):
regform = kwargs.pop('regform')
self.timezone = regform.event.timezone
super(RegistrationFormScheduleForm, self).__init__(*args, **kwargs)
class InvitationFormBase(IndicoForm):
_invitation_fields = ('skip_moderation',)
_email_fields = ('email_from', 'email_subject', 'email_body')
email_from = SelectField(_('From'), [DataRequired()])
email_subject = StringField(_("Email subject"), [DataRequired()])
email_body = TextAreaField(_("Email body"), [DataRequired()], widget=CKEditorWidget(simple=True))
skip_moderation = BooleanField(_("Skip moderation"), widget=SwitchWidget(),
description=_("If enabled, the user's registration will be approved automatically."))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
event = self.regform.event
super(InvitationFormBase, self).__init__(*args, **kwargs)
if not self.regform.moderation_enabled:
del self.skip_moderation
self.email_from.choices = event.get_allowed_sender_emails().items()
self.email_body.description = render_placeholder_info('registration-invitation-email', invitation=None)
def validate_email_body(self, field):
missing = get_missing_placeholders('registration-invitation-email', field.data, invitation=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
class InvitationFormNew(InvitationFormBase):
_invitation_fields = ('first_name', 'last_name', 'email', 'affiliation') + InvitationFormBase._invitation_fields
first_name = StringField(_('First name'), [DataRequired()],
description=_("The first name of the user you are inviting."))
last_name = StringField(_('Last name'), [DataRequired()],
description=_("The last name of the user you are inviting."))
email = EmailField(_('Email'), [DataRequired(), Email()], filters=[lambda x: x.lower() if x else x],
description=_("The invitation will be sent to this address."))
affiliation = StringField(_('Affiliation'),
description=_("The affiliation of the user you are inviting."))
@generated_data
def users(self):
return [{'first_name': self.first_name.data,
'last_name': self.last_name.data,
'email': self.email.data,
'affiliation': self.affiliation.data}]
def validate_email(self, field):
if RegistrationInvitation.find(email=field.data).with_parent(self.regform).count():
raise ValidationError(_("There is already an invitation with this email address."))
if Registration.find(email=field.data, is_active=True).with_parent(self.regform).count():
raise ValidationError(_("There is already a registration with this email address."))
class InvitationFormExisting(InvitationFormBase):
_invitation_fields = ('users_field',) + InvitationFormBase._invitation_fields
users_field = PrincipalListField(_('Users'), [DataRequired()], allow_external_users=True,
description=_("Select the users to invite."))
@generated_data
def users(self):
return [{'first_name': x['firstName'],
'last_name': x['familyName'],
'email': x['email'].lower(),
'affiliation': x['affiliation']}
for x in self.users_field.data]
def validate_users_field(self, field):
emails = {x['email'].lower() for x in field.data}
# invitations
existing = {x.email for x in self.regform.invitations} & emails
if existing:
raise ValidationError(_("There are already invitations for the following email addresses: {emails}")
.format(emails=', '.join(sorted(existing))))
# registrations
existing = {x.email for x in self.regform.registrations if x.is_active} & emails
if existing:
raise ValidationError(_("There are already registrations with the following email addresses: {emails}")
.format(emails=', '.join(sorted(existing))))
class EmailRegistrantsForm(IndicoForm):
from_address = SelectField(_("From"), [DataRequired()])
cc_addresses = EmailListField(_("CC"),
description=_("Beware, addresses in this field will receive one mail per "
"registrant."))
subject = StringField(_("Subject"), [DataRequired()])
body = TextAreaField(_("Email body"), [DataRequired()], widget=CKEditorWidget(simple=True))
recipients = IndicoEmailRecipientsField(_('Recipients'))
copy_for_sender = BooleanField(_('Send copy to me'), widget=SwitchWidget(),
description=_('Send copy of each email to my mailbox'))
attach_ticket = BooleanField(_('Attach ticket'), widget=SwitchWidget(),
description=_('Attach tickets to emails'))
registration_id = HiddenFieldList()
submitted = HiddenField()
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
event = self.regform.event
super(EmailRegistrantsForm, self).__init__(*args, **kwargs)
self.from_address.choices = event.get_allowed_sender_emails().items()
self.body.description = render_placeholder_info('registration-email', regform=self.regform, registration=None)
def validate_body(self, field):
missing = get_missing_placeholders('registration-email', field.data, regform=self.regform, registration=None)
if missing:
raise ValidationError(_('Missing placeholders: {}').format(', '.join(missing)))
def is_submitted(self):
return super(EmailRegistrantsForm, self).is_submitted() and 'submitted' in request.form
class TicketsForm(IndicoForm):
tickets_enabled = BooleanField(_('Enable Tickets'), widget=SwitchWidget(),
description=_('Create tickets for registrations using this registration form.'))
ticket_on_email = BooleanField(_('Send with an e-mail'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Attach PDF ticket to the email sent to a user after completing '
'their registration.'))
ticket_on_event_page = BooleanField(_('Download from event homepage'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Allow users to download their ticket from the '
'conference homepage.'))
ticket_on_summary_page = BooleanField(_('Download from summary page'), [HiddenUnless('tickets_enabled',
preserve_data=True)],
widget=SwitchWidget(),
description=_('Allow users to download their ticket from the registration '
'summary page.'))
ticket_template_id = SelectField(_('Ticket template'), [HiddenUnless('tickets_enabled', preserve_data=True),
Optional()], coerce=int)
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super(TicketsForm, self).__init__(*args, **kwargs)
default_tpl = get_default_template_on_category(event.category)
all_templates = set(event.designer_templates) | get_inherited_templates(event)
badge_templates = [(tpl.id, tpl.title) for tpl in all_templates
if tpl.type == TemplateType.badge and tpl != default_tpl]
# Show the default template first
badge_templates.insert(0, (default_tpl.id, '{} ({})'.format(default_tpl.title, _('Default category template'))))
self.ticket_template_id.choices = badge_templates
class ParticipantsDisplayForm(IndicoForm):
"""Form to customize the display of the participant list."""
json = JSONField()
def validate_json(self, field):
schema = {
'type': 'object',
'properties': {
'merge_forms': {'type': 'boolean'},
'participant_list_forms': {
'type': 'array',
'items': {'type': 'integer'}
},
'participant_list_columns': {
'type': 'array',
'items': {'type': 'string'}
}
}
}
try:
jsonschema.validate(field.data, schema)
except jsonschema.ValidationError as exc:
raise ValidationError(exc.message)
class ParticipantsDisplayFormColumnsForm(IndicoForm):
"""Form to customize the columns for a particular registration form on the participant list."""
json = JSONField()
def validate_json(self, field):
schema = {
'type': 'object',
'properties': {
'columns': {
'type': 'array',
'items': {'type': 'integer'}
}
}
}
try:
jsonschema.validate(field.data, schema)
except jsonschema.ValidationError as exc:
raise ValidationError(exc.message)
class RegistrationManagersForm(IndicoForm):
"""Form to manage users with privileges to modify registration-related items"""
managers = PrincipalListField(_('Registration managers'), allow_groups=True, allow_emails=True,
allow_external_users=True,
description=_('List of users allowed to modify registrations'),
event=lambda form: form.event)
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
super(RegistrationManagersForm, self).__init__(*args, **kwargs)
class CreateMultipleRegistrationsForm(IndicoForm):
"""Form to create multiple registrations of Indico users at the same time."""
user_principals = PrincipalListField(_("Indico users"), [DataRequired()])
notify_users = BooleanField(_("Send e-mail notifications"),
default=True,
description=_("Notify the users about the registration."),
widget=SwitchWidget())
def __init__(self, *args, **kwargs):
self._regform = kwargs.pop('regform')
open_add_user_dialog = kwargs.pop('open_add_user_dialog', False)
super(CreateMultipleRegistrationsForm, self).__init__(*args, **kwargs)
self.user_principals.open_immediately = open_add_user_dialog
def validate_user_principals(self, field):
for user in field.data:
if user.registrations.filter_by(registration_form=self._regform, is_deleted=False).one_or_none():
raise ValidationError(_("A registration for {} already exists.").format(user.full_name))
class BadgeSettingsForm(IndicoForm):
template = SelectField(_('Template'))
save_values = BooleanField(_("Save values for next time"), widget=SwitchWidget(),
description=_("Save these values in the event settings"))
dashed_border = BooleanField(_("Dashed border around each badge"), widget=SwitchWidget(),
description=_("Display a dashed border around each badge"))
page_size = IndicoEnumSelectField(_('Page size'), enum=PageSize, sorted=True)
page_orientation = IndicoEnumSelectField(_('Page orientation'), enum=PageOrientation)
page_layout = IndicoEnumSelectField(_('Page layout'), enum=PageLayout,
description=_('The single sided (foldable) option is only available if the '
'template orientation is the same as the page orientation and '
'its width is exactly half of the page width'))
top_margin = FloatField(_('Top margin'), [InputRequired()])
left_margin = FloatField(_('Left margin'), [InputRequired()])
right_margin = FloatField(_('Right margin'), [InputRequired()])
bottom_margin = FloatField(_('Bottom margin'), [InputRequired()])
margin_columns = FloatField(_('Margin between columns'), [InputRequired()])
margin_rows = FloatField(_('Margin between rows'), [InputRequired()])
submitted = HiddenField()
def __init__(self, event, **kwargs):
all_templates = set(event.designer_templates) | get_inherited_templates(event)
badge_templates = [tpl for tpl in all_templates if tpl.type.name == 'badge']
signals.event.filter_selectable_badges.send(type(self), badge_templates=badge_templates)
tickets = kwargs.pop('tickets')
super(BadgeSettingsForm, self).__init__(**kwargs)
self.template.choices = sorted(((unicode(tpl.id), tpl.title)
for tpl in badge_templates
if tpl.is_ticket == tickets),
key=itemgetter(1))
def is_submitted(self):
return super(BadgeSettingsForm, self).is_submitted() and 'submitted' in request.form
class ImportRegistrationsForm(IndicoForm):
source_file = FileField(_("Source File"), [DataRequired()], accepted_file_types='.csv')
skip_moderation = BooleanField(_("Skip Moderation"), widget=SwitchWidget(), default=True,
description=_("If enabled, the registration will be immediately accepted"))
notify_users = BooleanField(_("E-mail users"), widget=SwitchWidget(),
description=_("Whether the imported users should receive an e-mail notification"))
def __init__(self, *args, **kwargs):
self.regform = kwargs.pop('regform')
super(ImportRegistrationsForm, self).__init__(*args, **kwargs)
if not self.regform.moderation_enabled:
del self.skip_moderation
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# import necessay modules
from lxml import html
from lxml import etree
import requests
# top-level domain
parent_domain = 'http://trevecca.smartcatalogiq.com'
# parent page showing the porgrams of study
parent_page_url = parent_domain + '/en/2015-2016/University-Catalog/Programs-of-Study'
parent_page = requests.get(parent_page_url)
parent_tree = html.fromstring(parent_page.content.replace(' ',''))
# get list of program names and program URLs
program_names = parent_tree.xpath('//div[@id="sc-program-links"]//a//text()')
program_links = parent_tree.xpath('//div[@id="sc-program-links"]//a//@href')
# print opening of web page
print '<!DOCTYPE html>'
print '<html class="no-js" lang="en-US" itemscope itemtype="http://schema.org/CollegeOrUniversity">'
# loop over programs
for program_idx in range(len(program_names)):
# skip display of Four-Year plans
if program_names[program_idx].find("Four-Year")<0 and program_names[program_idx].find("Four Year")<0 and program_names[program_idx].find("Five-Year")<0 and program_names[program_idx].find("Program of Study")<0:
program_page_url = parent_domain + program_links[program_idx]
program_page = requests.get(program_page_url)
program_tree = html.fromstring(program_page.content.replace(' ',''))
program_div = program_tree.xpath('//div[@id="main"]')
if program_idx==0:
print '<head>'
print '<title>' + parent_domain + '</title>'
# css style links
program_style_links = program_tree.xpath('//head/link')
for program_style_link in program_style_links:
print(etree.tostring(program_style_link, pretty_print=True).replace('href="/', 'href="' + parent_domain + '/'))
# add page breaks before H1 (program names)
print '<style type="text/css">'
print '@media print {'
print ' h1:not([name=first]){page-break-before: always;}'
print '}'
print '@media screen {'
print ' h1:not([name=first]){page-break-before: always;}'
print '}'
print '</style>'
print '</head>'
print '<body>'
print(etree.tostring(program_div[0],pretty_print=True).replace('href="/', 'href="' + parent_domain + '/').replace('<h1>', '<h1 name="first">', 1))
else:
print(etree.tostring(program_div[0],pretty_print=True).replace('href="/', 'href="' + parent_domain + '/'))
# uncomment for quicker debug
#if program_idx>10:
# break
# print closing of body and page
print '</body>'
print '</html>'
|
nilq/baby-python
|
python
|
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
x3 = int(input())
y3 = y2
a = abs(x3 - x2)
h = abs(y1 - y2)
s = a * h / 2
print(s)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import serial
import sys
import time
import string
from serial import SerialException
import RPi.GPIO as gpio
class SerialExpander:
def __init__(self, port='/dev/ttyS0', baud=9600, timeout=0, **kwargs):
self.__port = port
self.__baud = baud
self.ser = serial.Serial(self.__port, self.__baud, timeout=timeout)
# Dictionary for expander addresses as accessed through GPIO
self.expanderAddr = {'P1': [0,0,0],
'P2': [0,0,1],
'P3': [0,1,0],
'P4': [0,1,1],
'P5': [1,0,0],
'P6': [1,0,1],
'P7': [1,1,0],
'P8': [1,1,1]
}
# Configuration for Lutra Airboats
self.config()
def config(self):
"""
Configuration specific for Atlas sensors in LSA's Lutra Airboats.
Namely sets GPIO ports, disable acknowledge (ACK, i.e., OK) messages for sensors
and turns off continuous sensor readings
"""
# Setup for GPIO
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
self.gpioPins = [24, 23, 18] # [S3, S2, S1]
for pin in self.gpioPins:
gpio.setup(pin, gpio.OUT)
# Turn off ACK messages and continuous reading for all devices
for port in self.expanderAddr:
self.ser.flush()
self.send_cmd("*OK,0", port) # Disable OK messages
time.sleep(0.01) # Wait 10 ms before next instruction
self.ser.flush()
self.send_cmd("C,0", port) # Disable continuous reading mode
time.sleep(0.01) # Wait 10 ms before next instruction
# Return to default port "0,0,0" (or "P1")
self.select_SE_port("P1")
def select_SE_port(self, port):
"""
Selects Serial Expander port based on address table in self.expanderAddr dictionary
"""
for i, pin in enumerate(self.gpioPins):
gpio.output(pin, self.expanderAddr[port][i])
def change_port(self, port):
self.__port = port
self.connect()
self.config()
def change_baud(self, baud):
self.__baud = baud
self.connect()
self.config()
def connect(self):
self.disconnect()
while True:
if self.ser.isOpen():
return True
self.ser.open()
def disconnect(self):
while True:
if not self.ser.isOpen():
return True
self.ser.close()
def read_line(self):
"""
taken from the ftdi library and modified to
use the ezo line separator "\r"
"""
lsl = len('\r')
line_buffer = []
while True:
next_char = self.ser.read(1)
if next_char == '':
break
line_buffer.append(next_char)
if (len(line_buffer) >= lsl and
line_buffer[-lsl:] == list('\r')):
break
return ''.join(line_buffer)
def read_lines(self):
"""
also taken from ftdi lib to work with modified readline function
"""
lines = []
try:
while True:
line = self.read_line()
if not line:
break
self.ser.flush_input()
lines.append(line)
return lines
except SerialException as e:
print( "Error, ", e)
return None
def send_cmd(self, cmd, port):
"""
Send command to the Atlas Sensor.
Before sending, add Carriage Return at the end of the command.
:param port:
:param cmd:
:return:
"""
self.select_SE_port(port)
buf = cmd + "\r" # add carriage return
try:
self.ser.write(buf.encode('utf-8'))
return True
except SerialException as e:
print ("Error, ", e)
return None
def get_data(self, port):
"""
Gets a single reading from sensor in selected port
"""
# Clear previous data
self.ser.flush()
# Send request for data
self.send_cmd("R", port)
time.sleep(1)
lines = self.read_lines()
return lines[0]
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
content = f.read()
setup(
name='Wechatbot',
version='0.0.1',
description='Wechatbot project',
long_description=readme,
install_requires=['itchat==1.3.10', 'requests==2.19.1'],
author='Kevin Zhao',
author_email='zhaomy2000@sina.com',
url='https://github.com/zhaomy2000/flyingshui/wechatbot',
license=content,
packages=find_packages()
)
|
nilq/baby-python
|
python
|
TLS_VERSIONING = "1.0.23"
TLS_DATE = "12 March 2019"
|
nilq/baby-python
|
python
|
"""
This module deals with the definition of all the database models needed for the application
"""
from app import db, app
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from math import cos, sin, atan2, sqrt, radians, degrees
class Lens(db.Model):
""" Represent a lens """
__tablename__ = 'lenses'
id = db.Column(db.Integer, primary_key=True)
display_name = db.Column(db.String(256))
focal_range = db.Column(db.String(64))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __repr__(self):
return 'Lens: %r' % (self.displayName)
class BetaCode(db.Model):
""" Table containing the beta codes """
__tablename__ = 'betacodes'
id = db.Column(db.Integer, primary_key=True)
code = db.Column(db.String(16))
def __repr__(self):
return 'Code: %r' % (self.code)
class User(db.Model):
""" Represents a user of the service """
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True)
password = db.Column(db.String(128))
fullname = db.Column(db.String(256))
email = db.Column(db.String(256))
guides = db.relationship('Guide', backref='owner', lazy='dynamic')
lenses = db.relationship('Lens', backref='owner', lazy='dynamic')
def __repr__(self):
return 'User: %r' % (self.username)
def hash_password(self, password):
""" Encrypts the given password before saving it in the entry """
self.password = pwd_context.encrypt(password)
def verify_password(self, password):
""" Validate the given password against the DB one """
return pwd_context.verify(password, self.password)
def generate_auth_token(self):
""" Generate a JWT token for this account """
token = Serializer(
app.config['API_SECRET_KEY'],
expires_in=app.config['JWT_TOKEN_EXPIRATION']
)
return token.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
""" Check that the token received is still valid """
# In case the token so wrong that it's None
if not token:
raise BadSignatureToken
gen_token = Serializer(app.config['API_SECRET_KEY'])
try:
data = gen_token.loads(token)
except SignatureExpired:
raise ExpiredToken() # valid token, but expired
except BadSignature:
raise BadSignatureToken() # invalid token
user = User.query.get(data['id'])
return user
class ExpiredToken(Exception):
""" Exception raised when jwt token is expired """
pass
class BadSignatureToken(Exception):
""" Exception raised when jwt token is invalid """
pass
""" Link for many-to-many relationship between photos and guides """
photo_guide = db.Table(
'photo_guide',
db.Column('guide_id', db.Integer, db.ForeignKey('guides.id')),
db.Column('photo_id', db.Integer, db.ForeignKey('photos.id'))
)
class Guide(db.Model):
""" Represents a travel guide """
__tablename__ = 'guides'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256))
creation = db.Column(db.DateTime, default=db.func.now())
last_edited = db.Column(db.DateTime, default=db.func.now())
visibility = db.Column(db.SmallInteger, default=0)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
photos = db.relationship('Photo', backref='guides', lazy='dynamic', secondary=photo_guide)
number_photo = 0
def __repr__(self):
return 'Guide: %r' % (self.title)
@staticmethod
def getFeaturedLocation(guide):
""" Return the featured image """
photos = guide.photos.all()
x = 0
y = 0
z = 0
size = 0
for photo in photos:
if photo.latitude:
lat = radians(float(photo.latitude))
lon = radians(float(photo.longitude))
x += cos(lat) * cos(lon)
y += cos(lat) * sin(lon)
z += sin(lat)
size+=1
if size is 0:
return None
x = float(x / size)
y = float(y / size)
z = float(z / size)
return {
'latitude': degrees(atan2(z, sqrt(x * x + y * y))),
'longitude': degrees(atan2(y, x))
}
# return atan2(z, sqrt(x * x + y * y)), atan2(y, x)
# for photo in photos:
# if photo.latitude:
# return {
# 'latitude': photo.latitude,
# 'longitude': photo.longitude
# }
# return None
@staticmethod
def getFeaturedImage(guide):
""" Return the featured image """
if guide.photos.first():
return guide.photos.first().url
return None
@staticmethod
def getNumberPhoto(guide):
""" Return the featured image """
return len(guide.photos.all())
class Photo(db.Model):
"""
Represent a photo stored in an external service (flickr/500px)
Photo are linked in a many to many relationship to the guides
"""
__tablename__ = 'photos'
id = db.Column(db.Integer, primary_key=True)
origin = db.Column(db.Enum('Flickr', '500px', name='service_origin'))
title = db.Column(db.Text())
author = db.Column(db.String(256))
flickr_id = db.Column(db.String(16))
url = db.Column(db.Text())
latitude = db.Column(db.String(16))
longitude = db.Column(db.String(16))
lensFocal = db.Column(db.String(16))
flash_fired = db.Column(db.SmallInteger, default=0)
exposure = db.Column(db.String(16))
def __repr__(self):
return 'Photo: %r' % (self.id)
|
nilq/baby-python
|
python
|
from WConio2 import textcolor, clrscr, getch, setcursortype
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW("n Numbers HCF")
def hcf(n):
a, b, r = n[0], n[1], 0
for x in range(0, len(n) - 1):
while a != 0:
r = b % a
b = a
a = r
a = b
if x + 2 == len(n): break
b = n[x + 2]
return a
def main():
textcolor(11)
clrscr()
print("\t\tAfter Entering Enough Numbers Enter last Value as 0 to Find HCF\n\n")
num, x, p, ch = [], 0, [], '\0'
textcolor(10)
while True:
try:
x = float(input("Enter a Number="))
except:
break
if not x:
break
p.append(str(int(x)) if x == int(x) else str(x))
x = abs(x)
while x != int(x):
x *= 10
num.append(int(x))
if len(p) == 1 or not len(p):
return
setcursortype(0)
clrscr()
textcolor(12)
print("\n\nHCF of These Entered Numbers:")
textcolor(15)
print(' , '.join(p))
textcolor(14)
print("Is =", hcf(num))
textcolor(15)
print('\nPress Enter Key to Continue')
while ch != '\r':
ch = getch()[1]
return
main()
|
nilq/baby-python
|
python
|
# Copyright (c) 2019 AT&T Intellectual Property.
# Copyright (c) 2018-2019 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This source code is part of the near-RT RIC (RAN Intelligent Controller)
# platform project (RICP).
#
"""The module provides implementation of Shared Data Layer (SDL) database backend interface."""
import contextlib
import threading
from typing import (Callable, Dict, Set, List, Optional, Tuple, Union)
import zlib
import redis
from redis import Redis
from redis.sentinel import Sentinel
from redis.lock import Lock
from redis._compat import nativestr
from redis import exceptions as redis_exceptions
from ricsdl.configuration import _Configuration
from ricsdl.exceptions import (
RejectedByBackend,
NotConnected,
BackendError
)
from .dbbackend_abc import DbBackendAbc
from .dbbackend_abc import DbBackendLockAbc
@contextlib.contextmanager
def _map_to_sdl_exception():
"""Translates known redis exceptions into SDL exceptions."""
try:
yield
except(redis_exceptions.ResponseError) as exc:
raise RejectedByBackend("SDL backend rejected the request: {}".
format(str(exc))) from exc
except(redis_exceptions.ConnectionError, redis_exceptions.TimeoutError) as exc:
raise NotConnected("SDL not connected to backend: {}".
format(str(exc))) from exc
except(redis_exceptions.RedisError) as exc:
raise BackendError("SDL backend failed to process the request: {}".
format(str(exc))) from exc
class PubSub(redis.client.PubSub):
def __init__(self, event_separator, connection_pool, ignore_subscribe_messages=False):
super().__init__(connection_pool, shard_hint=None, ignore_subscribe_messages=ignore_subscribe_messages)
self.event_separator = event_separator
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
Adapted from: https://github.com/andymccurdy/redis-py/blob/master/redis/client.py
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
elif message_type == 'pong':
message = {
'type': message_type,
'pattern': None,
'channel': None,
'data': response[1]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
if message_type == 'punsubscribe':
pattern = response[1]
if pattern in self.pending_unsubscribe_patterns:
self.pending_unsubscribe_patterns.remove(pattern)
self.patterns.pop(pattern, None)
else:
channel = response[1]
if channel in self.pending_unsubscribe_channels:
self.pending_unsubscribe_channels.remove(channel)
self.channels.pop(channel, None)
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
# Need to send only channel and notification instead of raw
# message
message_channel = self._strip_ns_from_bin_key('', message['channel'])
message_data = message['data'].decode('utf-8')
messages = message_data.split(self.event_separator)
handler(message_channel, messages)
return message_channel, messages
elif message_type != 'pong':
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
@classmethod
def _strip_ns_from_bin_key(cls, ns: str, nskey: bytes) -> str:
try:
redis_key = nskey.decode('utf-8')
except UnicodeDecodeError as exc:
msg = u'Namespace %s key conversion to string failed: %s' % (ns, str(exc))
raise RejectedByBackend(msg)
nskey = redis_key.split(',', 1)
if len(nskey) != 2:
msg = u'Namespace %s key:%s has no namespace prefix' % (ns, redis_key)
raise RejectedByBackend(msg)
return nskey[1]
class RedisBackend(DbBackendAbc):
"""
A class providing an implementation of database backend of Shared Data Layer (SDL), when
backend database solution is Redis.
Args:
configuration (_Configuration): SDL configuration, containing credentials to connect to
Redis database backend.
"""
def __init__(self, configuration: _Configuration) -> None:
super().__init__()
self.next_client_event = 0
self.event_separator = configuration.get_event_separator()
self.clients = list()
with _map_to_sdl_exception():
self.clients = self.__create_redis_clients(configuration)
def __del__(self):
self.close()
def __str__(self):
out = {"DB type": "Redis"}
for i, r in enumerate(self.clients):
out["Redis client[" + str(i) + "]"] = str(r)
return str(out)
def is_connected(self):
is_connected = True
with _map_to_sdl_exception():
for c in self.clients:
if not c.redis_client.ping():
is_connected = False
break
return is_connected
def close(self):
for c in self.clients:
c.redis_client.close()
def set(self, ns: str, data_map: Dict[str, bytes]) -> None:
db_data_map = self.__add_data_map_ns_prefix(ns, data_map)
with _map_to_sdl_exception():
self.__getClient(ns).mset(db_data_map)
def set_if(self, ns: str, key: str, old_data: bytes, new_data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command('SETIE', db_key, new_data, old_data)
def set_if_not_exists(self, ns: str, key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).setnx(db_key, data)
def get(self, ns: str, keys: List[str]) -> Dict[str, bytes]:
ret = dict()
db_keys = self.__add_keys_ns_prefix(ns, keys)
with _map_to_sdl_exception():
values = self.__getClient(ns).mget(db_keys)
for idx, val in enumerate(values):
# return only key values, which has a value
if val is not None:
ret[keys[idx]] = val
return ret
def find_keys(self, ns: str, key_pattern: str) -> List[str]:
db_key_pattern = self.__add_key_ns_prefix(ns, key_pattern)
with _map_to_sdl_exception():
ret = self.__getClient(ns).keys(db_key_pattern)
return self.__strip_ns_from_bin_keys(ns, ret)
def find_and_get(self, ns: str, key_pattern: str) -> Dict[str, bytes]:
# todo: replace below implementation with redis 'NGET' module
ret = dict() # type: Dict[str, bytes]
with _map_to_sdl_exception():
matched_keys = self.find_keys(ns, key_pattern)
if matched_keys:
ret = self.get(ns, matched_keys)
return ret
def remove(self, ns: str, keys: List[str]) -> None:
db_keys = self.__add_keys_ns_prefix(ns, keys)
with _map_to_sdl_exception():
self.__getClient(ns).delete(*db_keys)
def remove_if(self, ns: str, key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command('DELIE', db_key, data)
def add_member(self, ns: str, group: str, members: Set[bytes]) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).sadd(db_key, *members)
def remove_member(self, ns: str, group: str, members: Set[bytes]) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).srem(db_key, *members)
def remove_group(self, ns: str, group: str) -> None:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
self.__getClient(ns).delete(db_key)
def get_members(self, ns: str, group: str) -> Set[bytes]:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).smembers(db_key)
def is_member(self, ns: str, group: str, member: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).sismember(db_key, member)
def group_size(self, ns: str, group: str) -> int:
db_key = self.__add_key_ns_prefix(ns, group)
with _map_to_sdl_exception():
return self.__getClient(ns).scard(db_key)
def set_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
data_map: Dict[str, bytes]) -> None:
db_data_map = self.__add_data_map_ns_prefix(ns, data_map)
channels_and_events_prepared = []
total_events = 0
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"MSETMPUB",
len(db_data_map),
total_events,
*[val for data in db_data_map.items() for val in data],
*channels_and_events_prepared,
)
def set_if_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]], key: str,
old_data: bytes, new_data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared = []
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("SETIEMPUB", db_key, new_data, old_data,
*channels_and_events_prepared)
return ret == b"OK"
def set_if_not_exists_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
key: str, data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("SETNXMPUB", db_key, data,
*channels_and_events_prepared)
return ret == b"OK"
def remove_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]],
keys: List[str]) -> None:
db_keys = self.__add_keys_ns_prefix(ns, keys)
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"DELMPUB",
len(db_keys),
total_events,
*db_keys,
*channels_and_events_prepared,
)
def remove_if_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]], key: str,
data: bytes) -> bool:
db_key = self.__add_key_ns_prefix(ns, key)
channels_and_events_prepared, _ = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
ret = self.__getClient(ns).execute_command("DELIEMPUB", db_key, data,
*channels_and_events_prepared)
return bool(ret)
def remove_all_and_publish(self, ns: str, channels_and_events: Dict[str, List[str]]) -> None:
keys = self.__getClient(ns).keys(self.__add_key_ns_prefix(ns, "*"))
channels_and_events_prepared, total_events = self._prepare_channels(ns, channels_and_events)
with _map_to_sdl_exception():
return self.__getClient(ns).execute_command(
"DELMPUB",
len(keys),
total_events,
*keys,
*channels_and_events_prepared,
)
def subscribe_channel(self, ns: str, cb: Callable[[str, List[str]], None],
channels: List[str]) -> None:
channels = self.__add_keys_ns_prefix(ns, channels)
for channel in channels:
with _map_to_sdl_exception():
redis_ctx = self.__getClientConn(ns)
redis_ctx.redis_pubsub.subscribe(**{channel: cb})
if not redis_ctx.pubsub_thread.is_alive() and redis_ctx.run_in_thread:
redis_ctx.pubsub_thread = redis_ctx.redis_pubsub.run_in_thread(sleep_time=0.001,
daemon=True)
def unsubscribe_channel(self, ns: str, channels: List[str]) -> None:
channels = self.__add_keys_ns_prefix(ns, channels)
for channel in channels:
with _map_to_sdl_exception():
self.__getClientConn(ns).redis_pubsub.unsubscribe(channel)
def start_event_listener(self) -> None:
redis_ctxs = self.__getClientConns()
for redis_ctx in redis_ctxs:
if redis_ctx.pubsub_thread.is_alive():
raise RejectedByBackend("Event loop already started")
if redis_ctx.redis_pubsub.subscribed and len(redis_ctx.redis_client.pubsub_channels()) > 0:
redis_ctx.pubsub_thread = redis_ctx.redis_pubsub.run_in_thread(sleep_time=0.001, daemon=True)
redis_ctx.run_in_thread = True
def handle_events(self) -> Optional[Tuple[str, List[str]]]:
if self.next_client_event >= len(self.clients):
self.next_client_event = 0
redis_ctx = self.clients[self.next_client_event]
self.next_client_event += 1
if redis_ctx.pubsub_thread.is_alive() or redis_ctx.run_in_thread:
raise RejectedByBackend("Event loop already started")
try:
return redis_ctx.redis_pubsub.get_message(ignore_subscribe_messages=True)
except RuntimeError:
return None
def __create_redis_clients(self, config):
clients = list()
cfg_params = config.get_params()
if cfg_params.db_cluster_addr_list is None:
clients.append(self.__create_legacy_redis_client(cfg_params))
else:
for addr in cfg_params.db_cluster_addr_list.split(","):
client = self.__create_redis_client(cfg_params, addr)
clients.append(client)
return clients
def __create_legacy_redis_client(self, cfg_params):
return self.__create_redis_client(cfg_params, cfg_params.db_host)
def __create_redis_client(self, cfg_params, addr):
new_sentinel = None
new_redis = None
if cfg_params.db_sentinel_port is None:
new_redis = Redis(host=addr, port=cfg_params.db_port, db=0, max_connections=20)
else:
sentinel_node = (addr, cfg_params.db_sentinel_port)
master_name = cfg_params.db_sentinel_master_name
new_sentinel = Sentinel([sentinel_node])
new_redis = new_sentinel.master_for(master_name)
new_redis.set_response_callback('SETIE', lambda r: r and nativestr(r) == 'OK' or False)
new_redis.set_response_callback('DELIE', lambda r: r and int(r) == 1 or False)
redis_pubsub = PubSub(self.event_separator, new_redis.connection_pool, ignore_subscribe_messages=True)
pubsub_thread = threading.Thread(target=None)
run_in_thread = False
return _RedisConn(new_redis, redis_pubsub, pubsub_thread, run_in_thread)
def __getClientConns(self):
return self.clients
def __getClientConn(self, ns):
clients_cnt = len(self.clients)
client_id = self.__get_hash(ns) % clients_cnt
return self.clients[client_id]
def __getClient(self, ns):
clients_cnt = len(self.clients)
client_id = 0
if clients_cnt > 1:
client_id = self.__get_hash(ns) % clients_cnt
return self.clients[client_id].redis_client
@classmethod
def __get_hash(cls, str):
return zlib.crc32(str.encode())
@classmethod
def __add_key_ns_prefix(cls, ns: str, key: str):
return '{' + ns + '},' + key
@classmethod
def __add_keys_ns_prefix(cls, ns: str, keylist: List[str]) -> List[str]:
ret_nskeys = []
for k in keylist:
ret_nskeys.append('{' + ns + '},' + k)
return ret_nskeys
@classmethod
def __add_data_map_ns_prefix(cls, ns: str, data_dict: Dict[str, bytes]) -> Dict[str, bytes]:
ret_nsdict = {}
for key, val in data_dict.items():
ret_nsdict['{' + ns + '},' + key] = val
return ret_nsdict
@classmethod
def __strip_ns_from_bin_keys(cls, ns: str, nskeylist: List[bytes]) -> List[str]:
ret_keys = []
for k in nskeylist:
try:
redis_key = k.decode("utf-8")
except UnicodeDecodeError as exc:
msg = u'Namespace %s key conversion to string failed: %s' % (ns, str(exc))
raise RejectedByBackend(msg)
nskey = redis_key.split(',', 1)
if len(nskey) != 2:
msg = u'Namespace %s key:%s has no namespace prefix' % (ns, redis_key)
raise RejectedByBackend(msg)
ret_keys.append(nskey[1])
return ret_keys
def _prepare_channels(self, ns: str,
channels_and_events: Dict[str, List[str]]) -> Tuple[List, int]:
channels_and_events_prepared = []
for channel, events in channels_and_events.items():
one_channel_join_events = None
for event in events:
if one_channel_join_events is None:
channels_and_events_prepared.append(self.__add_key_ns_prefix(ns, channel))
one_channel_join_events = event
else:
one_channel_join_events = one_channel_join_events + self.event_separator + event
channels_and_events_prepared.append(one_channel_join_events)
pairs_cnt = int(len(channels_and_events_prepared) / 2)
return channels_and_events_prepared, pairs_cnt
def get_redis_connection(self, ns: str):
"""Return existing Redis database connection valid for the namespace."""
return self.__getClient(ns)
class _RedisConn:
"""
Internal class container to hold redis client connection
"""
def __init__(self, redis_client, pubsub, pubsub_thread, run_in_thread):
self.redis_client = redis_client
self.redis_pubsub = pubsub
self.pubsub_thread = pubsub_thread
self.run_in_thread = run_in_thread
def __str__(self):
return str(
{
"Client": repr(self.redis_client),
"Subscrions": self.redis_pubsub.subscribed,
"PubSub thread": repr(self.pubsub_thread),
"Run in thread": self.run_in_thread,
}
)
class RedisBackendLock(DbBackendLockAbc):
"""
A class providing an implementation of database backend lock of Shared Data Layer (SDL), when
backend database solution is Redis.
Args:
ns (str): Namespace under which this lock is targeted.
name (str): Lock name, identifies the lock key in a Redis database backend.
expiration (int, float): Lock expiration time after which the lock is removed if it hasn't
been released earlier by a 'release' method.
redis_backend (RedisBackend): Database backend object containing connection to Redis
database.
"""
lua_get_validity_time = None
# KEYS[1] - lock name
# ARGS[1] - token
# return < 0 in case of failure, otherwise return lock validity time in milliseconds.
LUA_GET_VALIDITY_TIME_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token then
return -10
end
if token ~= ARGV[1] then
return -11
end
return redis.call('pttl', KEYS[1])
"""
def __init__(self, ns: str, name: str, expiration: Union[int, float],
redis_backend: RedisBackend) -> None:
super().__init__(ns, name)
self.__redis = redis_backend.get_redis_connection(ns)
with _map_to_sdl_exception():
redis_lockname = '{' + ns + '},' + self._lock_name
self.__redis_lock = Lock(redis=self.__redis, name=redis_lockname, timeout=expiration)
self._register_scripts()
def __str__(self):
return str(
{
"lock DB type": "Redis",
"lock namespace": self._ns,
"lock name": self._lock_name,
"lock status": self._lock_status_to_string()
}
)
def acquire(self, retry_interval: Union[int, float] = 0.1,
retry_timeout: Union[int, float] = 10) -> bool:
succeeded = False
self.__redis_lock.sleep = retry_interval
with _map_to_sdl_exception():
succeeded = self.__redis_lock.acquire(blocking_timeout=retry_timeout)
return succeeded
def release(self) -> None:
with _map_to_sdl_exception():
self.__redis_lock.release()
def refresh(self) -> None:
with _map_to_sdl_exception():
self.__redis_lock.reacquire()
def get_validity_time(self) -> Union[int, float]:
validity = 0
if self.__redis_lock.local.token is None:
msg = u'Cannot get validity time of an unlocked lock %s' % self._lock_name
raise RejectedByBackend(msg)
with _map_to_sdl_exception():
validity = self.lua_get_validity_time(keys=[self.__redis_lock.name],
args=[self.__redis_lock.local.token],
client=self.__redis)
if validity < 0:
msg = (u'Getting validity time of a lock %s failed with error code: %d'
% (self._lock_name, validity))
raise RejectedByBackend(msg)
ftime = validity / 1000.0
if ftime.is_integer():
return int(ftime)
return ftime
def _register_scripts(self):
cls = self.__class__
client = self.__redis
if cls.lua_get_validity_time is None:
cls.lua_get_validity_time = client.register_script(cls.LUA_GET_VALIDITY_TIME_SCRIPT)
def _lock_status_to_string(self) -> str:
try:
if self.__redis_lock.locked():
if self.__redis_lock.owned():
return 'locked'
return 'locked by someone else'
return 'unlocked'
except(redis_exceptions.RedisError) as exc:
return f'Error: {str(exc)}'
|
nilq/baby-python
|
python
|
# Copyright 2014 Facebook, Inc.
# Modified by Vivek Menon
from facebookads.adobjects.adaccount import AdAccount
from facebookads.adobjects.campaign import Campaign
from facebookads.adobjects.adset import AdSet
from facebookads.adobjects.adcreative import AdCreative
from facebookads.adobjects.ad import Ad
from facebookads.adobjects.adpreview import AdPreview
from facebookads.api import FacebookAdsApi
# Access Details
access_token = ''
ad_account_id = ''
app_secret = ''
page_id = ''
FacebookAdsApi.init(access_token=access_token)
campaign = AdAccount(ad_account_id).create_campaign(
fields=[]],
params={
'name': 'My Campaign',
'buying_type': 'AUCTION',
'objective': 'PAGE_LIKES',
'status': 'PAUSED',
},
)
print('campaign', campaign)
campaign_id = campaign.get_id()
print('campaign_id:', campaign_id, '\n')
fields = [
]
params = {
'name': 'My AdSet',
'optimization_goal': 'PAGE_LIKES',
'billing_event': 'IMPRESSIONS',
'bid_amount': '20',
'promoted_object': {'page_id': page_id},
'daily_budget': '1000',
'campaign_id': campaign_id,
'targeting': {'geo_locations': {'countries': ['US']}},
'status': 'PAUSED',
}
ad_set = AdAccount(ad_account_id).create_ad_set(
fields=fields,
params=params,
)
print 'ad_set', ad_set
ad_set_id = ad_set.get_id()
print 'ad_set_id:', ad_set_id, '\n'
fields = [
]
params = {
'name': 'My Creative',
'object_id': page_id,
'title': 'My Page Like Ad',
'body': 'Like My Page',
'image_url': 'http://www.facebookmarketingdevelopers.com/static/images/resource_1.jpg',
}
creative = AdAccount(ad_account_id).create_ad_creative(
fields=fields,
params=params,
)
print 'creative', creative
creative_id = creative.get_id()
print 'creative_id:', creative_id, '\n'
fields = [
]
params = {
'name': 'My Ad',
'adset_id': ad_set_id,
'creative': {'creative_id': creative_id},
'status': 'PAUSED',
}
ad = AdAccount(ad_account_id).create_ad(
fields=fields,
params=params,
)
print 'ad', ad
ad_id = ad.get_id()
print 'ad_id:', ad_id, '\n'
fields = [
]
params = {
'ad_format': 'DESKTOP_FEED_STANDARD',
}
print Ad(ad_id).get_previews(
fields=fields,
params=params,
)
|
nilq/baby-python
|
python
|
def solution(s):
word_dict = {}
for element in s.lower():
word_dict[element] = word_dict.get(element, 0) + 1
if word_dict.get('p', 0) == word_dict.get('y', 0):
return True
return False
if __name__ == '__main__':
s = 'pPoooyY'
print(solution(s))
"""
def solution(s):
return s.lower().count('p') == s.lower().count('y')
"""
|
nilq/baby-python
|
python
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
import http.client
import mimetypes
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, cint
from frappe.utils.background_jobs import enqueue
from frappe import msgprint
from frappe.model.document import Document
import datetime
from frappe.utils import cint, flt, cstr, now
from datetime import date, datetime
from fuzzywuzzy import process
from fuzzywuzzy import fuzz
class SMSApi(Document):
pass
@frappe.whitelist()
def send_message(payload_to_send):
msgprint(payload_to_send)
#payload_to_use = json.loads(payload_to_send)
msgparameters = []
msgparameters.append(payload_to_send)
conn = http.client.HTTPSConnection("api.onfonmedia.co.ke")
payload ={}
payload["SenderId"] ="MTRH"
payload["MessageParameters"] = msgparameters
"""[
{
"Number":number,
"Text":message,
}
]"""
payload["ApiKey"] = "69pJq6iTBSwfAaoL4BU7yHi361dGLkqQ1MJYHQF/lJI="
payload["ClientId"] ="8055c2c9-489b-4440-b761-a0cc27d1e119"
msgprint(payload)
headers ={}
headers['Content-Type']= 'application/json'
headers['AccessKey']= 'FKINNX9pwrBDzGHxgQ2EB97pXMz6vVgd'
headers['Content-Type']= 'application/json'
headers['Cookie']= 'AWSALBTG=cWN78VX7OjvsWtCKpI8+ZTJuLfqNCOqRtmN6tRa4u47kdC/G4k7L3TdKrzftl6ni4LspFPErGdwg/iDlloajVm0LoGWChohiR07jljLMz/a8tduH+oHvptQVo1DgCplIyjCC+SyvnUjS2vrFiLN5E+OvP9KwWIjvmHjRiNJZSVJ4MageyKQ=; AWSALBTGCORS=cWN78VX7OjvsWtCKpI8+ZTJuLfqNCOqRtmN6tRa4u47kdC/G4k7L3TdKrzftl6ni4LspFPErGdwg/iDlloajVm0LoGWChohiR07jljLMz/a8tduH+oHvptQVo1DgCplIyjCC+SyvnUjS2vrFiLN5E+OvP9KwWIjvmHjRiNJZSVJ4MageyKQ='
conn.request("POST", "/v1/sms/SendBulkSMS", payload, headers)
res = conn.getresponse()
data = res.read()
#print(data.decode("utf-8"))
frappe.response["payload"] = payload
frappe.response["response"] =data
@frappe.whitelist()
def duplicate_checker(item_code):
item = frappe.db.get_value("Item",{"item_code":item_code},"item_name")
items = frappe.db.get_list('Item',
filters={
'disabled': "0",
'item_code':["NOT LIKE",item_code] #EXCLUDE THIS PARTICULAR ITEM
},
fields=['item_name','item_code','item_group'],
as_list=False
)
itemsarray =[]
itemdict={}
for row in items:
ratio = fuzz.token_sort_ratio(item,str(row.item_name))
itemcode = row.item_code
itemname =row.item_name
itemgroup =row.item_group
if ratio > 80:
itemdict["item_code"] = itemcode
itemdict["item_name"] = itemname
itemdict["item_category"] = itemgroup
itemdict["ratio"] = ratio
itemsarray.append(itemdict)
itemdict ={}
#payload = process.extract(item, itemsarray)
frappe.response["potential_duplicates"]=itemsarray
frappe.response["iteminquestion"] = item
return itemsarray
@frappe.whitelist()
def canceldocuments(payload):
#payload_to_use = json.loads(payload)
items = frappe.db.get_list('Item',
filters={
'item_code':["NOT IN", ["ITM000299", "760000","ITM000173"]] #760000
},
fields=['name'],
as_list=False
)
myarr=[]
payload_to_use =[]
for item in items:
payload_to_use.append(str(item.name))
for lisitem in payload_to_use:
item_code = lisitem
#myarr.append(lisitem)
#frappe.db.set_value("Item",item_code,"disabled","1")
frappe.delete_doc("Item",item_code)
"""awards = frappe.db.get_list('Tender Quotation Award',
filters={
'docstatus': "1",
'item_code':item_code
},
fields=['name'],
as_list=False
)
for award in awards:
docname = award.name
frappe.db.set_value("Tender Quotation Award",docname,"docstatus","2")
frappe.delete_doc("Tender Quotation Award",docname)
#frappe.delete_doc("Item",item_code)"""
frappe.response["items"]=myarr
|
nilq/baby-python
|
python
|
import warnings
from datetime import datetime
import pytest
import pandas as pd
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, custom_errors, create, conversion, conversion_rules
from mssql_dataframe.core.write import insert, _exceptions
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
self.insert = insert.insert(self.connection, autoadjust_sql_objects=True)
self.insert_meta = insert.insert(self.connection, include_metadata_timestamps=True, autoadjust_sql_objects=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
def test_insert_autoadjust_errors(sql):
table_name = "##test_insert_autoadjust_errors"
# create table with column for each conversion rule
columns = conversion_rules.rules['sql_type'].to_numpy()
columns = {'_'+x:x for x in columns}
sql.create.table(table_name, columns=columns)
# create dataframes for each conversion rule that should fail insert
boolean = [3]
exact_numeric = ['a', '2-1', 1.1, datetime.now()]
approximate_numeric = ['a', '2-1',datetime.now()]
date_time = ['a', 1, 1.1]
character_string = [1, datetime.now()]
dataframe = [
pd.DataFrame({'_bit': boolean}),
pd.DataFrame({'_tinyint': exact_numeric}),
pd.DataFrame({'_smallint': exact_numeric}),
pd.DataFrame({'_int': exact_numeric}),
pd.DataFrame({'_bigint': exact_numeric}),
pd.DataFrame({'_float': approximate_numeric}),
pd.DataFrame({'_time': date_time}),
pd.DataFrame({'_date': date_time}),
pd.DataFrame({'_datetime2': date_time}),
pd.DataFrame({'_varchar': character_string}),
pd.DataFrame({'_nvarchar': character_string}),
]
# insure all conversion rules are being tested
assert pd.Series(columns.keys()).isin([x.columns[0] for x in dataframe]).all()
for df in dataframe:
# check each row to infer to base pandas type
for row in df.index:
with pytest.raises(custom_errors.DataframeColumnInvalidValue):
sql.insert.insert(table_name, df.loc[[row]].infer_objects())
def test_insert_create_table(sql):
table_name = "##test_insert_create_table"
dataframe = pd.DataFrame(
{"ColumnA": [1, 2, 3], "ColumnB": ["06/22/2021", "06-22-2021", "2021-06-22"]}
)
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert_meta.insert(
table_name, dataframe=dataframe
)
assert len(warn) == 3
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert "Creating table " + table_name in str(warn[0].message)
assert "Created table: " + table_name in str(warn[1].message)
assert "Creating column _time_insert in table " + table_name in str(
warn[2].message
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
expected = pd.DataFrame(
{
"ColumnA": pd.Series([1, 2, 3], dtype="UInt8"),
"ColumnB": pd.Series(
[pd.Timestamp(year=2021, month=6, day=22)] * 3,
dtype="datetime64[ns]",
),
}
).set_index(keys="ColumnA")
assert result[expected.columns].equals(expected)
assert all(result["_time_insert"].notna())
def test_insert_create_table_indexpk(sql):
table_name = "##test_insert_create_table_indexpk"
dataframe = pd.DataFrame(
{"ColumnA": [1, 2, 3], "ColumnB": ["06/22/2021", "06-22-2021", "2021-06-22"]},
index=pd.Series([1,2,3], name='indexpk')
)
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert.insert(
table_name, dataframe=dataframe
)
assert len(warn) == 2
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert "Creating table " + table_name in str(warn[0].message)
assert "Created table: " + table_name in str(warn[1].message)
schema,_ = conversion.get_schema(sql.connection, table_name)
assert schema.index[schema['pk_seq'].notna()].equals(pd.Index(['indexpk']))
def test_insert_add_column(sql):
table_name = "##test_insert_add_column"
sql.create.table(table_name, columns={"ColumnA": "TINYINT"})
dataframe = pd.DataFrame({"ColumnA": [1], "ColumnB": [2], "ColumnC": ["zzz"]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert_meta.insert(table_name, dataframe=dataframe)
assert len(warn) == 3
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== f"Creating column _time_insert in table {table_name} with data type DATETIME2."
)
assert (
str(warn[1].message)
== f"Creating column ColumnB in table {table_name} with data type tinyint."
)
assert (
str(warn[2].message)
== f"Creating column ColumnC in table {table_name} with data type varchar(3)."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
assert all(result["_time_insert"].notna())
def test_insert_alter_column_unchanged(sql):
table_name = "##test_insert_alter_column_unchanged"
sql.create.table(
table_name,
columns={"ColumnA": "TINYINT", "ColumnB": "VARCHAR(1)", "ColumnC": "TINYINT"},
)
dataframe = pd.DataFrame({"ColumnA": [1], "ColumnB": ["a"], "ColumnC": [1]})
failure = custom_errors.SQLInsufficientColumnSize(
"manually testing expection for ColumnB, ColumnC", ["ColumnB", "ColumnC"]
)
with pytest.raises(custom_errors.SQLRecastColumnUnchanged):
_exceptions.handle(
failure,
table_name,
dataframe,
updating_table=False,
autoadjust_sql_objects=sql.insert.autoadjust_sql_objects,
modifier=sql.insert._modify,
creator=sql.insert._create,
)
def test_insert_alter_column(sql):
table_name = "##test_insert_alter_column"
sql.create.table(
table_name,
columns={"ColumnA": "TINYINT", "ColumnB": "VARCHAR(1)", "ColumnC": "TINYINT"},
)
dataframe = pd.DataFrame({"ColumnA": [1], "ColumnB": ["aaa"], "ColumnC": [100000]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.insert_meta.insert(table_name, dataframe=dataframe)
assert len(warn) == 3
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== f"Creating column _time_insert in table {table_name} with data type DATETIME2."
)
assert (
str(warn[1].message)
== f"Altering column ColumnB in table {table_name} to data type varchar(3) with is_nullable=True."
)
assert (
str(warn[2].message)
== f"Altering column ColumnC in table {table_name} to data type int with is_nullable=True."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result[dataframe.columns].equals(dataframe)
assert all(result["_time_insert"].notna())
_, dtypes = conversion.sql_spec(schema, dataframe)
assert dtypes == {
"ColumnA": "tinyint",
"ColumnB": "varchar(3)",
"ColumnC": "int",
"_time_insert": "datetime2",
}
def test_insert_alter_primary_key(sql):
# inital insert
table_name = "##test_insert_alter_primary_key"
dataframe = pd.DataFrame(
{
"ColumnA": [0, 1, 2, 3],
"ColumnB": [0, 1, 2, 3],
"ColumnC": ["a", "b", "c", "d"],
}
).set_index(keys=["ColumnA", "ColumnB"])
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create.table_from_dataframe(table_name, dataframe, primary_key="index")
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
schema,_ = conversion.get_schema(sql.connection, table_name)
_, dtypes = conversion.sql_spec(schema, dataframe)
assert dtypes == {
"ColumnA": "tinyint",
"ColumnB": "tinyint",
"ColumnC": "varchar(1)",
}
assert schema.at["ColumnA", "pk_seq"] == 1
assert schema.at["ColumnB", "pk_seq"] == 2
assert pd.isna(schema.at["ColumnC", "pk_seq"])
# insert that alters primary key
new = pd.DataFrame(
{
"ColumnA": [256, 257, 258, 259],
"ColumnB": [4, 5, 6, 7],
"ColumnC": ["e", "f", "g", "h"],
}
).set_index(keys=["ColumnA", "ColumnB"])
with warnings.catch_warnings(record=True) as warn:
new = sql.insert.insert(table_name, new)
assert len(warn) == 1
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== "Altering column ColumnA in table ##test_insert_alter_primary_key to data type smallint with is_nullable=False."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result.equals(dataframe.append(new))
_, dtypes = conversion.sql_spec(schema, new)
assert dtypes == {
"ColumnA": "smallint",
"ColumnB": "tinyint",
"ColumnC": "varchar(1)",
}
assert schema.at["ColumnA", "pk_seq"] == 1
assert schema.at["ColumnB", "pk_seq"] == 2
assert pd.isna(schema.at["ColumnC", "pk_seq"])
def test_insert_add_and_alter_column(sql):
table_name = "##test_insert_add_and_alter_column"
dataframe = pd.DataFrame({"ColumnA": [0, 1, 2, 3], "ColumnB": [0, 1, 2, 3]})
with warnings.catch_warnings(record=True) as warn:
dataframe = sql.create_meta.table_from_dataframe(table_name, dataframe, primary_key="index")
assert len(warn) == 1
assert isinstance(warn[0].message, custom_warnings.SQLObjectAdjustment)
assert "Created table" in str(warn[0].message)
new = pd.DataFrame({
'ColumnA': [4,5,6,7],
'ColumnB': [256, 257, 258, 259],
'ColumnC': [0, 1, 2, 3]
}, index=[4,5,6,7])
new.index.name = '_index'
with warnings.catch_warnings(record=True) as warn:
new = sql.insert_meta.insert(table_name, new)
assert len(warn) == 2
assert all([isinstance(x.message, custom_warnings.SQLObjectAdjustment) for x in warn])
assert (
str(warn[0].message)
== f"Creating column ColumnC in table {table_name} with data type tinyint."
)
assert (
str(warn[1].message)
== f"Altering column ColumnB in table {table_name} to data type smallint with is_nullable=False."
)
schema,_ = conversion.get_schema(sql.connection, table_name)
result = conversion.read_values(f"SELECT * FROM {table_name}", schema, sql.connection)
assert result[new.columns].equals(dataframe.append(new))
assert all(result["_time_insert"].notna())
_, dtypes = conversion.sql_spec(schema, dataframe)
assert dtypes == {
"_index": "tinyint",
"ColumnA": "tinyint",
"ColumnB": "smallint",
"_time_insert": "datetime2",
"ColumnC": "tinyint",
}
|
nilq/baby-python
|
python
|
# Server must be restarted after creating new tags file
from django import template
register = template.Library ()
@ register.inclusion_tag ('oauth/tags/user_avatar.html')
def get_user_avatar_tag (user):
'''Return the user's picture, it is an img tag'''
return {'user': user}
|
nilq/baby-python
|
python
|
# -*- coding:Utf-8 -*-
from gi.repository import Gtk, GObject, GdkPixbuf
from crudel import Crudel
import glob
class PicsouDiapo(Gtk.Window):
""" Affichage d'une image dans une Gtk.Window """
def __init__(self, crud, args):
Gtk.Window.__init__(self, title=args)
self.crud = crud
self.args = args
self.width = self.args["width"] if self.args.get("width") is not None else 800
self.heigth = self.args["height"] if self.args.get("height") is not None else 600
self.nb_cols = self.args["nb_cols"] if self.args.get("nb_cols") is not None else 3
self.set_size_request(self.width, self.heigth)
self.directory = "{}/{}".format(self.crud.get_application_prop("data_directory"),self.args["directory"])
self.set_title("Historique des cours")
self.create_widget()
def create_widget(self):
""" Construction des diapos """
vbox = Gtk.VBox()
hbox = None
self.nb_cols = 2
icol = 0
files = sorted([f for f in glob.glob(self.directory + "/**/*.png", recursive=True)])
for file_path in files:
# print(file_path)
if icol == 0:
hbox = Gtk.HBox()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(file_path, self.width//self.nb_cols - 4, -1, True)
image = Gtk.Image.new_from_pixbuf(pixbuf)
hbox.pack_start(image, False, False, 0)
icol+=1
if icol >= self.nb_cols:
vbox.pack_start(hbox, False, False, 0)
icol = 0
hbox = None
if hbox is not None:
vbox.pack_start(hbox, False, False, 0)
scroll_window = Gtk.ScrolledWindow()
scroll_window.set_policy(Gtk.PolicyType.ALWAYS, Gtk.PolicyType.ALWAYS)
scroll_window.add_with_viewport(vbox)
self.add(scroll_window)
self.show_all()
|
nilq/baby-python
|
python
|
"""
======================
Geographic Projections
======================
This shows 4 possible geographic projections. Cartopy_ supports more
projections.
.. _Cartopy: http://scitools.org.uk/cartopy
"""
import matplotlib.pyplot as plt
###############################################################################
plt.figure()
plt.subplot(projection="aitoff")
plt.title("Aitoff")
plt.grid(True)
###############################################################################
plt.figure()
plt.subplot(projection="hammer")
plt.title("Hammer")
plt.grid(True)
###############################################################################
plt.figure()
plt.subplot(projection="lambert")
plt.title("Lambert")
plt.grid(True)
###############################################################################
plt.figure()
plt.subplot(projection="mollweide")
plt.title("Mollweide")
plt.grid(True)
plt.show()
|
nilq/baby-python
|
python
|
"""
Wrappers around the Google API's.
"""
import os
import json
from datetime import (
datetime,
timedelta,
)
from collections import namedtuple
try:
# this is only an issue with Python 2.7 and if the
# Google-API packages were not installed with msl-io
from enum import Enum
except ImportError:
Enum = object
# having the Google-API packages are optional
try:
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials
from googleapiclient.http import (
MediaFileUpload,
MediaIoBaseDownload,
DEFAULT_CHUNK_SIZE,
)
HAS_GOOGLE_API = True
except ImportError:
DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024
HAS_GOOGLE_API = False
from .constants import (
HOME_DIR,
IS_PYTHON2,
)
def _authenticate(token, client_secrets_file, scopes):
"""Authenticate with a Google API.
Parameters
----------
token : :class:`str`
The path to a token file. If it does not exist then it will be created.
client_secrets_file : :class:`str`
The "client secrets" file to use to generate the OAuth credentials.
scopes : :class:`list` of :class:`str`
The list of scopes to enable.
Returns
-------
:class:`google.oauth2.credentials.Credentials`
The OAuth 2.0 credentials for the user.
"""
if not HAS_GOOGLE_API:
raise RuntimeError(
'You must install the Google-API packages, run\n'
' pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib'
)
credentials = None
# load the token from an environment variable if it exists
# ignore the '.json' extension
token_env_name = os.path.basename(token)[:-5].replace('-', '_').upper()
if token_env_name in os.environ:
info = json.loads(os.environ[token_env_name])
credentials = Credentials.from_authorized_user_info(info, scopes=scopes)
# load the cached token file if it exists
if not credentials and os.path.isfile(token):
credentials = Credentials.from_authorized_user_file(token, scopes=scopes)
# if there are no (valid) credentials available then let the user log in
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
try:
credentials.refresh(Request())
except RefreshError as err:
if os.path.isfile(token) and not os.getenv('MSL_IO_RUNNING_TESTS'):
message = '{}: {}\nDo you want to delete the token file and re-authenticate ' \
'(y/[n])? '.format(err.__class__.__name__, err.args[0])
if IS_PYTHON2:
yes_no = raw_input(message)
else:
yes_no = input(message)
if yes_no.lower().startswith('y'):
os.remove(token)
return _authenticate(token, client_secrets_file, scopes)
raise
else:
if not client_secrets_file:
raise OSError('You must specify the path to a "client secrets" file as the credentials')
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, scopes)
credentials = flow.run_local_server(port=0)
# save the credentials for the next run
if token_env_name in os.environ:
os.environ[token_env_name] = credentials.to_json()
else:
# make sure that all parent directories exist before creating the file
dirname = os.path.dirname(token)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
with open(token, mode='w') as fp:
fp.write(credentials.to_json())
return credentials
class GoogleAPI(object):
def __init__(self, service, version, credentials, scopes, is_read_only, is_corporate_account):
"""Base class for all Google API's."""
testing = 'testing-' if os.getenv('MSL_IO_RUNNING_TESTS') else ''
corporate = '-corporate' if is_corporate_account else ''
readonly = '-readonly' if is_read_only else ''
filename = '{}token-{}{}{}.json'.format(testing, service, corporate, readonly)
token = os.path.join(HOME_DIR, filename)
oauth = _authenticate(token, credentials, scopes)
self._service = build(service, version, credentials=oauth)
@property
def service(self):
"""The Resource object with methods for interacting with the API service."""
return self._service
class GDrive(GoogleAPI):
MIME_TYPE_FOLDER = 'application/vnd.google-apps.folder'
ROOT_NAMES = ['Google Drive', 'My Drive', 'Shared drives']
def __init__(self, credentials=None, is_read_only=True, is_corporate_account=True, scopes=None):
"""Interact with a user's Google Drive.
.. attention::
You must follow the instructions in the prerequisites section for setting up the
`Drive API <https://developers.google.com/drive/api/v3/quickstart/python#prerequisites>`_
before you can use this class. It is also useful to be aware of the
`refresh token expiration <https://developers.google.com/identity/protocols/oauth2#expiration>`_
policy.
Parameters
----------
credentials : :class:`str`, optional
The path to the "client secrets" credential file. This file only
needs to be specified the first time that you interact with a
user's Google Drive or if you delete the token file that was
created when you previously authenticated using the credentials.
is_read_only : :class:`bool`, optional
Whether to interact with a user's Google Drive in read-only mode.
is_corporate_account : :class:`bool`, optional
Whether you want to interact with a user's Google Drive via a
corporate Google account or a personal Google account.
scopes : :class:`list` of :class:`str`, optional
The list of scopes to enable for the Google API. See
`Drive scopes <https://developers.google.com/identity/protocols/oauth2/scopes#drive>`_
for more details. If not specified then default scopes are chosen
based on the value of `is_read_only`.
"""
if not scopes:
if is_read_only:
scopes = [
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/drive.metadata.readonly'
]
else:
scopes = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.metadata',
]
super(GDrive, self).__init__(
'drive', 'v3', credentials, scopes, is_read_only, is_corporate_account
)
self._files = self._service.files()
self._drives = self._service.drives()
@staticmethod
def _folder_hierarchy(folder):
# create a list of sub-folder names in the folder hierarchy
f = folder
names = []
while True:
f, name = os.path.split(f)
if not name or name in GDrive.ROOT_NAMES:
break
names.append(name)
return names[::-1]
def folder_id(self, folder, parent_id=None):
"""Get the ID of a Google Drive folder.
Parameters
----------
folder : :class:`str`
The path to a Google Drive file.
parent_id : :class:`str`, optional
The ID of the parent folder that the value of `folder` is relative to.
If not specified then `folder` is relative to the "root" folder.
Returns
-------
:class:`str`
The folder ID.
"""
# find the ID of the folder
folder_id = parent_id or 'root'
names = GDrive._folder_hierarchy(folder)
for name in names:
q = '"{}" in parents and name="{}" and trashed=false and mimeType="{}"'.format(
folder_id, name, GDrive.MIME_TYPE_FOLDER
)
request = self._files.list(q=q, fields='files(id,name)')
response = request.execute()
files = response['files']
if not files:
raise OSError('Not a valid Google Drive folder {!r}'.format(folder))
if len(files) > 1:
raise OSError('Multiple folder matches -- {}'.format(files))
first = files[0]
assert name == first['name'], '{!r} != {!r}'.format(name, first['name'])
folder_id = first['id']
return folder_id
def file_id(self, file, mime_type=None, folder_id=None):
"""Get the ID of a Google Drive file.
Parameters
----------
file : :class:`str`
The path to a Google Drive file.
mime_type : :class:`str`, optional
The mime type to use to filter the results.
folder_id : :class:`str`, optional
The ID of the folder that the value of `file` is relative to.
If not specified then `file` is relative to the "root" folder.
Returns
-------
:class:`str`
The file ID.
"""
folders, name = os.path.split(file)
folder_id = self.folder_id(folders, parent_id=folder_id)
q = '"{}" in parents and name="{}" and trashed=false'.format(folder_id, name)
if not mime_type:
q += ' and mimeType!="{}"'.format(GDrive.MIME_TYPE_FOLDER)
else:
q += ' and mimeType="{}"'.format(mime_type)
request = self._files.list(q=q, fields='files(id,name,mimeType)')
response = request.execute()
files = response['files']
if not files:
raise OSError('Not a valid Google Drive file {!r}'.format(file))
if len(files) > 1:
mime_types = '\n '.join(f['mimeType'] for f in files)
raise OSError('Multiple file matches. Filter by mime type:\n ' + mime_types)
first = files[0]
assert name == first['name'], '{!r} != {!r}'.format(name, first['name'])
return first['id']
def is_file(self, file, mime_type=None, folder_id=None):
"""Check if a file exists.
Parameters
----------
file : :class:`str`
The path to a Google Drive file.
mime_type : :class:`str`, optional
The mime type to use to filter the results.
folder_id : :class:`str`, optional
The ID of the folder that the value of `file` is relative to.
If not specified then `file` is relative to the "root" folder.
Returns
-------
:class:`bool`
Whether the file exists.
"""
try:
self.file_id(file, mime_type=mime_type, folder_id=folder_id)
except OSError as err:
return str(err).startswith('Multiple file matches')
else:
return True
def is_folder(self, folder, parent_id=None):
"""Check if a folder exists.
Parameters
----------
folder : :class:`str`
The path to a Google Drive folder.
parent_id : :class:`str`, optional
The ID of the parent folder that the value of `folder` is relative to.
If not specified then `folder` is relative to the "root" folder.
Returns
-------
:class:`bool`
Whether the folder exists.
"""
try:
self.folder_id(folder, parent_id=parent_id)
except OSError:
return False
else:
return True
def create_folder(self, folder, parent_id=None):
"""Create a folder.
Makes all intermediate-level folders needed to contain the leaf directory.
Parameters
----------
folder : :class:`str`
The folder(s) to create, for example, 'folder1' or 'folder1/folder2/folder3'.
parent_id : :class:`str`, optional
The ID of the parent folder that the value of `folder` is relative to.
If not specified then `folder` is relative to the "root" folder.
Returns
-------
:class:`str`
The ID of the last (right most) folder that was created.
"""
names = GDrive._folder_hierarchy(folder)
response = {'id': parent_id or 'root'}
for name in names:
request = self._files.create(
body={
'name': name,
'mimeType': GDrive.MIME_TYPE_FOLDER,
'parents': [response['id']],
},
fields='id',
supportsAllDrives=True, # ability to create in shared drives
)
response = request.execute()
return response['id']
def delete(self, file_or_folder_id):
"""Delete a file or a folder.
Parameters
----------
file_or_folder_id : :class:`str`
The ID of the file or folder to delete.
"""
self._files.delete(fileId=file_or_folder_id).execute()
def empty_trash(self):
"""Permanently delete all of the user's trashed files."""
self._files.emptyTrash().execute()
def upload(self, file, folder_id=None, mime_type=None, resumable=False, chunk_size=DEFAULT_CHUNK_SIZE):
"""Upload a file.
Parameters
----------
file : :class:`str`
The file to upload.
folder_id : :class:`str`, optional
The ID of the folder to upload the file to.
If not specified then uploads to the "root" folder.
mime_type : :class:`str`, optional
The mime type to use for the file's metadata. If not specified
then a value will be guessed from the file extension.
resumable : :class:`bool`
Whether the upload can be resumed.
chunk_size : :class:`int`
The file will be uploaded in chunks of this many bytes. Only used
if `resumable` is :data:`True`. Pass in a value of -1 if the file
is to be uploaded in a single chunk. Note that Google App Engine
has a 5MB limit on request size, so you should never set
`chunk_size` to be >5MB or to -1 (if the file size is >5MB).
Returns
-------
:class:`str`
The ID of the file that was uploaded.
"""
parent_id = folder_id or 'root'
filename = os.path.basename(file)
body = {'name': filename, 'parents': [parent_id]}
if mime_type:
body['mimeType'] = mime_type
request = self._files.create(
body=body,
media_body=MediaFileUpload(
file,
mimetype=mime_type,
chunksize=chunk_size,
resumable=resumable
),
fields='id',
supportsAllDrives=True, # ability to upload to shared drives
)
response = request.execute()
return response['id']
def download(self, file_id, save_as=None, num_retries=0, chunk_size=DEFAULT_CHUNK_SIZE, callback=None):
"""Download a file.
Parameters
----------
file_id : :class:`str`
The ID of the file to download.
save_as : :term:`path-like <path-like object>` or :term:`file-like <file object>`, optional
The location to save the file to.
Default is in the current working directory.
num_retries : :class:`int`, optional
The number of times to retry the download.
If zero (default) then attempt the request only once.
chunk_size : :class:`int`, optional
The file will be downloaded in chunks of this many bytes.
callback
The callback to call after each chunk of the file is downloaded.
The `callback` accepts one positional argument, for example::
def handler(file):
print(file.progress(), file.total_size, file.resumable_progress)
drive.download('0Bwab3C2ejYSdM190b2psXy1C50P', callback=handler)
"""
if hasattr(save_as, 'write'):
fh = save_as
else:
if not save_as:
request = self._files.get(fileId=file_id, fields='name')
response = request.execute()
save_as = response['name']
fh = open(save_as, mode='wb')
request = self._files.get_media(fileId=file_id)
downloader = MediaIoBaseDownload(fh, request, chunksize=chunk_size)
done = False
while not done:
status, done = downloader.next_chunk(num_retries=num_retries)
if callback:
callback(status)
if fh is not save_as: # then close the file that was opened
fh.close()
def path(self, file_or_folder_id):
"""Convert an ID to a path.
Parameters
----------
file_or_folder_id : :class:`str`
The ID of a file or folder.
Returns
-------
:class:`str`
The corresponding path of the ID.
"""
names = []
while True:
request = self._files.get(fileId=file_or_folder_id, fields='name,parents')
response = request.execute()
names.append(response['name'])
parents = response.get('parents', [])
if not parents:
break
if len(parents) > 1:
raise OSError('Multiple parents exist. This case has not been handled yet. Contact developers.')
file_or_folder_id = response['parents'][0]
return '/'.join(names[::-1])
class GValueOption(Enum):
"""Determines how values should be returned."""
FORMATTED = 'FORMATTED_VALUE'
"""Values will be calculated and formatted in the reply according to the
cell's formatting. Formatting is based on the spreadsheet's locale, not
the requesting user's locale. For example, if A1 is 1.23 and A2 is =A1
and formatted as currency, then A2 would return "$1.23"."""
UNFORMATTED = 'UNFORMATTED_VALUE'
"""Values will be calculated, but not formatted in the reply.
For example, if A1 is 1.23 and A2 is =A1 and formatted as currency, then
A2 would return the number 1.23."""
FORMULA = 'FORMULA'
"""Values will not be calculated. The reply will include the formulas.
For example, if A1 is 1.23 and A2 is =A1 and formatted as currency,
then A2 would return "=A1"."""
class GDateTimeOption(Enum):
"""Determines how dates should be returned."""
SERIAL_NUMBER = 'SERIAL_NUMBER'
"""Instructs date, time, datetime, and duration fields to be output as
doubles in "serial number" format, as popularized by Lotus 1-2-3. The
whole number portion of the value (left of the decimal) counts the days
since December 30th 1899. The fractional portion (right of the decimal)
counts the time as a fraction of the day. For example, January 1st 1900
at noon would be 2.5, 2 because it's 2 days after December 30st 1899,
and .5 because noon is half a day. February 1st 1900 at 3pm would be
33.625. This correctly treats the year 1900 as not a leap year."""
FORMATTED_STRING = 'FORMATTED_STRING'
"""Instructs date, time, datetime, and duration fields to be output as
strings in their given number format (which is dependent on the
spreadsheet locale)."""
class GCellType(Enum):
"""The data type of a spreadsheet cell."""
BOOLEAN = 'BOOLEAN'
CURRENCY = 'CURRENCY'
DATE = 'DATE'
DATE_TIME = 'DATE_TIME'
EMPTY = 'EMPTY'
ERROR = 'ERROR'
NUMBER = 'NUMBER'
PERCENT = 'PERCENT'
SCIENTIFIC = 'SCIENTIFIC'
STRING = 'STRING'
TIME = 'TIME'
UNKNOWN = 'UNKNOWN'
GCell = namedtuple('GCell', ('value', 'type', 'formatted'))
"""The information about a Google Sheets cell.
.. attribute:: value
The value of the cell.
.. attribute:: type
:class:`GCellType`: The data type of `value`.
.. attribute:: formatted
:class:`str`: The formatted value (i.e., how the `value` is displayed in the cell).
"""
class GSheets(GoogleAPI):
MIME_TYPE = 'application/vnd.google-apps.spreadsheet'
SERIAL_NUMBER_ORIGIN = datetime(1899, 12, 30)
def __init__(self, credentials=None, is_read_only=True, is_corporate_account=True, scopes=None):
"""Interact with a user's Google Sheets.
.. attention::
You must follow the instructions in the prerequisites section for setting up the
`Sheets API <https://developers.google.com/sheets/api/quickstart/python#prerequisites>`_
before you can use this class. It is also useful to be aware of the
`refresh token expiration <https://developers.google.com/identity/protocols/oauth2#expiration>`_
policy.
Parameters
----------
credentials : :class:`str`, optional
The path to the "client secrets" credential file. This file only
needs to be specified the first time that you interact with a
user's Google Sheets or if you delete the token file that was
created when you previously authenticated using the credentials.
is_read_only : :class:`bool`, optional
Whether to interact with a user's Google Sheets in read-only mode.
is_corporate_account : :class:`bool`, optional
Whether you want to interact with a user's Google Sheets via a
corporate Google account or a personal Google account.
scopes : :class:`list` of :class:`str`, optional
The list of scopes to enable for the Google API. See
`Sheets scopes <https://developers.google.com/identity/protocols/oauth2/scopes#sheets>`_
for more details. If not specified then default scopes are chosen
based on the value of `is_read_only`.
"""
if not scopes:
if is_read_only:
scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly']
else:
scopes = ['https://www.googleapis.com/auth/spreadsheets']
super(GSheets, self).__init__(
'sheets', 'v4', credentials, scopes, is_read_only, is_corporate_account
)
self._spreadsheets = self._service.spreadsheets()
def sheet_names(self, spreadsheet_id):
"""Get the names of all sheets in a spreadsheet.
Parameters
----------
spreadsheet_id : :class:`str`
The ID of a Google Sheets file.
Returns
-------
:class:`tuple` of :class:`str`
The names of all sheets.
"""
request = self._spreadsheets.get(spreadsheetId=spreadsheet_id)
response = request.execute()
return tuple(r['properties']['title'] for r in response['sheets'])
def values(self,
spreadsheet_id,
sheet=None,
cells=None,
row_major=True,
value_option=GValueOption.FORMATTED,
datetime_option=GDateTimeOption.SERIAL_NUMBER
):
"""Return a range of values from a spreadsheet.
Parameters
----------
spreadsheet_id : :class:`str`
The ID of a Google Sheets file.
sheet : :class:`str`, optional
The name of a sheet in the spreadsheet. If not specified and
only one sheet exists in the spreadsheet then automatically
determines the sheet name.
cells : :class:`str`, optional
The A1 notation or R1C1 notation of the range to retrieve values
from. If not specified then returns all values that are in `sheet`.
row_major : :class:`bool`, optional
Whether to return the values in row-major or column-major order.
value_option : :class:`str` or :class:`GValueOption`, optional
How values should be represented in the output. If a string
then it must be equal to one of the values in :class:`GValueOption`.
datetime_option : :class:`str` or :class:`GDateTimeOption`, optional
How dates, times, and durations should be represented in the
output. If a string then it must be equal to one of the values
in :class:`GDateTimeOption`. This argument is ignored if
`value_option` is :attr:`GValueOption.FORMATTED`.
Returns
-------
:class:`list`
The values from the sheet.
"""
if not sheet:
names = self.sheet_names(spreadsheet_id)
if len(names) != 1:
sheets = ', '.join(repr(n) for n in names)
raise ValueError('You must specify a sheet name: ' + sheets)
range_ = names[0]
else:
range_ = sheet
if cells:
range_ += '!{}'.format(cells)
if hasattr(value_option, 'value'):
value_option = value_option.value
if hasattr(datetime_option, 'value'):
datetime_option = datetime_option.value
request = self._spreadsheets.values().get(
spreadsheetId=spreadsheet_id,
range=range_,
majorDimension='ROWS' if row_major else 'COLUMNS',
valueRenderOption=value_option,
dateTimeRenderOption=datetime_option
)
response = request.execute()
return response.get('values', [])
def cells(self, spreadsheet_id, ranges=None):
"""Return cells from a spreadsheet.
Parameters
----------
spreadsheet_id : :class:`str`
The ID of a Google Sheets file.
ranges : :class:`str` or :class:`list` of :class:`str`, optional
The ranges to retrieve from the spreadsheet. Examples:
* ``'Sheet1'`` :math:`\\rightarrow` return all cells from
the sheet named Sheet1
* ``'Sheet1!A1:H5'`` :math:`\\rightarrow` return cells A1:H5
from the sheet named Sheet1
* ``['Sheet1!A1:H5', 'Data', 'Devices!B4:B9']`` :math:`\\rightarrow`
return cells A1:H5 from the sheet named Sheet1, all cells from the
sheet named Data and cells B4:B9 from the sheet named Devices
If not specified then return all cells from all sheets.
Returns
-------
:class:`dict`
The cells from the spreadsheet. The keys are the names of the
sheets and the values are a :class:`list` of :class:`GCell`
objects for the specified range of each sheet.
"""
request = self._spreadsheets.get(
spreadsheetId=spreadsheet_id, includeGridData=True, ranges=ranges
)
response = request.execute()
cells = {}
for sheet in response['sheets']:
data = []
for item in sheet['data']:
for row in item.get('rowData', []):
row_data = []
for col in row.get('values', []):
effective_value = col.get('effectiveValue', None)
formatted = col.get('formattedValue', '')
if effective_value is None:
value = None
typ = GCellType.EMPTY
elif 'numberValue' in effective_value:
value = effective_value['numberValue']
t = col.get('effectiveFormat', {}).get('numberFormat', {}).get('type', 'NUMBER')
typ = GCellType(t)
elif 'stringValue' in effective_value:
value = effective_value['stringValue']
typ = GCellType.STRING
elif 'boolValue' in effective_value:
value = effective_value['boolValue']
typ = GCellType.BOOLEAN
elif 'errorValue' in effective_value:
msg = effective_value['errorValue']['message']
value = '{} ({})'.format(col['formattedValue'], msg)
typ = GCellType.ERROR
else:
value = formatted
typ = GCellType.UNKNOWN
row_data.append(GCell(value=value, type=typ, formatted=formatted))
data.append(row_data)
cells[sheet['properties']['title']] = data
return cells
@staticmethod
def to_datetime(value):
"""Convert a "serial number" date into a :class:`datetime.datetime`.
Parameters
----------
value : :class:`float`
A date in the "serial number" format.
Returns
-------
:class:`datetime.datetime`
The date converted.
"""
days = int(value)
seconds = (value - days) * 86400 # 60 * 60 * 24
return GSheets.SERIAL_NUMBER_ORIGIN + timedelta(days=days, seconds=seconds)
|
nilq/baby-python
|
python
|
#!/bin/python
#NOTE: modified from original to be more module friendly (PS)
#Original source: https://github.com/jczaplew/postgis2geojson/blob/master/postgis2geojson.py
import argparse
import datetime
import decimal
import json
import subprocess
import psycopg2
#defaults for use as a module, possibly modified by the module user
global argsd
argsd = {"geometry": "geometry", "pretty": False, "topojson": False}
# Fix to float decimals
# http://stackoverflow.com/questions/16957275/python-to-json-serialization-fails-on-decimal
def check_for_decimals(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
#Main entry point for module use.
#NOTE: does not handle records with null geometry gracefully (exception in json.loads: TypeError: expected string or buffer)
def getData(conn, query, params=None):
"""NOTE: expects one field in query to be "ST_AsGeoJSON(foo) as geometry"."""
# Create a cursor for executing queries
with conn.cursor() as cur:
#print "query: " + query
# Execute the query
try:
if (params):
cur.execute(query, params)
else:
cur.execute(query)
except Exception as exc:
print "Unable to execute query. Error was {0}".format(str(exc))
raise exc
# Retrieve the results of the query
rows = cur.fetchall()
# Get the column names returned
colnames = [desc[0] for desc in cur.description]
# Find the index of the column that holds the geometry
geomIndex = colnames.index('geometry')
feature_collection = {'type': 'FeatureCollection', 'features': []}
# For each row returned...
for row in rows:
feature = {
'type': 'Feature',
'geometry': json.loads(row[geomIndex]),
'properties': {},
}
for index, colname in enumerate(colnames):
if colname not in ('geometry'):
if isinstance(row[index], datetime.datetime):
# datetimes are not JSON.dumpable, manually stringify these.
value = str(row[index])
else:
value = row[index]
feature['properties'][colname] = value
feature_collection['features'].append(feature)
indent = 2 if argsd["pretty"] is True else None
jsonified = json.dumps(feature_collection, indent=indent, default=check_for_decimals)
cur.close()
return jsonified
|
nilq/baby-python
|
python
|
"""Package initialization procedures.
The cli package provides components to build and execute the CLI.
"""
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.