content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Copyright 2020 Louis Richard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import xarray as xr
from scipy import optimize
from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d
def calc_vph_current(b_xyz, j_xyz):
"""Estimates the phase speed of the oscillating current sheet using
oscillations of J_N.
Parameters
----------
b_xyz : xarray.DataArray
Time series of the magnetic field.
j_xyz : xarray.DataArray
Time series of the current density.
Returns
-------
disprel : xarray.Dataset
Hash table. to fill
"""
# Time derivative of Bl
dbl_dt = gradient(b_xyz[:, 0])
n_bins = optimize_nbins_2d(dbl_dt, j_xyz[:, 2])
hist_dbl_dt_jn = histogram2d(dbl_dt, j_xyz[:, 2], bins=n_bins)
# Linear model for jn vs dBdt
def model_jn(x, a):
return a * x
v_phase_j, sigma_dbl_dt_jn = optimize.curve_fit(model_jn, dbl_dt.data,
j_xyz[:, 2].data)
v_phase_j = v_phase_j[0]
corr_coeffs = np.corrcoef(dbl_dt.data, j_xyz[:, 2].data)
rho = corr_coeffs[0, 1]
# v_phase_j = -3.12
sigma_dbl_dt_jn = np.sqrt(float(sigma_dbl_dt_jn))
dbl_dt_min = -1.2 * np.max(dbl_dt)
dbl_dt_max = 1.2 * np.max(dbl_dt)
disprel = {"fit_db_dt_jn": v_phase_j, "hist": hist_dbl_dt_jn,
"rho": rho, "sigma": sigma_dbl_dt_jn,
"hires_dBdt": np.linspace(dbl_dt_min, dbl_dt_max, 100),
"pred_Jn": (["hires_dBdt"],
model_jn(np.linspace(dbl_dt_min, dbl_dt_max, 100),
v_phase_j)),
"bound_upper": (["hires_dBdt"],
model_jn(np.linspace(dbl_dt_min, dbl_dt_max,
100),
v_phase_j + 1.92 * sigma_dbl_dt_jn)),
"bound_lower": (["hires_dBdt"],
model_jn(np.linspace(dbl_dt_min, dbl_dt_max,
100),
v_phase_j - 1.92 * sigma_dbl_dt_jn))}
disprel = xr.Dataset(disprel)
return disprel
|
nilq/baby-python
|
python
|
# import dependencies
import os
import json
import struct
import time
import requests
import numpy as np
import binascii
import datetime
import datetime as dt
from datetime import date
from flask import Flask, Response, request, redirect, url_for, escape, jsonify, make_response
from flask_mongoengine import MongoEngine
from itertools import chain
app = Flask(__name__)
TIME_FORMAT = "%Y-%m-%d_%H:%M:%S"
TIME_FORMAT_DEL = "%Y-%m-%dT%H:%M:%S"
dev_euis = ['78AF580300000485','78AF580300000506', '78AF580300000512']
# check if running in the cloud and set MongoDB settings accordingly
if 'VCAP_SERVICES' in os.environ:
vcap_services = json.loads(os.environ['VCAP_SERVICES'])
mongo_credentials = vcap_services['mongodb-2'][0]['credentials']
mongo_uri = mongo_credentials['uri']
else:
mongo_uri = 'mongodb://localhost/db'
app.config['MONGODB_SETTINGS'] = [
{
'host': mongo_uri,
'alias': 'soil_params'
}
]
# bootstrap our app
db = MongoEngine(app)
class Event():
def __init__(self):
self.next_time = "2019-04-01T10:00:00"
self.watering_time = 60
self.action = 1
next_step = Event()
next_next_step = Event()
class DataPoint(db.Document):
devEUI = db.StringField(required=True)
timestamp = db.DateTimeField()
time = db.StringField()
temperature = db.IntField()
illuminance = db.IntField()
humidity = db.IntField()
counter = db.IntField()
debit = db.FloatField()
voltage = db.IntField()
#work in a specific mongoDB collection:
meta = {'db_alias': 'soil_params'}
# set the port dynamically with a default of 3000 for local development
port = int(os.getenv('PORT', '3000'))
def time_date_to_unix_time(timedate1):
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
timee = time.mktime(datetime.datetime.strptime(timedate1, TIME_FORMAT).timetuple())
print(timee)
return timee
def CHAR_to_HEX(ascii):
return format(ord(ascii), 'x')
def int32_to_hex_clean(number, bytes):
if number > pow(2, 8*bytes):
return "F" * bytes
else:
print("number = ", number)
number_bytes = struct.pack(">I", number)
string = binascii.hexlify(bytearray(number_bytes))
print("string = ", string)
if len(string) == bytes*2:
return string
elif len(string) < (bytes*2):
return "0"*(bytes-len(string))+string
else:
return string[len(string) - bytes:]
def next_steps_string(next_event, next_next_event):
next_step_str = CHAR_to_HEX('n')
next_step_str += int32_to_hex_clean((int)(time_date_to_unix_time(next_event.next_time)), 4)
next_step_str += int32_to_hex_clean((int)(next_event.watering_time), 2)
if next_event.action == 1:
next_step_str += "01"
else:
next_step_str += "00"
next_step_str += int32_to_hex_clean((int)(time_date_to_unix_time(next_next_event.next_time)), 4)
next_step_str += int32_to_hex_clean((int)(next_next_event.watering_time), 2)
if next_next_event.action == 1:
next_step_str += "01"
else:
next_step_str += "00"
return next_step_str
# functions for decoding payload
def bitshift (payload,lastbyte):
return 8*(payload-lastbyte-1)
# our base route which just returns a string
@app.route('/')
def hello_world():
return "<b>Congratulations! Welcome to Soil Parameter!</b>"
#some functions for the freeboard interface
@app.route('/devices',methods=['GET'])
def devices():
query = request.args
if 'dev' in query:
for i, dev in enumerate(dev_euis):
if dev == query['dev']:
return json.dumps(latest_values[i],indent=4)
return json.dumps({})
#output JSON
@app.route('/json', methods=['GET'])
def print_json():
query = request.args
response = DataPoint.objects().to_json()
return Response(response,mimetype='application/json', headers={'Content-Disposition':'attachment;filename=database.json'})
#querying the database and giving back a JSON file
@app.route('/query', methods=['GET'])
def db_query():
start = dt.datetime.now() - dt.timedelta(days=365)
end = dt.datetime.now() + dt.timedelta(hours=2)
#enable for deleting objects. Attention, deletes parts of the database!
if 'delete' in query and 'start' in query and 'end' in query:
end = dt.datetime.strptime(query['end'], TIME_FORMAT)
start = dt.datetime.strptime(query['start'], TIME_FORMAT)
#DataPoint.objects(track_ID=query['delete'],timestamp__lt=end,timestamp__gt=start).delete()
#return 'objects deleted'
return 'delete feature disabled for security reasons'
if 'delpoint' in query:
print('query for deleting point received')
deltime_start = dt.datetime.strptime(query['delpoint'], TIME_FORMAT_DEL) - dt.timedelta(seconds=2)
deltime_end = dt.datetime.strptime(query['delpoint'], TIME_FORMAT_DEL) + dt.timedelta(seconds=2)
n_points = DataPoint.objects(timestamp__lt=deltime_end, timestamp__gt=deltime_start).count()
DataPoint.objects(timestamp__lt=deltime_end, timestamp__gt=deltime_start).delete()
return '{} points deleted'.format(n_points)
if 'start' in query:
start = dt.datetime.strptime(query['start'], TIME_FORMAT)
if 'end' in query:
end = dt.datetime.strptime(query['end'], TIME_FORMAT)
return datapoints
def calculate_next_steps():
#if it is raining that day then program the next two dates just to acquire data and if not just water it
today = date.today()
next_step.next_time= today.strftime("%Y-%m-%d") + "T10:00:00"
next_step.action = 0
next_step.watering_time = 10
next_next_step.next_time = today.strftime("%Y-%m-%d") + "T10:05:00"
next_next_step.action = 1
next_next_step.watering_time = 20
return [next_step, next_next_step]
# Swisscom LPN listener to POST from actility
@app.route('/sc_lpn', methods=['POST'])
def sc_lpn():
"""
This method handles every message sent by the LORA sensors
:return:
"""
print("Data received from ThingPark...")
j = []
try:
j = request.json
except:
print("Unable to read information or json from sensor...")
print("JSON received:")
print(j)
tuino_list = ['78AF580300000485','78AF580300000506', '78AF580300000512']
r_deveui = j['DevEUI_uplink']['DevEUI']
#Parse JSON from ThingPark
print("devEUI="+r_deveui)
payload = j['DevEUI_uplink']['payload_hex']
payload_int = int(j['DevEUI_uplink']['payload_hex'],16)
r_bytes = bytearray.fromhex(payload)
print("payload=" + payload)
r_time = j['DevEUI_uplink']['Time']
[r_timestamp1, timezone] = r_time.split("+")
r_timestamp1 = r_timestamp1.split(".")[0]
r_timestamp = dt.datetime.strptime(r_timestamp1,"%Y-%m-%dT%H:%M:%S")
if len(r_bytes) == 1:
print ('bytes length = ', len(r_bytes))
if r_bytes[0]==ord('t'): ##send time when receives t
command = CHAR_to_HEX('t')
r_time=int(time.time())
time_bytes = struct.pack(">I", r_time)
time_bytes_string = command + binascii.hexlify(bytearray(time_bytes))
print('Sending Time')
downlink_LoRa_data(time_bytes_string, r_deveui)
return "Data Sent"
elif r_bytes[0]==ord('U'): ##Unexpected Flow
print('Unexpected flow')
return "Unexpected Flow"
elif r_bytes[0]==ord('B'): ##Battery Low
print('Battery Low')
return "Battery Low"
elif r_bytes[0]==ord('n'):
[next_step, next_next_step] = calculate_next_steps()
next_step_command = next_steps_string(next_step, next_next_step)
print("Sending on LoRa: " , next_step_command)
downlink_LoRa_data(next_step_command, r_deveui)
return "Next Steps Sent"
else:
print("bytes = ", r_bytes[0])
return "something went wrong"
else:
if r_deveui in tuino_list:
r_temperature = ((r_bytes[0]<<8)+r_bytes[1])/100
r_illuminance = r_bytes[2]
r_humidity = r_bytes[3]
r_counter = (r_bytes[4]<<8)+r_bytes[5]
r_debit = ((r_bytes[6]<<8)+r_bytes[7])/100
r_voltage = ((r_bytes[8]<<8)+r_bytes[9])
print('Temperature = ' + str(r_temperature) + ' deg C')
print('illuminance = ' + str(r_illuminance) + '%')
print('Humidity = ' + str(r_humidity) + '%')
print('Counter = ' + str(r_counter) + ' pulses')
print('Debit = ' + str(r_debit) + ' liters')
print('Voltage = ' + str(r_voltage) + ' mV')
else:
return "device type not recognised"
datapoint = DataPoint(devEUI=r_deveui, time= r_time, timestamp = r_timestamp, temperature=r_temperature, illuminance=r_illuminance, humidity = r_humidity, counter=r_counter, debit=r_debit, voltage=r_voltage)
print(datapoint)
datapoint.save()
print('Datapoint saved to database')
return 'Datapoint DevEUI %s saved' %(r_deveui)
def downlink_LoRa_data(str, r_deveui):
headers_post = "Content-type:application/x-www-form-urlencoded"
print('sending to LoRa payload : ', str)
params = {'DevEUI': r_deveui,
'FPORT': '1',
'Payload': str}
url = "https://proxy1.lpn.swisscom.ch/thingpark/lrc/rest/downlink/"
r = requests.post(url, params=params)
print("url", r.url)
print(r.text)
return r.text
# start the app
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon import exceptions
from horizon.utils.memoized import memoized
import logging
from openstack_dashboard.api import base
from meteosclient.api import client as meteos_client
LOG = logging.getLogger(__name__)
TEMPLATE_CREATE_ATTRS = ['display_name', 'display_description', 'image_id',
'master_nodes_num', 'master_flavor_id',
'worker_nodes_num', 'worker_flavor_id',
'spark_version', 'floating_ip_pool']
EXPERIMENT_CREATE_ATTRS = ['display_name', 'display_description', 'key_name',
'neutron_management_network', 'template_id']
DATASET_CREATE_ATTRS = ['method', 'source_dataset_url', 'display_name',
'display_description', 'experiment_id', 'params',
'swift_tenant', 'swift_username', 'swift_password']
MODEL_CREATE_ATTRS = ['source_dataset_url', 'display_name',
'display_description', 'experiment_id',
'model_type', 'model_params', 'dataset_format',
'swift_tenant', 'swift_username', 'swift_password']
MODEL_EVA_CREATE_ATTRS = ['source_dataset_url', 'display_name',
'display_description', 'model_id', 'dataset_format',
'swift_tenant', 'swift_username', 'swift_password']
LEARNING_CREATE_ATTRS = ['display_name', 'display_description',
'model_id', 'method', 'args']
@memoized
def meteosclient(request):
meteos_url = ""
try:
meteos_url = base.url_for(request, 'machine-learning')
except exceptions.ServiceCatalogException:
LOG.debug('No Machine Learning service is configured.')
return None
LOG.debug('meteosclient connection created using the token "%s" and url'
'"%s"' % (request.user.token.id, meteos_url))
c = meteos_client.Client(username=request.user.username,
project_id=request.user.tenant_id,
input_auth_token=request.user.token.id,
meteos_url=meteos_url)
return c
def template_create(request, **kwargs):
args = {}
for (key, value) in kwargs.items():
if key in TEMPLATE_CREATE_ATTRS:
args[str(key)] = str(value)
else:
raise exceptions.BadRequest(
"Key must be in %s" % ",".join(TEMPLATE_CREATE_ATTRS))
return meteosclient(request).templates.create(**args)
def template_delete(request, id):
return meteosclient(request).templates.delete(id)
def template_list(request, search_opts=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
return meteosclient(request).templates.list(search_opts,
limit,
marker,
sort_key,
sort_dir)
def template_show(request, id):
return meteosclient(request).templates.get(id)
def experiment_create(request, **kwargs):
args = {}
for (key, value) in kwargs.items():
if key in EXPERIMENT_CREATE_ATTRS:
args[str(key)] = str(value)
else:
raise exceptions.BadRequest(
"Key must be in %s" % ",".join(EXPERIMENT_CREATE_ATTRS))
return meteosclient(request).experiments.create(**args)
def experiment_delete(request, id):
return meteosclient(request).experiments.delete(id)
def experiment_list(request, search_opts=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
return meteosclient(request).experiments.list(search_opts,
limit,
marker,
sort_key,
sort_dir)
def experiment_show(request, id):
return meteosclient(request).experiments.get(id)
def dataset_create(request, **kwargs):
args = {}
for (key, value) in kwargs.items():
if key in DATASET_CREATE_ATTRS:
args[str(key)] = str(value)
else:
raise exceptions.BadRequest(
"Key must be in %s" % ",".join(DATASET_CREATE_ATTRS))
return meteosclient(request).datasets.create(**args)
def dataset_delete(request, id):
return meteosclient(request).datasets.delete(id)
def dataset_list(request, search_opts=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
return meteosclient(request).datasets.list(search_opts,
limit,
marker,
sort_key,
sort_dir)
def dataset_show(request, id):
return meteosclient(request).datasets.get(id)
def model_create(request, **kwargs):
args = {}
for (key, value) in kwargs.items():
if key in MODEL_CREATE_ATTRS:
args[str(key)] = str(value)
else:
raise exceptions.BadRequest(
"Key must be in %s" % ",".join(MODEL_CREATE_ATTRS))
return meteosclient(request).models.create(**args)
def model_delete(request, id):
return meteosclient(request).models.delete(id)
def model_list(request, search_opts=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
return meteosclient(request).models.list(search_opts,
limit,
marker,
sort_key,
sort_dir)
def model_show(request, id):
return meteosclient(request).models.get(id)
def model_evaluation_create(request, **kwargs):
args = {}
for (key, value) in kwargs.items():
if key in MODEL_EVA_CREATE_ATTRS:
args[str(key)] = str(value)
else:
raise exceptions.BadRequest(
"Key must be in %s" % ",".join(MODEL_EVA_CREATE_ATTRS))
return meteosclient(request).model_evaluations.create(**args)
def model_evaluation_delete(request, id):
return meteosclient(request).model_evaluations.delete(id)
def model_evaluation_list(request, search_opts=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
return meteosclient(request).model_evaluations.list(search_opts,
limit,
marker,
sort_key,
sort_dir)
def model_evaluation_show(request, id):
return meteosclient(request).model_evaluations.get(id)
def learning_create(request, **kwargs):
args = {}
for (key, value) in kwargs.items():
if key in LEARNING_CREATE_ATTRS:
args[str(key)] = str(value)
else:
raise exceptions.BadRequest(
"Key must be in %s" % ",".join(LEARNING_CREATE_ATTRS))
return meteosclient(request).learnings.create(**args)
def learning_delete(request, id):
return meteosclient(request).learnings.delete(id)
def learning_list(request, search_opts=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
return meteosclient(request).learnings.list(search_opts,
limit,
marker,
sort_key,
sort_dir)
def learning_show(request, id):
return meteosclient(request).learnings.get(id)
|
nilq/baby-python
|
python
|
# vim:set et ts=4 sw=4:
import logging
class FileLogger(object):
"""
This becomes a File Like object that can be be written to. But
instead of actually writing to a file it writes to a logger object
Example:
#Redirect stdout and stderr to a logging object
import sys
from file_logger import FileLogger
import logging
import socket
logger=logging.handlers.SysLogHandler(address='/dev/log',facility='user',socktype=socket.SOCK_DGRAM)
sys.stderr.close()
sys.stdout.close()
sys.stderr=FileLogger(logger,logging.ERROR)
sys.stdout=FileLogger(logger,logging.INFO)
print("This message should now go to INFO logging level to syslog 'user' facility etc..")
"""
def __init__(self,logger,level=logging.INFO,prepend_all_lines=True):
"""
Args:
logger Logging Handler object to write to
level Valid logging level. Default: logging.INFO
prepend_all_lines Bool. Whether multiline messages should be
treated as individual lines or one message.
"""
self.logger=logger
self.level=level
if prepend_all_lines:
self.write=self.__write_prepend
else:
self.write=lambda m: self.logger.log(self.level,m)
def __write_prepend(self,message):
"""
This can replace the "write" function to treat multiline messages as
individual messages per line.
Args:
message String to be written
"""
msgs=message.split('\n')
for m in msgs:
if m:
self.logger.log(self.level,m)
def read(self,**kwargs):
raise NotImplemented("FileLogger is write only")
def close(self):
return
def flush(self):
return
|
nilq/baby-python
|
python
|
# Copyright 2014 - Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add artifact_type to image table
Revision ID: 3a615b3b2eae
Revises: 450600086a09
Create Date: 2014-10-13 17:25:04.880051
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3a615b3b2eae'
down_revision = '450600086a09'
def upgrade():
op.add_column('image', sa.Column('artifact_type', sa.String(length=36)))
def downgrade():
op.drop_column('image', 'artifact_type')
|
nilq/baby-python
|
python
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from tksugar import Generator
count = 1
def button(button, tag):
global count
child = Generator(r"samples\yml\multiwindow_child.yml").get_manager()
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4]) # https://stackoverflow.com/questions/9647202/ordinal-numbers-replacement
child.widgets["label"].widget["text"] = f"This is {ordinal(count)} Child Window"
if owner.vars["modal"].get():
child.window.grab_set()
count += 1
if __name__ == "__main__":
gen = Generator(r"samples\yml\multiwindow_owner.yml")
owner = gen.get_manager(commandhandler=button)
owner.mainloop()
|
nilq/baby-python
|
python
|
import numpy
from scipy.ndimage import shift
from skimage.exposure import rescale_intensity
from aydin.features.groups.translations import TranslationFeatures
from aydin.io.datasets import camera
def n(image):
return rescale_intensity(
image.astype(numpy.float32), in_range='image', out_range=(0, 1)
)
def test_translation_feature_group():
# get image:
image = n(camera().astype(numpy.float32))
# Instantiates translation features:
vectors = [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]
translations = TranslationFeatures(translations=vectors)
assert translations.num_features(image.ndim) == 8
# Check receptive field radius:
assert translations.receptive_field_radius == 1
# Set image:
translations.prepare(image)
# compute features and check their valididty:
feature = numpy.empty_like(image)
for index in range(translations.num_features(image.ndim)):
translations.compute_feature(index=index, feature=feature)
vector = vectors[index]
translated = shift(
image,
shift=list(vector),
output=feature,
order=0,
mode='constant',
cval=0.0,
prefilter=False,
)
assert (feature == translated).all()
|
nilq/baby-python
|
python
|
numberlist = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for number in numberlist:
if number == 1:
print("1st")
elif number == 2:
print("2nd")
elif number == 3:
print("3rd")
else:
print(str(number) + "th")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import inspect
import io
import os
import numpy as np
import pytest
import pytoshop
from pytoshop import enums
from pytoshop import layers
DATA_PATH = os.path.join(os.path.dirname(__file__), 'psd_files')
def test_futz_with_channel_image_data():
filename = os.path.join(DATA_PATH, 'group.psd')
with open(filename, 'rb') as fd:
psd = pytoshop.PsdFile.read(fd)
first_layer = psd.layer_and_mask_info.layer_info.layer_records[0]
first_layer.channels[0].image = np.empty((256, 256))
with pytest.raises(ValueError):
psd.write(io.BytesIO())
first_layer.channels[0].image = np.empty((256,), np.uint8)
with pytest.raises(ValueError):
psd.write(io.BytesIO())
first_layer.channels[0].image = np.empty((256, 256), np.uint8)
with pytest.raises(ValueError):
psd.write(io.BytesIO())
first_layer.channels[0].image = 0
psd.write(io.BytesIO())
first_layer.channels[0].image = np.empty((200, 100), np.uint8)
psd.write(io.BytesIO())
def test_futz_with_layer_channels():
filename = os.path.join(DATA_PATH, 'group.psd')
with open(filename, 'rb') as fd:
psd = pytoshop.PsdFile.read(fd)
first_layer = psd.layer_and_mask_info.layer_info.layer_records[0]
with pytest.raises(TypeError):
first_layer.channels = [
layers.ChannelImageData(image=np.empty((200, 100), np.uint8))]
first_layer.channels = {
0: layers.ChannelImageData(image=np.empty((200, 100), np.uint8))}
with pytest.raises(ValueError):
channel = first_layer.get_channel(enums.ColorChannel.bitmap)
first_layer.channels = {
0:
layers.ChannelImageData(image=np.empty((200, 100), np.uint8))}
channel = first_layer.get_channel(enums.ColorChannel.red)
assert channel.image.shape == (200, 100)
first_layer.set_channel(
enums.ColorChannel.green,
first_layer.get_channel(enums.ColorChannel.red))
first_layer.mask = first_layer.mask
first_layer.blending_ranges = first_layer.blending_ranges
psd.write(io.BytesIO())
with pytest.raises(ValueError):
first_layer.channels = {
0: np.empty((200, 100), np.uint8)}
with pytest.raises(ValueError):
first_layer.channels = {
'zero': layers.ChannelImageData(
image=np.empty((200, 100), np.uint8))}
def test_layer_mask_invalid_values():
m = layers.LayerMask()
for prop in ('top', 'left', 'right', 'bottom',
'real_top', 'real_left', 'real_right', 'real_bottom'):
with pytest.raises(ValueError):
setattr(m, prop, (1 << 32))
for prop in ('user_mask_density', 'user_mask_feather',
'vector_mask_density', 'vector_mask_feather'):
with pytest.raises(ValueError):
setattr(m, prop, -1)
with pytest.raises(TypeError):
m.real_flags = None
def test_channel_image_data_invalid():
args = inspect.getargspec(layers.ChannelImageData.__init__)
for arg in args[0]:
if arg in ('self', 'image', 'compression'):
continue
with pytest.raises(ValueError):
layers.ChannelImageData(
image=np.empty((0, 0), dtype='u8'),
**{arg: 0})
def test_invalid_compression_type():
with pytest.raises(ValueError):
layers.ChannelImageData(compression=4)
with pytest.raises(ValueError):
layers.ChannelImageData(compression='zlib')
def test_layer_record_invalid_values():
m = layers.LayerRecord()
for prop in ('top', 'left', 'right', 'bottom'):
with pytest.raises(ValueError):
setattr(m, prop, (1 << 32))
for prop in ('opacity', 'blend_mode'):
with pytest.raises(ValueError):
setattr(m, prop, -1)
m.name = b'ascii'
with pytest.raises(ValueError):
m.name = u'X' * 256
|
nilq/baby-python
|
python
|
from abc import ABC, abstractmethod
from typing import List
from evobench import Benchmark
from evobench.model import Solution
class HillClimber(ABC):
def __init__(self, benchmark: Benchmark):
self.benchmark = benchmark
@abstractmethod
def __call__(self, solution: Solution, **kwargs) -> Solution:
pass
def apply(self, solutions: List[Solution]) -> List[Solution]:
hc_solutions: List[Solution] = []
for solution in solutions:
hc_solution = self.__call__(solution)
hc_solutions.append(hc_solution)
return hc_solutions
|
nilq/baby-python
|
python
|
# coding=utf-8
from urllib2 import URLError, HTTPError
import urllib2
import json
import logging
from api_exception import APIError
from common.config import configs
from common.util import Util
# from appName.route.neutron_api import list_ports_by_nobind
# from appName.route.neutron_api import list_network
# from appName.route import neutron_api
from ..models import Opcache
import Queue
import threading
__author__ = 'aaron'
CACHE_SERVER_PARAMS = "/v2/servers"
CACHE_SERVER_CATEGORY = "list_all_server"
#NOVAL request url
NOVAL_URL = configs.get('nova')
TIME_OUT = configs.get('time_out')
#logger
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
flavor_list = []
#4 days xxm xx s xx second transfer to the xx year xx day xx hours xx minutes xx seconds
def format_date(str_datetime):
index_days = 0
if 'days' in str_datetime:
index_days = str_datetime.index('days')
index = 0
day=0
if index_days > 0:
day = str_datetime[0:index_days]
index = index_days+6
days = 0
years = 0
if int(day) > 365:
years = int(day)/365
days = int(day)%365
string =""
if years > 0 :
string +=(str(years)+' year ')
if days > 0 :
string +=(str(days)+' day ')
elif day > 0:
string +=(str(day)+' day ')
h = str_datetime[index:index+2]
m = str_datetime[index+3:index+5]
s = str_datetime[index+6:index+8]
if len(h) > 0 and int(h) > 0:
string += (h+' hours ')
if len(m) > 0 and int(m) > 0:
string += (m+' minutes ')
if len(s) > 0 and int(s) > 0:
string += (s+' seconds ')
return string
q = Queue.Queue()
class myThread(threading.Thread):
def __init__(self,user,server_list,thread_id):
threading.Thread.__init__(self)
self.thread_id = thread_id
# self.name = name
self.server_list = server_list
self.user = user
def run(self):
launch_server(self.user, self.server_list,self.thread_id)
def _start_thread(user,server_list):
threads = []
thread_id = 1
# 创建新线程
for server in server_list:
thread = myThread(user,server,thread_id)
thread.start()
threads.append(thread)
thread_id += 1
return threads
def launch_server_by_thread(user,server_list):
threads = _start_thread(user,server_list)
for t in threads:
t.join()
result = list()
while not q.empty():
result.append(q.get())
print 'xxxxxxx',result
if len(server_list) == len(result):
return 202,"Success"
else:
return 203,"Create failed "+bytes(len(result)-len(server_list)) +" num"
def networkport_field_data(user):
"""Returns a list of tuples of all networks.
Generates a list of networks available to the user (request). And returns
a list of (id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
from appName.route import neutron_api as neutron_api
networks = []
ports = []
try:
networks,code = neutron_api.list_network(user,user.get('projectid'), False)
print 'networks:----',networks
if code == 200:
networks_list = networks.get('networks')
if len(networks_list) >0:
networks = [(n.get('id'),n.get('name')) for n in networks_list]
# print 'networksxxxx',networks
# networks.sort(key=lambda obj: obj[0])
except Exception as e:
print e
if not networks:
print "No networks available"
networks=[
(u'01db79e0-628f-4fb0-a483-c84f923b0728', u'zportal123111'),
(u'147fbad0-485d-4ada-ba5d-cf7a25de67bf', u'huang'),
(u'23969855-53d3-4300-b3a4-1aeb8981c182', u'tcxtcxtcx'),
(u'2f0674bc-663f-45e3-bb98-ac3ea91f65b1', u'zsh-1'),
(u'38c7b08b-73da-4561-aab6-f96c6be892ae', u'tempest_public'),
(u'3fd6e9ee-6618-46e6-9471-ff12a605e04f', u'test11'),
(u'4e4192af-ba9c-4908-8d88-b4b79a22277c', u'test-vpn-network'),
(u'52ea0274-692b-4ef2-a37b-d56fc2561af4', u'cbnet'),
(u'5c9aadc3-ed0e-42a5-8ba5-71b5c957199c', u'ext-net'),
(u'753dd77b-7b77-42f1-9cfc-1d0273bfd670', u'wangwang'),
(u'7d1a1cee-dd60-4546-971e-3db05fcdf371', u'suibiansuibian'),
(u'90ee0dc5-38f9-4c5f-b206-8ea04966b7de', u'test-vpn-network-2'),
(u'9809e635-1ace-4fa9-bbbc-f450df24ba59', u'net'),
(u'b91179d1-33d9-414a-8956-34124dd09caa', u'nnzhang-network'),
(u'bdbb7cfd-5603-4a0e-9216-34b3ea7e1e8d', u'wangxuguang'),
(u'c6ddf9ea-6ccc-4e29-8829-a5c8161515cb', u'nnzhang-network1'),
(u'ca6fa325-ed70-455d-a196-8cc02574f982', u'sisyphuswxg')
]
# networks= [
# (u'52ea0274-692b-4ef2-a37b-d56fc2561af4', u'cbnet'),
# (u'5c9aadc3-ed0e-42a5-8ba5-71b5c957199c', u'ext-net'),
# (u'2fca71c5-d15c-4715-b9ac-ff1c707fb35e', u'gzy-net'), #meiyou
# (u'147fbad0-485d-4ada-ba5d-cf7a25de67bf', u'huang'),
# (u'1d03b190-6854-4992-8096-75c6f55482b0', u'kly-net-nodhcp'), #meiyou
# (u'9809e635-1ace-4fa9-bbbc-f450df24ba59', u'net'),
# (u'b91179d1-33d9-414a-8956-34124dd09caa', u'nnzhang-network'),
# (u'c6ddf9ea-6ccc-4e29-8829-a5c8161515cb', u'nnzhang-network1'),
# (u'ca6fa325-ed70-455d-a196-8cc02574f982', u'sisyphuswxg'),
# (u'7d1a1cee-dd60-4546-971e-3db05fcdf371', u'suibiansuibian'),
# (u'23969855-53d3-4300-b3a4-1aeb8981c182', u'tcxtcxtcx'),
# (u'38c7b08b-73da-4561-aab6-f96c6be892ae', u'tempest_public'),
# (u'167d6c61-ebcb-411e-9694-1ee84b3e69e7', u'test-lc'), #meiyou
# (u'4e4192af-ba9c-4908-8d88-b4b79a22277c', u'test-vpn-network'),
# (u'90ee0dc5-38f9-4c5f-b206-8ea04966b7de', u'test-vpn-network-2'),
# (u'3fd6e9ee-6618-46e6-9471-ff12a605e04f', u'test11'),
# (u'4de556c2-1978-479e-a80b-dfe37226a891', u'vpn2'), #meiyou
# (u'753dd77b-7b77-42f1-9cfc-1d0273bfd670', u'wangwang'),
# (u'bdbb7cfd-5603-4a0e-9216-34b3ea7e1e8d', u'wangxuguang'),
# (u'5e2585a3-018f-41ee-93c2-cac9d15d72e0', u'yannhua_network2'), #meiyou
# (u'01db79e0-628f-4fb0-a483-c84f923b0728', u'zportal123111'),
# (u'2f0674bc-663f-45e3-bb98-ac3ea91f65b1', u'zsh-1')
# ]s
for n in networks:
#temply ignore ext-net
if n[1] == 'ext-net':
continue
tmpports = neutron_api.list_ports_by_nobind(network_id=n[0])
tmpports = [(p.get('id'),p.get('ip_address')) for p in tmpports] #fetch the show of the multichose part
if tmpports:
ports.extend(tmpports)
tmpports = []
ports= [(u'82f35082-5e37-4332-9902-0cd53415c26a', u'192.168.0.221'), (u'8b4d199c-b066-40f8-92f7-d417c031b281', u'192.168.0.220'), (u'6a89143c-27a6-420b-993c-3d419f9b429c', u'10.0.200.200')]
return ports
def update_server(user, server_id, **kwargs):#513 {'tenant_id':tenant_id,'server_id':server_id,'migrage_value':migrage_value}
method_name = 'update_server'
params = json.dumps(kwargs)
req = Util.createRequest(NOVAL_URL+'v2/%s/servers/%s/action' % (user['projectid'], server_id), 'PUT',user) #500 error 48d8cd602577412c978f0184544e9e1c
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Not found the server')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
def launch_server(user, kwargs, thread_id=None): #{'server':server,'tenant_id':tenant_id}
method_name = 'launch_server'
req = Util.createRequest(NOVAL_URL+'v2/%s/servers' % user.get('projectid'), 'POST',user)
try:
new_instance = json.dumps(kwargs)
print 'new_instance',new_instance
response = urllib2.urlopen(req, new_instance, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
instance_id = content.get('server')['id']
if thread_id is not None:
q.put(thread_id, code)
if code == 202:
try:
delete_cache(None, user)
except Exception, e:
print e
else:
delete_cache(instance_id, user)
logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Reason: not found the server')
except URLError, e:
logger.debug(method_name+' URLError: '+str(e.reason))
raise APIError('URLError: ', e.reason)
def list_all_server(user):#123 #/v2.1/servers/detail ErrorCode 500
method_name = 'list_all_server'
opcache = []
opcache = Opcache.objects.filter(user=user.get('username')).filter(category=method_name)
if opcache is not None and len(opcache) > 0:
content = json.loads(opcache[0].content)
return 200, content
req = Util.createRequest(NOVAL_URL+'v2/%s/servers/detail' % user['projectid'], 'GET',user) #500 error 48d8cd602577412c978f0184544e9e1c
try:
response = urllib2.urlopen(req, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
o = Opcache()
o.user = user.get('username')
o.category = method_name
o.content = obj
o.params = "/v2/servers"
o.save()
code = response.code
# logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError(method_name+' failed.Reason: not found the server')
# else:
# raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
def list_flavor(user):#621 /v2/{tenant_id}/flavors ErrorCode 500
opcache = []
opcache = Opcache.objects.filter(user=user.get('username')).filter(category='list_flavor')
if opcache is not None and len(opcache) > 0:
flavor_list1 = json.loads(opcache[0].content)
return flavor_list1
method_name = 'list_flavor'
req = Util.createRequest(NOVAL_URL+'v2/%s/flavors' % user['projectid'], 'GET', user)
try:
response = urllib2.urlopen(req, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
flavor_list_temp = []
# logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
flavor_list_temp.append(code)
flavor_list_temp.append(content)
if code == 200:
for flavor in flavor_list_temp[1]['flavors']:
map = {}
map['id'] = flavor['id']
map['name'] = flavor['name']
flavor_list.append(map)
o = Opcache()
o.user = user
o.category = 'list_flavor'
o.params = 'v2/flavors'
o.content = json.dumps(flavor_list)
o.save()
return flavor_list
else:
return ["code",code]
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError(method_name+' failed.Reason: not found the flavors')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
return flavor_list
def migrate_server(user,**kwargs):#513 {'tenant_id':tenant_id,'server_id':server_id,'migrage_value':migrage_value}
method_name = 'migrate_server'
params = json.dumps(kwargs.get('migrage_value'))
req = Util.createRequest(NOVAL_URL+'v2/%s/servers/%s/action' % (user['projectid'], kwargs.get('server_id')), 'POST',user) #500 error 48d8cd602577412c978f0184544e9e1c
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Not found the server')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
def operate_instance(user,**kwargs):#{tenant_id:'tenant_id','server_id':server_id,'reboot_type':reboot_type}
method_name = 'operate_instance'
req = Util.createRequest(NOVAL_URL+'v2/%s/servers/%s/action' % (user['projectid'], kwargs.get('server_id')), 'POST',user) #500 error 48d8cd602577412c978f0184544e9e1c
params = json.dumps(kwargs.get('operate_type'))
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
code = response.code
logger.debug(method_name+" code:" + str(code))
# if code ==202:
# log = Loginfo()
# log.userid=user["userid"]
# log.operate=kwargs.get('operate_type')
# log.content = kwargs.get('operate_type')+" instance "
# log.save()
return code
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Reason: not found the server')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
def start_stop_server(user,**kwargs):#137 /v2.1/servers/{server_id}/action ErrorCode 500
#{'tenant_id':tenant_id,'server_id':server_id,'action':action}
method_name = 'start_stop_server'
parameter = ('server_id', 'action')
if Util.validate_parameter(*parameter, **kwargs):
req = Util.createRequest(NOVAL_URL+'v2/%s/servers/%s/action' % (user['projectid'], kwargs.get('server_id')), 'POST',user) #500 error 48d8cd602577412c978f0184544e9e1c
values = {
kwargs.get('action'): '' #"os-start"
}
params = json.dumps(values)
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
code = response.code
logger.debug(method_name+" code:" + str(code))
return code
except HTTPError, e:
logging.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Reason: not found the server')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logging.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
return 'error', 'Input parameter error,can not start or stop server '
def delete_cache(instance_id, user):
if instance_id is None:
opcache = Opcache.objects.filter(user=user.get('username')).filter(params=CACHE_SERVER_PARAMS).filter(
category=CACHE_SERVER_CATEGORY)
else:
opcache = Opcache.objects.filter(user=user.get('username')).filter(params=CACHE_SERVER_PARAMS).filter(
category=CACHE_SERVER_CATEGORY).filter(content__icontains=instance_id)
opcache[0].delete()
def update_cache(user, instance_id ,operate , new_instance = None):
opcache = []
if operate == 'add':
opcache = Opcache.objects.filter(user=user.get('username')).filter(params=CACHE_SERVER_PARAMS).filter(category=CACHE_SERVER_CATEGORY)
else:
opcache = Opcache.objects.filter(user=user.get('username')).filter(params=CACHE_SERVER_PARAMS).filter(category=CACHE_SERVER_CATEGORY).filter(content__icontains=instance_id)
if opcache is not None and len(opcache) > 0 : # update the cache table data
obj = opcache[0].content
if obj and len(obj):
content = json.loads(obj)
i = 0
if operate == 'add':
content['servers'].append(new_instance)
else:
for server_obj in content['servers']:
if server_obj.get('id') == instance_id:
i = i+1
if 'delete' == operate:
content['servers'].remove(server_obj)
break
elif 'update' == operate:
if new_instance is not None and len(new_instance) > 0:
for element in new_instance:
element['visibility'] = 'xxx'
content['servers'][i] = new_instance
opcache[0].content = json.dumps(content)
opcache[0].save()
def delete_server(user,server_id):#163 {'tenant_id':tenant_id,'server_id':server_id} #/v2/{tenant_id}/servers/{server_id}/action
method_name = 'delete_server'
req = Util.createRequest(NOVAL_URL+'v2/%s/servers/%s/action' % (user['projectid'], server_id), 'POST',user) #500 error 48d8cd602577412c978f0184544e9e1c
values = {
"forceDelete": ''
}
params = json.dumps(values)
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
code = response.code
logger.debug(method_name+" code:" + str(code))
if code == 204 or code == 202:
try:
update_cache(user,server_id, 'delete',None)
except Exception, e:
print e.code
delete_cache(server_id, user)
return code
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Reason: not found the server')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
def list_server_snapshot(user):#{'tenant_id':tenant_id}
method_name = 'list_server_snapshot'
req = Util.createRequest(NOVAL_URL+'v1.1/%s/os-snapshots' % user['projectid'], 'GET',user) #500 error 48d8cd602577412c978f0184544e9e1c
try:
response = urllib2.urlopen(req, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+' Error code: '+str(e.code))
if 404 == e.code:
raise APIError('Reason: not found the volume')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
def create_server_snapshot(user,**kwargs):#841{'tenant_id':tenant_id, 'snaphot_content':snaphot_content }
method_name = 'create_server_snapshot'
# parameter = ('tenant_id')
# if Util.validate_parameter(*parameter, **kwargs):
req = Util.createRequest(NOVAL_URL+'v1.1/%s/os-snapshots' % user['projectid'], 'POST',user) #500 error 48d8cd602577412c978f0184544e9e1c
# values = {
# "snapshot": {
# "display_name": "testing1126",
# "volume_id": "2bd364d0-a3eb-4adf-be93-eeb6e583c987"
# }
# }
params = json.dumps(kwargs.get('snaphot_content'))
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+" code:" + str(e.code)+" content:" + str(e.message))
if 404 == e.code:
raise APIError('Reason: not found the snaphot')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
# return 'error', 'Input parameter error,can not create server snapshot'
def backup_server(user,**kwargs):#841{'tenant_id':tenant_id, 'snaphot_content':snaphot_content }
method_name = 'backup_server'
# parameter = ('tenant_id')
# if Util.validate_parameter(*parameter, **kwargs):
req = Util.createRequest(NOVAL_URL+'/v2/%s/servers/%s/action' %( user['projectid'], '3b160a94-2050-4a72-811e-2df143b9d874'), 'POST',user) #500 error 48d8cd602577412c978f0184544e9e1c
# values = {
# "snapshot": {
# "display_name": "testing1126",
# "volume_id": "2bd364d0-a3eb-4adf-be93-eeb6e583c987"
# }
# }
params = json.dumps(kwargs)
try:
response = urllib2.urlopen(req, params, timeout=TIME_OUT)
obj = response.read()
if obj and len(obj):
content = json.loads(obj)
code = response.code
logger.debug(method_name+" code:" + str(code)+" content:" + str(content))
return code, content
except HTTPError, e:
logger.error(method_name+" code:" + str(e.code)+" content:" + str(e.message))
if 404 == e.code:
raise APIError('Reason: not found the snaphot')
else:
raise APIError('Reason: '+str(e.message))
except URLError, e:
logger.error(method_name+' URLError: '+str(e.reason))
raise APIError(' URLError: ', e.reason)
if __name__ == "__main__":
#launch_server()
values = {
"os-migrateLive": {
"host": "aaron-55",
"block_migration": False,
"disk_over_commit": False
}
}
sersers={"server": {"name": "zzz",
"imageRef": "047ff440-2835-4129-b2a6-eaca1396695a",
"flavorRef": "1",
"return_reservation_id": "true",
"max_count": 1,
"available_zone": "nova",
"min_count": 1,
"networks": [{"uuid": "3891a9d6-9083-4bf3-8c99-034932c3e81c"}],
"metadata": {"My Server Name": "Apache1"}
}
}
sersers={"server": {"name": "portal12", "imageRef": "047ff440-2835-4129-b2a6-eaca1396695a", "flavorRef": "1", "return_reservation_id": "true", "max_count": 1, "available_zone": "nova", "min_count": 1, "networks": [{"uuid": "3891a9d6-9083-4bf3-8c99-034932c3e81c"}], "metadata": {"My Server Name": "Apache1"}}}
sersers={'tenant_id': 'd04021d5a4144b4c9f579fdc1d1c2a9a', 'server': {'server': {'name': '1', 'imageRef': '047ff440-2835-4129-b2a6-eaca1396695a', 'flavorRef': '1', 'return_reservation_id': 'true', 'max_count': 1, 'available_zone': 'portal5', 'min_count': 1, 'networks': [{'uuid': '3891a9d6-9083-4bf3-8c99-034932c3e81c'}], 'metadata': {'My Server Name': 'Apache1'}}}}
new_instance={"server": {"name": "nova", "imageRef": "fa2aea3a-109e-4790-9545-2b118721ec0d", "flavorRef": "1", "max_count": 1, "available_zone": "nova", "min_count": 1, "networks": [{"uuid": ["01db79e0-628f-4fb0-a483-c84f923b0728"]}]}}
users={"username":"admin","password":"Abc12345","projectid":"d04021d5a4144b4c9f579fdc1d1c2a9a"}
# launch_server(users,**sersers)
delete_server(users,'4d9dceeb-f57d-4025-8f28-296b764c490b')
server = {
"server": {"name": "sddstest"}
}
update_server(users,'5ac35631-aca8-45e0-a574-ced0cc92f73b',server)
user={'username': 'portal1', 'project_name': 'demo', 'projectid': '3cd084a024d64d0486920e28ec30a233', 'user_domain_name': 'Default', 'userid': 'd2370ed26c454733bb97abaec34db0d4', 'userroles': [{'id': '31fd1d2bb7e741218637e230c13df800', 'name': 'user'}], 'password': '123', 'project_domain_name': 'Default'}
# user={'username': 'admin', 'project_name': 'admin', 'projectid': 'd04021d5a4144b4c9f579fdc1d1c2a9a', 'user_domain_name': 'Default', 'userid': '48d8cd602577412c978f0184544e9e1c', 'userroles': [{'id': '7461b89e471a41729769010cab38b71b', 'name': 'admin'}], 'password': 'Abc12345', 'project_domain_name': 'Default'}
|
nilq/baby-python
|
python
|
import os
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def load_dataset(data_root, dataset_name, trans):
if dataset_name == 'mnist':
return datasets.MNIST(
root=data_root,
train=True,
transform=trans,
download=True)
elif dataset_name == 'kmnist':
return datasets.KMNIST(
root=data_root,
train=True,
transform=trans,
download=True)
elif dataset_name == 'cifar100':
return datasets.CIFAR100(
root=data_root,
train=True,
transform=trans,
download=True)
else:
return datasets.ImageFolder(
root=os.path.join(data_root, dataset_name),
transform=trans)
class DataLoader(object):
def __init__(self, data_root, dataset_name, img_size, img_type, batch_size):
self.data_root = data_root
self.dataset_name = dataset_name
self.img_size = img_size
self.batch_size = batch_size
trans = list()
if img_type == 'grayscale':
trans.append(transforms.Grayscale())
trans.append(transforms.Resize((self.img_size, self.img_size)))
trans.append(transforms.ToTensor())
if img_type == 'grayscale':
trans.append(transforms.Normalize((0.5,), (0.5,)))
else:
trans.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
self.transforms = transforms.Compose(trans)
def get_loader(self):
dataset = load_dataset(self.data_root, self.dataset_name, self.transforms)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=4
)
n_classes = dataset.classes
print(f'Total number of images: {len(dataset)}')
print(f'Total number of classes: {len(dataset.classes)}')
return dataloader, len(n_classes)
|
nilq/baby-python
|
python
|
from typing import Union, Optional, List, Dict
from torch_geometric.typing import OptPairTensor, Adj, OptTensor, Size, PairTensor
import torch
from torch import Tensor
from torch_geometric.nn import GINEConv as BaseGINEConv, GINConv as BaseGINConv, LEConv as BaseLEConv
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import reset
from torch_geometric.utils import degree
from torch_scatter import scatter
class GINConv(BaseGINConv):
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_attr: OptTensor = None, edge_atten: OptTensor = None, size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor)
out = self.propagate(edge_index, x=x, edge_atten=edge_atten, size=size)
x_r = x[1]
if x_r is not None:
out += (1 + self.eps) * x_r
return self.nn(out)
def message(self, x_j: Tensor, edge_atten: OptTensor = None) -> Tensor:
if edge_atten is not None:
return x_j * edge_atten
else:
return x_j
class GINEConv(BaseGINEConv):
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_attr: OptTensor = None, edge_atten: OptTensor = None, size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, edge_atten=edge_atten, size=size)
x_r = x[1]
if x_r is not None:
out += (1 + self.eps) * x_r
return self.nn(out)
def message(self, x_j: Tensor, edge_attr: Tensor, edge_atten: OptTensor = None) -> Tensor:
if self.lin is None and x_j.size(-1) != edge_attr.size(-1):
raise ValueError("Node and edge feature dimensionalities do not "
"match. Consider setting the 'edge_dim' "
"attribute of 'GINEConv'")
if self.lin is not None:
edge_attr = self.lin(edge_attr)
m = (x_j + edge_attr).relu()
if edge_atten is not None:
return m * edge_atten
else:
return m
class LEConv(BaseLEConv):
def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj,
edge_weight: OptTensor = None, edge_atten: OptTensor = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x = (x, x)
a = self.lin1(x[0])
b = self.lin2(x[1])
# propagate_type: (a: Tensor, b: Tensor, edge_weight: OptTensor)
out = self.propagate(edge_index, a=a, b=b, edge_weight=edge_weight, edge_atten=edge_atten, size=None)
return out + self.lin3(x[1])
def message(self, a_j: Tensor, b_i: Tensor, edge_weight: OptTensor, edge_atten: OptTensor = None) -> Tensor:
out = a_j - b_i
m = out if edge_weight is None else out * edge_weight.view(-1, 1)
if edge_atten is not None:
return m * edge_atten
else:
return m
# https://github.com/lukecavabarrett/pna/blob/master/models/pytorch_geometric/pna.py
class PNAConvSimple(MessagePassing):
r"""The Principal Neighbourhood Aggregation graph convolution operator
from the `"Principal Neighbourhood Aggregation for Graph Nets"
<https://arxiv.org/abs/2004.05718>`_ paper
.. math::
\bigoplus = \underbrace{\begin{bmatrix}I \\ S(D, \alpha=1) \\
S(D, \alpha=-1) \end{bmatrix} }_{\text{scalers}}
\otimes \underbrace{\begin{bmatrix} \mu \\ \sigma \\ \max \\ \min
\end{bmatrix}}_{\text{aggregators}},
in:
.. math::
X_i^{(t+1)} = U \left( \underset{(j,i) \in E}{\bigoplus}
M \left(X_j^{(t)} \right) \right)
where :math:`U` denote the MLP referred to with posttrans.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
aggregators (list of str): Set of aggregation function identifiers,
namely :obj:`"sum"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"var"` and :obj:`"std"`.
scalers: (list of str): Set of scaling function identifiers, namely
:obj:`"identity"`, :obj:`"amplification"`,
:obj:`"attenuation"`, :obj:`"linear"` and
:obj:`"inverse_linear"`.
deg (Tensor): Histogram of in-degrees of nodes in the training set,
used by scalers to normalize.
post_layers (int, optional): Number of transformation layers after
aggregation (default: :obj:`1`).
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: int, out_channels: int,
aggregators: List[str], scalers: List[str], deg: Tensor,
post_layers: int = 1, **kwargs):
super(PNAConvSimple, self).__init__(aggr=None, node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
self.F_in = in_channels
self.F_out = self.out_channels
deg = deg.to(torch.float)
self.avg_deg: Dict[str, float] = {
'lin': deg.mean().item(),
'log': (deg + 1).log().mean().item(),
'exp': deg.exp().mean().item(),
}
in_channels = (len(aggregators) * len(scalers)) * self.F_in
modules = [Linear(in_channels, self.F_out)]
for _ in range(post_layers - 1):
modules += [ReLU()]
modules += [Linear(self.F_out, self.F_out)]
self.post_nn = Sequential(*modules)
self.reset_parameters()
def reset_parameters(self):
reset(self.post_nn)
def forward(self, x: Tensor, edge_index: Adj, edge_attr: OptTensor = None, edge_atten=None) -> Tensor:
# propagate_type: (x: Tensor)
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=None, edge_atten=edge_atten)
return self.post_nn(out)
def message(self, x_i: Tensor, x_j: Tensor, edge_attr=None, edge_atten=None) -> Tensor:
if edge_attr is not None:
m = torch.cat([x_i, x_j, edge_attr], dim=-1)
else:
m = torch.cat([x_i, x_j], dim=-1)
if edge_atten is not None:
return m * edge_atten
else:
return m
def aggregate(self, inputs: Tensor, index: Tensor,
dim_size: Optional[int] = None) -> Tensor:
outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
out = torch.cat(outs, dim=-1)
deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1)
outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
return torch.cat(outs, dim=-1)
def __repr__(self):
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}')
raise NotImplementedError
def aggregate_sum(src: Tensor, index: Tensor, dim_size: Optional[int]):
return scatter(src, index, 0, None, dim_size, reduce='sum')
def aggregate_mean(src: Tensor, index: Tensor, dim_size: Optional[int]):
return scatter(src, index, 0, None, dim_size, reduce='mean')
def aggregate_min(src: Tensor, index: Tensor, dim_size: Optional[int]):
return scatter(src, index, 0, None, dim_size, reduce='min')
def aggregate_max(src: Tensor, index: Tensor, dim_size: Optional[int]):
return scatter(src, index, 0, None, dim_size, reduce='max')
def aggregate_var(src, index, dim_size):
mean = aggregate_mean(src, index, dim_size)
mean_squares = aggregate_mean(src * src, index, dim_size)
return mean_squares - mean * mean
def aggregate_std(src, index, dim_size):
return torch.sqrt(torch.relu(aggregate_var(src, index, dim_size)) + 1e-5)
AGGREGATORS = {
'sum': aggregate_sum,
'mean': aggregate_mean,
'min': aggregate_min,
'max': aggregate_max,
'var': aggregate_var,
'std': aggregate_std,
}
def scale_identity(src: Tensor, deg: Tensor, avg_deg: Dict[str, float]):
return src
def scale_amplification(src: Tensor, deg: Tensor, avg_deg: Dict[str, float]):
return src * (torch.log(deg + 1) / avg_deg['log'])
def scale_attenuation(src: Tensor, deg: Tensor, avg_deg: Dict[str, float]):
scale = avg_deg['log'] / torch.log(deg + 1)
scale[deg == 0] = 1
return src * scale
def scale_linear(src: Tensor, deg: Tensor, avg_deg: Dict[str, float]):
return src * (deg / avg_deg['lin'])
def scale_inverse_linear(src: Tensor, deg: Tensor, avg_deg: Dict[str, float]):
scale = avg_deg['lin'] / deg
scale[deg == 0] = 1
return src * scale
SCALERS = {
'identity': scale_identity,
'amplification': scale_amplification,
'attenuation': scale_attenuation,
'linear': scale_linear,
'inverse_linear': scale_inverse_linear
}
|
nilq/baby-python
|
python
|
import argparse, logging, math, filepattern, time, queue
from bfio import BioReader, BioWriter
import pathlib
from preadator import ProcessManager
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
# length/width of the chunk each _merge_layers thread processes at once
chunk_size = 8192
# Units for conversion
UNITS = {'m': 10**9,
'cm': 10**7,
'mm': 10**6,
'µm': 10**3,
'nm': 1,
'Å': 10**-1}
def _merge_layers(input_files,output_path):
with ProcessManager.process(output_path.name):
# Get the number of layers to stack
z_size = 0
for f in input_files:
with BioReader(f['file']) as br:
z_size += br.z
# Get some basic info about the files to stack
with BioReader(input_files[0]['file']) as br:
# Get the physical z-distance if available, set to physical x if not
ps_z = br.ps_z
# If the z-distances are undefined, average the x and y together
if None in ps_z:
# Get the size and units for x and y
x_val,x_units = br.ps_x
y_val,y_units = br.ps_y
# Convert x and y values to the same units and average
z_val = (x_val*UNITS[x_units] + y_val*UNITS[y_units])/2
# Set z units to the smaller of the units between x and y
z_units = x_units if UNITS[x_units] < UNITS[y_units] else y_units
# Convert z to the proper unit scale
z_val /= UNITS[z_units]
ps_z = (z_val,z_units)
ProcessManager.log('Could not find physical z-size. Using the average of x & y {}.'.format(ps_z))
# Hold a reference to the metadata once the file gets closed
metadata = br.metadata
# Create the output file within a context manager
with BioWriter(output_path,metadata=metadata,max_workers=ProcessManager._active_threads) as bw:
# Adjust the dimensions before writing
bw.z = z_size
bw.ps_z = ps_z
# ZIndex tracking for the output file
zi = 0
# Start stacking
for file in input_files:
# Open an image
with BioReader(file['file'],max_workers=ProcessManager._active_threads) as br:
# Open z-layers one at a time
for z in range(br.z):
# Use tiled reading in x&y to conserve memory
# At most, [chunk_size, chunk_size] pixels are loaded
for xs in range(0,br.x,chunk_size):
xe = min([br.x,xs + chunk_size])
for ys in range(0,br.y,chunk_size):
ye = min([br.y,ys + chunk_size])
bw[ys:ye,xs:xe,zi:zi+1,...] = br[ys:ye,xs:xe,z:z+1,...]
zi += 1
# update the BioWriter in case the ProcessManager found more threads
bw.max_workers = ProcessManager._active_threads
def main(input_dir: pathlib.Path,
file_pattern: str,
output_dir: pathlib.Path
) -> None:
# create the filepattern object
fp = filepattern.FilePattern(input_dir,file_pattern)
for files in fp(group_by='z'):
output_name = fp.output_name(files)
output_file = output_dir.joinpath(output_name)
ProcessManager.submit_process(_merge_layers,files,output_file)
ProcessManager.join_processes()
if __name__ == "__main__":
# Initialize the main thread logger
logger = logging.getLogger('main')
logger.setLevel(logging.INFO)
# Setup the Argument parsing
logger.info('Parsing arguments...')
parser = argparse.ArgumentParser(prog='main', description='Compile individual tiled tiff images into a single volumetric tiled tiff.')
parser.add_argument('--inpDir', dest='input_dir', type=str,
help='Path to folder with tiled tiff files', required=True)
parser.add_argument('--outDir', dest='output_dir', type=str,
help='The output directory for ome.tif files', required=True)
parser.add_argument('--filePattern', dest='file_pattern', type=str,
help='A filename pattern specifying variables in filenames.', required=True)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
if input_dir.joinpath("images").is_dir():
input_dir = input_dir.joinpath("images")
output_dir = pathlib.Path(args.output_dir)
file_pattern = args.file_pattern
logger.info(f'input_dir = {input_dir}')
logger.info(f'output_dir = {output_dir}')
logger.info(f'file_pattern = {file_pattern}')
logger.info(f'max_threads: {ProcessManager.num_processes()}')
ProcessManager.init_processes('main','stack')
main(input_dir,
file_pattern,
output_dir)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, sys
import string
import argparse
import re
parser = argparse.ArgumentParser(description="""
creates mut file from snpEFF vcf
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--vcf', metavar = '<file.vcf>', required=True,
help="""VCF file. <REQUIRED>\n\n""")
parser.add_argument('--depth', metavar = '<file.vcf>', default = 10,
help="""Sample minimum DP (default:10)\n\n""")
parser.add_argument('--targets', metavar = '<targets.txt>',
help="""List of targets to create scoring for\n\n""")
parser._optionals.title = "Program Options"
args = parser.parse_args()
VHEADER = re.compile('^##', re.IGNORECASE)
SHEADER = re.compile('^#', re.IGNORECASE)
VAR_GT = re.compile('(0/1|1/1|1/0)', re.IGNORECASE)
DP = re.compile(':(\d+):', re.IGNORECASE)
GT_CHECK = re.compile('GT:DP', re.IGNORECASE)
OUT_HEADER = "chr\tstart\tend\tsample\ttype\tgene\tallele\ttranscript\tHGVS.c\tcDNA position\tCDS position\tHGVS.p\tProtein position\tLOF\tNMD\n"
SCORE_EFFECT = {'MODIFIER' : 0, 'LOW' : 1, 'MODERATE' : 2, 'HIGH' : 4}
def check_file(file):
if not os.path.isfile(file):
sys.stderr.write("could not locate {}\n".format(file))
raise SystemExit(1)
def target_list(targets, score):
with open(targets) as topen:
for line in topen:
line = line.rstrip()
score[line] = {}
def make_mut(vcf, d, score):
samples = []
score['global'] = {}
mut_out = open('./mut_out.mut', 'w')
with open(vcf) as vopen:
for line in vopen:
if VHEADER.match(line):
continue
line = line.rstrip()
fields = line.split("\t")
if SHEADER.match(line):
mut_out.write(OUT_HEADER)
for i in range(9, len(fields)):
samples.append(fields[i])
for r in score:
score[r][fields[i]] = 0
else:
chr = fields[0]
start = fields[1]
stop = int(fields[1]) + len(fields[3]) - 1
info = { i[0] : i[1] for i in [f.split("=") for f in fields[7].split(";")] };
#ANN is last field in INFO
ann = info['ANN']
#most impactful effect is listed first
eff = ann.split(",")
eff1 = eff[0].split("|");
type = eff1[1]
gene = eff1[3]
# if chr in score, if start >= min && start <= max
effect = eff1[2]
escore = SCORE_EFFECT[effect]
allele = eff1[0];
transcript = eff1[6];
hgvs_c = eff1[9]
hgvs_p = eff1[10]
cdna_pos = eff1[11]
cds_pos = eff1[12]
protein_pos = eff1[13]
if 'LOF' in info:
lof = info['LOF']
else:
lof = ""
if 'NMD' in info:
nmd = info['NMD']
else:
nmd = ""
for i in range(9, len(fields)):
if VAR_GT.search(fields[i]):
sample = samples[i - 9]
check = 1
if DP.search(fields[i]):
if DP.search(fields[i]).group(1) >= d:
check = 1
else:
check = 0
if check:
score['global'][sample] += escore
if gene in score:
score[gene][sample] += escore
field = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
chr,
start,
stop,
sample,
type,
gene,
allele,
transcript,
hgvs_c,
cdna_pos,
cds_pos,
hgvs_p,
protein_pos,
lof,
nmd,
)
mut_out.write(field)
mut_out.close()
if __name__ == "__main__":
vcf = args.vcf
depth = args.depth
targets = args.targets
score = {}
results = {}
check_file(vcf)
if targets:
check_file(targets)
target_list(targets, score)
make_mut(vcf, depth, score)
header = ['sample']
for r in score:
if r == 'global':
continue
header.append(str(r + "_score"))
for s in score[r]:
rscore = str(score[r][s])
if s in results:
results[s].append(rscore)
else:
results[s] = [s, rscore]
header.append('global_score')
for s in score['global']:
if not s in results:
rscore = str(score['global'][s])
results[s] = [s, rscore]
else:
rscore = str(score['global'][s])
results[s].append(rscore)
file = "attributes.txt"
r_out = open(file, 'w')
r_out.write("\t".join(header) + "\n")
for s in results:
r_out.write("\t".join(results[s]) + "\n")
r_out.close()
|
nilq/baby-python
|
python
|
"""
Django settings for Journal project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('APP_SECRET_KEY')
GOOGLE_RECAPTCHA_SECRET_KEY = os.environ.get('GOOGLE_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', False)
ALLOWED_HOSTS = os.environ.get('DJANGO_ALLOWED_HOSTS', '').split()
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'entries',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Journal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Journal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': ('django.db.backends.postgresql'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASSWORD'),
'HOST': os.environ.get('DB_HOST'),
'PORT': os.environ.get('DB_PORT'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': 'ltkybrgd',
# 'USER': 'ltkybrgd',
# 'PASSWORD': 'q_a0TTW9n3-JMatz9L3JO3lELHU9WBIE',
# 'HOST': 'kandula.db.elephantsql.com',
# 'PORT': '5432',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Prague'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST= 'smtp.gmail.com'
EMAIL_HOST_USER= 'fairlynet00@gmail.com'
EMAIL_HOST_PASSWORD= 'nkrfhxklpnccncnh'
EMAIL_USE_TLS= True
EMAIL_PORT= 587
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, sys, subprocess, argparse, re, logging, errno
import mymm
parser = argparse.ArgumentParser(description = "This program takes a .pqr file (MEAD format only for now, meaning no chain field!) and writes a CRG and PDB file from it.", prog = sys.argv[0])
parser.add_argument('--pqr', metavar = 'lysozyme.pqr')
parser.add_argument('--output', metavar = '<base for output filename>')
args = parser.parse_args(sys.argv[1:])
system = mymm.Molecule()
system.read_pqr(args.pqr)
system.write_pdb2(args.output + ".pdb")
system.write_crg(args.output + ".crg")
|
nilq/baby-python
|
python
|
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class Adjustment(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def applyHandlingAdjustment(self,adjustment, orderId, updateMode = None, version = None, responseFields = None):
""" Updates the order handling adjustment.
Args:
| adjustment(adjustment) -
| orderId (string) - Unique identifier of the order.
| updateMode (string) - Specifies whether to update the original order, update the order in draft mode, or update the order in draft mode and then commit the changes to the original. Draft mode enables users to make incremental order changes before committing the changes to the original order. Valid values are "ApplyToOriginal," "ApplyToDraft," or "ApplyAndCommit."
| version (string) - Determines whether or not to check versioning of items for concurrency purposes.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| Order
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/orders/{orderId}/adjustment/handling?updatemode={updateMode}&version={version}&responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("orderId", orderId);
url.formatUrl("responseFields", responseFields);
url.formatUrl("updateMode", updateMode);
url.formatUrl("version", version);
self.client.withResourceUrl(url).withBody(adjustment).execute();
return self.client.result();
def applyShippingAdjustment(self,adjustment, orderId, updateMode = None, version = None, responseFields = None):
""" Applies a shipping adjustment to the specified order.
Args:
| adjustment(adjustment) - Properties of an ad-hoc price adjustment for an order.
| orderId (string) - Unique identifier of the order.
| updateMode (string) - Specifies whether to update the original order, update the order in draft mode, or update the order in draft mode and then commit the changes to the original. Draft mode enables users to make incremental order changes before committing the changes to the original order. Valid values are "ApplyToOriginal," "ApplyToDraft," or "ApplyAndCommit."
| version (string) - System-supplied integer that represents the current version of the order, which prevents users from unintentionally overriding changes to the order. When a user performs an operation for a defined order, the system validates that the version of the updated order matches the version of the order on the server. After the operation completes successfully, the system increments the version number by one.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| Order
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/orders/{orderId}/adjustment/shipping?updatemode={updateMode}&version={version}&responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("orderId", orderId);
url.formatUrl("responseFields", responseFields);
url.formatUrl("updateMode", updateMode);
url.formatUrl("version", version);
self.client.withResourceUrl(url).withBody(adjustment).execute();
return self.client.result();
def applyAdjustment(self,adjustment, orderId, updateMode = None, version = None, responseFields = None):
""" Applies a price adjustment to the specified order.
Args:
| adjustment(adjustment) - Properties of an ad-hoc price adjustment for an order.
| orderId (string) - Unique identifier of the order.
| updateMode (string) - Specifies whether to update the original order, update the order in draft mode, or update the order in draft mode and then commit the changes to the original. Draft mode enables users to make incremental order changes before committing the changes to the original order. Valid values are "ApplyToOriginal," "ApplyToDraft," or "ApplyAndCommit."
| version (string) - System-supplied integer that represents the current version of the order, which prevents users from unintentionally overriding changes to the order. When a user performs an operation for a defined order, the system validates that the version of the updated order matches the version of the order on the server. After the operation completes successfully, the system increments the version number by one.
| responseFields (string) - Use this field to include those fields which are not included by default.
Returns:
| Order
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/orders/{orderId}/adjustment?updatemode={updateMode}&version={version}&responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("orderId", orderId);
url.formatUrl("responseFields", responseFields);
url.formatUrl("updateMode", updateMode);
url.formatUrl("version", version);
self.client.withResourceUrl(url).withBody(adjustment).execute();
return self.client.result();
def removeHandlingAdjustment(self,orderId, updateMode = None, version = None):
""" Removes an adjustment to the order handling fee.
Args:
| orderId (string) - Unique identifier of the order.
| updateMode (string) - Specifies whether to update the original order, update the order in draft mode, or update the order in draft mode and then commit the changes to the original. Draft mode enables users to make incremental order changes before committing the changes to the original order. Valid values are "ApplyToOriginal," "ApplyToDraft," or "ApplyAndCommit."
| version (string) - Determines whether or not to check versioning of items for concurrency purposes.
Returns:
| Order
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/orders/{orderId}/adjustment/handling?updatemode={updateMode}&version={version}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("orderId", orderId);
url.formatUrl("updateMode", updateMode);
url.formatUrl("version", version);
self.client.withResourceUrl(url).execute();
return self.client.result();
def removeShippingAdjustment(self,orderId, updateMode = None, version = None):
""" Removes a shipping adjustment previously applied to an order or draft.
Args:
| orderId (string) - Unique identifier of the order.
| updateMode (string) - Specifies whether to update the original order, update the order in draft mode, or update the order in draft mode and then commit the changes to the original. Draft mode enables users to make incremental order changes before committing the changes to the original order. Valid values are "ApplyToOriginal," "ApplyToDraft," or "ApplyAndCommit."
| version (string) - System-supplied integer that represents the current version of the order, which prevents users from unintentionally overriding changes to the order. When a user performs an operation for a defined order, the system validates that the version of the updated order matches the version of the order on the server. After the operation completes successfully, the system increments the version number by one.
Returns:
| Order
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/orders/{orderId}/adjustment/shipping?updatemode={updateMode}&version={version}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("orderId", orderId);
url.formatUrl("updateMode", updateMode);
url.formatUrl("version", version);
self.client.withResourceUrl(url).execute();
return self.client.result();
def removeAdjustment(self,orderId, updateMode = None, version = None):
""" Removes a price adjustment from the specified order.
Args:
| orderId (string) - Unique identifier of the order.
| updateMode (string) - Specifies whether to update the original order, update the order in draft mode, or update the order in draft mode and then commit the changes to the original. Draft mode enables users to make incremental order changes before committing the changes to the original order. Valid values are "ApplyToOriginal," "ApplyToDraft," or "ApplyAndCommit."
| version (string) - System-supplied integer that represents the current version of the order, which prevents users from unintentionally overriding changes to the order. When a user performs an operation for a defined order, the system validates that the version of the updated order matches the version of the order on the server. After the operation completes successfully, the system increments the version number by one.
Returns:
| Order
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/orders/{orderId}/adjustment?updatemode={updateMode}&version={version}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("orderId", orderId);
url.formatUrl("updateMode", updateMode);
url.formatUrl("version", version);
self.client.withResourceUrl(url).execute();
return self.client.result();
|
nilq/baby-python
|
python
|
import os
import os.path as osp
import pickle
import sys
import time
project_root = os.path.abspath ( os.path.join ( os.path.dirname ( __file__ ), '..', '..' ) )
if __name__ == '__main__':
if project_root not in sys.path:
sys.path.append ( project_root )
import coloredlogs, logging
logger = logging.getLogger ( __name__ )
coloredlogs.install ( level='DEBUG', logger=logger )
from src.models.model_config import model_cfg
from tqdm import tqdm
from torch.utils.data import DataLoader, Subset
from src.m_utils.base_dataset import BaseDataset, PreprocessedDataset
from src.models.estimate3d import MultiEstimator
from src.m_utils.evaluate import numpify
from src.m_utils.mem_dataset import MemDataset
def export(model, loader, is_info_dicts=False, show=False):
pose_list = list ()
for img_id, imgs in enumerate ( tqdm ( loader ) ):
try:
pass
except Exception as e:
pass
# poses3d = model.estimate3d ( img_id=img_id, show=False )
if is_info_dicts:
info_dicts = numpify ( imgs )
model.dataset = MemDataset ( info_dict=info_dicts, camera_parameter=camera_parameter,
template_name='Unified' )
poses3d = model._estimate3d ( 0, show=show )
else:
this_imgs = list ()
for img_batch in imgs:
this_imgs.append ( img_batch.squeeze ().numpy () )
poses3d = model.predict ( imgs=this_imgs, camera_parameter=camera_parameter, template_name='Unified',
show=show, plt_id=img_id )
pose_list.append ( poses3d )
return pose_list
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser ()
parser.add_argument ( '-d', nargs='+', dest='datasets', required=True,
choices=['Shelf', 'Campus', 'ultimatum1', 'HD_ultimatum1', 'HD_pizza', 'ultimatumVga10',
'mafia2', '160224_mafia2'] )
parser.add_argument ( '-dumped', nargs='+', dest='dumped_dir', default=None )
args = parser.parse_args ()
test_model = MultiEstimator ( cfg=model_cfg )
for dataset_idx, dataset_name in enumerate ( args.datasets ):
model_cfg.testing_on = dataset_name
if dataset_name == 'Shelf':
dataset_path = model_cfg.shelf_path
test_range = model_cfg.shelf_range
gt_path = dataset_path
elif dataset_name == 'Campus':
dataset_path = model_cfg.campus_path
test_range = model_cfg.campus_range
gt_path = dataset_path
elif dataset_name == 'ultimatum1':
dataset_path = model_cfg.ultimatum1_path
test_range = model_cfg.ultimatum1_range
elif dataset_name == 'ultimatumVga10':
dataset_path = model_cfg.ultimatum1Vga10_path
test_range = model_cfg.ultimatum1_range
elif dataset_name == 'HD_ultimatum1':
dataset_path = model_cfg.HD_ultimatum1_path
test_range = model_cfg.HD_ultimatum1_range
elif dataset_name == 'HD_pizza':
dataset_path = model_cfg.pizza_path
test_range = model_cfg.pizza_range
elif dataset_name == 'mafia2':
dataset_path = model_cfg.mafia2_demo_path
test_range = model_cfg.mafia2_demo_range
elif dataset_name == '160224_mafia2':
dataset_path = model_cfg.mafia160244_path
test_range = model_cfg.mafia160244_range
else:
logger.error ( f"Unknown datasets name: {dataset_name}" )
exit ( -1 )
# read the camera parameter of this dataset
with open ( osp.join ( dataset_path, 'camera_parameter.pickle' ),
'rb' ) as f:
camera_parameter = pickle.load ( f )
# using preprocessed 2D poses or using CPN to predict 2D pose
if args.dumped_dir:
test_dataset = PreprocessedDataset ( args.dumped_dir[dataset_idx] )
logger.info ( f"Using pre-processed datasets {args.dumped_dir[dataset_idx]} for quicker evaluation" )
else:
test_dataset = BaseDataset ( dataset_path, test_range )
test_loader = DataLoader ( test_dataset, batch_size=1, pin_memory=True, num_workers=6, shuffle=False )
pose_in_range = export ( test_model, test_loader, is_info_dicts=bool ( args.dumped_dir ), show=True )
with open ( osp.join ( model_cfg.root_dir, 'result',
time.strftime ( str ( model_cfg.testing_on ) + "_%Y_%m_%d_%H_%M",
time.localtime ( time.time () ) ) + '.pkl' ), 'wb' ) as f:
pickle.dump ( pose_in_range, f )
|
nilq/baby-python
|
python
|
# ----------------------------------------------------------------------
# AdministrativeDomain REST API
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from fastapi import APIRouter
# NOC modules
from noc.sa.models.administrativedomain import AdministrativeDomain
from noc.main.models.label import Label
from ..models.utils import LabelItem
from ..models.administrativedomain import (
DefaultAdministrativeDomainItem,
FormAdministrativeDomainItem,
)
from ..utils.ref import get_reference, get_reference_label
from ..utils.rest.model import ModelResourceAPI
from ..utils.rest.op import FilterExact, RefFilter, FuncFilter, FilterIn
router = APIRouter()
class AdministrativeDomainAPI(ModelResourceAPI[AdministrativeDomain]):
prefix = "/api/ui/administrativedomain"
model = AdministrativeDomain
list_ops = [
FilterIn("id"),
FuncFilter("query", function=lambda qs, values: qs.filter(name__icontains=values[0])),
FilterExact("name"),
RefFilter("parent", model=AdministrativeDomain),
]
sort_fields = ["name", "id", ("parent", "parent__name")]
@classmethod
def item_to_label(cls, item: AdministrativeDomain) -> LabelItem:
return LabelItem(
id=str(item.id),
label=str(item.name),
parent=get_reference(item.parent),
level=item.level,
has_children=item.has_children,
)
@classmethod
def item_to_default(cls, item: AdministrativeDomain) -> DefaultAdministrativeDomainItem:
return DefaultAdministrativeDomainItem(
id=str(item.id),
name=str(item.name),
parent=get_reference(item.parent),
default_pool=get_reference(item.default_pool),
bioseg_floating_name_template=get_reference(item.bioseg_floating_name_template),
bioseg_floating_parent_segment=get_reference(item.bioseg_floating_parent_segment),
labels=[get_reference_label(ii) for ii in Label.objects.filter(name__in=item.labels)],
effective_labels=[
get_reference_label(ii)
for ii in Label.objects.filter(name__in=item.effective_labels)
],
remote_system=get_reference(item.remote_system),
remote_id=item.remote_id,
bi_id=str(item.bi_id),
)
@classmethod
def item_to_form(cls, item: AdministrativeDomain) -> FormAdministrativeDomainItem:
return FormAdministrativeDomainItem(
name=str(item.name),
parent=get_reference(item.parent),
default_pool=get_reference(item.default_pool),
bioseg_floating_name_template=get_reference(item.bioseg_floating_name_template),
bioseg_floating_parent_segment=get_reference(item.bioseg_floating_parent_segment),
labels=item.labels,
)
# Install endpoints
AdministrativeDomainAPI(router)
|
nilq/baby-python
|
python
|
import torchvision.transforms as transforms
import numpy as np
import h5py, os, random, math, torch
import os
import torch.utils.data as data
import cv2
from PIL import Image
import csv
def get_csv_content(path): #
if not path.endswith('.csv'):
raise ValueError(f"Wrong path, Got {path}")
with open(path) as f:
f_csv = csv.reader(f)
res_list = []
discard = next(f_csv) # discard No.3142
for i, row in enumerate(f_csv):
res = {'index': row[0]}
landmarks = []
for i in range(1, len(row), 2):
# print(f"{i}")
# landmarks += [(row[i], row[i+1])] #
landmarks += [[int(row[i]), int(row[i + 1])]] # change _tuple to _list
res['landmark'] = landmarks
res_list += [res]
return res_list
class AddGaussianNoise(object):
def __init__(self, mean=0., std=1.):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class HandXray(data.Dataset):
def __init__(self,
pathDataset='/home/quanquan/hand/hand/jpg/',
label_path='/home/quanquan/hand/hand/all.csv',
mode="Oneshot",
size=[384, 384],
R_ratio=0.05,
num_landmark=37,
datanum=0):
self.num_landmark = num_landmark
self.size = size
self.pth_Image = os.path.join(pathDataset)
self.datanum = datanum
self.list = [x.path for x in os.scandir(self.pth_Image) if x.name.endswith(".jpg")]
self.list.sort()
# print(self.list)
self.landmarks = get_csv_content(label_path)
self.test_list = self.list[:100]
self.landmarks_test = self.landmarks[:100]
self.train_list = self.list[100:]
self.landmarks_train = self.landmarks[100:]
if self.datanum > 0:
self.train_list = self.train_list[:datanum]
self.landmarks_train = self.landmarks_train[:datanum]
if mode in ["Oneshot", "Train"]:
self.istrain = True
elif mode in ["Test1", "Test"]:
self.istrain = False
else:
raise NotImplementedError
normalize = transforms.Normalize([0.], [1.])
transformList = []
transformList.append(transforms.Resize(self.size))
transformList.append(transforms.ToTensor())
transformList.append(normalize)
self.transform = transforms.Compose(transformList)
self.transform_resize = transforms.Resize(self.size)
self.transform_tensor = transforms.ToTensor()
# transforms.RandomChoice(transforms)
# transforms.RandomApply(transforms, p=0.5)
# transforms.RandomOrder()
self.extra_aug_transform = transforms.Compose([
transforms.Resize(self.size),
transforms.RandomApply([
transforms.GaussianBlur(3, sigma=(0.1, 2.0)),
transforms.ColorJitter(brightness=0.15, contrast=0.25)], p=0.5),
transforms.ToTensor(),
AddGaussianNoise(0., 1.),
transforms.Normalize([0], [1]),
])
self.aug_transform = transforms.Compose([
transforms.ColorJitter(brightness=0.15, contrast=0.25),
transforms.ToTensor(),
transforms.Normalize([0], [1])
]
)
self.mode = mode
self.base = 16
# gen mask
self.Radius = int(max(size) * R_ratio)
mask = torch.zeros(2*self.Radius, 2*self.Radius, dtype=torch.float)
guassian_mask = torch.zeros(2*self.Radius, 2*self.Radius, dtype=torch.float)
for i in range(2*self.Radius):
for j in range(2*self.Radius):
distance = np.linalg.norm([i+1 - self.Radius, j+1 - self.Radius])
if distance < self.Radius:
mask[i][j] = 1
self.mask = mask
self.guassian_mask = guassian_mask
# gen offset
self.offset_x = torch.zeros(2*self.Radius, 2*self.Radius, dtype=torch.float)
self.offset_y = torch.zeros(2*self.Radius, 2*self.Radius, dtype=torch.float)
for i in range(2*self.Radius):
self.offset_x[:, i] = self.Radius - i
self.offset_y[i, :] = self.Radius - i
self.offset_x = self.offset_x * self.mask / self.Radius
self.offset_y = self.offset_y * self.mask / self.Radius
assert len(self) > 0
def return_img_name(self, index):
p, name = os.path.split(self.train_list[index])
return name
def return_name(self, index):
return self.return_img_name(index)
def __getitem__(self, index):
"""
landmark = {'index': index, 'landmark': [(x,y), (x,y), (x,y), ...]}
"""
item_path = self.train_list[index]
ori_img = Image.open(item_path)
img_shape = np.array(ori_img).shape[::-1] # shape: (y, x) or (long, width)
# if self.transform is not None:
item = self.transform(ori_img.convert('RGB'))
# import ipdb; ipdb.set_trace()
landmark_list = self.resize_landmark(self.landmarks_train[index]['landmark'],
img_shape) # [1:] for discarding the index
# return item, landmark, img_shape
# return {'img': item, 'landmark': landmark, 'img_shape': img_shape}
y, x = item.shape[-2], item.shape[-1]
mask = torch.zeros((self.num_landmark, y, x), dtype=torch.float)
offset_x = torch.zeros((self.num_landmark, y, x), dtype=torch.float)
offset_y = torch.zeros((self.num_landmark, y, x), dtype=torch.float)
for i, landmark in enumerate(landmark_list):
margin_x_left = max(0, landmark[0] - self.Radius)
margin_x_right = min(x, landmark[0] + self.Radius)
margin_y_bottom = max(0, landmark[1] - self.Radius)
margin_y_top = min(y, landmark[1] + self.Radius)
mask[i][margin_y_bottom:margin_y_top, margin_x_left:margin_x_right] = \
self.mask[0:margin_y_top-margin_y_bottom, 0:margin_x_right-margin_x_left]
offset_x[i][margin_y_bottom:margin_y_top, margin_x_left:margin_x_right] = \
self.offset_x[0:margin_y_top-margin_y_bottom, 0:margin_x_right-margin_x_left]
offset_y[i][margin_y_bottom:margin_y_top, margin_x_left:margin_x_right] = \
self.offset_y[0:margin_y_top-margin_y_bottom, 0:margin_x_right-margin_x_left]
return {'img':item, 'mask':mask, 'offset_x': offset_x, 'offset_y':offset_y,
'landmark_list': landmark_list, "img_shape": img_shape, "index": index}
def resize_landmark(self, landmark, img_shape):
"""
landmark = ['index': index, 'landmark': [(x,y), (x,y), (x,y), ...]]
"""
for i in range(len(landmark)):
# print("[Trans Debug] ", landmark[i], img_shape)
landmark[i][0] = int(landmark[i][0] * self.size[0] / float(img_shape[0]))
landmark[i][1] = int(landmark[i][1] * self.size[1] / float(img_shape[1]))
return landmark
def __len__(self):
if self.istrain:
return len(self.train_list)
else:
return len(self.test_list)
def TestHandXray(*args, **kwargs):
return HandXray(mode="Test", *args, **kwargs)
if __name__ == '__main__':
from torch.utils.data import DataLoader
dataset = HandXray(pathDataset='/home1/quanquan/datasets/hand/hand/jpg/', label_path='/home1/quanquan/datasets/hand/hand/all.csv')
# dataset.landmarks
# print()
img = dataset.__getitem__(0)
loader = DataLoader(dataset, batch_size=1, shuffle=False)
for data in loader:
import ipdb; ipdb.set_trace()
|
nilq/baby-python
|
python
|
# import the libraries
import googlemaps
import json
from GoogleMapsAPIKey import get_my_key
# Define the API Key.
API_KEY = get_my_key()
# Define the Client
gmaps = googlemaps.Client(key = API_KEY)
# Define Parameters
# Method One Place ID: 'place_id:ChIJ7-bxRDmr3oARawtVV_lGLtw'
# Method Two Lat/Lng: '32.961951,-117.154038'
# Method Three Address: '7845 Highland Village Pl, San Diego CA, 92129'
place_origin = 'place_id:ChIJ7-bxRDmr3oARawtVV_lGLtw' # This is the Place ID for the San Diego Airport
place_destin = '32.961951,-117.154038' # This is the Geolocation of Peet's Coffee & Tea in San Diego
# Make a reuqest for direction
direction_results = gmaps.directions(origin = place_origin, # the origin point
destination = place_destin, # the destination point
mode = 'driving', # method of transportation
alternatives = True, # Get more than one possible route
avoid = 'tolls', # What do we want to avoid
language = 'en-Au', # The language we want our response in
units = 'metric') # System of unit measurement
# traffic_model = 'optimistic'
# arrival_time = 1546301024
# departure_time = 1546301024
# This setting affects the value returned in the duration_in_traffic
# Can only be used with the departure time parameter.
print(json.dumps(direction_results, indent = 3))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""NICOS GUI command input widgets."""
from nicos.clients.gui.utils import loadUi
from nicos.guisupport.qt import QAbstractSpinBox, QColor, Qt, QWidget, \
pyqtSignal
from nicos.guisupport.typedvalue import DeviceParamEdit
from nicos.guisupport.utils import DoubleValidator, setBackgroundColor
from nicos.utils import findResource, formatDuration
invalid = QColor('#ffcccc')
def isFloat(ctl, minval=None, maxval=None, conv=float):
try:
v = conv(ctl.text())
except ValueError:
return False
if minval is not None and v < minval:
return False
if maxval is not None and v > maxval:
return False
return True
def isInt(ctl, minval=None, maxval=None):
return isFloat(ctl, minval, maxval, int)
class Cmdlet(QWidget):
name = ''
category = ''
cmdletUp = pyqtSignal()
cmdletDown = pyqtSignal()
cmdletRemove = pyqtSignal()
valueModified = pyqtSignal()
def __init__(self, parent, client, uifile):
self.client = client
QWidget.__init__(self, parent)
loadUi(self, uifile)
loadUi(self.buttons, 'cmdlets/buttons.ui')
self.buttons.upBtn.clicked.connect(self.cmdletUp)
self.buttons.downBtn.clicked.connect(self.cmdletDown)
self.buttons.delBtn.clicked.connect(self.cmdletRemove)
def removeSelf(self):
self.parent().layout().removeWidget(self)
self.hide()
def changed(self, *args):
"""Should be called whenever any value in the cmdlet changes."""
self.valueModified.emit()
def getValues(self):
"""Return a dict with the values of the cmdlet.
Values should have a name that is the same for the same logical
value in multiple cmdlets, e.g. "dev" for the device in a command.
"""
return {}
def _getDeviceList(self, special_clause=''):
"""Helper for getting a list of devices for manipulation."""
exp = getattr(self.parent(), 'expertmode', False)
if exp:
clause = special_clause
else:
clause = ('(dn in session.explicit_devices or '
'("nicos.core.mixins.AutoDevice" in d.classes and '
'dn.split(".")[0] in session.explicit_devices))')
if special_clause:
clause += ' and ' + special_clause
# special construction to get AutoDevices like slit.centerx which is
# useful to make scans over
return self.client.getDeviceList('nicos.core.device.Moveable',
only_explicit=False,
special_clause=clause)
def _getDeviceRepr(self, devname):
"""Return bare ``dev`` if the device is in the NICOS user namespace,
else ``'dev'`` in quotes.
"""
if self.client.eval(devname, None) is None:
return repr(devname)
return devname
def _setDevice(self, values):
"""Helper for setValues for setting a device combo box."""
if 'dev' in values:
idx = self.device.findText(values['dev'])
if idx > -1:
self.device.setCurrentIndex(idx)
def setValues(self, values):
"""Set values of the cmdlet with values from the argument.
Unknown values must be ignored.
"""
def markValid(self, ctl, condition):
"""Return boolean condition, and also mark the offending widget.
For use in isValid().
"""
if condition:
setBackgroundColor(ctl, Qt.white)
else:
setBackgroundColor(ctl, invalid)
if isinstance(ctl, QAbstractSpinBox):
# also mark the inner line-edit
return self.markValid(ctl.lineEdit(), condition)
return condition
def isValid(self):
"""Check if all entered data is valid.
This method can change widget styles to indicate invalid data with
the markValid() method if wanted.
"""
return True
def generate(self, mode):
"""Generate code for the commandlet.
*mode* is 'python' or 'simple'.
Should generate a string of lines, complete with newlines.
"""
return ''
class Move(Cmdlet):
name = 'Move'
category = 'Device'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/move.ui')
self.multiList.entryAdded.connect(self.on_entryAdded)
self.multiList.uifile = findResource('nicos/clients/gui/cmdlets/move_one.ui')
self.waitBox.stateChanged.connect(self.changed)
def on_entryAdded(self, entry):
def on_device_change(text):
entry.target.dev = text
self.changed()
entry.device.addItems(self._getDeviceList())
on_device_change(entry.device.currentText())
entry.device.currentIndexChanged['QString'].connect(on_device_change)
entry.target.setClient(self.client)
entry.target.valueModified.connect(self.changed)
def _setDevice(self, values):
"""Helper for setValues for setting a device combo box."""
if 'dev' in values:
idx = self.multiList.entry(0).device.findText(values['dev'])
if idx > -1:
self.multiList.entry(0).device.setCurrentIndex(idx)
def getValues(self):
return {'dev': self.multiList.entry(0).device.currentText(),
'moveto': self.multiList.entry(0).target.getValue()}
def setValues(self, values):
self._setDevice(values)
if 'moveto' in values:
self.multiList.entry(0).target.setValue(values['moveto'])
def isValid(self):
return True
def generate(self, mode):
cmd = 'maw' if self.waitBox.isChecked() else 'move'
if mode == 'simple':
return cmd + ''.join(' %s %r' % (frm.device.currentText(),
frm.target.getValue())
for frm in self.multiList.entries())
return cmd + '(' + ', '.join('%s, %r' % (
self._getDeviceRepr(frm.device.currentText()), frm.target.getValue())
for frm in self.multiList.entries()) + ')'
class Count(Cmdlet):
name = 'Count'
category = 'Scan'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/count.ui')
self.seconds.valueChanged.connect(self.changed)
def getValues(self):
return {'counttime': self.seconds.value()}
def setValues(self, values):
if 'counttime' in values:
self.seconds.setValue(values['counttime'])
def isValid(self):
return self.markValid(self.seconds, self.seconds.value() > 0)
def generate(self, mode):
if mode == 'simple':
return 'count %(counttime)s' % self.getValues()
return 'count(%(counttime)s)' % self.getValues()
class CommonScan(Cmdlet):
cmdname = ''
uiName = ''
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, self.uiName)
self.device.addItems(self._getDeviceList())
self.on_device_change(self.device.currentText())
self.device.currentIndexChanged[str].connect(self.on_device_change)
self.start.setValidator(DoubleValidator(self))
self.step.setValidator(DoubleValidator(self))
self.start.textChanged.connect(self.on_range_change)
self.step.textChanged.connect(self.on_range_change)
self.numpoints.valueChanged.connect(self.on_range_change)
self.seconds.valueChanged.connect(self.changed)
self.contBox.toggled.connect(self.changed)
def on_device_change(self, text):
unit = self.client.getDeviceParam(text, 'unit')
self.unit1.setText(unit or '')
self.unit2.setText(unit or '')
self.changed()
def getValues(self):
return {'dev': self.device.currentText(),
'scanstart': self.start.text(),
'scanstep': self.step.text(),
'scanpoints': self.numpoints.value(),
'scancont': self.contBox.isChecked(),
'counttime': self.seconds.value()}
def setValues(self, values):
self._setDevice(values)
if 'scanstart' in values:
self.start.setText(values['scanstart'])
if 'scanstep' in values:
self.step.setText(values['scanstep'])
if 'scanpoints' in values:
self.numpoints.setValue(values['scanpoints'])
if 'counttime' in values:
self.seconds.setValue(int(values['counttime']))
if 'scancont' in values:
self.contBox.setChecked(values['scancont'])
def isValid(self):
# NOTE: cannot use "return markValid() and markValid() and ..." because
# that short-circuits evaluation and therefore skips marking all but the
# first invalid control
valid = [
self.markValid(self.start, isFloat(self.start)),
self.markValid(self.step, isFloat(self.step)),
self.markValid(self.seconds, self.seconds.value() > 0),
]
return all(valid)
def generate(self, mode):
values = self.getValues()
if self.contBox.isChecked():
start, end, speed, delta = self._getContParams(values)
if mode == 'simple':
return 'contscan %s %s %s %s %s' % (values['dev'], start, end,
speed, delta)
values['dev'] = self._getDeviceRepr(values['dev'])
return 'contscan(%s, %s, %s, %s, %s)' % (values['dev'], start, end,
speed, delta)
else:
if mode == 'simple':
return self.cmdname + ' %(dev)s %(scanstart)s %(scanstep)s ' \
'%(scanpoints)s %(counttime)s' % values
values['dev'] = self._getDeviceRepr(values['dev'])
return self.cmdname + '(%(dev)s, %(scanstart)s, %(scanstep)s, ' \
'%(scanpoints)s, %(counttime)s)' % values
class Scan(CommonScan):
name = 'Scan'
category = 'Scan'
cmdname = 'scan'
uiName = 'cmdlets/scan.ui'
def on_range_change(self, *args):
try:
start = float(self.start.text())
step = float(self.step.text())
except ValueError:
endpos = ''
else:
numpoints = self.numpoints.value()
endpos = '%.3f %s' % (start + (numpoints-1)*step, self.unit1.text())
self.endPos.setText(endpos)
self.changed()
def _getContParams(self, values):
start, step, npoints, ctime = (float(values['scanstart']),
float(values['scanstep']),
float(values['scanpoints']),
values['counttime'])
end = start + (npoints - 1) * step
return start, end, abs(end - start) / npoints / ctime, ctime
class CScan(CommonScan):
name = 'Centered Scan'
category = 'Scan'
cmdname = 'cscan'
uiName = 'cmdlets/cscan.ui'
def on_range_change(self, *args):
numpoints = self.numpoints.value()
try:
start = float(self.start.text())
step = float(self.step.text())
except ValueError:
edgepos = ''
else:
edgepos = '%.3f - %.3f %s' % (start - numpoints*step,
start + numpoints*step,
self.unit1.text())
self.edgePos.setText(edgepos)
self.totalPoints.setText('Total: %d points' % (2 * numpoints + 1))
self.changed()
def _getContParams(self, values):
center, step, npoints, ctime = (float(values['scanstart']),
float(values['scanstep']),
float(values['scanpoints']),
values['counttime'])
start = center - npoints * step
end = center + npoints * step
return start, end, abs(end - start) / (2*npoints + 1) / ctime, ctime
class TimeScan(Cmdlet):
name = 'Time scan'
category = 'Scan'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/timescan.ui')
self.seconds.valueChanged.connect(self.changed)
def on_infBox_toggled(self, on):
self.numpoints.setEnabled(not on)
self.changed()
def getValues(self):
return {'scanpoints': self.numpoints.value(),
'counttime': self.seconds.value(),
'countinf': self.infBox.isChecked()}
def setValues(self, values):
if 'scanpoints' in values:
self.numpoints.setValue(values['scanpoints'])
if 'counttime' in values:
self.seconds.setValue(int(values['counttime']))
if 'countinf' in values:
self.infBox.setChecked(bool(values['countinf']))
def isValid(self):
return self.markValid(self.seconds, self.seconds.value() > 0)
def generate(self, mode):
npoints = -1 if self.infBox.isChecked() else self.numpoints.value()
counttime = self.seconds.value()
if mode == 'simple':
return 'timescan %s %s' % (npoints, counttime)
return 'timescan(%s, %s)' % (npoints, counttime)
class ContScan(Cmdlet):
name = 'Continuous Scan'
category = 'Scan'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/contscan.ui')
self.device.addItems(self._getDeviceList('hasattr(d, "speed")'))
self.on_device_change(self.device.currentText())
self.device.currentIndexChanged[str].connect(self.on_device_change)
self.start.setValidator(DoubleValidator(self))
self.stop.setValidator(DoubleValidator(self))
self.speed.setValidator(DoubleValidator(self))
self.delta.setValidator(DoubleValidator(self))
self.start.textChanged.connect(self.on_range_change)
self.stop.textChanged.connect(self.on_range_change)
self.speed.textChanged.connect(self.on_range_change)
self.delta.textChanged.connect(self.on_range_change)
def on_range_change(self):
try:
rng = abs(float(self.start.text()) - float(self.stop.text()))
secs = rng / float(self.speed.text())
pnts = int(secs / float(self.delta.text()))
self.totalLabel.setText('Total: %d points, %s' %
(pnts, formatDuration(secs)))
except (ValueError, ArithmeticError):
self.totalLabel.setText('Total:')
self.changed()
def on_device_change(self, text):
unit = self.client.getDeviceParam(text, 'unit')
value = self.client.getDeviceParam(text, 'value')
fmtstr = self.client.getDeviceParam(text, 'fmtstr')
try:
self.start.setText(fmtstr % value)
except Exception:
pass
self.unit1.setText(unit or '')
self.unit2.setText(unit or '')
self.unit3.setText((unit or '') + '/second')
self.changed()
def getValues(self):
return {'dev': self.device.currentText(),
'scanstart': self.start.text(),
'scanend': self.stop.text(),
'devspeed': self.speed.text(),
'counttime': float(self.delta.text())}
def setValues(self, values):
self._setDevice(values)
if 'scanstart' in values:
self.start.setText(values['scanstart'])
if 'scanend' in values:
self.stop.setText(values['scanend'])
if 'devspeed' in values:
self.speed.setText(values['devspeed'])
if 'counttime' in values:
self.delta.setText(str(values['counttime']))
def isValid(self):
valid = [
self.markValid(self.start, isFloat(self.start)),
self.markValid(self.stop, isFloat(self.stop)),
self.markValid(self.speed, isFloat(self.speed, 0.00001)),
self.markValid(self.delta, isFloat(self.delta, 0.05)),
]
return all(valid)
def generate(self, mode):
values = self.getValues()
if mode == 'simple':
return 'contscan %(dev)s %(scanstart)s %(scanend)s %(devspeed)s ' \
'%(counttime)s' % values
values['dev'] = self._getDeviceRepr(values['dev'])
return 'contscan(%(dev)s, %(scanstart)s, %(scanend)s, %(devspeed)s, ' \
'%(counttime)s)' % values
class Sleep(Cmdlet):
name = 'Sleep'
category = 'Other'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/sleep.ui')
self.seconds.valueChanged.connect(self.changed)
def getValues(self):
return {'sleeptime': self.seconds.value()}
def setValues(self, values):
if 'sleeptime' in values:
self.seconds.setValue(values['sleeptime'])
def isValid(self):
return self.markValid(self.seconds, self.seconds.value() > 0)
def generate(self, mode):
if mode == 'simple':
return 'sleep %(sleeptime)s' % self.getValues()
return 'sleep(%(sleeptime)s)' % self.getValues()
class Configure(Cmdlet):
name = 'Configure'
category = 'Device'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/configure.ui')
self.paraminfo = {}
self.paramvalues = {}
self.target = DeviceParamEdit(self)
self.target.setClient(self.client)
self.target.valueModified.connect(self.changed)
self.hlayout.insertWidget(5, self.target)
self.device.addItems(self._getDeviceList())
self.on_device_change(self.device.currentText())
self.device.currentIndexChanged[str].connect(self.on_device_change)
self.parameter.currentIndexChanged[str].connect(
self.on_parameter_change)
def on_device_change(self, text):
self.paraminfo = self.client.getDeviceParamInfo(text)
self.paramvalues = dict(self.client.getDeviceParams(text))
self.parameter.clear()
self.parameter.addItems(sorted(p for p in self.paraminfo
if self.paraminfo[p]['settable'] and
self.paraminfo[p]['userparam']))
self.target.dev = text
self.on_parameter_change(self.parameter.currentText())
self.changed()
def on_parameter_change(self, text):
self.target.param = text
self.changed()
def getValues(self):
return {'dev': self.device.currentText(),
'param': self.parameter.currentText(),
'paramvalue': self.target.getValue()}
def setValues(self, values):
self._setDevice(values)
if 'param' in values:
idx = self.parameter.findText(values['param'])
if idx > -1:
self.parameter.setCurrentIndex(idx)
# DeviceValueEdit doesn't support setValue (yet)
# if 'paramvalue' in values:
# self.target.setValue(values['paramvalue'])
def isValid(self):
return self.markValid(self.target, True)
def generate(self, mode):
values = self.getValues()
if mode == 'simple':
return 'set %(dev)s %(param)s %(paramvalue)r' % values
values['dev'] = self._getDeviceRepr(values['dev'])
return 'set(%(dev)s, %(param)r, %(paramvalue)r)' % values
class NewSample(Cmdlet):
name = 'New sample'
category = 'Other'
def __init__(self, parent, client):
Cmdlet.__init__(self, parent, client, 'cmdlets/sample.ui')
self.samplename.textChanged.connect(self.changed)
def getValues(self):
return {'samplename': self.samplename.text()}
def setValues(self, values):
if 'samplename' in values:
self.samplename.setText(values['samplename'])
def generate(self, mode):
if mode == 'simple':
return 'NewSample %(samplename)r' % self.getValues()
return 'NewSample(%(samplename)r)' % self.getValues()
all_cmdlets = []
all_categories = []
def register(cmdlet):
# allow overriding cmdlets with subclasses
for i, old in enumerate(all_cmdlets):
if issubclass(cmdlet, old):
all_cmdlets[i] = cmdlet
break
else:
all_cmdlets.append(cmdlet)
if cmdlet.category not in all_categories:
all_categories.append(cmdlet.category)
for cmdlet in [Move, Count, Scan, CScan, TimeScan, ContScan,
Sleep, Configure, NewSample]:
register(cmdlet)
|
nilq/baby-python
|
python
|
from internos.taskapp.celery import app
import json
import pytz
import httplib
import datetime
from django.utils import timezone
from time import mktime
from internos.backends.utils import get_data
@app.task
def sync_partner_data():
from internos.etools.models import PartnerOrganization
partners = get_data('etools.unicef.org', '/api/v2/partners/', 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
partners = json.loads(partners)
for item in partners:
partner, new_instance = PartnerOrganization.objects.get_or_create(etl_id=item['id'])
partner.rating = item['rating']
partner.last_assessment_date = item['last_assessment_date']
partner.short_name = item['short_name']
partner.postal_code = item['postal_code']
partner.basis_for_risk_rating = item['basis_for_risk_rating']
partner.city = item['city']
partner.reported_cy = item['reported_cy']
partner.total_ct_ytd = item['total_ct_ytd']
partner.vendor_number = item['vendor_number']
partner.hidden = item['hidden']
partner.cso_type = item['cso_type']
partner.net_ct_cy = item['net_ct_cy']
partner.phone_number = item['phone_number']
partner.shared_with = item['shared_with']
partner.partner_type = item['partner_type']
partner.address = item['address']
partner.total_ct_cy = item['total_ct_cy']
partner.name = item['name']
partner.total_ct_cp = item['total_ct_cp']
partner.country = item['country']
partner.email = item['email']
partner.deleted_flag = item['deleted_flag']
partner.street_address = item['street_address']
partner.save()
@app.task
def sync_individual_partner_data():
from internos.etools.models import PartnerOrganization
partners = PartnerOrganization.objects.all()
for partner in partners:
item = get_data('etools.unicef.org', '/api/v2/partners/{}/'.format(partner.etl_id),
'Token 36f06547a4b930c6608e503db49f1e45305351c2')
try:
item = json.loads(item)
partner.rating = item['rating']
partner.last_assessment_date = item['last_assessment_date']
partner.short_name = item['short_name']
partner.postal_code = item['postal_code']
partner.basis_for_risk_rating = item['basis_for_risk_rating']
partner.city = item['city']
partner.reported_cy = item['reported_cy']
partner.total_ct_ytd = item['total_ct_ytd']
partner.vendor_number = item['vendor_number']
partner.hidden = item['hidden']
partner.cso_type = item['cso_type']
partner.net_ct_cy = item['net_ct_cy']
partner.phone_number = item['phone_number']
partner.shared_with = item['shared_with']
partner.partner_type = item['partner_type']
partner.address = item['address']
partner.total_ct_cy = item['total_ct_cy']
partner.name = item['name']
partner.total_ct_cp = item['total_ct_cp']
partner.country = item['country']
partner.email = item['email']
partner.deleted_flag = item['deleted_flag']
partner.street_address = item['street_address']
partner.staff_members = item['staff_members']
partner.assessments = item['assessments']
partner.planned_engagement = item['planned_engagement']
partner.hact_values = item['hact_values']
partner.hact_min_requirements = item['hact_min_requirements']
partner.planned_visits = item['planned_visits']
partner.core_values_assessments = item['core_values_assessments']
partner.flags = item['flags']
partner.type_of_assessment = item['type_of_assessment']
if item['last_assessment_date']:
partner.last_assessment_date = datetime.datetime.strptime(item['last_assessment_date'], "%Y-%m-%d")
if item['core_values_assessment_date']:
partner.core_values_assessment_date = datetime.datetime.strptime(item['core_values_assessment_date'], "%Y-%m-%d")
partner.total_ct_cp = item['total_ct_cp']
partner.total_ct_cy = item['total_ct_cy']
partner.net_ct_cy = item['net_ct_cy']
partner.reported_cy = item['reported_cy']
partner.total_ct_ytd = item['total_ct_ytd']
partner.save()
except Exception as ex:
print(item)
print(ex.message)
continue
@app.task
def sync_agreement_data():
from internos.etools.models import Agreement, PartnerOrganization
result = get_data('etools.unicef.org', '/api/v2/agreements/', 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
result = json.loads(result)
for item in result:
instance, create = Agreement.objects.get_or_create(etl_id=item['id'])
instance.partner = PartnerOrganization.objects.get(etl_id=item['partner'])
instance.country_programme = item['country_programme']
instance.agreement_number = item['agreement_number']
instance.partner_name = item['partner_name']
instance.agreement_type = item['agreement_type']
instance.end = item['end']
instance.start = item['start']
instance.signed_by_unicef_date = item['signed_by_unicef_date']
instance.signed_by_partner_date = item['signed_by_partner_date']
instance.status = item['status']
instance.agreement_number_status = item['agreement_number_status']
instance.special_conditions_pca = item['special_conditions_pca']
instance.save()
@app.task
def sync_intervention_data():
from internos.etools.models import PCA
from internos.locations.models import Location
result = get_data('etools.unicef.org', '/api/v2/interventions/', 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
result = json.loads(result)
for item in result:
instance, create = PCA.objects.get_or_create(etl_id=item['id'])
instance.number = item['number']
instance.document_type = item['document_type']
instance.partner_name = item['partner_name']
instance.status = item['status']
instance.title = item['title']
instance.start = item['start']
instance.end = item['end']
instance.end_date = item['end']
instance.frs_total_frs_amt = item['frs_total_frs_amt']
instance.unicef_cash = item['unicef_cash']
instance.cso_contribution = item['cso_contribution']
instance.country_programme = item['country_programme']
instance.frs_earliest_start_date = item['frs_earliest_start_date']
instance.frs_latest_end_date = item['frs_latest_end_date']
instance.sections = item['sections']
instance.section_names = item['section_names']
instance.cp_outputs = item['cp_outputs']
instance.unicef_focal_points = item['unicef_focal_points']
instance.frs_total_intervention_amt = item['frs_total_intervention_amt']
instance.frs_total_outstanding_amt = item['frs_total_outstanding_amt']
instance.offices = item['offices']
instance.actual_amount = item['actual_amount']
instance.offices_names = item['offices_names']
instance.total_unicef_budget = item['total_unicef_budget']
instance.total_budget = item['total_budget']
instance.metadata = item['metadata']
instance.flagged_sections = item['flagged_sections']
instance.budget_currency = item['budget_currency']
instance.fr_currencies_are_consistent = item['fr_currencies_are_consistent']
instance.all_currencies_are_consistent = item['all_currencies_are_consistent']
instance.fr_currency = item['fr_currency']
instance.multi_curr_flag = item['multi_curr_flag']
instance.location_p_codes = item['location_p_codes']
instance.donors = item['donors']
instance.donor_codes = item['donor_codes']
instance.grants = item['grants']
for p_code in item['location_p_codes']:
try:
instance.locations.add(Location.objects.filter(p_code=p_code).first())
except Exception:
continue
instance.save()
@app.task
def sync_intervention_individual_data(instance=None):
from internos.etools.models import PCA, PartnerOrganization, Agreement
interventions = PCA.objects.filter(donors__len__gt=0)
for instance in interventions:
item = get_data('etools.unicef.org', '/api/v2/interventions/'+instance.etl_id+'/',
'Token 36f06547a4b930c6608e503db49f1e45305351c2')
try:
item = json.loads(item)
instance.partner = PartnerOrganization.objects.get(etl_id=item['partner_id'])
instance.agreement = Agreement.objects.get(etl_id=item['agreement'])
instance.number = item['number']
instance.document_type = item['document_type']
instance.status = item['status']
instance.title = item['title']
instance.start = item['start']
instance.end = item['end']
instance.end_date = item['end']
instance.frs_details = item['frs_details']
for fr in item['frs_details']['frs']:
instance.donors_set = fr['line_item_details']
instance.save()
except Exception as ex:
print(item)
print(ex.message)
continue
@app.task
def sync_audit_data():
from internos.etools.models import Engagement, PartnerOrganization
instances = get_data('etools.unicef.org', '/api/audit/engagements/?page_size=1000', 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
instances = json.loads(instances)
for item in instances['results']:
instance, new_instance = Engagement.objects.get_or_create(id=int(item['id']))
instance.unique_id = item['unique_id']
instance.agreement = item['agreement']
instance.engagement_type = item['engagement_type']
instance.total_value = item['total_value']
instance.partner = PartnerOrganization.objects.get(etl_id=item['partner']['id'])
instance.status = item['status']
instance.status_date = item['status_date']
instance.save()
sync_audit_individual_data(instance)
@app.task
def sync_audit_individual_data(instance):
from internos.etools.models import PCA
api_func = '/api/audit/engagement/'
if instance.engagement_type == instance.TYPE_AUDIT:
api_func = '/api/audit/audits/{}/'.format(instance.id)
elif instance.engagement_type == instance.TYPE_MICRO_ASSESSMENT:
api_func = '/api/audit/micro-assessments/{}/'.format(instance.id)
elif instance.engagement_type == instance.TYPE_SPOT_CHECK:
api_func = '/api/audit/spot-checks/{}/'.format(instance.id)
elif instance.engagement_type == instance.TYPE_SPECIAL_AUDIT:
api_func = '/api/audit/special-audits/{}/'.format(instance.id)
data = get_data('etools.unicef.org', api_func, 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
data = json.loads(data)
if 'face_form_start_date' in data:
instance.face_form_start_date = data['face_form_start_date']
if 'face_form_end_date' in data:
instance.face_form_end_date = data['face_form_end_date']
if 'cancel_comment' in data:
instance.cancel_comment = data['cancel_comment']
instance.agreement = data['agreement']
instance.po_item = data['po_item']
if 'related_agreement' in data:
instance.related_agreement = data['related_agreement']
if 'exchange_rate' in data:
instance.exchange_rate = data['exchange_rate']
if 'total_amount_tested' in data:
instance.total_amount_tested = data['total_amount_tested']
if 'total_amount_of_ineligible_expenditure' in data:
instance.total_amount_of_ineligible_expenditure = data['total_amount_of_ineligible_expenditure']
if 'internal_controls' in data:
instance.internal_controls = data['internal_controls']
if 'amount_refunded' in data:
instance.amount_refunded = data['amount_refunded']
if 'additional_supporting_documentation_provided' in data:
instance.additional_supporting_documentation_provided = data['additional_supporting_documentation_provided']
if 'justification_provided_and_accepted' in data:
instance.justification_provided_and_accepted = data['justification_provided_and_accepted']
if 'write_off_required' in data:
instance.write_off_required = data['write_off_required']
if 'explanation_for_additional_information' in data:
instance.explanation_for_additional_information = data['explanation_for_additional_information']
if 'audited_expenditure' in data:
instance.audited_expenditure = data['audited_expenditure']
if 'financial_findings' in data:
instance.financial_findings = data['financial_findings']
if 'audit_opinion' in data:
instance.audit_opinion = data['audit_opinion']
if 'pending_unsupported_amount' in data:
instance.pending_unsupported_amount = data['pending_unsupported_amount']
if 'findings' in data:
instance.findings = data['findings']
if 'findings' in data:
instance.findings_sets = data['findings']
instance.partner_contacted_at = data['partner_contacted_at']
instance.start_date = data['start_date']
instance.end_date = data['end_date']
instance.authorized_officers = data['authorized_officers']
for pd in data['active_pd']:
instance.active_pd.add(PCA.objects.get(etl_id=pd['id']))
instance.staff_members = data['staff_members']
instance.date_of_cancel = data['date_of_cancel']
instance.date_of_final_report = data['date_of_final_report']
instance.date_of_report_submit = data['date_of_report_submit']
instance.date_of_comments_by_ip = data['date_of_comments_by_ip']
instance.date_of_comments_by_unicef = data['date_of_comments_by_unicef']
instance.date_of_draft_report_to_ip = data['date_of_draft_report_to_ip']
instance.date_of_draft_report_to_unicef = data['date_of_draft_report_to_unicef']
instance.date_of_field_visit = data['date_of_field_visit']
if 'joint_audit' in data:
instance.joint_audit = data['joint_audit']
if 'shared_ip_with' in data:
instance.shared_ip_with = data['shared_ip_with']
instance.save()
@app.task
def sync_trip_data():
from internos.etools.models import Engagement, PartnerOrganization, Travel
for page in range(320, 350):
try:
api_func = '/api/t2f/travels/?page={}&page_size={}'.format(page, 100)
instances = get_data('etools.unicef.org', api_func, 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
instances = json.loads(instances)
for item in instances['data']:
instance, new_instance = Travel.objects.get_or_create(id=item['id'])
instance.reference_number = item['reference_number']
instance.traveler_name = item['traveler']
instance.purpose = item['purpose']
instance.status = item['status'].lower()
instance.start_date = datetime.datetime.strptime(item['start_date'], "%Y-%m-%d") if item['start_date'] else ''
instance.end_date = datetime.datetime.strptime(item['end_date'], "%Y-%m-%d") if item['end_date'] else ''
instance.supervisor_name = item['supervisor_name']
instance.section_id = item['section']
instance.office_id = item['office']
instance.save()
except Exception as ex:
print(ex.message)
continue
@app.task
def sync_trip_individual_data(instance):
from internos.etools.models import TravelActivity, PartnerOrganization, PCA
from internos.locations.models import Location
data = get_data('etools.unicef.org', '/api/t2f/travels/{}/'.format(instance.id),
'Token 36f06547a4b930c6608e503db49f1e45305351c2')
item = json.loads(data)
instance.international_travel = item['international_travel']
instance.ta_required = item['ta_required']
instance.itinerary_set = item['itinerary']
instance.activities_set = item['activities']
for activity in item['activities']:
instance.travel_type = activity['travel_type'].title()
act_instance, new_instance = TravelActivity.objects.get_or_create(id=activity['id'])
act_instance.travel_type = activity['travel_type'].title()
if activity['date']:
act_instance.date = datetime.datetime.strptime(item['start_date'], "%Y-%m-%d")
else:
act_instance.date = instance.start_date
act_instance.is_primary_traveler = activity['is_primary_traveler']
act_instance.travel = instance
if activity['partner']:
act_instance.partner = PartnerOrganization.objects.get(etl_id=activity['partner'])
if activity['partnership']:
act_instance.partnership = PCA.objects.get(etl_id=activity['partnership'])
locations = activity['locations']
for location in locations:
try:
act_instance.locations.add(Location.objects.filter(id=location).first())
except Exception:
continue
act_instance.save()
instance.mode_of_travel = item['mode_of_travel']
instance.estimated_travel_cost = item['estimated_travel_cost']
instance.completed_at = item['completed_at']
instance.canceled_at = item['canceled_at']
instance.rejection_note = item['rejection_note']
instance.cancellation_note = item['cancellation_note']
instance.attachments_set = item['attachments']
instance.attachments_sets = item['attachments']
instance.have_hact = 0
for attache in instance.attachments_sets:
if 'HACT' in attache['name'] and '.docx' in attache['name']:
instance.have_hact += 1
instance.certification_note = item['certification_note']
instance.report = item['report']
instance.additional_note = item['additional_note']
instance.misc_expenses = item['misc_expenses']
instance.first_submission_date = item['first_submission_date']
instance.save()
@app.task
def sync_action_points_data():
from internos.etools.models import Engagement, PartnerOrganization, ActionPoint
engagements = Engagement.objects.filter(status=Engagement.FINAL)
for engagement in engagements.iterator():
api_func = '/api/action-points/action-points/?engagement={}'.format(engagement.id)
data = get_data('etools.unicef.org', api_func, 'Token 36f06547a4b930c6608e503db49f1e45305351c2')
data = json.loads(data)
for item in data['results']:
instance, created = ActionPoint.objects.get_or_create(id=int(item['id']))
instance.reference_number = item['reference_number']
instance.related_module = '{}_{}'.format(item['related_module'], engagement.engagement_type)
instance.category_id = int(item['category']['id'])
instance.category_name = item['category']['description']
instance.description = item['description']
instance.due_date = item['due_date']
instance.author_name = item['author']['name']
instance.assigned_by_name = item['assigned_by']['name']
instance.assigned_to_name = item['assigned_to']['name']
instance.high_priority = item['high_priority']
instance.section_id = int(item['section']['id'])
instance.office_id = int(item['office']['id'])
instance.engagement = engagement
instance.status = item['status']
instance.status_date = item['status_date']
instance.partner_id = PartnerOrganization.objects.get(etl_id=int(item['partner']['id']))
instance.save()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import DomoticzAPI as dom
from test_all import (WIDTH_LABEL, FILE, TEST, H2, CRLF, SUFFIX)
def main():
print(FILE)
print("{:{}<{}}: {}".format("Test script", SUFFIX, WIDTH_LABEL, __file__))
print(FILE)
server = dom.Server()
print(CRLF)
print(TEST)
print("Create hw2 Class")
print(TEST)
hw2 = dom.Hardware(server,
type=dom.HTYPE_DUMMY,
port=1,
name="Test API hw2",
enabled="true")
print("{:{}<{}}: {}".format("hw2", SUFFIX, WIDTH_LABEL, hw2))
print(CRLF)
print(TEST)
print("Add hw2 to Domoticz")
print(TEST)
hw2.add()
if hw2.exists():
print("{:{}<{}}: {}".format(
"hw2", SUFFIX, WIDTH_LABEL, hw2))
print("{:{}<{}}: {}".format(
"type_name", SUFFIX, WIDTH_LABEL, hw2.type_name))
else:
print("{:{}<{}}: {}".format(
"hw2", SUFFIX, WIDTH_LABEL, "doesn't exists"))
print(CRLF)
print(TEST)
print("Create class hw1 from hw2 in Domoticz")
print(TEST)
hw1 = dom.Hardware(server, idx=hw2.idx)
print("{:{}<{}}: {}".format("hw1", SUFFIX, WIDTH_LABEL, hw2))
if not hw1.exists():
print("{:{}<{}}: {}".format(
"hw1", SUFFIX, WIDTH_LABEL, "doesn't exists"))
print(CRLF)
print(TEST)
print("Rename hw2")
print(TEST)
hw2.name = "Test API hw2 renamed"
print("{:{}<{}}: {}".format("hw2", SUFFIX, WIDTH_LABEL, hw2))
print(CRLF)
print(TEST)
print("Delete hw2 to Domoticz")
print(TEST)
hw2.delete()
print("{:{}<{}}: {}".format("hw2", SUFFIX, WIDTH_LABEL, hw2))
print(CRLF)
print(TEST)
print("Modify object hw2 and add to Domoticz")
print(TEST)
print("{:{}<{}}: {}".format("hw2", SUFFIX, WIDTH_LABEL, hw2))
hw2.address = "10.10.0.10"
hw2.port = 9876
hw2.serialport = "1234"
hw2.add()
print("{:{}<{}}: {}".format("hw2", SUFFIX, WIDTH_LABEL, hw2))
print(CRLF)
print(TEST)
print("Clean up test data")
print(TEST)
hw1.delete()
hw2.delete()
print("{:{}<{}}: {}".format("hw1", SUFFIX, WIDTH_LABEL, hw1))
print("{:{}<{}}: {}".format("hw2", SUFFIX, WIDTH_LABEL, hw2))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from unittest import TestCase
from algotrader.app.backtest_runner import BacktestRunner
from algotrader.trading.config import Config, load_from_yaml
from algotrader.trading.context import ApplicationContext
from tests import test_override
class StrategyPersistenceTest(TestCase):
start_date = 19930101
intrim_date = 20080101
end_date = 20170101
stg_override = {
"Strategy": {
"down2%": {
"qty": 1000
}
}
}
def create_app_context(self, override):
return ApplicationContext(config=Config(
load_from_yaml("../config/backtest.yaml"),
load_from_yaml("../config/down2%.yaml"),
test_override,
StrategyPersistenceTest.stg_override,
override))
def execute(self, conf):
context = self.create_app_context(conf)
runner = BacktestRunner()
runner.start(context)
begin_result = runner.initial_result['total_equity']
end_result = runner.portfolio.get_result()['total_equity']
return begin_result, end_result
def test_result(self):
total_begin_result, total_end_result = self.execute(
conf={
"Application": {
"portfolioId": "test",
"fromDate": StrategyPersistenceTest.start_date,
"toDate": StrategyPersistenceTest.end_date,
"deleteDBAtStop": True,
"persistenceMode": "Disable"
}
})
part1_begin_result, part1_end_result = self.execute(
conf={
"Application": {
"portfolioId": "test1",
"fromDate": StrategyPersistenceTest.start_date,
"toDate": StrategyPersistenceTest.intrim_date,
"createDBAtStart": True,
"deleteDBAtStop": False,
"persistenceMode": "Batch"
}
})
part2_begin_result, part2_end_result = self.execute(
conf={
"Application": {
"portfolioId": "test1",
"fromDate": StrategyPersistenceTest.intrim_date,
"toDate": StrategyPersistenceTest.end_date,
"deleteDBAtStop": True,
"persistenceMode": "Disable"
}
})
print("total begin = %s" % total_begin_result)
print("total end = %s" % total_end_result)
print("part1 begin = %s" % part1_begin_result)
print("part1 end = %s" % part1_end_result)
print("part2 begin = %s" % part2_begin_result)
print("part2 end = %s" % part2_end_result)
self.assertEqual(total_begin_result, part1_begin_result)
self.assertEqual(part1_end_result, part2_begin_result)
self.assertEqual(total_end_result, part2_end_result)
|
nilq/baby-python
|
python
|
def ajuda(com):
help(com)
def titulo(msg, cor=0):
tam = len(msg) + 4
print('-' * tam)
print(f' {msg}')
print('-' * tam)
comando = ''
while True:
titulo('SISTEMA DE AJUDA PyHELP')
comando = str(input('Funca ou Biblioteca > '))
if comando.upper() == 'FIM':
break
else:
ajuda(comando)
titulo('ATE LOGO')
|
nilq/baby-python
|
python
|
from src.core.models import TimestampedModel
from src.users.managers import UserManager
from src.users.validators import (
validate_unique_username,
validate_username_valid_characters_only,
)
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils.translation import gettext_lazy as _
class LmsUser(AbstractBaseUser, PermissionsMixin, TimestampedModel):
"""User model."""
objects = UserManager()
username = models.CharField(
max_length=30,
blank=True,
null=True,
validators=[validate_unique_username, validate_username_valid_characters_only],
)
email = models.EmailField(_("email address"), unique=True, blank=False, null=False)
first_name = models.CharField(max_length=30, blank=True, null=True)
last_name = models.CharField(max_length=255, blank=True, null=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = "email"
REQUIRED_FIELDS: list[str] = []
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
def __str__(self) -> str:
return f"User(full_name={self.full_name}, email={self.email})"
@property
def full_name(self) -> str:
return f"{self.first_name} {self.last_name}"
|
nilq/baby-python
|
python
|
from django import forms
from django.core.exceptions import ValidationError
from grandchallenge.publications.models import (
Publication,
identifier_validator,
)
from grandchallenge.publications.utils import get_identifier_csl
class PublicationForm(forms.ModelForm):
def clean_identifier(self):
identifier = self.cleaned_data["identifier"]
identifier = identifier.lower()
identifier_validator(identifier)
return identifier
def clean(self):
self.cleaned_data = super().clean()
if self.errors:
return self.cleaned_data
identifier = self.cleaned_data.get(
"identifier", self.instance.identifier
)
try:
csl, new_identifier = get_identifier_csl(doi_or_arxiv=identifier)
except ValueError:
raise ValidationError("Identifier not recognised")
if new_identifier != identifier:
self.cleaned_data["identifier"] = new_identifier
self.instance.identifier = new_identifier
self.cleaned_data["csl"] = csl
self.instance.csl = csl
return self.cleaned_data
class Meta:
model = Publication
fields = ("identifier",)
|
nilq/baby-python
|
python
|
class MorseCode():
def __init__(self):
self.__dic_plain = { 'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'}
self.__dic_cipher = {self.__dic_plain[i]:i for i in self.__dic_plain}
def encrypt(self, string):
string = string.upper()
lst = []
for i in string:
if i in self.__dic_plain:
lst+=[self.__dic_plain[i], ' ']
elif i == ' ':
lst+=[i]
else:
lst+=[i, ' ']
return ''.join(lst).lower()
def decrypt(self, string):
string = string.split(' ')
lst = []
for i in string:
if i in self.__dic_cipher:
lst+=[self.__dic_cipher[i]]
elif i == '':
lst+=[' ']
else:
lst+=[i]
return ''.join(lst).lower()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2013, 2014 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
from __future__ import absolute_import, unicode_literals
from zope.cachedescriptors.property import Lazy
from zope.component import createObject
from AccessControl import getSecurityManager
from gs.group.base import GroupPage
from .topicssearch import TopicsSearch
class TopicsAjax(GroupPage):
@Lazy
def viewTopics(self):
# TODO: Figure out I could do this better.
msgs = self.context.messages
user = getSecurityManager().getUser()
retval = bool(user.has_permission('View', msgs))
return retval
@Lazy
def searchTokens(self):
s = self.request.get('s', '')
return createObject('groupserver.SearchTextTokens', s)
@Lazy
def offset(self):
retval = int(self.request.get('i', 0))
assert retval >= 0
return retval
@Lazy
def limit(self):
retval = int(self.request.get('l', 10)) % 48
assert retval >= 0
return retval
def topics(self):
'''Generator, which returns the topics'''
ts = TopicsSearch(self.context, self.searchTokens, self.limit,
self.offset)
return ts.topics()
|
nilq/baby-python
|
python
|
from .emojipedia import Emojipedia, Emoji
|
nilq/baby-python
|
python
|
import re
import ssl
from retrying import retry
import urllib.request
wordlist_re = re.compile(
r'<a href="/wordlist/(\w+.shtml)" target="_top">\w+</a><BR>')
wordlist_name_re = re.compile(
r'<h1 class=body-title__title>(.+?)</h1>')
words_re = re.compile(
r'<div class=wordlist-item>([\w -\\\']+?)</div>')
def extract_wordlist(page):
name = wordlist_name_re.search(page)
return name.group(1).strip(), words_re.findall(page)
@retry
def get_wordlist(url_name):
try:
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
response = urllib.request.urlopen(
'https://www.enchantedlearning.com/wordlist/' + url_name, context=context)
return extract_wordlist(str(response.read()))
except Exception as error:
print(error)
def write_to_file(f, name, words):
f.write(name)
first = True
for word in words:
f.write(':' if first == True else ',')
first = False
f.write(word)
f.write('\n')
f.flush()
def get_wordlists_k12():
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with urllib.request.urlopen('https://www.enchantedlearning.com/wordlist/', context=context) as response:
with open('wordbook/category/categories-k12.txt', 'w') as f:
for url_name in wordlist_re.findall(str(response.read())):
name, words = get_wordlist(url_name)
print(name)
write_to_file(f, name, words)
get_wordlists_k12()
|
nilq/baby-python
|
python
|
"""This module contains the general information for AdaptorHostEthIf ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class AdaptorHostEthIfConsts:
IF_TYPE_VIRTUAL = "virtual"
MAC_AUTO = "AUTO"
UPLINK_PORT_0 = "0"
UPLINK_PORT_1 = "1"
class AdaptorHostEthIf(ManagedObject):
"""This is AdaptorHostEthIf class."""
consts = AdaptorHostEthIfConsts()
naming_props = set([u'name'])
mo_meta = {
"classic": MoMeta("AdaptorHostEthIf", "adaptorHostEthIf", "host-eth-[name]", VersionMeta.Version151f, "InputOutput", 0x3fff, [], ["admin", "read-only", "user"], [u'adaptorUnit'], [], ["Add", "Get", "Remove", "Set"]),
"modular": MoMeta("AdaptorHostEthIf", "adaptorHostEthIf", "host-eth-[name]", VersionMeta.Version2013e, "InputOutput", 0x3fff, [], ["admin", "read-only", "user"], [u'adaptorUnit'], [], ["Add", "Get", "Remove", "Set"])
}
prop_meta = {
"classic": {
"advanced_filter": MoPropertyMeta("advanced_filter", "advancedFilter", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"cdn": MoPropertyMeta("cdn", "cdn", "string", VersionMeta.Version204c, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[a-zA-Z0-9\-\._:]{0,32}""", [], []),
"channel_number": MoPropertyMeta("channel_number", "channelNumber", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-1000"]),
"class_of_service": MoPropertyMeta("class_of_service", "classOfService", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[0-6]""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"mac": MoPropertyMeta("mac", "mac", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", ["AUTO"], []),
"mtu": MoPropertyMeta("mtu", "mtu", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, [], ["1500-9000"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version151f, MoPropertyMeta.NAMING, 0x100, None, None, r"""[a-zA-Z0-9\-\._:]{1,31}""", [], []),
"port_profile": MoPropertyMeta("port_profile", "portProfile", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""[a-zA-Z0-9_\-]{0,80}""", [], []),
"pxe_boot": MoPropertyMeta("pxe_boot", "pxeBoot", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x800, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x1000, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"uplink_port": MoPropertyMeta("uplink_port", "uplinkPort", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2000, None, None, None, ["0", "1"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["virtual"], []),
"iscsi_boot": MoPropertyMeta("iscsi_boot", "iscsiBoot", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"usnic_count": MoPropertyMeta("usnic_count", "usnicCount", "uint", VersionMeta.Version151x, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-225"]),
},
"modular": {
"advanced_filter": MoPropertyMeta("advanced_filter", "advancedFilter", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"cdn": MoPropertyMeta("cdn", "cdn", "string", VersionMeta.Version301c, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[a-zA-Z0-9\-\._:]{0,32}""", [], []),
"channel_number": MoPropertyMeta("channel_number", "channelNumber", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["1-1000"]),
"class_of_service": MoPropertyMeta("class_of_service", "classOfService", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""[0-6]""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"mac": MoPropertyMeta("mac", "mac", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", ["AUTO"], []),
"mtu": MoPropertyMeta("mtu", "mtu", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, [], ["1500-9000"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version2013e, MoPropertyMeta.NAMING, 0x100, None, None, r"""[a-zA-Z0-9\-\._:]{1,31}""", [], []),
"port_profile": MoPropertyMeta("port_profile", "portProfile", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""[a-zA-Z0-9_\-]{0,80}""", [], []),
"pxe_boot": MoPropertyMeta("pxe_boot", "pxeBoot", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x800, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x1000, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"uplink_port": MoPropertyMeta("uplink_port", "uplinkPort", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2000, None, None, None, ["0", "1"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"if_type": MoPropertyMeta("if_type", "ifType", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["virtual"], []),
"iscsi_boot": MoPropertyMeta("iscsi_boot", "iscsiBoot", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
"usnic_count": MoPropertyMeta("usnic_count", "usnicCount", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-225"]),
},
}
prop_map = {
"classic": {
"advancedFilter": "advanced_filter",
"cdn": "cdn",
"channelNumber": "channel_number",
"classOfService": "class_of_service",
"dn": "dn",
"mac": "mac",
"mtu": "mtu",
"name": "name",
"portProfile": "port_profile",
"pxeBoot": "pxe_boot",
"rn": "rn",
"status": "status",
"uplinkPort": "uplink_port",
"childAction": "child_action",
"ifType": "if_type",
"iscsiBoot": "iscsi_boot",
"usnicCount": "usnic_count",
},
"modular": {
"advancedFilter": "advanced_filter",
"cdn": "cdn",
"channelNumber": "channel_number",
"classOfService": "class_of_service",
"dn": "dn",
"mac": "mac",
"mtu": "mtu",
"name": "name",
"portProfile": "port_profile",
"pxeBoot": "pxe_boot",
"rn": "rn",
"status": "status",
"uplinkPort": "uplink_port",
"childAction": "child_action",
"ifType": "if_type",
"iscsiBoot": "iscsi_boot",
"usnicCount": "usnic_count",
},
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.advanced_filter = None
self.cdn = None
self.channel_number = None
self.class_of_service = None
self.mac = None
self.mtu = None
self.port_profile = None
self.pxe_boot = None
self.status = None
self.uplink_port = None
self.child_action = None
self.if_type = None
self.iscsi_boot = None
self.usnic_count = None
ManagedObject.__init__(self, "AdaptorHostEthIf", parent_mo_or_dn, **kwargs)
|
nilq/baby-python
|
python
|
from PyQt5.QtGui import QImage
from PIL import Image
import numpy as np
def numpyQImage(image):
qImg = QImage()
if image.dtype == np.uint8:
if len(image.shape) == 2:
channels = 1
height, width = image.shape
bytesPerLine = channels * width
qImg = QImage(
image.data, width, height, bytesPerLine, QImage.Format_Indexed8
)
qImg.setColorTable([qRgb(i, i, i) for i in range(256)])
elif len(image.shape) == 3:
if image.shape[2] == 3:
height, width, channels = image.shape
bytesPerLine = channels * width
qImg = QImage(
image.data, width, height, bytesPerLine, QImage.Format_RGB888
)
elif image.shape[2] == 4:
height, width, channels = image.shape
bytesPerLine = channels * width
fmt = QImage.Format_ARGB32
qImg = QImage(
image.data, width, height, bytesPerLine, QImage.Format_ARGB32
)
return qImg
# generate data
im = Image.open('spritesheet.png').convert('RGB')
# Make into Numpy array of RGB and get dimensions
RGB = np.array(im)
h, w = RGB.shape[:2]
# Add an alpha channel, fully opaque (255)
RGBA = np.dstack((RGB, np.zeros((h,w),dtype=np.uint8)+255))
# Make mask of black pixels - mask is True where image is black
mBlack = (RGBA[:, :, 0:3] == [0,0,0]).all(2)
# Make all pixels matched by mask into transparent ones
RGBA[mBlack] = (0,0,0,0)
imarray = np.array(RGBA, dtype=np.uint8)
y,x = im.size
cropx = 174
cropy = 174
# TOP
TOP_LEFT = numpyQImage(imarray[0:0+cropy,0:0+cropx,:].copy())
TOP = numpyQImage(imarray[0:0+cropy,cropx:cropx+cropx,:].copy())
TOP_RIGHT = numpyQImage(imarray[0:0+cropy,cropx*2:cropx*2+cropx*2,:].copy())
# MIDDLE
MIDDLE_LEFT = numpyQImage(imarray[cropy:cropy+cropy,0:0+cropx,:].copy())
MIDDLE = numpyQImage(imarray[cropy:cropy+cropy,cropx:cropx+cropx,:].copy())
MIDDLE_RIGHT = numpyQImage(imarray[cropy:cropy+cropy,cropx*2:cropx*2+cropx*2,:].copy())
# BOTTOM
BOTTOM_LEFT = numpyQImage(imarray[cropy*2:cropy*2+cropy*2,0:0+cropx,:].copy())
BOTTOM = numpyQImage(imarray[cropy*2:cropy*2+cropy*2,cropx:cropx+cropx,:].copy())
BOTTOM_RIGHT = numpyQImage(imarray[cropy*2:cropy*2+cropy*2,cropx*2:cropx*2+cropx*2,:].copy())
|
nilq/baby-python
|
python
|
#Copyright (c) 2014 Samsung Electronics Co., Ltd All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @file network_tests.py
# @author Jacek Pielaszkiewicz (j.pielaszkie@samsung.com)
#
'''! Module used to test network in zones
@author: Jacek Pielaszkiewicz (j.pielaszkie@samsung.com)
'''
import unittest
from vsm_integration_tests.common import vsm_test_utils
from network_common import *
class NetworkTestCase(unittest.TestCase):
'''! Test case to check network configuration
'''
def setUp(self):
# Function setup host machine to perform tests
#
# 1. Check user permisions
if(test_run_user() == 1):
self.assertTrue(False, "ROOT user is required to run the test")
return
# 2. Test zone path
if(test_zone_path() == 1):
self.assertTrue(False, "No test zone path :" + TEST_ZONE_PATH)
return
# 3. Ethernet device obtaning
if(ETHERNET_DEVICE_DETECT and getActiveEthernetDevice() == 1):
self.assertTrue(False, "Cannot obtain ethernet device")
return
def test_01twoNetworks(self):
'''! Checks networks configuration
'''
def main():
unittest.main(verbosity=2)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from src import Camera, Tracer, World, Stereo
|
nilq/baby-python
|
python
|
import pygame
import random
import time
import sys
pygame.init()
# definicion de colores
blanco = pygame.Color(255,255,255)
negro = pygame.Color(0,0,0)
rojo = pygame.Color(255,0,0)
rojo_o = pygame.Color(100,0,0)
gris = pygame.Color(200,200,200)
verde = pygame.Color(0,250,100)
verde_o = pygame.Color(0,150,50)
morado = pygame.Color(126, 21, 175)
# tamano ventana
winSizeX = 1000
winSizeY = 600
# tamano serpiente
tam_serp = 25
# nombre de la ventana
titulo_ventana = "Snake! Ver 2.2 By: moxwel"
# propiedades ventana
pantalla = pygame.display.set_mode((winSizeX,winSizeY))
pygame.display.set_caption(titulo_ventana)
icono_juego = pygame.image.load("resources/img/snake_icon.gif")
pygame.display.set_icon(icono_juego)
reloj = pygame.time.Clock()
# funcion para renderizar texto con bordes (predeterminado = sin bordes)
def render_text(texto, color, tam, x=0, y=0, grosor=0, color_grosor=(255,255,255)):
fuente = pygame.font.Font("resources/determination.ttf",tam)
texto_out = fuente.render(texto, True, color_grosor, None)
texto = fuente.render(texto, True, color, None)
pantalla.blit(texto_out,(x+grosor,y+grosor))
pantalla.blit(texto_out,(x+grosor,y-grosor))
pantalla.blit(texto_out,(x-grosor,y+grosor))
pantalla.blit(texto_out,(x-grosor,y-grosor))
pantalla.blit(texto_out,(x+grosor,y))
pantalla.blit(texto_out,(x-grosor,y))
pantalla.blit(texto_out,(x,y+grosor))
pantalla.blit(texto_out,(x,y-grosor))
pantalla.blit(texto,(x,y))
render_text("Cargando . . .",blanco,40,20,10)
pygame.display.update()
# imagenes y sonidos (cargar antes del main_game para optimizar cargas)
go_image = pygame.transform.scale(pygame.image.load("resources/img/game_over.png"),(winSizeX,winSizeY))
pause_image = pygame.transform.scale(pygame.image.load("resources/img/pause.png"),(winSizeX,winSizeY))
musica = pygame.mixer.Sound("resources/music/music.ogg")
comer = pygame.mixer.Sound("resources/music/apple.ogg")
comer2 = pygame.mixer.Sound("resources/music/apple2.ogg")
comer3 = pygame.mixer.Sound("resources/music/apple3.ogg")
crash = pygame.mixer.Sound("resources/music/crash.ogg")
crash2 = pygame.mixer.Sound("resources/music/crash2.ogg")
keys = pygame.image.load("resources/img/keys.png")
gameover_music = pygame.mixer.Sound("resources/music/gameover.ogg")
# pantalla de inicio del juego
def intro_juego():
print("[Intro] Bienvenido a Snake!")
intro_state = True
pantalla.blit(pause_image,(0,0))
render_text(titulo_ventana,negro,20,3,1,)
render_text("El objetivo principal del juego es",blanco,30,50,220,2,negro)
render_text("lograr que la serpiente se coma",blanco,30,50,250,2,negro)
render_text("todas las manzanas posibles.",blanco,30,50,280,2,negro)
render_text("Si la serpiente sale del escenario",blanco,30,50,370,2,negro)
render_text("o se choca a si misma...",blanco,30,50,400,2,negro)
pantalla.blit(keys,(650,40))
render_text("Moverse",blanco,30,690,160,2,negro)
render_text("[P] Pausa",blanco,30,685,200,2,negro)
render_text("[Q] Salir",blanco,30,685,240,2,negro)
pygame.draw.circle(pantalla,rojo,(510,320),20)
pygame.draw.circle(pantalla,verde_o,(510,390),20)
pygame.draw.circle(pantalla,morado,(510,460),20)
render_text("Manzana normal. Comelas para aumentar tu puntaje!",blanco,22,540,310,2,negro)
render_text("Manzana verde. Si comes una, aumentaras tu rapidez.",blanco,22,540,380,2,negro)
render_text("Manzana lila. Si comes una, seras mucho mas grande!",blanco,22,540,450,2,negro)
while intro_state == True:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
print("[Evento/inicio] Ventana cerrada. Cerrando...")
sys.exit()
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_SPACE:
print("[Evento/inicio] Espacio. Iniciando juego...")
pygame.mixer.stop()
main_game()
if evento.key == pygame.K_q:
print("[Evento/inicio] Saliendo del juego / Tecla Q")
sys.exit()
# Letras parpadeando
render_text("Snake!",verde,100,50,50,3,verde_o)
render_text("P I E R D E S",rojo,30,320,400,2,negro)
render_text("Presiona [Espacio] para comenzar",verde,40,240,530,2,negro)
reloj.tick(5.45)
pygame.display.update()
render_text("Snake!",verde_o,100,50,50,3,verde)
render_text("P I E R D E S",negro,30,320,400,2,rojo)
render_text("Presiona [Espacio] para comenzar",blanco,40,240,530,2,negro)
reloj.tick(5.45)
pygame.display.update()
def pause_screen():
pantalla.blit(pause_image,(0,0))
pantalla.blit(keys,(650,40))
render_text("Moverse",blanco,30,690,160,2,negro)
render_text("[P] Reanudar",blanco,30,685,200,2,negro)
render_text("[Q] Salir",blanco,30,685,240,2,negro)
render_text("Pausa.",verde,100,50,50,3,verde_o)
pygame.draw.circle(pantalla,rojo,(510,320),20)
pygame.draw.circle(pantalla,verde_o,(510,390),20)
pygame.draw.circle(pantalla,morado,(510,460),20)
render_text("Manzana normal. Comelas para aumentar tu puntaje!",blanco,22,540,310,2,negro)
render_text("Manzana verde. Si comes una, aumentaras tu rapidez.",blanco,22,540,380,2,negro)
render_text("Manzana lila. Si comes una, seras mucho mas grande!",blanco,22,540,450,2,negro)
pygame.display.update()
# funcion principal del juego, facilita volver a empezar el juego
def main_game():
# cada vez que comienza el juego, el fondo puede ser aleatorio
num_fondo = random.randint(1,4)
print("[Fondo] Usando fondo " + str(num_fondo))
if num_fondo == 1:
bg_image = pygame.transform.scale(pygame.image.load("resources/img/fondo.png"),(winSizeX,winSizeY))
elif num_fondo == 2:
bg_image = pygame.transform.scale(pygame.image.load("resources/img/fondo2.png"),(winSizeX,winSizeY))
elif num_fondo == 3:
bg_image = pygame.transform.scale(pygame.image.load("resources/img/fondo3.png"),(winSizeX,winSizeY))
elif num_fondo == 4:
bg_image = pygame.transform.scale(pygame.image.load("resources/img/fondo4.png"),(winSizeX,winSizeY))
# posicion inicial serpiente (aleatoria)
posX = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
posY = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
# cambios de posicion (inicial = inmovil)
cambioX,cambioY = 0,0
# propiedades de la serpiente
largo_serp = 1
coord_serp = []
points = 0
# posicion inicial manzana (aleatoria)
appleX = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
appleY = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
# Posicion inicial manzana verde (aleatoria)
apple2X = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
apple2Y = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
# Posicion inicial manzana morada (aleatoria)
apple3X = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
apple3Y = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
# movimiento anterior (8=U 2=D 4=L 6=R 0=n/a) para evitar giros en 180 grados
mov_ant = 0
# variables de estado del juego
gameOver = False
pauseGame = False
fps = 13
# reproducir musica
musica.play(-1)
# renderizar serpiente
def render_snake():
# la variable "cuerpo" va a tomar el valor de cada "mini lista" dentro de la lista "coord_serp"...
for cuerpo in coord_serp:
# ..y va a hacer que se dibuje el cuerpo de la serpiente en la coordenada que se encuentra dentro de la "mini lista".
pygame.draw.rect(pantalla, negro, [(cuerpo[0],cuerpo[1]),(tam_serp,tam_serp)])
# coord_serp = [[x1,y1],[x2,y2],[x3,y3]]
# cuerpo = [x1,y1] cuerpo = [x2,y2] cuerpo = [x3,y3]
# cuerpo[0],cuerpo[1] = x1,y1 cuerpo[0],cuerpo[1] = x2,y2 cuerpo[0],cuerpo[1] = x3,y3
# main loop del juego
while True:
pygame.mixer.unpause()
# si el estado del juego es gameOver, entonces pasa a la seccion de "juego terminado"
while gameOver == True:
# Detener todos los efectos
musica.stop()
comer.stop()
# modo game over
pantalla.blit(go_image,(0,0))
render_snake()
render_text("Juego terminado",rojo,50,(winSizeX/2)-150,(winSizeY/2)-70,3,rojo_o)
render_text("[Espacio] Volver a jugar",negro,30,(winSizeX/2)-150,(winSizeY/2)+10,2)
render_text("[Q] Volver al inicio",negro,30,(winSizeX/2)-150,(winSizeY/2)+35,2)
pygame.display.update()
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
print("[Evento/GameOver] Ventana cerrada. Cerrando...")
sys.exit()
# Si se toca Espacio, vuelve a empezar, si toca Q, el juego se cierra
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_SPACE:
print("[Volviendo a iniciar snake]")
pygame.mixer.stop()
main_game()
if evento.key == pygame.K_q:
print("[Evento/GameOver] Saliendo del juego / Tecla Q")
pygame.mixer.stop()
intro_juego()
# Si esta en modo pausa
while pauseGame == True:
pygame.mixer.pause()
pause_screen()
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
print("[Evento/pausa] Ventana cerrada. Cerrando...")
sys.exit()
# Si se toca Espacio, vuelve a empezar, si toca Q, el juego se cierra
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_q:
print("[Evento/pausa] Q. Saliendo")
sys.exit()
if evento.key == pygame.K_p:
print("[Evento/pausa] reanudando")
pauseGame = False
# juego normal
pantalla.blit(bg_image,(0,0))
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
print("[Evento/Juego] Ventana cerrada. Cerrando...")
sys.exit()
if evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_LEFT and mov_ant != 6:
cambioX,cambioY = -tam_serp,0
mov_ant = 4
elif evento.key == pygame.K_UP and mov_ant != 2:
cambioX,cambioY = 0,-tam_serp
mov_ant = 8
elif evento.key == pygame.K_RIGHT and mov_ant != 4:
cambioX,cambioY = tam_serp,0
mov_ant = 6
elif evento.key == pygame.K_DOWN and mov_ant != 8:
cambioX,cambioY = 0,tam_serp
mov_ant = 2
elif evento.key == pygame.K_p:
print("[Tecla P] PAUSA")
pauseGame = True
# renderizacion manzana
pygame.draw.circle(pantalla,rojo,(appleX+tam_serp//2,appleY+tam_serp//2),tam_serp//2)
pygame.draw.circle(pantalla,verde_o,(apple2X+tam_serp//2,apple2Y+tam_serp//2),tam_serp//2)
pygame.draw.circle(pantalla,morado,(apple3X+tam_serp//2,apple3Y+tam_serp//2),tam_serp//2)
# cambio de posicion de serpiente
posX += cambioX
posY += cambioY
# guarda las coordenadas actuales de la cabeza de la serpiente a una lista...
cabeza_serp = []
cabeza_serp.append(posX)
cabeza_serp.append(posY)
# ...y luego las anade a otra lista que guarda todas las coordenadas (coord_serp)
coord_serp.append(cabeza_serp)
# si el tamano de la lista "coord_serp" es mayor al del supuesto largo de la serpiente, entonces que elimine el primer termino.
if len(coord_serp) > largo_serp:
del coord_serp[0]
# DEBUG: print(coord_serp)
render_snake()
# mostrar los puntos y velocidad
render_text("Puntos: " + str(points),blanco,40,10,0,2,negro)
render_text("Rapidez: " + str(fps-12),blanco,40,200,0,2,negro)
# cuando recien comience el juego, pedir que se toque alguna tecla
if mov_ant == 0:
render_text("Toca una tecla direccional para comenzar",blanco,30,(winSizeX/2)-250,(winSizeY/2)-20,2,negro)
# si la serpiente toca la manzana, generar una nueva manzana aleatoria y aumentar el largo de serpiente
if (posX,posY) == (appleX,appleY):
comer.play()
print("[Evento] Se toco la manzana en: " + str(appleX) + "," + str(appleY))
appleX = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
appleY = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
largo_serp += 1
points += 1
render_text("Puntos: " + str(points),rojo,40,10,0,2,negro)
# Cada vez que se consigan 3 puntos, la velocidad va a ir aumentando
if points > 0:
if points % 3 == 0:
fps += 1
render_text("Rapidez: " + str(fps-12),rojo,40,200,0,2,negro)
# si la serpiente toca la manzana VERDE, generar una nueva manzana aleatoria y aumentar velocidad
if (posX,posY) == (apple2X,apple2Y):
comer2.play()
print("[Evento] Se toco la manzana verde en: " + str(appleX) + "," + str(appleY))
apple2X = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
apple2Y = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
fps += 1
points += 2
render_text("Puntos: " + str(points),verde,40,10,0,2,negro)
render_text("Rapidez: " + str(fps-12),verde,40,200,0,2,negro)
# si la serpiente toca la manzana MORADA, generar una nueva manzana aleatoria y aumentar largo de serpiente
if (posX,posY) == (apple3X,apple3Y):
comer3.play()
print("[Evento] Se toco la manzana morada en: " + str(appleX) + "," + str(appleY))
apple3X = random.randrange(0, winSizeX-tam_serp+1, tam_serp)
apple3Y = random.randrange(0, winSizeY-tam_serp+1, tam_serp)
largo_serp += 10
points += 10
render_text("Puntos: " + str(points),morado,40,10,0,2,negro)
# si la serpiente se choca a si misma, pierde el juego
if largo_serp > 1:
for x in range(len(coord_serp)-1):
if coord_serp[x] == cabeza_serp:
print("[Evento] Autochoque. Game over.")
crash2.play()
gameover_music.play()
gameOver = True
# si la serpiente sale del escenario, pierde (se activa el estado gameOver)
if (posX < 0 or posX > winSizeX-tam_serp) or (posY < 0 or posY > winSizeY-tam_serp):
print("[Evento] Fuera de escenario. Game over.")
crash.play()
gameover_music.play()
gameOver = True
reloj.tick(fps)
pygame.display.update()
intro_juego()
print("inicio_juego ---> main_game")
main_game()
# moxwel 2018
# Algunos recursos son propiedad de terceros.
|
nilq/baby-python
|
python
|
# 要添加一个新单元,输入 '# %%'
# 要添加一个新的标记单元,输入 '# %% [markdown]'
# %%
import numpy as np
from matplotlib import pyplot as plt
import os
from matplotlib import font_manager
import matplotlib as mpl
zhfont1 = font_manager.FontProperties(fname='SimHei.ttf')
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
# %%
def loading_data(path):
import pandas as pd
import os
datas=[]
dirlist = os.listdir(path)
for idx in range(len(dirlist)):
data = pd.read_csv(os.path.join(path,dirlist[idx]))
#print(data.head())
#print(data.tail(3))
#print(data.columns)
delta_d = np.array(data['range'])
delta_v = np.array(data['rangerate'])
acc = np.array(data['ax'])
datas.append([delta_d,delta_v,acc])
#print(acc.max())
#print(np.shape(new_data)) (564,3)
return datas
###############data is np.array
dir='17103'
datas_orig = loading_data(os.path.join('car-following',dir))
iter = 100
kappa_0=0.75
init=2.
# %%
####################
###normalize data###
####################
def initializing(datas):
d=[]
v=[]
a=[]
for data in datas:
data=np.array(data)
#print(np.shape(data))
d=d+list(data[0].T)
v=v+list(data[1].T)
a=a+list(data[2].T)
data_info=[[np.mean(d),np.std(d)],[np.mean(v),np.std(v)],[np.mean(a),np.std(a)]]
datas_new=[]
for data in datas:
data = np.array(data)
d_new = (data[0]-data_info[0][0])/data_info[0][1]
v_new = (data[1]-data_info[1][0])/data_info[1][1]
a_new = (data[2]-data_info[2][0])/data_info[2][1]
datas_new.append(np.array([d_new.T,v_new.T,a_new.T]).T)
return datas_new,data_info
datas,data_info = initializing(datas_orig)
# %%
def get_seg_d(data):
data = np.array(data).T
threshold = [[59.26,20.02,5.00],[-1.19,-0.2,0.25,1.23],[-0.20,-0.06,0.07,0.20]]
real_data = np.array(list(data[idx]*data_info[idx][1]+data_info[idx][0] for idx in range(len(data_info)))).reshape(len(data),len(data[0]))
final_data = real_data.T
LD_data,ND_data,CD_data = [],[],[]
for point in final_data:
if point[0]>threshold[0][0]:
LD_data.append(point)
elif point[0]>=threshold[0][1]:
ND_data.append(point)
elif point[0]>=threshold[0][2]:
CD_data.append(point)
else:
print('Data has a wrong delta_d')
return [LD_data,ND_data,CD_data],threshold
#range_datas=[LD_data,ND_data,CD_data]
def get_seg_v_a(range_datas,threshold):
analyze_data = []
for range_data in range_datas:
group_data = np.ones(shape=(len(range_data),2))*20
for idx in range(len(range_data)):
if range_data[idx][1]<threshold[1][0]:
group_data[idx][1] = -2
elif range_data[idx][1]<threshold[1][1]:
group_data[idx][1]= -1
elif range_data[idx][1]<threshold[1][2]:
group_data[idx][1]=0
elif range_data[idx][1]<threshold[1][3]:
group_data[idx][1]= 1
else:
group_data[idx][1] = 2
if range_data[idx][2]<threshold[2][0]:
group_data[idx][0] = -2
elif range_data[idx][2]<threshold[2][1]:
group_data[idx][0]= -1
elif range_data[idx][2]<threshold[2][2]:
group_data[idx][0]=0
elif range_data[idx][2]<threshold[2][3]:
group_data[idx][0]= 1
else:
group_data[idx][0] = 2
analyze_data.append(group_data)
return analyze_data
# %%
def get_prob(analyze_data):
all_counts = []
for idx in range(len(analyze_data)):
counts = np.zeros((5,5))
for h in range(len(analyze_data[idx])):
for i in range(5):
for j in range(5):
if (analyze_data[idx][h] == [i-2,j-2]).all():
counts[i][j] = counts[i][j]+1
if not len(analyze_data[idx]):
pass
else:
counts = counts/float(len(analyze_data[idx]))
all_counts.append(counts)
return all_counts
all_counts=np.zeros((3,5,5))
for data in datas:
range_datas,thre = get_seg_d(data)
anylyze_data=get_seg_v_a(range_datas,thre)
all_counts = all_counts+np.array(get_prob(anylyze_data))
# %%
def plotting_style(all_counts):
Y = ['急减','缓减','匀速','缓加','急加']
X = ['快近','渐近','维持','渐远','快远']
fig = plt.figure(figsize=(7,23))
axes = []
img = []
titles = ['远距离','中距离','短距离']
for idx in range(len(all_counts)):
axes.append(fig.add_subplot(3,1,idx+1))
axes[-1].set_ylabel('加速度'+r'$a_x$',size=14,fontproperties=zhfont1)
axes[-1].set_xticks(np.linspace(0.5,4.5,5,endpoint=True))
axes[-1].set_xticklabels(X,fontproperties=zhfont1,size=13)
axes[-1].set_xlabel('相对速度'+r'$\Delta$v',size=14,fontproperties=zhfont1)
axes[-1].set_yticks(np.linspace(0.5,4.5,5,endpoint=True))
axes[-1].set_yticklabels(Y,fontproperties=zhfont1,size=13)
axes[-1].set_title(titles[idx],fontproperties=zhfont1,size=16)
img.append(axes[-1].pcolormesh(all_counts[idx],cmap = mpl.cm.Spectral_r))
divider = make_axes_locatable(axes[-1])
cax = divider.append_axes("right",size='5%',pad=0.05)
#print(np.linspace(0.5,4.5,5,endpoint=True))
norm = mpl.colors.Normalize(vmin=0,vmax=all_counts[idx].max())
cmap = mpl.cm.Spectral_r
cb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm,cmap=cmap),cax=cax)
all_counts=all_counts/len(datas)
plotting_style(all_counts)
#plt.xla
plt.show()
# %%
if not os.path.isdir('no_seg'):
os.makedirs('no_seg')
if not os.path.isdir(os.path.join('no_seg',dir)):
os.makedirs(os.path.join('no_seg',dir))
for i in range(len(all_counts)):
range_counts=all_counts[i]
frame_counts=pd.DataFrame(range_counts)
frame_counts.to_csv(os.path.join('no_seg',dir,str(i)+'.csv'))
# %%
|
nilq/baby-python
|
python
|
# Corrigido
print('Exercício 010')
print()
# Bloco de entrada
carteira = float(input('Informe quanto você tem na sua carteira: '))
print()
# Bloco de cálculo
dólar = carteira / 5.47
# Bloco de saída
print('Você pode comprar $ {:.2f} dólares com R$ {} reais na carteira.'.format(
dólar, carteira))
print()
|
nilq/baby-python
|
python
|
"""
Write an iterative function iterPower(base, exp) that calculates the exponential baseexp by simply using successive multiplication. For example, iterPower(base, exp) should compute baseexp by multiplying base times itself exp times. Write such a function below.
This function should take in two values - base can be a float or an integer; exp will be an integer ≥ 0. It should return one numerical value. Your code must be iterative - use of the ** operator is not allowed.
"""
def iterPower(base, exp):
'''
base: int or float.
exp: int >= 0
returns: int or float, base^exp
'''
result = 1
while exp > 0:
result *= base
exp -= 1
return result
|
nilq/baby-python
|
python
|
from SuperSafety.Utils.utils import limit_phi, load_conf
from SuperSafety.Supervisor.Dynamics import run_dynamics_update
import numpy as np
from matplotlib import pyplot as plt
import numpy as np
from numba import njit
class Modes:
def __init__(self, conf) -> None:
self.time_step = conf.kernel_time_step
self.nq_steer = conf.nq_steer
self.max_steer = conf.max_steer
vehicle_speed = conf.vehicle_speed
ds = np.linspace(-self.max_steer, self.max_steer, self.nq_steer)
vs = vehicle_speed * np.ones_like(ds)
self.qs = np.stack((ds, vs), axis=1)
self.n_modes = len(self.qs)
def get_mode_id(self, delta):
d_ind = np.argmin(np.abs(self.qs[:, 0] - delta))
return int(d_ind)
def action2mode(self, action):
id = self.get_mode_id(action[0])
return self.qs[id]
def __len__(self): return self.n_modes
def generate_dynamics_entry(state, action, m, time, resolution, phis):
dyns = np.zeros(4)
new_state = run_dynamics_update(state, action, time)
dx, dy, phi, vel, steer = new_state[0], new_state[1], new_state[2], new_state[3], new_state[4]
new_q = m.get_mode_id(steer)
phi = limit_phi(phi)
new_k = int(round((phi + np.pi) / (2*np.pi) * (len(phis)-1)))
dyns[2] = min(max(0, new_k), len(phis)-1)
dyns[0] = int(round(dx * resolution))
dyns[1] = int(round(dy * resolution))
dyns[3] = int(new_q)
return dyns
# @njit(cache=True)
def build_viability_dynamics(m, conf):
phis = np.linspace(-np.pi, np.pi, conf.n_phi)
ns = conf.n_intermediate_pts
dt = conf.kernel_time_step / ns
dynamics = np.zeros((len(phis), len(m), len(m), ns, 4), dtype=np.int)
invalid_counter = 0
for i, p in enumerate(phis):
for j, state_mode in enumerate(m.qs): # searches through old q's
state = np.array([0, 0, p, state_mode[1], state_mode[0]])
for k, action in enumerate(m.qs): # searches through actions
for l in range(ns):
dynamics[i, j, k, l] = generate_dynamics_entry(state.copy(), action, m, dt*(l+1), conf.n_dx, phis)
print(f"Invalid transitions: {invalid_counter}")
print(f"Dynamics Table has been built: {dynamics.shape}")
return dynamics
def build_dynamics_table(sim_conf):
m = Modes(sim_conf)
if sim_conf.kernel_mode == "viab":
dynamics = build_viability_dynamics(m, sim_conf)
else:
raise ValueError(f"Unknown kernel mode: {sim_conf.kernel_mode}")
np.save(f"{sim_conf.dynamics_path}{sim_conf.kernel_mode}_dyns.npy", dynamics)
if __name__ == "__main__":
conf = load_conf("config_file")
build_dynamics_table(conf)
|
nilq/baby-python
|
python
|
import math
import re
from threading import Thread
import serial
from ...LightSkin import ForwardModel, LightSkin, EventHook
class ArduinoConnectorForwardModel(ForwardModel):
""" Connects to an Arduino running the Arduino Connector Script on the given port with the given baudrate
Parses the input in a new thread and updates its values accordingly.
After each full received frame, the onUpdate is triggered. """
sampleDistance = 0.125
MAX_VALUE = 1024
def __init__(self, ls: LightSkin, port: str, baudrate: int):
super().__init__(ls)
self.onUpdate: EventHook = EventHook()
self._sensorValues = []
for i in range(len(self.ls.LEDs)):
self._sensorValues.append([1.0] * len(self.ls.sensors))
self._readerThread = Thread(target=self._readLoop, daemon=True)
self._readerThreadRun = False
self.ser = serial.Serial(
port=port,
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=10)
self._readerThreadRun = True
self._readerThread.start()
def __del__(self):
if self._readerThreadRun:
self._readerThreadRun = False
self._readerThread.join()
try:
self.ser.close()
except Exception:
pass
def _readLoop(self):
print('Read Loop started')
while self._readerThreadRun:
line = self.ser.readline()
match = re.match(b'Snapshot: ([0-9]+),([0-9]+)', line)
if match is not None:
leds = int(match.group(1))
sensors = int(match.group(2))
if leds != len(self.ls.LEDs) or sensors != len(self.ls.sensors):
print("Received wring amount of sensor values: %i / %i; expected %i / %i" % (
leds, sensors, len(self.ls.LEDs), len(self.ls.sensors)))
else:
try:
for l in range(leds):
line = self.ser.readline()
vals = line.split(b',')
for s in range(sensors):
val = float(vals[s]) / self.MAX_VALUE if s < len(vals) else 0.0
self._sensorValues[l][s] = min(1.0, max(0.0, val))
print("received data")
self.onUpdate()
except Exception as e:
print(e)
print('Read Loop finished')
def measureLEDAtPoint(self, x: float, y: float, led: int = -1) -> float:
# No measurement possible
return 0.0
def getSensorValue(self, sensor: int, led: int = -1) -> float:
if led < 0:
led = self.ls.selectedLED
return self._sensorValues[led][sensor]
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.display.display import *
from pixiedust.display import *
from .flightPredict import *
import pixiedust
import pixiedust.utils.dataFrameMisc as dataFrameMisc
from pixiedust.utils.shellAccess import ShellAccess
from pyspark.rdd import RDD
from pyspark.sql import DataFrame
from pyspark.mllib.regression import LabeledPoint
from six import iteritems, with_metaclass
myLogger = pixiedust.getLogger(__name__)
initialAirport = "BOS"
@PixiedustDisplay()
class PixieDustFlightPredictPluginMeta(DisplayHandlerMeta):
def createCategories(self):
return [{"id":"FlightPredict","title":"Flight Predictor", "icon-path":"flightPredict.jpeg"}]
@addId
def getMenuInfo(self,entity, dataHandler):
if entity==self.__class__:
return [{"id": "flightpredict"}]
elif entity == "fp_configure_training":
return [{"id": "fp_configure_training"}]
elif entity == "fp_map_results":
return [{"id": "fp_map_results"}]
menus = []
dataSetsValues = Configuration.getDataSets()
dataSetsValues = dataSetsValues if len(dataSetsValues)==0 else list(zip(*dataSetsValues))[1]
if entity in dataSetsValues:
menus = menus + [
{"categoryId": "FlightPredict", "title": "Visualize Features", "icon-path":"vizFeatures.png", "id":"fp_viz_features"},
{"categoryId": "FlightPredict", "title": "Show Histogram", "icon-path":"vizFeatures.png", "id":"fp_histogram"}
]
if len(Configuration.getModels())>0 and Configuration.getLabeledData(entity) is not None:
menus.append(
{"categoryId": "FlightPredict", "title": "Measure Accuracy", "icon-path":"vizFeatures.png", "id":"fp_run_metrics"}
)
return menus
def isLabeledRDD(self, entity):
if isinstance(entity,RDD):
sample = entity.take(1)
if sample is not None and len(sample)>0:
return isinstance(sample[0], LabeledPoint)
return False
def newDisplayHandler(self,options,entity):
handlerId=options.get("handlerId")
myLogger.debug("Creating a new Display Handler with id {0}".format(handlerId))
if handlerId == "fp_viz_features":
from . import vizFeatures
return vizFeatures.VizualizeFeatures(options,entity)
elif handlerId == "fp_configure_training":
from . import configureTraining
return configureTraining.ConfigureTraining(options,entity)
elif handlerId == "fp_create_models":
from . import createModels
return createModels.CreateModels(options, entity)
elif handlerId == "fp_histogram":
from . import histogramDisplay
return histogramDisplay.HistogramDisplay(options, entity)
elif handlerId == "fp_run_metrics":
from . import runMetrics
return runMetrics.RunMetricsDisplay(options, entity)
elif handlerId == "fp_map_results":
from . import mapResults
return mapResults.MapResultsDisplay(options, entity)
else:
options["initialAirport"] = initialAirport
return PixieDustFlightPredict(options,entity)
def flightPredict(depAir="BOS"):
global initialAirport
initialAirport = depAir
display(PixieDustFlightPredictPluginMeta)
def displayMapResults():
display("fp_map_results")
def configure():
display("fp_configure_training")
class Configuration(with_metaclass(
type("",(type,),{
"configDict":{},
"__getitem__":lambda cls, key: cls.configDict.get(key),
"__setitem__":lambda cls, key,val: cls.configDict.update({key:val}),
"__getattr__":lambda cls, key: cls.configDict.get(key),
"__setattr__":lambda cls, key, val: cls.configDict.update({key:val})
}), object
)):
@staticmethod
def update(**kwargs):
for key,val in iteritems(kwargs):
Configuration[key]=val
@staticmethod
def getModels():
return [(x,ShellAccess[x]) for x in ShellAccess if hasattr(ShellAccess[x], "predict") and callable(getattr(ShellAccess[x], "predict"))]
@staticmethod
def getDataSets():
return [(x,ShellAccess[x]) for x in ShellAccess if (x=="trainingData" or x=="testData") and isinstance(ShellAccess[x], DataFrame)]
@staticmethod
def getLabeledData(entity):
if ShellAccess[Configuration.DFTrainingVarName] == entity and ShellAccess[Configuration.LabeledRDDTrainingVarName] is not None:
return (ShellAccess[Configuration.LabeledRDDTrainingVarName], Configuration.TrainingSQLTableName)
elif ShellAccess[Configuration.DFTestVarName] == entity and ShellAccess[Configuration.LabeledRDDTestVarName] is not None:
return (ShellAccess[Configuration.LabeledRDDTestVarName], Configuration.TestSQLTableName)
return None
@staticmethod
def isReadyForRun():
return len(Configuration.getModels())>0 and Configuration.weatherUrl is not None
def loadDataSet(dbName,sqlTable):
if Configuration.cloudantHost is None or Configuration.cloudantUserName is None or Configuration.cloudantPassword is None:
raise Exception("Missing credentials")
cloudantdata = get_ipython().user_ns.get("sqlContext").read.format("com.cloudant.spark")\
.option("cloudant.host",Configuration.cloudantHost)\
.option("cloudant.username",Configuration.cloudantUserName)\
.option("cloudant.password",Configuration.cloudantPassword)\
.option("schemaSampleSize", "-1")\
.load(dbName)
cloudantdata.cache()
print("Successfully cached dataframe")
cloudantdata.registerTempTable(sqlTable)
print("Successfully registered SQL table " + sqlTable);
return cloudantdata
|
nilq/baby-python
|
python
|
import numpy as np
from numpy.testing import assert_equal
from terrapin.flow_direction import aread8, convert_d8_directions
test_sets = [
# source:
# http://resources.arcgis.com/en/help/main/10.1/index.html#//009z00000051000000
# lower right corner of flow accumulation array is 2 in url but it should be 1
# confirmed in example in url -> http://www.nws.noaa.gov/ohd/hrl/gis/data.html
['esri',
np.array([
[ 2, 2, 2, 4, 4, 8],
[ 2, 2, 2, 4, 4, 8],
[ 1, 1, 2, 4, 8, 4],
[128, 128, 1, 2, 4, 8],
[ 2, 2, 1, 4, 4, 4],
[ 1, 1, 1, 1, 4, 16],
]),
np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 2, 2, 0],
[0, 3, 7, 5, 4, 0],
[0, 0, 0, 20, 0, 1],
[0, 0, 0, 1, 24, 0],
[0, 2, 4, 7, 35, 1] #
])
],
# source: http://www.geo.uzh.ch/microsite/geo372/PDF/GEO372_W7_Hydrology_2013.pdf
['esri',
np.array([
[32, 16, 16, 16, 16, 16],
[64, 32, 16, 32, 16, 16],
[64, 64, 32, 64, 64, 32],
[64, 32, 32, 32, 32, 32],
[64, 32, 16, 32, 32, 32],
[64, 16, 32, 32, 32, 16],
]),
np.array([
[35, 13, 12, 2, 1, 0],
[10, 9, 0, 8, 4, 0],
[ 9, 4, 2, 2, 1, 0],
[ 7, 0, 3, 1, 1, 0],
[ 2, 3, 1, 2, 0, 0],
[ 1, 0, 0, 0, 1, 0],
])
],
# source: http://www.geospatialworld.net/paper/application/ArticleView.aspx?aid=1356
['esri',
np.array([
[ 1, 64, 1, 64, 16, 16],
[ 4, 64, 32, 64, 32, 4],
[16, 16, 16, 1, 1, 1],
[64, 32, 2, 4, 8, 4],
[ 4, 4, 1, 4, 16, 4],
[ 4, 4, 1, 4, 16, 16],
]),
np.array([
[0, 3, 0, 5, 1, 0], # the 5 is a 9 in the paper which is wrong.
[0, 0, 0, 0, 0, 0],
[5, 1, 0, 0, 1, 3],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 5, 0, 1],
[1, 1, 0, 11, 3, 2], # the 11 is a 9 in the paper which is wrong.
])
],
]
def test_flow_accumulation():
for fmt, d8, area in test_sets:
d8 = convert_d8_directions(d8, fmt, inverse=True)
a = aread8(d8)
a.accumulate()
assert_equal(area, a.accumulation)
|
nilq/baby-python
|
python
|
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from typing import (
Dict,
ItemsView,
Iterable,
KeysView,
List,
Mapping,
NamedTuple,
OrderedDict,
ValuesView,
)
from braket.circuits.instruction import Instruction
from braket.circuits.qubit import Qubit
from braket.circuits.qubit_set import QubitSet
class MomentsKey(NamedTuple):
"""Key of the Moments mapping."""
time: int
qubits: QubitSet
class Moments(Mapping[MomentsKey, Instruction]):
"""
An ordered mapping of `MomentsKey` to `Instruction`. The core data structure that
contains instructions, ordering they are inserted in, and time slices when they
occur. `Moments` implements `Mapping` and functions the same as a read-only
dictionary. It is mutable only through the `add()` method.
This data structure is useful to determine a dependency of instructions, such as
printing or optimizing circuit structure, before sending it to a quantum
device. The original insertion order is preserved and can be retrieved via the `values()`
method.
Args:
instructions (Iterable[Instruction], optional): Instructions to initialize self.
Default = [].
Examples:
>>> moments = Moments()
>>> moments.add([Instruction(Gate.H(), 0), Instruction(Gate.CNot(), [0, 1])])
>>> moments.add([Instruction(Gate.H(), 0), Instruction(Gate.H(), 1)])
>>> for i, item in enumerate(moments.items()):
... print(f"Item {i}")
... print(f"\\tKey: {item[0]}")
... print(f"\\tValue: {item[1]}")
...
Item 0
Key: MomentsKey(time=0, qubits=QubitSet([Qubit(0)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(0)]))
Item 1
Key: MomentsKey(time=1, qubits=QubitSet([Qubit(0), Qubit(1)]))
Value: Instruction('operator': CNOT, 'target': QubitSet([Qubit(0), Qubit(1)]))
Item 2
Key: MomentsKey(time=2, qubits=QubitSet([Qubit(0)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(0)]))
Item 3
Key: MomentsKey(time=2, qubits=QubitSet([Qubit(1)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(1)]))
"""
def __init__(self, instructions: Iterable[Instruction] = []):
self._moments: OrderedDict[MomentsKey, Instruction] = OrderedDict()
self._max_times: Dict[Qubit, int] = {}
self._qubits = QubitSet()
self._depth = 0
self.add(instructions)
@property
def depth(self) -> int:
"""int: Get the depth (number of slices) of self."""
return self._depth
@property
def qubit_count(self) -> int:
"""int: Get the number of qubits used across all of the instructions."""
return len(self._qubits)
@property
def qubits(self) -> QubitSet:
"""
QubitSet: Get the qubits used across all of the instructions. The order of qubits is based
on the order in which the instructions were added.
Note:
Don't mutate this object, any changes may impact the behavior of this class and / or
consumers. If you need to mutate this, then copy it via `QubitSet(moments.qubits())`.
"""
return self._qubits
def time_slices(self) -> Dict[int, List[Instruction]]:
"""
Get instructions keyed by time.
Returns:
Dict[int, List[Instruction]]: Key is the time and value is a list of instructions that
occur at that moment in time. The order of instructions is in no particular order.
Note:
This is a computed result over self and can be freely mutated. This is re-computed with
every call, with a computational runtime O(N) where N is the number
of instructions in self.
"""
time_slices = {}
for key, instruction in self._moments.items():
instructions = time_slices.get(key.time, [])
instructions.append(instruction)
time_slices[key.time] = instructions
return time_slices
def add(self, instructions: Iterable[Instruction]) -> None:
"""
Add instructions to self.
Args:
instructions (Iterable[Instruction]): Instructions to add to self. The instruction
is added to the max time slice in which the instruction fits.
"""
for instruction in instructions:
self._add(instruction)
def _add(self, instruction: Instruction) -> None:
qubit_range = instruction.target
time = max([self._max_time_for_qubit(qubit) for qubit in qubit_range]) + 1
# Mark all qubits in qubit_range with max_time
for qubit in qubit_range:
self._max_times[qubit] = max(time, self._max_time_for_qubit(qubit))
self._moments[MomentsKey(time, instruction.target)] = instruction
self._qubits.update(instruction.target)
self._depth = max(self._depth, time + 1)
def _max_time_for_qubit(self, qubit: Qubit) -> int:
return self._max_times.get(qubit, -1)
#
# Implement abstract methods, default to calling selfs underlying dictionary
#
def keys(self) -> KeysView[MomentsKey]:
"""Return a view of self's keys."""
return self._moments.keys()
def items(self) -> ItemsView[MomentsKey, Instruction]:
"""Return a view of self's (key, instruction)."""
return self._moments.items()
def values(self) -> ValuesView[Instruction]:
"""Return a view of self's instructions."""
return self._moments.values()
def get(self, key: MomentsKey, default=None) -> Instruction:
"""
Get the instruction in self by key.
Args:
key (MomentsKey): Key of the instruction to fetch.
default (Any, optional): Value to return if `key` is not in `moments`. Default = `None`.
Returns:
Instruction: `moments[key]` if `key` in `moments`, else `default` is returned.
"""
return self._moments.get(key, default)
def __getitem__(self, key):
return self._moments.__getitem__(key)
def __iter__(self):
return self._moments.__iter__()
def __len__(self):
return self._moments.__len__()
def __contains__(self, item):
return self._moments.__contains__(item)
def __eq__(self, other):
if isinstance(other, Moments):
return (self._moments) == (other._moments)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def __repr__(self):
return self._moments.__repr__()
def __str__(self):
return self._moments.__str__()
|
nilq/baby-python
|
python
|
# Copyright 2019 Adobe
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it. If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
#
from result import Ok, Err
from protector.rules.rule import Rule
import time
class RuleChecker(Rule):
def __init__(self, conf):
# adaptive multiplier. ie. 1 means query is throttled by an amount of time equal to previous execution time
# preeempts static throttling
self.adaptive = conf.get('adaptive', 0)
# static throttling
if not self.adaptive:
self.max_duration = conf.get('limit')
self.throttle_duration = conf.get('throttle')
@staticmethod
def description():
return "Throttle lengthy queries"
@staticmethod
def reason():
return ["Such queries can bring down the time series database",
"usually performing long and inefficient scans or aggregations"]
def check(self, query):
"""
:param query OpenTSDBQuery
"""
stats = query.get_stats()
current_time = int(round(time.time()))
if stats:
duration = float(stats.get('duration', 0))
last_occurence = int(stats.get('timestamp', 0))
elapsed = current_time - last_occurence
if self.adaptive:
adaptive_throttle_duration = duration * self.adaptive
if elapsed < adaptive_throttle_duration:
remaining = adaptive_throttle_duration - elapsed
return Err("Adaptive throttling: {}x Last duration: {}s Throttling ends in {}s".format(self.adaptive, duration, remaining))
else:
if self.max_duration <= duration:
if elapsed < self.throttle_duration:
remaining = self.throttle_duration - elapsed
return Err("Query duration exceeded: {}s Limit: {}s Throttling ends in {}s".format(duration, self.max_duration, remaining))
return Ok(True)
|
nilq/baby-python
|
python
|
import os
import sys
from time import sleep
# Logo do programa!
print('-' * 41)
print(' Calculadora Simples Version 1.0')
print('-' * 41)
print(' Seja bem-vindo!')
print('-' * 41)
sleep(3)
os.system('clear')
# Valores para usar na Opção!
n1 = int(input('1# Digite um valor: '))
n2 = int(input('2# Digite um valor: '))
os.system('clear')
# Estética
print('-' * 41)
print(' Carregando Opções!')
print('-' * 41)
sleep(2)
os.system('clear')
# Var Menu(Opção!) e While do Programa!
opt = 0
while opt != 6:
print(''' # Menu de Opções #
[ 1 ] + (somar)
[ 2 ] x (multiplicar)
[ 3 ] - (subtrair)
[ 4 ] ÷ (dividir)
[ 5 ] CE
[ 6 ] ON/OFF
''')
opt = float(input(' >>> Opção: '))
if opt == 1:
somar = n1 + n2
os.system('clear')
print('\nResultado: {} + {} = {}'.format(n1, n2, somar))
sleep(5)
os.system('clear')
elif opt == 2:
multiplicar = n1 * n2
os.system('clear')
print('\nResultado: {} x {} = {}'.format(n1, n2, multiplicar))
sleep(5)
os.system('clear')
elif opt == 3:
subtrair = n1 - n2
os.system('clear')
print('\nResultado: {} - {} = {}'.format(n1, n2, subtrair))
sleep(5)
os.system('clear')
elif opt == 4:
dividir = n1 / n2
os.system('clear')
print('\nResultado: {} ÷ {} = {:.2f}'.format(n1, n2, dividir))
sleep(5)
os.system('clear')
elif opt == 5:
os.system('clear')
print('Informe os números novamente: ')
n1 = float(input('\n1# Digite um valor: '))
n2 = float(input('2# Digite um valor: '))
os.system('clear')
print('-' * 41)
print(' Carregando Opções!')
print('-' * 41)
sleep(2)
os.system('clear')
elif opt == 6:
os.system('clear')
print('-' * 41)
print(' Calculadora Simples - 1.0')
print(' Finalizando, Volte sempre')
print('-' * 41)
sleep(5)
sys.exit(0)
else:
os.system('clear')
print('Opção inválida, Tente novamente.')
sleep(3)
os.system('clear')
|
nilq/baby-python
|
python
|
from __future__ import division
import os, time, scipy.io
import tensorflow as tf
import numpy as np
from PIL import Image
tf.logging.set_verbosity(tf.logging.INFO)
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from dataset import SID_dataset as SID_dataset
from network import Netowrk as Netowrk
###############
## Data
##############
set_name = 'Sony'
data = SID_dataset(set_name=set_name, stage='EDGE', debug=False, patch_size=512)
data.save_training_data_into_memory()
def train_input_fn():
"""An input function for training"""
dataset = data.get_edge_training_dataset()
return dataset
def test_input_fn():
dataset = data.get_edge_test_dataset()
return dataset
###############
## Model
##############
model_dir = "../result/" + set_name + "_20190628_edge"
def model_fn(features, labels, mode):
input = features['image']
input = tf.space_to_depth(input, data.ratio_packed)
gt_mask = features['mask']
gt_mask = tf.space_to_depth(gt_mask, data.ratio_packed)
gt_image = labels
gt_edge_ = tf.space_to_depth(gt_image, data.ratio_packed)
network = Netowrk(set_name=set_name)
[s1_out, s2_out, s3_out, s4_out, s5_out, fuse_out] = network.edge(input)
vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for v in vars: print(v)
loss = 0 #network.edge_loss(fuse_out, gt_edge_, gt_mask)
for gen_edge in [s1_out, s2_out, s3_out, s4_out, s5_out, fuse_out]:
loss += network.edge_loss(gen_edge, gt_edge_, gt_mask)
gen_edge = tf.sigmoid(fuse_out)
gen_edge = tf.depth_to_space(gen_edge, data.ratio_packed)
gt_edge = gt_image
out_image_cut = tf.cast(tf.minimum(tf.maximum(gen_edge * 255, 0), 255), tf.uint8)
gt_image_cut = tf.cast(tf.minimum(tf.maximum(gt_edge * 255, 0), 255), tf.uint8)
tf.train.init_from_checkpoint('../result/Sony_20190311_edge/model.ckpt-785138', {'edge/': 'edge/'})
if mode == tf.estimator.ModeKeys.EVAL:
summary_hook = tf.train.SummarySaverHook(
500,
output_dir=model_dir + '/eval',
summary_op=[
tf.summary.image('result_gt_image', tf.concat((out_image_cut, gt_image_cut), axis=2), max_outputs=1)]
)
eval_metric_ops = {'SSIM': tf.metrics.mean(tf.image.ssim(gen_edge, gt_image, max_val=1.0)),
'PSN': tf.metrics.mean(tf.image.psnr(gen_edge, gt_image, max_val=1.0)),
}
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=eval_metric_ops, evaluation_hooks=[summary_hook])
# if mode == tf.estimator.ModeKeys.PREDICT:
#
# predictions = {"gen_image": gen_image
# }
# return tf.estimator.EstimatorSpec(mode, predictions=predictions)
if mode == tf.estimator.ModeKeys.TRAIN:
# Optimizer
learning_rate = tf.train.piecewise_constant(tf.train.get_global_step(), [160 * 2500], [1e-4, 1e-5])
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss, global_step=tf.train.get_global_step())
# Summary
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.image('result_gt_image', tf.concat((out_image_cut, gt_image_cut), axis=2), max_outputs=1)
tf.summary.scalar('PSN', tf.reduce_mean(tf.image.psnr(gen_edge, gt_image, max_val=1.0)))
tf.summary.scalar('SSIM', tf.reduce_mean(tf.image.ssim(gen_edge, gt_image, max_val=1.0)))
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
###############
## Estimator
##############
distribution = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(
train_distribute=distribution,
model_dir=model_dir,
save_checkpoints_secs=30 * 60, # Save checkpoints every 30 minutes.
keep_checkpoint_max=1, # Retain the 10 most recent checkpoints.
save_summary_steps=100000
)
estimator = tf.estimator.Estimator(model_fn=model_fn, config=config)
###############
## Train
###############
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=160 * 5000)#
eval_spec = tf.estimator.EvalSpec(input_fn=test_input_fn, throttle_secs=1000, steps=data.n_test)
st = time.time()
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print("Time=%.3f" % (time.time() - st))
st = time.time()
eval = estimator.evaluate(input_fn=test_input_fn, steps=data.n_test)
print("Time=%.3f" % (time.time() - st))
print(eval)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
from hoppy.timing.timing import EventFits
__author__ = 'Teruaki Enoto'
__date__ = '2020 September 5'
__version__ = '0.01'
def get_parser():
"""
Creates a new argument parser.
"""
usage = """
plot event fits file (PULSE_PHASE)
"""
parser = argparse.ArgumentParser(
prog='fextract_phase_spectrum.py',
usage='fextract_phase_spectrum.py infits filter_expression',
description=usage,
epilog='',
add_help=True)
version = '%(prog)s ' + __version__
parser.add_argument('infits',metavar='infits',type=str,
help='Input event fits file.')
parser.add_argument('outpha',metavar='outpha',type=str,
help='Output phafile.')
parser.add_argument('filter_expression',metavar='filter_expression',type=str, default=None,
help='filter expression')
parser.add_argument('--exposure_fraction',metavar='exposure_fraction',type=float, default=1.0,
help='exposure fraction (default:1.0)')
return parser
def fextract_phase_spectrum(args):
evtfits = EventFits(args.infits)
evtfits.extract_phase_spectrum(args.outpha,args.filter_expression,
exposure_fraction=args.exposure_fraction)
print("finished...\n")
def main(args=None):
parser = get_parser()
args = parser.parse_args(args)
fextract_phase_spectrum(args)
if __name__=="__main__":
main()
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class MainConfig(AppConfig):
name = 'main'
SOCIALS = {
'vk': 'https://vk.com/goto_msk',
'instagram': 'https://www.instagram.com/goto_goto_goto/',
'facebook': 'https://www.facebook.com/GoToCampPage/',
'telegram': 'https://t.me/goto_channel'
}
|
nilq/baby-python
|
python
|
'''
Based on https://stackoverflow.com/questions/44164749/how-does-keras-handle-multilabel-classification
'''
import warnings
try:
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import SGD
def keras_fit(T, Y, **kwargs):
if Y.ndim == 1:
Y.shape = (-1, 1)
fitfns = []
for j in range(Y.shape[1]):
y = Y[:,j]
fit_fn = keras_fit_multilabel(T, y, **kwargs)[0]
fitfns.append(fit_fn)
return fitfns
def keras_fit_multilabel(T, Y, sizes=[500, 500], epochs=50, activation='relu', dropout=0, **ignored):
if Y.ndim == 1:
Y.shape = (-1, 1)
model = Sequential()
for s in sizes:
model.add(Dense(s, activation=activation, input_dim=T.shape[1]))
if dropout > 0:
model.add(Dropout(dropout))
# the final layer
model.add(Dense(Y.shape[1], activation='sigmoid'))
sgd = SGD(lr=0.03, decay=1e-3, momentum=0.6, nesterov=True)
model.compile(loss='binary_crossentropy',
optimizer=sgd)
model.fit(T, Y, epochs=epochs)
fitfns = [lambda T_test: model.predict(T_test)[:,j] for j in range(Y.shape[1])]
return fitfns
except ImportError:
warnings.warn('module `keras` not importable, `keras_fit` and `keras_fit_multilabel` will not be importable')
|
nilq/baby-python
|
python
|
from z3 import Implies
import numpy as np
from z3 import Implies
from quavl.lib.expressions.qbit import Qbits
from quavl.lib.models.circuit import Circuit, Method
from quavl.lib.operations.gates import Rx, Rz, V, CNOT, V_dag
def repair_toffoli():
# Initialize circuit
a, b, c = Qbits(['a', 'b', 'c'])
n = 3
circuit = Circuit([a, b, c],
[
V(a).controlled_by(c),
V(a).controlled_by(b),
# CNOT(b, c),
V_dag(a).controlled_by(b),
CNOT(b, c)
])
initial_values = [{(1, 0), (0, 1)} for _ in range(n)]
circuit.initialize_qbits(initial_values)
# Build specification
final_qbits = circuit.get_final_qbits()
circuit.solver.add(Implies(b.alpha.r == 1 and c.alpha.r == 1,
final_qbits[0].beta.r == a.alpha.r))
# Prove and repair
circuit.prove(method=Method.qbit_sequence_model,
dump_smt_encoding=True,
dump_solver_output=True,
synthesize_repair=True)
def prove_repaired_toffoli():
rep_theta_0 = [-0.10000000000000001, -0.099999999999999992]
rep_phi_0 = [0.099999999999999992, 0.10000000000000001]
rep_theta_1 = [-0.10000000000000001, -0.099999999999999992]
rep_phi_1 = [0.099999999999999992, 0.10000000000000001]
rep_theta_2 = [-0.10000000000000001, -0.099999999999999992]
rep_phi_2 = [0.099999999999999992, 0.10000000000000001]
# Initialize circuit
a, b, c = Qbits(['a', 'b', 'c'])
n = 3
circuit = Circuit([a, b, c],
[
V(a).controlled_by(c),
V(a).controlled_by(b),
# CNOT(b, c),
V_dag(a).controlled_by(b),
CNOT(b, c),
Rx(a, np.mean(rep_theta_0)),
Rz(a, np.mean(rep_phi_0)),
Rx(b, np.mean(rep_theta_1)),
Rz(b, np.mean(rep_phi_1)),
Rx(c, np.mean(rep_theta_2)),
Rz(c, np.mean(rep_phi_2))
])
initial_values = [{(1, 0), (0, 1)} for _ in range(n)]
circuit.initialize_qbits(initial_values)
# Build specification
final_qbits = circuit.get_final_qbits()
circuit.solver.add(Implies(b.alpha.r == 1 and c.alpha.r == 1,
final_qbits[0].beta.r == a.alpha.r))
# Prove and repair
circuit.prove(method=Method.qbit_sequence_model,
dump_smt_encoding=True,
dump_solver_output=True,
synthesize_repair=True)
if __name__ == "__main__":
repair_toffoli()
prove_repaired_toffoli()
|
nilq/baby-python
|
python
|
from . import Base
from sqlalchemy import Column, String, Integer, Enum as EnumCol, Date, ForeignKey
from sqlalchemy.orm import relationship
from enum import Enum
class TaskType(Enum):
Note = "Note"
Task = "Task"
class Task(Base):
__tablename__ = "task"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False, unique=True)
finished_by = Column(Date, nullable=False)
type = Column(EnumCol(TaskType, create_constraint=False, native_enum=False), nullable=False)
attachments = relationship("TaskAttachment")
class TaskAttachment(Base):
__tablename__ = "task_attachment"
id = Column(Integer, primary_key=True, autoincrement=True)
task_id = Column(Integer, ForeignKey(Task.id), nullable=False)
location = Column(String, nullable=False)
|
nilq/baby-python
|
python
|
"""Core Module
==============
This module includes the abstraction of a Simulation Model and the definition of a Path.
"""
from .base_model import BaseModel, ModelState
from .path import Path
|
nilq/baby-python
|
python
|
import pulp
from graphviz import Digraph
from discord import Embed, File
from tabulate import tabulate
import math
import ada.emoji
from ada.result_message import ResultMessage
from ada.breadcrumbs import Breadcrumbs
class HelpResult:
def __str__(self):
return """
ADA is a bot for the videogame Satisfactory.
ADA can be used to get information about items,
buildings, and recipes. ADA can also be used to
calculate an optimal production chain. Here are
some examples of queries that ADA supports:
```
ada iron rod
```
```
ada recipes for iron rod
```
```
ada recipes for refineries
```
```
ada produce 60 iron rods
```
```
ada produce 60 iron rod from ? iron ore
```
```
ada produce ? iron rods from 60 iron ore
```
```
ada produce ? power from 240 crude oil with only
fuel generators
```
```
ada produce 60 modular frames without refineries
```
For more information and examples, see [the GitHub page](https://github.com/ScottJDaley/ada).
"""
def messages(self, breadcrumbs):
message = ResultMessage()
message.embed = Embed(title="Help")
message.embed.description = str(self)
message.content = str(breadcrumbs)
return [message]
def handle_reaction(self, emoji, breadcrumbs):
return None
class ErrorResult:
def __init__(self, msg):
self.__msg = msg
def __str__(self):
return self.__msg
def messages(self, breadcrumbs):
message = ResultMessage()
message.embed = Embed(title="Error")
message.embed.description = self.__msg
message.content = str(breadcrumbs)
return [message]
def handle_reaction(self, emoji, breadcrumbs):
return None
class InfoResult:
num_on_page = 9
def __init__(self, vars_, raw_query):
self._vars = sorted(vars_, key=lambda var_: var_.human_readable_name())
self._raw_query = raw_query
self._add_reaction_selectors = False
def __str__(self):
if len(self._vars) == 1:
return self._vars[0].details()
var_names = [var.human_readable_name() for var in self._vars]
var_names.sort()
return "\n".join(var_names)
def _num_pages(self):
return math.ceil(len(self._vars) / InfoResult.num_on_page)
def _footer(self, page):
return "Page " + str(page) + " of " + str(self._num_pages())
def _get_var_on_page(self, page, index):
var_index = (page - 1) * InfoResult.num_on_page + index
return self._vars[var_index]
def _get_info_page(self, breadcrumbs):
var_names = [var.human_readable_name() for var in self._vars]
start_index = (breadcrumbs.page() - 1) * InfoResult.num_on_page
last_index = start_index + InfoResult.num_on_page
vars_on_page = var_names[start_index:last_index]
out = []
message = ResultMessage()
for i, var_ in enumerate(vars_on_page):
prefix = ""
if self._add_reaction_selectors:
prefix = ada.emoji.NUM_EMOJI[i+1]
message.reactions.append(prefix)
out.append("- " + prefix + var_)
if not self._add_reaction_selectors:
message.reactions = []
if breadcrumbs.page() > 1:
message.reactions.append(ada.emoji.PREVIOUS_PAGE)
message.reactions.append(ada.emoji.INFO)
if breadcrumbs.page() < self._num_pages():
message.reactions.append(ada.emoji.NEXT_PAGE)
message.embed = Embed(
title="Found " + str(len(self._vars)) + " matches:")
message.embed.description = "\n".join(out)
message.embed.set_footer(text=self._footer(breadcrumbs.page()))
message.content = str(breadcrumbs)
return [message]
def messages(self, breadcrumbs):
if len(self._vars) == 0:
message = ResultMessage()
message.embed = Embed(title="No matches found")
message.content = str(breadcrumbs)
return [message]
if len(self._vars) > 1:
return self._get_info_page(breadcrumbs)
message = ResultMessage()
message.embed = self._vars[0].embed()
message.content = str(breadcrumbs)
message.reactions = [ada.emoji.PREVIOUS_PAGE]
return [message]
def handle_reaction(self, emoji, breadcrumbs):
query = None
if emoji == ada.emoji.INFO:
self._add_reaction_selectors = True
elif emoji == ada.emoji.PREVIOUS_PAGE and breadcrumbs.has_prev_query():
breadcrumbs.goto_prev_query()
query = breadcrumbs.primary_query()
elif emoji == ada.emoji.NEXT_PAGE and breadcrumbs.page() < self._num_pages():
breadcrumbs.goto_next_page()
elif emoji == ada.emoji.PREVIOUS_PAGE and breadcrumbs.page() > 1:
breadcrumbs.goto_prev_page()
elif emoji in ada.emoji.NUM_EMOJI:
index = ada.emoji.NUM_EMOJI.index(emoji) - 1
selected_var = self._get_var_on_page(breadcrumbs.page(), index)
query = selected_var.human_readable_name()
breadcrumbs.add_query(query)
return query
class OptimizationResult:
def __init__(self, db, vars_, prob, status, query):
self.__db = db
self.__prob = prob
self.__vars = vars_
self.__status = status
self.__query = query
# Dictionaries from var -> (obj, value)
# TODO: Use these in the functions below
self.__inputs = {
item.var(): (item, -self.__get_value(item.var()))
for item in self.__db.items().values()
if self.__has_value(item.var()) and self.__get_value(item.var()) < 0
}
self.__outputs = {
item.var(): (item, self.__get_value(item.var()))
for item in self.__db.items().values()
if self.__has_value(item.var()) and self.__get_value(item.var()) > 0
}
self.__recipes = {
recipe.var(): (recipe, self.__get_value(recipe.var()))
for recipe in self.__db.recipes().values()
if self.__has_value(recipe.var())
}
self.__crafters = {
crafter.var(): (crafter, self.__get_value(crafter.var()))
for crafter in self.__db.crafters().values()
if self.__has_value(crafter.var())
}
self.__generators = {
generator.var(): (generator, self.__get_value(generator.var()))
for generator in self.__db.generators().values()
if self.__has_value(generator.var())
}
self.__net_power = self.__get_value(
"power") if self.__has_value("power") else 0
def inputs(self):
return self.__inputs
def outputs(self):
return self.__outputs
def recipes(self):
return self.__recipes
def crafters(self):
return self.__crafters
def generators(self):
return self.__generators
def net_power(self):
return self.__net_power
def __has_value(self, var):
return self.__vars[var].value() and self.__vars[var].value() != 0
def __get_value(self, var):
return self.__vars[var].value()
def __get_vars(self, objs, check_value=lambda val: True, suffix=""):
out = []
for obj in objs:
var = obj.var()
if self.__has_value(var) and check_value(self.__get_value(var)):
out.append(obj.human_readable_name() +
": " + str(round(abs(self.__get_value(var)), 2)) + suffix)
return out
def __get_section(self, title, objs, check_value=lambda val: True, suffix=""):
out = []
out.append(title)
vars_ = self.__get_vars(objs, check_value=check_value, suffix=suffix)
if len(vars_) == 0:
return []
out = []
out.append(title)
out.extend(vars_)
out.append("")
return out
def __string_solution(self):
out = []
out.append(str(self.__query))
out.append("=== OPTIMAL SOLUTION FOUND ===\n")
out.extend(self.__get_section(
"INPUT", self.__db.items().values(), check_value=lambda val: val < 0, suffix="/m"))
out.extend(self.__get_section(
"OUTPUT", self.__db.items().values(), check_value=lambda val: val > 0, suffix="/m"))
# out.extend(self.__get_section("INPUT", [item.input() for item in self.__db.items().values()]))
# out.extend(self.__get_section("OUTPUT", [item.output() for item in self.__db.items().values()]))
out.extend(self.__get_section("RECIPES", self.__db.recipes().values()))
out.extend(self.__get_section(
"CRAFTERS", self.__db.crafters().values()))
out.extend(self.__get_section(
"GENERATORS", self.__db.generators().values()))
out.append("NET POWER")
net_power = 0
if self.__has_value("power"):
net_power = self.__get_value("power")
out.append(str(net_power) + " MW")
out.append("")
out.append("OBJECTIVE VALUE")
out.append(str(self.__prob.objective.value()))
return '\n'.join(out)
def __str__(self):
if self.__status is pulp.LpStatusNotSolved:
return "No solution has been found."
if self.__status is pulp.LpStatusUndefined:
return "No solution has been found."
if self.__status is pulp.LpStatusInfeasible:
return "Solution is infeasible, try removing a constraint or allowing a byproduct (e.g. rubber >= 0)"
if self.__status is pulp.LpStatusUnbounded:
return "Solution is unbounded, try adding a constraint or replacing '?' with a concrete value (e.g. 1000)"
return self.__string_solution()
def __solution_messages(self, breadcrumbs):
message = ResultMessage()
message.embed = Embed(title="Optimization Query")
sections = [str(self.__query)]
inputs = self.__get_vars(
self.__db.items().values(), check_value=lambda val: val < 0, suffix="/m")
if len(inputs) > 0:
sections.append("**Inputs**\n" + "\n".join(inputs))
outputs = self.__get_vars(
self.__db.items().values(), check_value=lambda val: val > 0, suffix="/m")
if len(outputs) > 0:
sections.append("**Outputs**\n" + "\n".join(outputs))
recipes = self.__get_vars(self.__db.recipes().values())
if len(recipes) > 0:
sections.append("**Recipes**\n" + "\n".join(recipes))
buildings = self.__get_vars(self.__db.crafters().values())
buildings.extend(self.__get_vars(self.__db.generators().values()))
if len(buildings) > 0:
sections.append("**Buildings**\n" + "\n".join(buildings))
descriptions = []
curr_description = ""
for section in sections:
if len(curr_description) + len(section) >= 4096:
descriptions.append(curr_description)
curr_description = ""
curr_description += section + "\n\n"
descriptions.append(curr_description)
message.embed.description = descriptions[0]
filename = 'output.gv'
filepath = 'output/' + filename
self.generate_graph_viz(filepath)
file = File(filepath + '.png')
# The image already shows up from the attached file, so no need to place it in the embed as well.
# message.embed.set_image(url="attachment://" + filename + ".png")
message.file = file
message.content = str(breadcrumbs)
messages = [message]
if len(descriptions) > 1:
for i in range(1, len(descriptions)):
next_message = ResultMessage()
next_message.embed = Embed()
next_message.embed.description = descriptions[i]
messages.append(next_message)
return messages
def messages(self, breadcrumbs):
if self.__status is pulp.LpStatusOptimal:
return self.__solution_messages(breadcrumbs)
message = ResultMessage()
message.embed = Embed(title=str(self))
message.content = str(breadcrumbs)
return [message]
def handle_reaction(self, emoji, breadcrumbs):
return None
def __add_nodes(self, s, objs):
for obj in objs:
var = obj.var()
if not self.__has_value(var):
continue
amount = self.__get_value(var)
s.node(obj.viz_name(), obj.viz_label(amount), shape="plaintext")
def __has_non_zero_var(self):
for var in self.__vars:
if self.__has_value(var):
return True
return False
def has_solution(self):
return self.__status is pulp.LpStatusOptimal and self.__has_non_zero_var()
def __power_viz_label(self, output, net):
color = "moccasin" if net < 0 else "lightblue"
out = '<'
out += '<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">'
if output > 0:
out += '<TR>'
out += '<TD COLSPAN="2" BGCOLOR="' + color + '">Power Output</TD>'
out += '<TD>' + str(round(output, 2)) + ' MW</TD>'
out += '</TR>'
out += '<TR>'
out += '<TD COLSPAN="2" BGCOLOR="' + color + '">Net Power</TD>'
out += '<TD>' + str(round(net, 2)) + ' MW</TD>'
out += '</TR>'
out += '</TABLE>>'
return out
def generate_graph_viz(self, filename):
s = Digraph('structs', format='png', filename=filename,
node_attr={'shape': 'record'})
sources = {} # item => {source => amount}
sinks = {} # item => {sink => amount}
def add_to_target(item_var, targets, target, amount):
if item_var not in targets:
targets[item_var] = {}
targets[item_var][target] = amount
# items
self.__add_nodes(s, self.__db.items().values())
for item in self.__db.items().values():
if not self.__has_value(item.var()):
continue
amount = self.__get_value(item.var())
target = sources if amount < 0 else sinks
add_to_target(item.var(), target, item.viz_name(),
self.__get_value(item.var()))
# recipes
self.__add_nodes(s, self.__db.recipes().values())
for recipe in self.__db.recipes().values():
if not self.__has_value(recipe.var()):
continue
recipe_amount = self.__get_value(recipe.var())
for item_var, ingredient in recipe.ingredients().items():
ingredient_amount = recipe_amount * ingredient.minute_rate()
add_to_target(item_var, sinks, recipe.viz_name(),
ingredient_amount)
for item_var, product in recipe.products().items():
product_amount = recipe_amount * product.minute_rate()
add_to_target(item_var, sources,
recipe.viz_name(), product_amount)
# power
power_output = 0
net_power = 0
if self.__has_value("power"):
net_power = self.__get_value("power")
def get_power_edge_label(power_production):
return str(round(power_production, 2)) + ' MW'
# power recipes
self.__add_nodes(s, self.__db.power_recipes().values())
for power_recipe in self.__db.power_recipes().values():
if not self.__has_value(power_recipe.var()):
continue
fuel_item = power_recipe.fuel_item()
fuel_amount = self.__get_value(
power_recipe.var()) * power_recipe.fuel_minute_rate()
add_to_target(fuel_item.var(), sinks,
power_recipe.viz_name(), fuel_amount)
power_production = self.__get_value(
power_recipe.var()) * power_recipe.power_production()
power_output += power_production
s.edge(power_recipe.viz_name(), "power",
label=get_power_edge_label(power_production))
s.node("power", self.__power_viz_label(
power_output, net_power), shape="plaintext")
def get_edge_label(item, amount):
return str(round(amount, 2)) + '/m\n' + item
# Connect each source to all sinks of that item
for item_var, item_sources in sources.items():
item = self.__db.items()[item_var]
if item_var not in sinks:
print("Could not find", item_var, "in sinks")
continue
for source, source_amount in item_sources.items():
total_sink_amount = 0
for _, sink_amount in sinks[item_var].items():
total_sink_amount += sink_amount
multiplier = source_amount / total_sink_amount
for sink, sink_amount in sinks[item_var].items():
s.edge(source, sink, label=get_edge_label(
item.human_readable_name(), multiplier * sink_amount))
s.render()
class RecipeCompareResult:
def __init__(self, stats):
self.__stats = stats
def get_percentage_str(percentage):
if isinstance(percentage, str):
return percentage
percentage_string = str(int(round(percentage, 0)))
if percentage > 0:
percentage_string = "+" + percentage_string
return percentage_string + "%"
recipes = []
unweighted = []
weighted = []
power = []
complexity = []
recipes.append(stats.query.base_recipe.human_readable_name())
unweighted.append("")
weighted.append("")
power.append("")
complexity.append("")
for related_stats in stats.related_recipe_stats:
recipes.append(
related_stats.recipe.human_readable_name())
unweighted.append(
get_percentage_str(related_stats.recipe_comp_stats.unweighted_comp_stats.resource_requirements))
weighted.append(
get_percentage_str(related_stats.recipe_comp_stats.weighted_comp_stats.resource_requirements))
power.append(
get_percentage_str(related_stats.recipe_comp_stats.unweighted_comp_stats.power_consumption))
complexity.append(
get_percentage_str(related_stats.recipe_comp_stats.unweighted_comp_stats.complexity))
self.__overall_stats = {
"Recipe": recipes,
"Unweighted\nResources": unweighted,
"Weighted\nResources": weighted,
"Power\nConsumption": power,
"Complexity": complexity,
}
# Find all possible inputs.
input_vars = {}
for (_input, value) in stats.base_stats_normalized.unweighted_stats.inputs.values():
input_vars[_input.var()] = _input.human_readable_name()
for related_stats in stats.related_recipe_stats:
for (_input, value) in related_stats.recipe_stats.unweighted_stats.inputs.values():
input_vars[_input.var()] = _input.human_readable_name()
inputs = {}
inputs["Recipe"] = recipes
for input_var, input_name in input_vars.items():
if input_var in stats.base_stats_normalized.unweighted_stats.inputs:
_input, value = stats.base_stats_normalized.unweighted_stats.inputs[input_var]
inputs[input_name] = [str(round(value, 2))]
else:
inputs[input_name] = [""]
for related_stats in stats.related_recipe_stats:
if input_var in related_stats.recipe_stats.unweighted_stats.inputs:
_input, value = related_stats.recipe_stats.unweighted_stats.inputs[
input_var]
resource, percentage = related_stats.recipe_comp_stats.unweighted_comp_stats.resources[
input_var]
percentage_str = get_percentage_str(percentage)
inputs[input_name].append(
"{}/m ({})".format(round(value, 2), percentage_str))
else:
inputs[input_name].append("")
raw_power = []
power_value = stats.base_stats_normalized.unweighted_stats.power_consumption
raw_power.append("{} MW".format(round(power_value, 1)))
for related_stats in stats.related_recipe_stats:
power_value = related_stats.recipe_stats.unweighted_stats.power_consumption
power_percentage = related_stats.recipe_comp_stats.unweighted_comp_stats.power_consumption
percentage_str = get_percentage_str(power_percentage)
raw_power.append("{} MW ({})".format(
round(power_value, 1), percentage_str))
inputs["Power"] = raw_power
self.__input_stats = inputs
def __str__(self):
# === OVERALL STATS ===
# | Unweighted | Weighted | Power | |
# Recipe | Resources | Resources | Consumption | Complexity |
# -----------------------------|------------|-----------|-------------|------------|
# Recipe: Iron Rod | | | | |
# -----------------------------|------------|-----------|-------------|------------|
# Recipe: Alternate: Steel Rod | -50% | -1.25% | -56% | +33% |
#
# === RAW INPUTS ===
# | Iron | | | |
# Recipe | Ore | Coal | | Power |
# -----------------------------|---------------|--------------|-------------|------------|
# Recipe: Iron Rod | 0.75/m | | | 0.27 MW |
# -----------------------------|---------------|--------------|-------------|------------|
# Recipe: Alternate: Steel Rod | 0.25/m (-75%) | 0.45/m (NEW) | | 1.2 MW |
product_name = self.__stats.query.product_item.human_readable_name()
out = []
out.append("All recipes that produce " + product_name)
out.append(tabulate(self.__overall_stats,
headers="keys", tablefmt="grid"))
out.append("")
out.append("Raw Inputs for 1/m " + product_name)
out.append(tabulate(self.__input_stats,
headers="keys", tablefmt="grid"))
return '\n'.join(out)
# return str(self.__stats)
def messages(self, breadcrumbs):
message = ResultMessage()
# message.embed = Embed(title="Error")
# message.embed.description = "hello" # "```{}```".format(str(self))
product_name = self.__stats.query.product_item.human_readable_name()
out = []
out.append("All recipes that produce " + product_name)
out.append("```\n{}```".format(
tabulate(self.__overall_stats, headers="keys", tablefmt="simple")))
out.append("Raw Inputs for 1/m " + product_name)
out.append("```\n{}```".format(
tabulate(self.__input_stats, headers="keys", tablefmt="simple")))
message.content = "{}\n{}".format(
str(breadcrumbs), '\n'.join(out))
if len(message.content) > 2000:
message.content = "Output was too long"
return [message]
def handle_reaction(self, emoji, breadcrumbs):
return None
|
nilq/baby-python
|
python
|
import numpy
import dask.dataframe as dd
import operator
import collections
import re
import logging
#logging.basicConfig(format='%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s', level=logging.NONE)
# build successor relation and edges among concepts
def successor(concepts,c1,c2):
return c1 < c2 and {c for c in concepts if c1 < c and c < c2} == set()
def edges(concepts):
return {(c1,c2) for c1 in concepts for c2 in concepts if successor(concepts,c1,c2)}
# helper functions to format concepts
def tupToIndices(tup): # given a tuple t, returns a tuple whose values are the positions in which tuple t has a value of 1
return tuple(i for i in range(len(tup)) if tup[i] == 1)
def indicesToNames(indices_tup,names_list): #given a list of names and a tuple t containing list positions/indices, returns a tuple of names
return tuple(names_list[e] for e in indices_tup)
# helper functions for filtering redundant implication rules
def impRuleSubsumes(rule1,rule2):
return rule1[0] <= rule2[0] and rule2[1] <= rule1[1]
def impRuleRedundant(rules,rule):
return any(impRuleSubsumes(exrule,rule) == True for exrule in rules)
def updateImpRules(rules,r):
return {rule for rule in rules if not impRuleSubsumes(r,rule)} | {r}
def filterSubsumedImp(rules):
newRules = set()
for rule in rules.keys():
if not(impRuleRedundant(newRules,rule)):
newRules = updateImpRules(newRules,rule)
return {newRule : rules[newRule] for newRule in newRules}
class ContextProcessing():
def __init__(self,input_file):
logging.debug('Initializing context with input file '+input_file+' ...')
self.inputfile = input_file
self.full_context = dd.read_csv(input_file,header=0,low_memory=False) #contains all the csv content including the header
self.header = list(self.full_context.head(1)) # csv header as a list
self.attributes = self.header[1:]
self.objects = self.full_context.values.compute()[:,0] # names of the data points
self.num_attributes = len(self.attributes)
self.num_objects = len(self.objects)
self.data = self.full_context.values.compute()[:,1:]
self.fca_stats = {'total':0,'closures':0, 'fail_canon':0, 'fail_fcbo':0, 'fail_support':0}
self.scores = {}
self.fca_properties = { 'min_support':0.5, 'min_confidence':0, 'num_rules':'*'}
self.concepts = ()
self.rules = {}
logging.debug('Context initialized...')
#--------------------------------------------------------------------------
# FCA-related methods
#--------------------------------------------------------------------------
def setFcaProperties(self,dict_prop):
for k in dict_prop.keys():
if k in self.fca_properties and dict_prop[k] != self.fca_properties[k]:
self.fca_properties[k] = dict_prop[k]
#--------------------------------------------------------------------------
# computeClosure, generateFrom and generateConcepts are a port
# from the original FCbO algorithm C code
#--------------------------------------------------------------------------
#-----------------------------------------------------------------------
# computeClosure: computes closures. Takes into account
# minimal support conditions
#-----------------------------------------------------------------------
def computeClosure(self,extent,intent,new_attribute):
rows = numpy.flatnonzero(self.data[:,new_attribute]==1)
C = numpy.zeros((self.num_objects,),dtype=int)
D = numpy.ones((self.num_attributes,),dtype=int)
intersect_extent_rows = rows[extent[rows]==1]
supp = len(intersect_extent_rows)/self.num_objects
for e in intersect_extent_rows:
C[e] = 1
for j in range(self.num_attributes):
if self.data[e,j] == 0:
D[j] = 0
self.fca_stats['closures']+=1
return C,D,supp
#-------------------------------------------------------------------------------
# generateFrom: main function of FCbO.
#
# Generates the concepts whose support is above a certain threshold.
#
# Currently a direct port of the original recursive algorithm.
# For scalability reasons, it might be good to make this function iterative
# in future versions of the code.
#-------------------------------------------------------------------------------
def generateFrom(self,extent,intent,new_attribute):
concepts = {(tuple(extent),tuple(intent)),}
if all(intent) != 1 and new_attribute <= self.num_attributes:
for j in range(new_attribute,self.num_attributes):
if intent[j] == 0:
C,D,supp = self.computeClosure(extent,intent,j)
skip = False
for k in range(j-1):
if D[k] != intent[k]:
skip = True
self.fca_stats['fail_canon'] += 1
break
if supp < self.fca_properties['min_support']:
skip = True
self.fca_stats['fail_support'] += 1
break
if skip == False:
concept = self.generateFrom(C,D,j+1)
concepts.update(concept)
return concepts
#-------------------------------------------------------------------------------
# format concepts so that extents and intents contain named objects/attributes
#-------------------------------------------------------------------------------
def formatConcepts(self,concepts):
transformed_concepts = {(tupToIndices(tup[0]),tupToIndices(tup[1])) for tup in concepts}
transformed_concepts = {(indicesToNames(tup[0],self.objects),indicesToNames(tup[1],self.attributes)) for tup in transformed_concepts}
return transformed_concepts
#-------------------------------------------------------------------------------
# Retrieve attribute names given an object name
#-------------------------------------------------------------------------------
def getAttributeNamesFromObjName(self,objname):
obj_index = numpy.flatnonzero(self.objects==objname)
if obj_index.size != 0:
data_line = self.data[obj_index].ravel()
if 1 in data_line:
attributes = operator.itemgetter(*numpy.flatnonzero(data_line==1).tolist())(self.attributes)
else:
attributes = ()
else:
attributes = ()
return attributes
#-------------------------------------------------------------------------------
# generateConcepts: generates all concepts whose support is above the
# 'support' parameter
#-------------------------------------------------------------------------------
def generateConcepts(self,support):
if support != self.fca_properties['min_support']:
self.setFcaProperties({'min_support':support})
start_extent = numpy.ones((self.num_objects,),dtype=int) #initialize the extent to an array of ones
start_intent = numpy.zeros((self.num_attributes,),dtype=int) #initialize the inttent to an array of zeros
new_attribute = 0
logging.debug('Generating FCA (FCbO) concepts for '+self.inputfile+'...')
concepts = self.generateFrom(start_extent,start_intent,new_attribute) #generate the concepts
#filter duplicate concepts
logging.debug('De-duplicating FCA (FCbO) concepts for '+self.inputfile+'...')
unfiltered_concepts = list(concepts)
dic = collections.defaultdict(list)
for i in range(len(unfiltered_concepts)):
dic[unfiltered_concepts[i][0]].append((i,unfiltered_concepts[i][1].count(1)))
concept_filter = [max(v,key=operator.itemgetter(1))[0] for v in dic.values()]
filtered_concepts = set([unfiltered_concepts[c] for c in concept_filter])
self.fca_stats['total'] = len(filtered_concepts)
self.concepts = self.formatConcepts(filtered_concepts)
logging.debug('FCA (FCbO) concepts generation for '+self.inputfile+' completed...')
#----------------------------------------------------------------------------
# Functions to compute the support of an itemset/concept intent (nominal
# support or support scaled by the number of objects)
#----------------------------------------------------------------------------
def getConceptSupport(self,concept_intent):
support = 0
indices = [self.attributes.index(e) for e in concept_intent]
for row in self.data:
vals = row[indices]
if all(vals == 1):
support += 1
return support
def getItemsetScaledSupport(self,concept_intent):
support = self.getConceptSupport(concept_intent)
return support/self.num_objects
#---------------------------------------------------------------------------------
#generating non-redundant implication rules from concepts
#---------------------------------------------------------------------------------
def generateRules(self,support,confidence):
logging.debug('Starting rule generation from FCA (FCbO) concepts for '+self.inputfile+'...')
self.setFcaProperties({'min_support':support,'min_confidence':confidence})
itemsets = {frozenset(c[1]) for c in self.concepts}
edg = edges(itemsets)
supports = {}
logging.debug('Computation of required itemset supports'+self.inputfile+'...')
for e in edg: #compute all the supports necessary for the rule computations
if e[1] not in supports.keys():
supports[e[1]] = self.getItemsetScaledSupport(e[1])
if e[0] not in supports.keys():
supports[e[0]] = self.getItemsetScaledSupport(e[0])
if e[1]-e[0] not in supports.keys():
supports[e[1]-e[0]] = self.getItemsetScaledSupport(e[1]-e[0])
# compute implication rules along with their confidence and lift
logging.debug('Computation of implication rules'+self.inputfile+'...')
rules = {(e[0],e[1]-e[0]): (supports[e[1]]/supports[e[0]],supports[e[1]]/(supports[e[0]]*supports[e[1]-e[0]]))
for e in edg if supports[e[0]] > 0 and supports[e[1]]/supports[e[0]] > self.fca_properties['min_confidence']}
#filtering redundant rules
logging.debug('Eliminating redundant rules'+self.inputfile+'...')
reducedRules = filterSubsumedImp(rules)
self.rules = reducedRules
logging.debug('Rule generation for '+self.inputfile+' completed...')
def violations(self,att_list):
setS = (frozenset({att_list}) if type(att_list) == str else frozenset(att_list))
vios = [e for e in self.rules.keys() if e[0] <= setS and not(e[1] <= setS)]
ent = sum([-numpy.log2(1-self.rules[e][0]) for e in vios])
entLift = max([self.rules[e][1] for e in vios]+ [0])
count = len(vios)
return {'conf':ent,'lift':entLift,'num':count}
def fca_anomaly_score(self):
logging.debug('Computing FCA-based anomaly scores for '+self.inputfile+'...')
return {k:self.violations(self.getAttributeNamesFromObjName(k)) for k in self.objects}
#--------------------------------------------------------------------------
# More general methods
#--------------------------------------------------------------------------
def score(self,method='lift',score_file=''):
# score_file is the path to the output file. If not provided, defaults to input_file path where
# input_file extension is replaced by '.scored.csv'
logging.debug('Computing anomaly scores of context objects for '+self.inputfile+' with FCA...')
logging.debug('Computing anomaly scores for '+self.inputfile+'...')
scores = self.fca_anomaly_score()
method_header = 'FCA confidence score,FCA lift score'
if method == 'lift':
method_name = 'Lift_Score'
elif method == 'conf':
method_name = 'Confidence_Score'
elif method == 'num':
method_name = 'Score'
header = 'Objects,'+method_name+'\n'
logging.debug('Writing anomaly scores to file '+score_file+'...')
with(open(score_file,'w')) as f:
f.write(header)
for k in scores.keys():
f.write("%s, %f\n" % (k, scores[k][method]))
|
nilq/baby-python
|
python
|
import io, csv
from flask import (
Blueprint,
render_template,
request,
redirect,
url_for,
make_response,
session,
)
from helpers.hubspot import create_client
from helpers.session import SessionKey
from auth import auth_required
from hubspot.crm import ObjectType
from hubspot.crm.contacts import (
PublicObjectSearchRequest,
SimplePublicObject,
SimplePublicObjectInput,
Filter,
FilterGroup,
)
module = Blueprint("contacts", __name__)
@module.route("/")
@auth_required
def list():
hubspot = create_client()
search_request = PublicObjectSearchRequest(
sorts=[
{
"propertyName": "createdate",
"direction": "DESCENDING",
}
]
)
contacts_page = hubspot.crm.contacts.search_api.do_search(
public_object_search_request=search_request
)
return render_template(
"contacts/list.html",
contacts=contacts_page.results,
action_performed=session.pop(SessionKey.ACTION_PERFORMED, None),
)
@module.route("/new")
@auth_required
def new():
contact = SimplePublicObject(
properties={
"email": None,
}
)
properties_dict = {
"email": {"label": "Email"},
}
return render_template(
"contacts/show.html", contact=contact, properties_dict=properties_dict
)
@module.route("/<contact_id>")
@auth_required
def show(contact_id):
hubspot = create_client()
all_properties = hubspot.crm.properties.core_api.get_all(ObjectType.CONTACTS)
editable_properties = []
for prop in all_properties.results:
if (
prop.type == "string"
and prop.modification_metadata.read_only_value is False
):
editable_properties.append(prop)
editable_properties_names = [p.name for p in editable_properties]
editable_properties_names.append("hubspot_owner_id")
contact = hubspot.crm.contacts.basic_api.get_by_id(
contact_id,
properties=editable_properties_names,
)
editable_properties_dict = {p.name: p for p in editable_properties}
editable_properties_dict["hubspot_owner_id"] = {"label": "Contact Owner"}
contact.properties = {
name: prop
for name, prop in contact.properties.items()
if name in editable_properties_dict
}
return render_template(
"contacts/show.html",
contact=contact,
properties_dict=editable_properties_dict,
owners=hubspot.crm.owners.get_all(),
action_performed=session.pop(SessionKey.ACTION_PERFORMED, None),
)
@module.route("/new", methods=["POST"])
@auth_required
def create():
properties = SimplePublicObjectInput(request.form)
hubspot = create_client()
contact = hubspot.crm.contacts.basic_api.create(
simple_public_object_input=properties
)
session[SessionKey.ACTION_PERFORMED] = "created"
return redirect(url_for("contacts.show", contact_id=contact.id))
@module.route("/<contact_id>", methods=["POST"])
@auth_required
def update(contact_id):
properties = SimplePublicObject(properties=request.form)
hubspot = create_client()
hubspot.crm.contacts.basic_api.update(
contact_id, simple_public_object_input=properties
)
session[SessionKey.ACTION_PERFORMED] = "updated"
return redirect(request.url)
@module.route("/search")
@auth_required
def search():
hubspot = create_client()
search = request.args.get("search")
filter = Filter(
property_name="email",
operator="EQ",
value=search,
)
filter_group = FilterGroup(filters=[filter])
public_object_search_request = PublicObjectSearchRequest(
filter_groups=[filter_group],
)
contacts_page = hubspot.crm.contacts.search_api.do_search(
public_object_search_request=public_object_search_request
)
return render_template(
"contacts/list.html", contacts=contacts_page.results, search=search
)
@module.route("/delete/<contact_id>")
@auth_required
def delete(contact_id):
hubspot = create_client()
hubspot.crm.contacts.basic_api.archive(contact_id)
session[SessionKey.ACTION_PERFORMED] = "deleted"
return redirect(url_for("contacts.list"))
@module.route("/export")
@auth_required
def export():
si = io.StringIO()
writer = csv.writer(si)
writer.writerow(["Email", "Firstname", "Lastname"])
hubspot = create_client()
contacts = hubspot.crm.contacts.get_all()
for contact in contacts:
writer.writerow(
[
contact.properties["email"],
contact.properties["firstname"],
contact.properties["lastname"],
]
)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=contacts.csv"
output.headers["Content-type"] = "text/csv"
return output
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import importlib
import os
import sys
from io import StringIO
from PySide2.QtCore import Qt
from PySide2.QtGui import QKeyEvent
from PySide2.QtWidgets import QWidget, QMessageBox
from tensorflow.keras.models import Model, load_model
from MnistClassifier.Ui_MainWidget import Ui_MainWidget
class MainWidget(QWidget):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.ui = Ui_MainWidget()
self.ui.setupUi(self)
self.convert_func = None
for root, dirs, files in os.walk('.', ):
for f in files:
if f.endswith('.h5'):
self.ui.comboBoxModel.addItem(f)
assert (self.ui.comboBoxModel.count() > 0)
self.neuron_net: Model = load_model(self.ui.comboBoxModel.itemText(0))
self.import_converter(self.ui.comboBoxModel.itemText(0))
self.ui.widgetInputDraw.input_changed.connect(self.input_changed)
self.ui.pushButtonPredict.clicked.connect(self.on_click_predict)
self.ui.pushButtonClear.clicked.connect(self.ui.widgetInputDraw.clear)
self.ui.pushButtonModelInfo.clicked.connect(self.show_model_info)
self.ui.comboBoxModel.currentIndexChanged.connect(self.change_model)
def import_converter(self, model_file_name):
convert_name = model_file_name[:-3]
convert_module = importlib.import_module(convert_name)
self.convert_func = convert_module.convert
def show_model_info(self):
summary_string = StringIO()
self.neuron_net.summary(print_fn=lambda x: summary_string.write(x + '\n'))
summary = summary_string.getvalue()
QMessageBox.information(self, 'Model information', summary)
def change_model(self, index):
self.neuron_net = load_model(self.ui.comboBoxModel.itemText(index))
self.import_converter(self.ui.comboBoxModel.itemText(index))
def keyPressEvent(self, event: QKeyEvent):
if event.key() == Qt.Key_Escape:
self.close()
else:
event.ignore()
def on_click_predict(self):
image = self.ui.widgetInputDraw.get_image()
self.predict(image)
def input_changed(self, image):
if self.ui.checkBoxInteractive.isChecked():
self.predict(image)
def predict(self, image):
assert (self.convert_func is not None)
input_ = self.convert_func(image)
try:
output_ = self.neuron_net.predict(input_)
except Exception as e:
print(e)
except:
print("Unexpected error:", sys.exc_info()[0])
else:
self.ui.widgetHistogram.set_data(output_)
|
nilq/baby-python
|
python
|
# DESCQA galaxy catalog interface. This defines the GalaxyCatalog base class
# and, on import, registers all of the available catalog readers. Convenience
# functions are defined that enable automatic detection of the appropriate
# catalog type.
# Note: right now we are working with galaxy properties as floats, with
# expected return units listed in GalaxyCatalog.__init__ below. In the future
# we might move to expecting values as Astropy Quantity objects.
__all__ = ['GalaxyCatalog']
import os
import numpy as np
import astropy.units as u
# Galaxy catalog base class.
class GalaxyCatalog(object):
"""
Base class for galaxy catalog classes. Common internal data structures:
type_ext A string giving the file name extension, for catalogs that use
the default method for determining file type.
filters A dictionary whose keys are strings giving the names of
filters supported by the catalog class, and whose values are
the methods used to apply these constraints (or True if they
are supported but handled via a different mechanism). The
default implementation sets this dictionary to include keys
that should be supported by all catalogs.
quantities A dictionary whose keys are strings giving the names of
quantities that can be requested from the catalog, and whose
values are the methods used to request these quantities. The
methods should take two arguments: the name of the quantity
and a dictionary containing the filters to be applied and the
values for the filters. The default implementation sets this
dictionary to include keys that should be supported by all
catalogs.
sky_area The sky area covered by the catalog as an Astropy Quantity
object.
cosmology Should be set by load routines to an Astropy.cosmology object
encoding the cosmology used to generate the catalog. This
allows calling programs to compute things like comoving
volumes appropriately. None by default.
"""
type_ext = ''
filters = {'zlo' : None, # min redshift
'zhi' : None # max redshift
}
quantities = {'stellar_mass' : None # stellar mass in M_sun
}
sky_area = 4.*np.pi*u.sr # all sky by default
cosmology = None
def __init__(self, fn=None):
"""
Default GalaxyCatalog constructor takes one optional filename argument.
If present, the referenced catalog is checked for validity and loaded
if possible. If it is not valid, a ValueError is raised. If no argument
is given, an instance of the class is created without internal data.
Subclass __init__ methods that override this one should call this one
just before they return.
"""
if fn:
if self.is_valid(fn):
self.load(fn)
else:
raise ValueError('invalid catalog file')
def is_valid(self, fn):
"""
Given a catalog path, determine whether it is a valid catalog of this
type. The default implementation merely checks the filename extension
against the type_ext attribute of the class.
"""
base = os.path.basename(fn)
ext = base.split('.')[-1]
return (ext == self.type_ext)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures. Should return self if successful.
"""
return self
def get_cosmology(self):
"""
Return as an Astropy.cosmology object the cosmological parameter values
assumed in generating this catalog.
"""
return self.cosmology
def get_sky_area(self):
"""
Return the sky area covered by the catalog as an Astropy Quantity.
"""
return self.sky_area
def get_quantities(self, ids, filters):
"""
Given a list of string property names and optional filter arguments,
return as a list of NumPy arrays the selected values from the catalog.
A single property name can also be passed, in which case the result
is a single NumPy array. Filters are specified using a dictionary in
which the keys are string constraint names and the values are the
constraints.
"""
if type(ids) is list:
idList = ids
elif isinstance(ids, basestring):
idList = [ids]
else:
raise TypeError("get_quantities: ids must be list or str")
if type(filters) != dict:
raise TypeError("get_quantities: filters must be dict")
okQuantities = self.get_supp_quantities()
for quantity in idList:
if quantity not in okQuantities:
raise ValueError("get_quantities: quantity '%s' not supported" % quantity)
okFilters = self.get_supp_filters()
for filt in filters.keys():
if filt not in okFilters:
raise ValueError("get_quantities: filter '%s' not supported" % filt)
results = []
for quantity in idList:
quantityGetter = self.quantities[quantity]
results.append(quantityGetter(quantity, filters))
if type(ids) == list:
return results
else:
return results[0]
def get_supp_filters(self):
"""
Return a list containing the supported filter keywords for this
catalog.
"""
return self.filters.keys()
filterList = self.filters.keys()
filterListOK = []
for filt in filterList:
if self.filters[filt]:
filterListOK.append(filt)
return filterListOK
def get_supp_quantities(self):
"""
Return a list containing the supported quantities for this
catalog.
"""
quantityList = self.quantities.keys()
quantityListOK = []
for quantity in quantityList:
if self.quantities[quantity]:
quantityListOK.append(quantity)
return quantityListOK
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import getopt
import sys
from _datetime import datetime
from fmc_rest_client import FMCRestClient
from fmc_rest_client.resources import *
logging.basicConfig(level=logging.INFO)
fmc_server_url = None
username = None
password = None
obj_types = []
obj_name_prefix = None
skip_read_only = False
types_map = {
'networks': [ NetworkGroup(), Host() , Network(), Range()],
'ports': [PortObjectGroup(), Port(), ICMPV4Object(), ICMPV6Object() ]
}
supported_types = ['AccessPolicy', 'FtdNatPolicy', 'NetworkGroup', 'Host' , 'Network', 'Range', 'SecurityZone', 'InterfaceGroup', 'PortObjectGroup', 'Port', 'ICMPV4Object', 'ICMPV6Object']
def usage():
print('script -s <fmc server url> -u <username> -p <password> -t <comma separated types> [-x <object name prefix>]')
def parse_args(argv):
global fmc_server_url
global username
global password
global obj_types
global obj_name_prefix
global skip_read_only
try:
opts, args = getopt.getopt(argv,'hu:p:s:t:x:', ['file=' , 'skip-readonly', '--types'])
except getopt.GetoptError as e:
print(str(e))
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt == '-u':
username = arg
elif opt == '-p':
password = arg
elif opt == '-s':
fmc_server_url = arg
elif opt == '-t' or opt == '--types':
obj_types = process_types_arg(arg)
elif opt == '--skip-readonly':
skip_read_only = True
elif opt == '-x':
obj_name_prefix = arg
else:
pass
if password is None or username is None or fmc_server_url is None or len(obj_types)== 0:
usage()
sys.exit(2)
def process_types_arg(types):
type_list = types.split(',')
msg = "Incorrect objects specified along with '-t' option. Pass the comma separated list of following - \n\t{}. " \
"\nYou can also use - 'networks' for all network type objects, 'ports' for all port type objects.".format(supported_types)
if len(type_list) == 0:
print(msg)
sys.exit(2)
obj_list = []
for t in type_list:
if t in types_map:
obj_list.extend(types_map[t])
elif t in globals():
obj_list.append(globals()[t]())
if len(obj_list) == 0:
print(msg)
sys.exit(2)
print('Types selected for cleanup: {}'.format(list(map(lambda x: x.__class__.__name__, obj_list))))
return obj_list
def delete_objects(rest_client, obj_types, skip_read_only=False, obj_name_prefix=None):
failed_obj_dict = {}
deleted_obj_list = []
for obj_type in obj_types:
resource_iterator = rest_client.list_iterator(obj_type)
print('Found total {} objects at this point: {}'.format(type(obj_type).__name__, str(resource_iterator.total)))
print('Deleting objects of type {}'.format(type(obj_type).__name__), end='')
if obj_name_prefix:
print(' starting name with {}'.format(obj_name_prefix), end='')
print(' ...')
for resource in resource_iterator:
try:
if not obj_name_prefix or obj_name_prefix in resource.name:
if skip_read_only and hasattr(resource, 'metadata') and 'readOnly' in resource.metadata and \
'state' in resource.metadata['readOnly'] and resource.metadata['readOnly']["state"]:
#print (obj.__dict__)
print('\tSkipping delete for read only object {}'.format(resource.name))
continue
print('\tDeleting {} object {}'.format(type(resource).__name__, resource.name), end='')
rest_client.remove(resource)
print(' \t\tdone.')
deleted_obj_list.append(resource)
except Exception as e:
failed_obj_dict[resource] = str(e)
return failed_obj_dict, deleted_obj_list
'''
print the std output both to terminal as well as to File
'''
def write_line_to_file(text, fh, do_print=True):
_write_to_file(text, fh, do_print)
def _write_to_file(text, fh,do_print=True):
if do_print:
print(text)
fh.write(text + '\n')
'''
create report
'''
def write_report(report_filename, failed_obj_dict, deleted_obj_list, dump_deleted_obj=True):
deleted_dump = 'DeletedObjectsDump-{}.json'.format(datetime.now().strftime('%Y%m%d%H%M%S'))
with open(report_filename, 'w') as fh, open(deleted_dump,'w') as dh:
if len(deleted_obj_list) > 0:
write_line_to_file('-' * 120, fh)
msg='Total number of deleted objects: '+str(len(deleted_obj_list))
write_line_to_file(msg, fh)
write_line_to_file('List of deleted object names:', fh)
write_line_to_file('-' * 120, fh)
if dump_deleted_obj:
write_line_to_file('{\n\"items\": [', dh, do_print=False)
for obj in deleted_obj_list:
write_line_to_file('\t' + obj.name, fh)
if dump_deleted_obj:
write_line_to_file(obj.json() + ',', dh,do_print=False)
if dump_deleted_obj:
write_line_to_file('\n]}', dh, do_print=False)
write_line_to_file('-' * 120, fh)
if len(failed_obj_dict)>0:
write_line_to_file('-' * 120, fh)
msg='Total number of object failed to delete: ' + str(len(failed_obj_dict))
write_line_to_file(msg, fh)
write_line_to_file('Failed objects list:', fh)
write_line_to_file('-' * 120, fh)
for resource,reason in failed_obj_dict.items():
resource = '{}: {}'.format(type(resource).__name__, resource.name)
reason = '\tReason for failure: ' + reason
write_line_to_file(resource, fh)
write_line_to_file(reason, fh)
write_line_to_file('-'*120, fh)
if __name__ == '__main__':
start_time = datetime.now().replace(microsecond=0)
parse_args(sys.argv[1:])
print('Connecting to FMC {} ...'.format(fmc_server_url))
rest_client = FMCRestClient(fmc_server_url, username, password)
print('Connected Successfully')
report_file = 'ObjectCleanupReport.txt'
result = delete_objects(rest_client, obj_types, skip_read_only, obj_name_prefix)
write_report(report_file, result[0], result[1])
end_time = datetime.now().replace(microsecond=0)
print("Script completed in {}s.".format(str(end_time - start_time)))
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from .models import Portfolio
def portfolio(request):
portfolios = Portfolio.objects
return render(request, 'portfolio/portfolio.html', {'portfolios': portfolios})
|
nilq/baby-python
|
python
|
from ...config import LennyBotSourceConfig
from .isource import ISource
from ..github import GitHubService
import re
import requests
class GithubSource(ISource):
def __init__(self, name, config: LennyBotSourceConfig, github: GitHubService) -> None:
self._name = name
self._github = github
self._repository = config.repository
self._version_regex = config.regex
@property
def application(self) -> str:
return self._name
def latest_version(self):
release = self._github.fetch_latest_release(self._repository)
# TODO check if tag_name property exists
tag_name = release["tag_name"]
match = re.fullmatch(self._version_regex, tag_name)
# TODO check if matched
if match is None:
raise Exception(f"Version pattern does not match, Pattern: {self._version_regex}, Tag: {tag_name}")
if len(match.groups()) < 1:
raise Exception(f"Missing Group in regex pattern, Pattern: {self._version_regex}, Tag: {tag_name}")
return match.group(1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8; -*-
"""
Gui implementation for gui created via qt-designer.
"""
__author__ = "Christoph G. Keller"
__copyright__ = "Copyright 2017"
__credits__ = [""]
__license__ = "MIT"
__version__ = "2.0.0"
__maintainer__ = "Christoph G. Keller"
__email__ = "christoph.g.keller@gmail.com"
__status__ = "Production"
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtWidgets import QDialog
from .gui.jira_settings_gui_ui import Ui_JiraSettings
from my_name_is_wolf.parameter_wrapper import ParameterWrapper
class JiraSettings(QDialog, Ui_JiraSettings):
def __init__(self, params: ParameterWrapper):
super(JiraSettings, self).__init__()
self.setupUi(self)
self.params = params
# TODO improve the parameter stuff
# if not (params.conf.has_section('jira') and params.conf.has_option('jira_url')):
# self.conf.set('jira', 'jira_url', '')
self.lineEdit.setText(self.params.get_jira_url)
self.checkBox_use_proxy.setChecked(self.params.get_use_proxy)
self.lineEdit_http_proxy.setText(self.params.get_http_proxy_url)
self.lineEdit_https_proxy.setText(self.params.get_https_proxy_url)
self.buttonBox.accepted.connect(self.on_save)
self.checkBox_use_proxy.stateChanged.connect(self.update_proxy_view)
self.update_proxy_view()
@pyqtSlot()
def on_save(self):
"""
Store the selected gui values in the param class and save to file
"""
self.params.set_jira_url(str(self.lineEdit.text()))
self.params.set_use_proxy(self.checkBox_use_proxy.checkState() == Qt.Checked)
if self.checkBox_use_proxy.checkState() == Qt.Checked:
self.params.set_http_proxy_url(self.lineEdit_http_proxy.text())
self.params.set_https_proxy_url(self.lineEdit_https_proxy.text())
self.params.set_use_cert(self.checkBox_p12.checkState() == Qt.Checked)
self.params.save()
@pyqtSlot()
def update_proxy_view(self):
"""
If the proxy checkbox is not set then hide the gui elements
"""
if self.checkBox_use_proxy.checkState() == Qt.Checked:
self.lineEdit_http_proxy.show()
self.lineEdit_https_proxy.show()
self.label_http_proxy.show()
self.label_https_proxy.show()
else:
self.lineEdit_http_proxy.hide()
self.lineEdit_https_proxy.hide()
self.label_http_proxy.hide()
self.label_https_proxy.hide()
|
nilq/baby-python
|
python
|
from flask_restful import Resource, reqparse, abort
from flask import request
from lista.models.service_model import ServiceModel
from lista.schemas.service_schema import ServiceSchema
class ServiceResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument('name',
type=str,
required=True,
help="O nome do Service não pode estar em branco."
)
parser.add_argument('details',
type=str,
required=True,
help="O email do Service não pode estar em branco."
)
parser.add_argument('theme',
type=str,
required=True,
help="O email do Service não pode estar em branco."
)
parser.add_argument('funcReq',
type=str,
required=True,
help="O email do Service não pode estar em branco."
)
parser.add_argument('notFuncReq',
type=str,
required=True,
help="O email do Service não pode estar em branco."
)
parser.add_argument('status',
type=str,
required=True,
help="O email do Service não pode estar em branco."
)
def get(self,nome):
json = ''
try:
nome = ServiceModel.encontrar_pelo_id(nome)
if nome:
schema = ServiceSchema()
json = schema.dump(nome).data
else:
abort(404, message="Service {} não está na lista".format(nome))
except Exception as e:
print(e)
abort(404, message="Service {} não está na lista".format(nome))
return json,201
def post(self):
json = ''
try:
data = ServiceResource.parser.parse_args()
print(data)
nome = data['name']
details = data['details']
theme = data['theme']
funcReq = data['funcReq']
notFuncReq = data['notFuncReq']
status = data['status']
service = ServiceModel.encontrar_pelo_nome(nome)
if service :
return {"message":"Service {} já está na lista".format(nome)}
else:
service = ServiceModel(name=nome, details=details, theme=theme, funcReq=funcReq, notFuncReq=notFuncReq, status=status)
service.adicionar()
service = ServiceModel.encontrar_pelo_nome(nome)
schema = ServiceSchema()
json = schema.dump(service).data
except Exception as e:
print(e)
abort(500, message="Erro no POST")
return json, 201
def delete(self,nome):
json = []
try:
nome = ServiceModel.encontrar_pelo_id(nome)
if nome:
nome.remover()
lista = ServiceModel.listar()
schema = ServiceSchema(many=True)
json = schema.dump(lista).data
else:
return {"message":"Service {} não está na lista".format(nome)},404
except Exception as e:
print(e)
return json, 201
def put(self):
json = ''
try:
data = ServiceResource.parser.parse_args()
print(data)
nome = data['name']
details = data['details']
theme = data['theme']
funcReq = data['funcReq']
notFuncReq = data['notFuncReq']
status = data['status']
service = ServiceModel.encontrar_pelo_nome(nome)
if service :
return {"message":"Service {} já está na lista".format(service)},200
else:
service = ServiceModel(name=nome, details=details, theme=theme, funcReq=funcReq, notFuncReq=notFuncReq, status=status)
service.adicionar()
schema = ServiceSchema(many=True)
service = ServiceModel.encontrar_pelo_nome(nome)
json = schema.dump(service).data
except Exception as e:
print(e)
return json, 201
class ServicesResource(Resource):
def get(self):
json = []
try:
itens = ServiceModel.listar()
schema = ServiceSchema(many=True)
json = schema.dump(itens).data
except Exception as e:
print(e)
return {"message": "Aconteceu um erro tentando retornar a lista de compras."}, 500
return json,201
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from PIL import Image, ImageDraw
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import seaborn as sns
import tqdm
class DetectionDataset(Dataset):
def __init__(self, marks, img_folder, transforms=None):
self.marks = marks
self.img_folder = img_folder
self.transforms = transforms
def __getitem__(self, idx):
item = self.marks[idx]
img_path = f'{self.img_folder}{item["file"]}'
img = Image.open(img_path).convert('RGB')
w, h = img.size
box_coords = item['nums']
boxes = []
labels = []
masks = []
for box in box_coords:
points = np.array(box['box'])
x0, y0 = np.min(points[:, 0]), np.min(points[:, 1])
x2, y2 = np.max(points[:, 0]), np.max(points[:, 1])
boxes.append([x0, y0, x2, y2])
labels.append(1)
nx, ny = w, h
poly_verts = points
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x, y = x.flatten(), y.flatten()
points = np.vstack((x,y)).T
path = Path(poly_verts)
grid = path.contains_points(points)
grid = grid.reshape((ny,nx)).astype(int)
masks.append(grid)
boxes = torch.as_tensor(boxes)
labels = torch.as_tensor(labels)
masks = torch.as_tensor(masks)
target = {
'boxes': boxes,
'labels': labels,
'masks': masks,
}
if self.transforms is not None:
img = self.transforms(img)
return img, target
def __len__(self):
return len(self.marks)
|
nilq/baby-python
|
python
|
def _repr(mat,notes,dFrame):
from shutil import get_terminal_size as gts
old = None
d0,d1 = mat.dim
feats = mat.features
ind_level = mat.index.level
col_place_holder = mat.DISPLAY_OPTIONS["col_place_holder"]
row_place_holder = mat.DISPLAY_OPTIONS["row_place_holder"]
left_seperator = mat.DISPLAY_OPTIONS["left_seperator"]
label_seperator = mat.DISPLAY_OPTIONS["label_seperator"]
available = gts().columns - 4
shuffled_col_inds = []
usedcols = []
used_col_amount = 0
upper = d1//2 + 1 if d1%2 else d1//2
for ind in range(upper):
shuffled_col_inds.append(ind)
shuffled_col_inds.append(d1-ind-1)
if d1%2:
shuffled_col_inds.append(d1//2 + 1)
rowlimit = min(d0,mat.ROW_LIMIT)
halfrow = rowlimit//2
if rowlimit%2:
halfrow += 1
#Get column tab lengths to decide which columns to print
old_dfMat,old_fMat,old_cMat = mat._dfMat,mat._fMat,mat._cMat
#Turn non-dataframe into dataframe
if mat.dtype != dFrame:
old = mat.copy
mat = old.copy
mat.dtype = dFrame
#All rows can be printed
if rowlimit==d0:
string_bounds = mat._stringfy(mat.coldtypes,True)
#Too many rows, use only the head and tail rows' tab lengths
else:
top_bounds = mat[:halfrow]._stringfy(mat.coldtypes,True)
bottom_bounds = mat[d0-(rowlimit//2):]._stringfy(mat.coldtypes,True)
string_bounds = [max(top_bounds[i],bottom_bounds[i]) for i in range(d1+ind_level)]
if string_bounds == "Empty matrix":
return string_bounds
total_col_size = sum(string_bounds[:ind_level])+(ind_level-1)*len(label_seperator)+len(left_seperator)
string_bounds = list(map(lambda a:a+2,string_bounds[ind_level:-1])) + [string_bounds[-1]]
if (not isinstance(string_bounds,list)) or (len(feats)==0):
return "Empty Matrix"
if sum(string_bounds)+total_col_size>available:
#Check how many columns will fit
for num,i in enumerate(shuffled_col_inds):
bound = string_bounds[i]
extra = len(col_place_holder)+2 if num!=d1-1 else 0
total_col_size += bound
if total_col_size + extra<= available:
used_col_amount += 1
usedcols.append(i)
else:
total_col_size+= extra
break
else:
used_col_amount = d1
if used_col_amount == 0 or (total_col_size>available and usedcols==[0]) :#Update this :')
return "\nWindow \ntoo \nsmall"
#Check limits
collimit = min(d1,used_col_amount)
for i in [rowlimit,collimit]:
if not isinstance(i,int):
raise TypeError("ROW/COL limit can't be non-integer values")
else:
if i<1:
return f"Can't display any rows/columns using limits for rows and columns : [{rowlimit},{collimit}]"
if not isinstance(notes,str):
raise TypeError(f"NOTES option can only be used with strings, not {type(notes).__name__}")
#Not too many rows or columns
if d0<=rowlimit and d1<=collimit:
if old != None:
return old._stringfy(coldtypes=mat.coldtypes[:]) + "\n\n" + notes
return mat._stringfy(coldtypes=mat.coldtypes[:]) + "\n\n" + notes
halfcol = collimit//2
if collimit%2:
halfcol += 1
srted = sorted(usedcols)
first = srted[:halfcol]
second = srted[halfcol:]
dec = mat.decimal if (old_dfMat or old_fMat or old_cMat) else 0
#Too many rows
if d0>rowlimit:
#Too many columns
if d1>collimit and collimit>1:
#Divide matrix into 4 parts
topLeft = mat[:halfrow,first].roundForm(mat.decimal,dec)
topRight = mat[:halfrow,second].roundForm(mat.decimal,dec)
bottomLeft = mat[d0-(rowlimit//2):,first].roundForm(mat.decimal,dec)
bottomRight = mat[d0-(rowlimit//2):,second].roundForm(mat.decimal,dec)
#Change dtypes to dFrames filled with strings
for i in [topLeft,topRight,bottomLeft,bottomRight]:
if i.dtype != dFrame:
i.dtype = dFrame
#Add col_place_holder to represent missing column's existence
topLeft.add([col_place_holder]*topLeft.d0,col=halfcol + 1,dtype=str,feature=col_place_holder)
bottomLeft.add([col_place_holder]*bottomLeft.d0,col=halfcol + 1,dtype=str,feature=col_place_holder)
#Concat left parts with rights, dots in the middle
topLeft.concat(topRight,axis=1)
bottomLeft.concat(bottomRight,axis=1)
#Fix indices
if mat._dfMat:
topLeft.index = mat.index[:halfrow]
bottomLeft.index = mat.index[d0-(rowlimit//2):]
else:
bottomLeft.index = list(range(d0-(rowlimit//2),d0))
#Concat bottom to top
topLeft.concat(bottomLeft,axis=0)
#Add dots as middle row
topLeft.add([row_place_holder]*topLeft.d1,row=halfrow+1,index=row_place_holder)
return topLeft._stringfy(coldtypes=topLeft.coldtypes) + "\n\n" + notes
#Just too many rows
else:
end = 1 if collimit==1 else d1
#Get needed parts
top = mat[:halfrow,:end].roundForm(mat.decimal,dec)
bottom = mat[d0-(rowlimit//2):,:end].roundForm(mat.decimal,dec)
if d1>1 and end == 1:
top.add([col_place_holder]*top.d0,col=2,dtype=str,feature=col_place_holder)
bottom.add([col_place_holder]*bottom.d0,col=2,dtype=str,feature=col_place_holder)
#Set new dtypes
for i in [top,bottom]:
if i.dtype != dFrame:
i.dtype = dFrame
#Fix indices
if mat._dfMat:
top.index = mat.index[:halfrow]
bottom.index = mat.index[d0-(rowlimit//2):]
else:
bottom.index = list(range(d0-(rowlimit//2),d0))
#Concat last items
top.concat(bottom,axis=0)
#Add middle part
top.add([row_place_holder]*top.d1,row=halfrow+1,index=row_place_holder)
return top._stringfy(coldtypes=top.coldtypes) + "\n\n" + notes
#Just too many columns
elif d1>collimit:
#Single column can fit
if first == second:
left = mat[:,0].roundForm(mat.decimal,dec)
if d1>1:
left.add([col_place_holder]*d0,col=2,dtype=str,feature=col_place_holder)
if not mat._dfMat:
left.dtype = dFrame
else:
#Get needed parts
left = mat[:,first].roundForm(mat.decimal,dec)
right = mat[:,second].roundForm(mat.decimal,dec)
#Set new dtypes
for i in [left,right]:
if i.dtype != dFrame:
i.dtype = dFrame
#Add and concat rest of the stuff
left.add([col_place_holder]*d0,col=halfcol + 1,dtype=str,feature=col_place_holder)
left.concat(right,axis=1)
return left._stringfy(coldtypes=left.coldtypes) + "\n\n" + notes
#Should't go here
else:
raise ValueError("Something is wrong with the matrix, check dimensions and values")
|
nilq/baby-python
|
python
|
import numpy as np
import tensorflow as tf
tensor_2d = np.array([(1, 2, 3, 4), (4, 5, 6, 7), (8, 9, 10, 11), (12, 13, 14, 15)])
print(tensor_2d)
print(tensor_2d[2][3])
print(tensor_2d[0:2, 0:2])
matrix1 = np.array([(2, 2, 2), (2, 2, 2), (2, 2, 2)], dtype='int32')
matrix2 = np.array([(1, 1, 1), (1, 1, 1), (1, 1, 1)], dtype='int32')
print("Matrix 1 = ")
print(matrix1)
print("Matrix 2 = ")
print(matrix2)
matrix1 = tf.constant(matrix1)
matrix2 = tf.constant(matrix2)
matrix_product = tf.matmul(matrix1, matrix1)
matrix_sum = tf.add(matrix1, matrix2)
matrix3 = np.array([(2, 7, 2), (1, 4, 2), (9, 0, 2)], dtype='float64')
print("Matrix3")
print(matrix3)
matrix_det = tf.matrix_determinant(matrix3)
with tf.Session() as sess:
print(sess.run(matrix1))
print(sess.run(matrix2))
print(sess.run(matrix_product))
print(sess.run(matrix_sum))
print(sess.run(matrix_det))
|
nilq/baby-python
|
python
|
# Generated by Django 4.0.1 on 2022-01-06 14:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('verzelapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='modulo',
name='app_label',
),
]
|
nilq/baby-python
|
python
|
from . HaarClassifierFunctions import faceFinder as fd
from . HaarClassifierFunctions import haarTraining as tr
from . HaarClassifierFunctions import boundingBoxes as bd
import cv2
import os
import numpy as np
def classifierceleb(path):
test_img = cv2.imread(path)
print(test_img)
face_detected, gray_img = fd.faceDetection(test_img)
print("faces_detected: ", face_detected)
"Training Haar Classifier"
"This part is run only once for training"
# faces, faceID = tr.labels_for_training_data('HaarCascadeDataset')
# face_recognizer = tr.train_classifier(faces, faceID)
# face_recognizer.write('trainingData.yml')
"-----"
"This part is run for testing"
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read('trainingData.yml') # use this to load training data for subsquent ones
" --- "
# Dictionary of output
name = {0: "Dalai Lama", 1: "Arsene Wenger", 2: "Genelia Dsouza",
3: "Luiz Suarez", 4: "Sergio Aguero"}
for face in face_detected:
(x, y, w, h) = face
roi_gray = gray_img[y:y + h, x:x + h]
label, confidence = face_recognizer.predict(roi_gray) # predicting the label
print("Confidence:", confidence)
print("label:", label)
bd.draw_rect(test_img, face)
predicted_name = name[label]
bd.put_text(test_img, predicted_name, x, y)
resized_img = cv2.resize(test_img, (500, 500))
cv2.imwrite(path, resized_img)
cv2.waitKey(0)
|
nilq/baby-python
|
python
|
"""Simple watcher that prints wikipedia changes as they occur."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import pprint
import argparse
import pywikibot
import requests
import sseclient
from googleapiclient import errors as google_api_errors
from antidox import clean
from antidox import perspective
# pylint: disable=fixme, too-many-locals
def log_event(apikey_data, toxicity, dlp, change):
"""Logs event by printing.
Args:
change: a json object with the wikimedia change record.
"""
# print(
# u'user:{user} namespace:{namespace} bot:{bot} comment:{comment} title:{title}'
# .format(**change))
# print('\n########## change:')
from_id = (str(change['revision']['old']))
to_id = (str(change['revision']['new']))
page = ('https://en.wikipedia.org/w/api.php?action=compare&fromrev=' +
from_id + '&torev=' + to_id + '&format=json')
get_page = requests.get(page)
response = json.loads(get_page.content.decode('utf-8'))
revision = response['compare']['*']
text = clean.content_clean(revision)
# for line in text:
print(text)
if not text:
return
dlp_response = perspective.dlp_request(dlp, apikey_data, text)
try:
perspective_response = perspective.perspective_request(toxicity, text)
# Perspective can't handle language errors at this time
except google_api_errors.HttpError as err:
print('Error:', err)
return
has_pii_bool, pii_type = perspective.contains_pii(dlp_response)
if has_pii_bool:
header = '==Possible Doxxing Detected: Waiting for review=='
result = (
u'{'
'user:{user}, namespace:{namespace}, bot:{bot}, comment:{comment}' +
'title:{title},'.format(**change) + ', ' + 'comment_text:' + str(text) +
', ' + 'contains_pii:' + 'True' + ', ' + 'pii_type:' + str(pii_type) +
', '
'}'
'\n')
wiki_write(result, header)
if perspective.contains_toxicity(perspective_response):
header = '==Possibly Toxic Detected: Waiting for review=='
result = (
u'{'
'user:{user}, namespace:{namespace}, bot:{bot}, comment:{comment}' +
'title:{title}'.format(**change) + ', ' + 'comment_text:' + str(text) +
', ' + 'contains_toxicity:' + 'True' + ', ' + 'toxic_score:' +
str(perspective_response['attributeScores'] +
['TOXICITY']['summaryScore']['value']) + ', '
'}'
'\n')
wiki_write(result, header)
def wiki_write(result, header):
site = pywikibot.Site()
repo = site.data_repository()
page = pywikibot.Page(site, u'User_talk:DoxDetective')
heading = (header)
content = (result)
message = '\n\n{}\n{} --~~~~'.format(heading, content)
page.save(
summary='Testing',
watch=None,
minor=False,
botflag=True,
force=False,
async=False,
callback=None,
apply_cosmetic_changes=None,
appendtext=message)
def watcher(event_source, wiki_filter, namespaces_filter, callback):
"""Watcher captures and filters evens from mediawiki.
Args:
event_source: an interable source of streaming sse events.
wiki_filter: string for filtering 'wiki' class.
namespaces_filter: a set() of namespaces to keep.
callback: A method to invoke with the JSON params for each filterd event.
"""
for event in event_source:
if event.event == 'message' and event.data:
try:
change = json.loads(event.data)
except json.decoder.JSONDecodeError as err:
print('Error:', err)
pprint.pprint(event.data)
continue
if change['bot']:
continue
if change['wiki'] != wiki_filter:
continue
if change['namespace'] not in namespaces_filter:
continue
if 'revision' not in change:
continue
if 'old' not in change['revision']:
continue
callback(change)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wiki_filter', default='enwiki', help='Wiki to scan, default enwiki.')
parser.add_argument(
'--namespaces',
default='1,3',
help='Namespaces defined in http://phabricator.wikimedia.' +
'org/source/mediawiki/browse/master/includes/Defines.php separated by commas.'
)
parser.add_argument(
'--url',
default='https://stream.wikimedia.org/v2/stream/recentchange',
help='SSE client url')
args = parser.parse_args()
namespaces = set([int(ns) for ns in args.namespaces.split(',')])
client = sseclient.SSEClient(args.url)
apikey_data, toxicity, dlp = perspective.get_client()
def log_change(change):
return log_event(apikey_data, toxicity, dlp, change)
watcher(client, args.wiki_filter, namespaces, log_change)
|
nilq/baby-python
|
python
|
#programa ainda não finalizado
n = []
for c in range(0, 5):
valor = int(input('Digite um valor: '))
if c == 0 or valor > n[-1]:
n.append(valor)
print('Valor adicionado ao final da lista.')
else:
pos = 0
while pos < len(n):
if valor <= n[pos]: #TypeError
n.insert(pos, n)
print(f'Valor adicionado na posição {pos}.')
break
pos += 1
print(n)
|
nilq/baby-python
|
python
|
from collections import defaultdict
from typing import List, Any, Set, Tuple, Iterable
import pandas as pd
def printUniqueTokens(series: pd.Series):
unique_series = series.unique()
token_count = {}
for a in unique_series:
tokens = a.split(' ')
for t in tokens:
if t not in token_count:
token_count[t] = 1
else:
token_count[t] += 1
for key, value in sorted(token_count.items(), key=lambda item: item[1]):
print("%s: %s" % (key, value))
# Source: https://www.geeksforgeeks.org/python-merge-list-with-common-elements-in-a-list-of-lists/
# merge function to merge all sublist having common elements.
def merge_common(lists: Iterable[Iterable[Any]]) -> List[List[Any]]:
neigh = defaultdict(set)
visited = set()
for each in lists:
for item in each:
neigh[item].update(each)
def comp(node, neigh=neigh, visited=visited, vis=visited.add):
nodes = {node}
next_node = nodes.pop
while nodes:
node = next_node()
vis(node)
nodes |= neigh[node] - visited
yield node
for node in neigh:
if node not in visited:
yield sorted(comp(node))
def loadGoldStandard() -> Tuple[List[Set[int]], List[int]]:
"""Loads the Gold Standard from the file provided by the HPI 'restaurants_DPL.tsv'
Returns:
a tuple with two lists (dupeSets, dupeIds).
dupeSets is a list of all rows in the Gold Standard as sets
dupeIds is a list of all ids that are in Gold Standard
"""
dupeDf = pd.read_csv(PATH_PREFIX + '/restaurants_DPL.tsv', delimiter='\t')
dupeDict = {}
for i, dupeRow in dupeDf.iterrows():
if dupeRow[0] not in dupeDict:
dupeDict[dupeRow[0]] = set()
dupeDict[dupeRow[0]].add(dupeRow[0])
dupeDict[dupeRow[0]].add(dupeRow[1])
dupeSets: List[Set[Any]] = list(dupeDict.values())
dupeIds: List[int] = [y for x in dupeSets for y in x]
return dupeSets, dupeIds
def compareDfToGold(df: pd.DataFrame, total=864) -> Tuple[Set, Set, Set, List]:
return compareToGold(list(df[df.id.map(len) > 1].id), total)
def compareToGold(duplicates: List[Set[int]], total=864, printType="table") -> Tuple[Set, Set, Set, List]:
"""Compares the given list of duplicates to the Gold Standard and prints and returns the results
Args:
duplicates: to compare to the Gold Standard
total: the total number of records
printType: the type in which this method should print its results
(possible values: "table", "csv", or anything else to not print at all)
Returns:
A Tuple (true_positive, false_negative, false_positive, listOfParams)
"""
recognizedDuplicates = duplicates
true_positive = set()
false_negative = set()
false_positive = set()
for dupeSet in GOLD_DUPE_SETS:
if dupeSet in recognizedDuplicates:
true_positive.add(frozenset(dupeSet))
else:
false_negative.add(frozenset(dupeSet))
for recognizedDupeSet in recognizedDuplicates:
if recognizedDupeSet in GOLD_DUPE_SETS:
true_positive.add(frozenset(recognizedDupeSet))
else:
false_positive.add(frozenset(recognizedDupeSet))
# times 2 because we work with sets of 2
fn = len([e for s in false_negative for e in s])
fp = len([e for s in false_positive for e in s])
tp = len([e for s in true_positive for e in s])
tn = total - tp - fn - fp
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fScore = (2 * precision * recall) / (precision + recall)
listOfParams = [tp, tn, fp, fn, precision, recall, fScore]
if printType == "table":
print("tp={:<5}, tn={:<5}, fp={:<5}, fn={:<5}, precision={:< .3f}, recall={:< .3f}, fScore={:< .3f}"
.format(*listOfParams))
elif printType == "csv":
print(*listOfParams, sep=",")
return true_positive, false_negative, false_positive, listOfParams
def prepareUploadJsons(df: pd.DataFrame) -> pd.DataFrame:
"""Prepares and saves two json files that can be imported into mongodb.
Look in ../data/work/deduped_raw.json and ../data/work/deduped_clean.json
Args:
df: a pandas DataFrame
Returns:
a pandas DataFrame
"""
def __firstOfSet(s: Any) -> Any:
if isinstance(s, set):
return s.pop()
else:
return s
if "group" in df:
df.drop("group", axis=1, inplace=True)
df.to_json(PATH_PREFIX + '/deduped_raw.json', orient='records')
df.name = df.name.apply(__firstOfSet)
df.address = df.address.apply(__firstOfSet)
df.city = df.city.apply(__firstOfSet)
df.cname = df.cname.apply(__firstOfSet)
df.caddress = df.caddress.apply(__firstOfSet)
df.to_json(PATH_PREFIX + '/deduped_clean.json', orient='records')
print("Written two jsons 'deduped_raw.json' and 'deduped_clean.json' to directory '"
+ PATH_PREFIX + "'.")
print("You can use those with mongoimport!")
print("Example:")
print(" mongoimport [...] --db dmdb --collection clean --type json --file deduped_raw.json -v --jsonArray --drop")
return df
def difference(li1: List[Any], li2: List[Any]) -> Tuple[List[Any], List[Any]]:
left = [i for i in li1 + li2 if i not in li1]
right = [i for i in li1 + li2 if i not in li2]
return left, right
PATH_PREFIX = "../data/work"
GOLD_DUPE_SETS, GOLD_DUPE_IDS = loadGoldStandard()
config = {
# if True a comparison to the gold standard will be made
"compareToGold": True,
# if True then the results of the deplucation will be prepared and saved as json files
# to enable an import into mongodb
"prepareUploadJsons": True,
# this is a feature flag to toggle the old way on, that found all but 12 duplicates
# if this is set to false a slightly improved way is used, which found all but 8 duplicates
# ONLY applies when the clean.py is used
"useOldCalculation": False
}
|
nilq/baby-python
|
python
|
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0011_update_proxy_permissions'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0007_proxy_group_user'),
]
operations = [
migrations.CreateModel(
name='ObjectPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('description', models.CharField(blank=True, max_length=200)),
('enabled', models.BooleanField(default=True)),
('constraints', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('actions', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=30), size=None)),
('object_types', models.ManyToManyField(limit_choices_to=models.Q(models.Q(models.Q(_negated=True, app_label__in=['admin', 'auth', 'contenttypes', 'sessions', 'taggit', 'users']), models.Q(('app_label', 'auth'), ('model__in', ['group', 'user'])), models.Q(('app_label', 'users'), ('model__in', ['objectpermission', 'token'])), _connector='OR')), related_name='object_permissions', to='contenttypes.ContentType')),
('groups', models.ManyToManyField(blank=True, related_name='object_permissions', to='auth.Group')),
('users', models.ManyToManyField(blank=True, related_name='object_permissions', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
'verbose_name': 'permission',
},
),
]
|
nilq/baby-python
|
python
|
from unittest import TestCase
from src.chunker import Chunker
class TestChunker(TestCase) :
def test_chunker_yields_list_with_buffered_size(self) :
chunks = Chunker(range(5), 3)
chunk = next(chunks)
self.assertEqual(len(chunk), 3)
self.assertListEqual(chunk, [0,1,2])
next_chunk = next(chunks)
self.assertListEqual(next_chunk, [3,4])
def test_setting_neg1_on_buffer_yields_entire_list(self) :
chunks = Chunker(range(5), -1)
chunk = next(chunks)
self.assertListEqual(chunk, [0,1,2,3,4])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import sys
from setuptools import setup, find_packages
VERSION = os.environ.get('GITHUB_REF', '0.0.4').replace('refs/tags/v', '')
is_wheel = 'bdist_wheel' in sys.argv
_license = ""
if os.path.exists('LICENSE'):
with open('LICENSE') as lf:
_license = lf.readline().rstrip()
description = ""
if os.path.exists('README.md'):
with open('README.md') as df:
description = df.read()
requirements = []
if os.path.exists('requirements.txt'):
with open('requirements.txt') as rf:
requirements = rf.readlines()
setup_info = dict(
name='qab_core',
version=VERSION,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
py_modules=['qab_core'],
license=_license,
description="QAB framework, high performance, secure, easy to learn, fast to code, ready for production",
long_description=description,
long_description_content_type="text/markdown",
url="https://github.com/MaJyxSoftware/qab_core",
author="Benjamin Schwald",
author_email="b.schwald@majyx.net",
python_requires='>=3.7',
classifiers=[
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'
],
)
if is_wheel:
setup_info['install_requires'] = requirements
setup(**setup_info)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import transforms
import numpy as np
import cv2
from models.single_track import SiamRPNPP as base_model
from dataset.util import generate_anchor
class SiamRPNPP(nn.Module):
def __init__(self, tracker_name = ''):
super(SiamRPNPP, self).__init__()
self.cfg = {'lr': 0.45, 'window_influence': 0.44, 'penalty_k': 0.04, 'instance_size': 255, 'adaptive': False} # 0.355
self.tracker_name = tracker_name
self.model = base_model()
def temple(self, z):
zf = self.model.features(z)
zf = self.model.neck(zf)
self.zf = zf
def forward(self, x):
xf = self.model.features(x)
xf = self.model.neck(xf)
cls, loc = self.model.head(self.zf, xf)
return loc, cls
class TrackerConfig(object):
# These are the default hyper-params for DaSiamRPN 0.3827
windowing = 'cosine' # to penalize large displacements [cosine/uniform]
# Params from the network architecture, have to be consistent with the training
exemplar_size = 127 # input z size
instance_size = 255 # input x size (search region)
total_stride = 8
# score_size = (instance_size-exemplar_size)/total_stride+1 # for siamrpn
score_size = 25 # for siamrpn++
# print(score_size)
context_amount = 0.5 # context amount for the exemplar
ratios = [0.33, 0.5, 1, 2, 3]
scales = [8, ]
anchor_num = len(ratios) * len(scales)
anchor = []
penalty_k = 0.055
window_influence = 0.42
lr = 0.295
def update(self, cfg):
for k, v in cfg.items():
setattr(self, k, v)
# self.score_size = (self.instance_size - self.exemplar_size) / self.total_stride + 1 # for siamrpn
def tracker_eval(net, x_crop, target_pos, target_sz, window, scale_z, p):
delta, score = net(x_crop)
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1), dim=0).data[1, :].cpu().numpy()
delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]
def change(r):
return np.maximum(r, 1./r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
s_c = change(sz(delta[2, :], delta[3, :]) / (sz_wh(target_sz))) # scale penalty
r_c = change((target_sz[0] / target_sz[1]) / (delta[2, :] / delta[3, :])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1.) * p.penalty_k)
pscore = penalty * score
# window float
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
best_pscore_id = np.argmax(pscore)
# print('###################### {}'.format(best_pscore_id))
target = delta[:, best_pscore_id] / scale_z
target_sz = target_sz / scale_z
lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr
res_x = target[0] + target_pos[0]
res_y = target[1] + target_pos[1]
res_w = target_sz[0] * (1 - lr) + target[2] * lr
res_h = target_sz[1] * (1 - lr) + target[3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
return target_pos, target_sz, score[best_pscore_id]
def get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch', new=False):
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz+1) / 2
context_xmin = round(pos[0] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[1] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
# zzp: a more easy speed version
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8) # 0 is better than 1 initialization
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else:
im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz)) # zzp: use cv to get a better speed
else:
im_patch = im_patch_original
return im_to_torch(im_patch) if out_mode in 'torch' else im_patch
def SiamRPN_init(im, target_pos, target_sz, net):
state = dict()
p = TrackerConfig()
p.update(net.cfg)
state['im_h'] = im.shape[0]
state['im_w'] = im.shape[1]
p.anchor = generate_anchor(p.total_stride, p.scales, p.ratios, int(p.score_size))
avg_chans = np.mean(im, axis=(0, 1))
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans, out_mode='np')
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
z = Variable(transform(z_crop).unsqueeze(0))
# net.temple(z.cuda())
net.temple(z)
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
state['p'] = p
state['net'] = net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
return state
def SiamRPN_track(state, im):
p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
wc_z = target_sz[1] + p.context_amount * sum(target_sz)
hc_z = target_sz[0] + p.context_amount * sum(target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = p.exemplar_size / s_z
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# extract scaled crops for search region x at previous target position
x_crop = get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans, out_mode='np')
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
x_crop = Variable(transform(x_crop).unsqueeze(0))
# target_pos, target_sz, score = tracker_eval(net, x_crop.cuda(), target_pos, target_sz * scale_z, window, scale_z, p)
target_pos, target_sz, score = tracker_eval(net, x_crop, target_pos, target_sz * scale_z, window, scale_z, p)
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score
return state
|
nilq/baby-python
|
python
|
import argparse
from utils import levenshtein
import pdb
import re
def clean(label):
alphabet = [a for a in '0123456789abcdefghijklmnopqrstuvwxyz* ']
label = label.replace('-', '*')
nlabel = ""
for each in label.lower():
if each in alphabet:
nlabel += each
return nlabel
parser = argparse.ArgumentParser()
parser.add_argument('--preds', type=str, default='../misc/preds/temp.txt', help='path to preds file')
parser.add_argument('--vocab', type=str, required=True)
parser.add_argument('--mode', type=str, default='word', help='path to preds file')
parser.add_argument('--lower', action='store_true', help='convert strings to lowercase ebfore comparison')
parser.add_argument('--alnum', action='store_true', help='convert strings to alphanumeric before comparison')
opt = parser.parse_args()
train_vocab = []
with open(opt.vocab) as f:
for line in f:
train_vocab.append(line.strip())
f = open(opt.preds, 'r')
tw = 0
ww = 0
tc = 0
wc = 0
word_lens = []
if opt.mode == 'word':
for i , line in enumerate(f):
print(line)
if i%2==0:
pred = line.strip()
else:
gt = line.strip()
if gt in train_vocab:
continue
if opt.lower:
gt = gt.lower()
pred = pred.lower()
if opt.alnum:
pattern = re.compile('[\W_]+')
gt = pattern.sub('', gt)
pred = pattern.sub('', pred)
# pdb.set_trace()
# gt =
# print('before')
if gt != pred:
ww += 1
wc += levenshtein(gt, pred)
word_lens.append(len(gt))
print(gt, pred, wc)
tc += len(gt)
tw += 1
else:
for i , line in enumerate(f):
if i%2==0:
pred = line.strip()
else:
gt = line.strip()
gt = clean(gt)
pred = clean(pred)
gt_w = gt.split()
pred_w = pred.split()
for j in range(len(gt_w)):
try:
if gt_w[j] != pred_w[j]:
# print(gt_w[j], pred_w[j])
ww += 1
except IndexError:
ww += 1
tw += len(gt.split())
wc += levenshtein(gt, pred)
tc += len(gt)
print(ww, tw)
print('WER: ', (ww/tw)*100)
print('CER: ', (wc/tc)*100)
print('Incorrect Avg: ', sum(word_lens)/len(word_lens))
print('Incorrect Max Avg: ', max(word_lens))
print('Incorrect Min Avg: ', min(word_lens))
|
nilq/baby-python
|
python
|
"""
Create a pedestal file from an event file using the target_calib Pedestal
class
"""
from targetpipe.io.camera import Config
Config('checs')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, \
FuncFormatter, AutoMinorLocator
from traitlets import Dict, List
from ctapipe.core import Tool, Component
from ctapipe.io.eventfilereader import EventFileReaderFactory
from targetpipe.calib.camera.makers import PedestalMaker
from targetpipe.calib.camera.r1 import TargetioR1Calibrator
from targetpipe.calib.camera.tf import TFApplier
from targetpipe.io.eventfilereader import TargetioFileReader
from targetpipe.plots.official import ThesisPlotter
from tqdm import tqdm, trange
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import patches
from os.path import join, dirname
from IPython import embed
import pandas as pd
from scipy.stats import norm
from targetpipe.utils.dactov import checm_dac_to_volts
from glob import glob
import re
class WaveformPlotter(ThesisPlotter):
name = 'WaveformPlotter'
def __init__(self, config, tool, **kwargs):
"""
Parameters
----------
config : traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
Set to None if no configuration to pass.
tool : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
Set to None if no Tool to pass.
kwargs
"""
super().__init__(config=config, tool=tool, **kwargs)
def add(self, waveform):
self.ax.plot(waveform)
def save(self, output_path=None):
self.ax.set_title("Waveforms for one channel, incrementing VPED")
self.ax.set_xlabel("Time (ns)")
self.ax.set_ylabel("Amplitude (ADC Pedestal-Subtracted)")
self.ax.xaxis.set_major_locator(MultipleLocator(16))
super().save(output_path)
class TFInvestigator(Tool):
name = "TFInvestigator"
description = "Produce plots associated with the " \
"transfer function calibration"
aliases = Dict(dict())
classes = List([])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.df_file = None
self.tf = None
self.r1 = None
self.n_pixels = None
self.n_samples = None
self.p_vi = None
def setup(self):
self.log_format = "%(levelname)s: %(message)s [%(name)s.%(funcName)s]"
kwargs = dict(config=self.config, tool=self)
ped_path = "/Volumes/gct-jason/data_checs/tf/ac_tf_tmSN0074/Pedestal.tcal"
self.r1 = TargetioR1Calibrator(pedestal_path=ped_path,
**kwargs,
)
dfl = []
file_list = glob("/Volumes/gct-jason/data_checs/tf/ac_tf_tmSN0074/Amplitude_*_r0.tio")
pattern = 'Amplitude_(.+?)_r0.tio'
for p in file_list:
amplitude = int(re.search(pattern, p).group(1))
print(amplitude)
dfl.append(dict(path=p, amplitude=amplitude))
for d in dfl:
d['reader'] = TargetioFileReader(input_path=d['path'], **kwargs)
self.df_file = pd.DataFrame(dfl)
self.df_file = self.df_file.sort_values('amplitude')
first_event = dfl[0]['reader'].get_event(0)
telid = list(first_event.r0.tels_with_data)[0]
r1 = first_event.r1.tel[telid].pe_samples[0]
self.n_pixels, self.n_samples = r1.shape
p_kwargs = kwargs
p_kwargs['script'] = "ac_transfer_function_wfs"
p_kwargs['figure_name'] = "amplitude_increments"
self.p_vi = WaveformPlotter(**kwargs)
def start(self):
desc1 = 'Looping through files'
n_rows = len(self.df_file.index)
t = tqdm(self.df_file.iterrows(), total=n_rows, desc=desc1)
for index, row in t:
path = row['path']
reader = row['reader']
amplitude = row['amplitude']
source = reader.read()
n_events = reader.num_events
event = reader.get_event(0)
self.r1.calibrate(event)
wf = event.r1.tel[0].pe_samples[0, 0]
# if amplitude < 2000:
self.p_vi.add(wf)
def finish(self):
self.p_vi.save()
exe = TFInvestigator()
exe.run()
|
nilq/baby-python
|
python
|
import django_filters
import htmlgenerator as hg
from django import forms
from django.utils.html import mark_safe
from django.utils.translation import gettext as _
from django_countries.widgets import LazySelect
from .button import Button
from .notification import InlineNotification
class Form(hg.FORM):
@staticmethod
def from_django_form(form, **kwargs):
return Form.from_fieldnames(form, form.fields, **kwargs)
@staticmethod
def from_fieldnames(form, fieldnames, **kwargs):
return Form.wrap_with_form(
form, *[FormField(fieldname) for fieldname in fieldnames], **kwargs
)
@staticmethod
def wrap_with_form(form, *elements, submit_label=None, **kwargs):
if kwargs.get("standalone", True) is True:
elements += (
hg.DIV(
Button(submit_label or _("Save"), type="submit"),
_class="bx--form-item",
style="margin-top: 2rem",
),
)
return Form(form, *elements, **kwargs)
def __init__(self, form, *children, use_csrf=True, standalone=True, **attributes):
"""
form: lazy evaluated value which should resolve to the form object
children: any child elements, can be formfields or other
use_csrf: add a CSRF input, but only for POST submission and standalone forms
standalone: if true, will add a CSRF token and will render enclosing FORM-element
"""
self.form = form
self.standalone = standalone
defaults = {"method": "POST", "autocomplete": "off"}
defaults.update(attributes)
if (
defaults["method"].upper() == "POST"
and use_csrf is not False
and standalone is True
):
children = (CsrfToken(),) + children
super().__init__(*children, **defaults)
def formfieldelements(self):
return self.filter(
lambda elem, parents: isinstance(elem, FormChild)
and not any((isinstance(p, Form) for p in parents[1:]))
)
def render(self, context):
form = hg.resolve_lazy(self.form, context, self)
for formfield in self.formfieldelements():
formfield.form = form
for error in form.non_field_errors():
self.insert(0, InlineNotification(_("Form error"), error, kind="error"))
for hidden in form.hidden_fields():
for error in hidden.errors:
self.insert(
0,
InlineNotification(
_("Form error: "), hidden.name, error, kind="error"
),
)
if self.standalone:
if form.is_multipart() and "enctype" not in self.attributes:
self.attributes["enctype"] = "multipart/form-data"
return super().render(context)
return super().render_children(context)
class FormChild:
"""Used to mark elements which need the "form" attribute set by the parent form before rendering"""
class FormField(FormChild, hg.BaseElement):
"""Dynamic element which will resolve the field with the given name
and return the correct HTML, based on the widget of the form field or on the passed argument 'fieldtype'"""
def __init__(
self,
fieldname,
fieldtype=None,
hidelabel=False,
elementattributes={},
widgetattributes={},
):
self.fieldname = fieldname
self.fieldtype = fieldtype
self.widgetattributes = widgetattributes
self.elementattributes = elementattributes
self.form = None # will be set by the render method of the parent method
self.hidelabel = hidelabel
def render(self, context):
element = _mapwidget(
self.form[self.fieldname],
self.fieldtype,
self.elementattributes,
self.widgetattributes,
)
if self.hidelabel:
element._replace(
lambda e, ancestors: isinstance(e, hg.LABEL), None, all=True
)
return element.render(context)
def __repr__(self):
return f"FormField({self.fieldname})"
class FormsetField(FormChild, hg.BaseElement):
def __init__(
self,
fieldname,
*children,
containertag=hg.DIV,
formsetinitial=None,
**formsetfactory_kwargs,
):
super().__init__(*children)
self.fieldname = fieldname
self.formsetfactory_kwargs = formsetfactory_kwargs
self.formsetinitial = formsetinitial
self.containertag = containertag
def render(self, context):
formset = self.form[self.fieldname].formset
# Detect internal fields like the delete-checkbox, the order-widget, id fields, etc and add their
# HTML representations. But we never show the "delete" checkbox, it should be manually added via InlineDeleteButton
declared_fields = [
f.fieldname
for f in self.filter(lambda e, ancestors: isinstance(e, FormField))
]
internal_fields = [
field
for field in formset.empty_form.fields
if field not in declared_fields
and field != forms.formsets.DELETION_FIELD_NAME
]
for field in internal_fields:
self.append(FormField(field))
skeleton = hg.DIV(
Form.from_django_form(formset.management_form, standalone=False),
self.containertag(
hg.Iterator(
formset,
loopvariable="formset_form",
content=Form(hg.C("formset_form"), *self, standalone=False),
),
id=f"formset_{formset.prefix}_container",
),
hg.DIV(
Form(formset.empty_form, *self, standalone=False),
id=f"empty_{ formset.prefix }_form",
_class="template-form",
style="display:none;",
),
hg.SCRIPT(
mark_safe(
f"""document.addEventListener("DOMContentLoaded", e => init_formset("{ formset.prefix }"));"""
)
),
)
yield from skeleton.render(context)
def __repr__(self):
return f"Formset({self.fieldname}, {self.formsetfactory_kwargs})"
class FormsetAddButton(FormChild, Button):
def __init__(self, fieldname, label=_("Add"), **kwargs):
defaults = {
"icon": "add",
"notext": True,
"buttontype": "tertiary",
}
defaults.update(kwargs)
self.fieldname = fieldname
super().__init__(label, **defaults)
def render(self, context):
formset = self.form[self.fieldname].formset
self.attributes["id"] = f"add_{formset.prefix}_button"
self.attributes[
"onclick"
] = f"formset_add('{ formset.prefix }', '#formset_{ formset.prefix }_container');"
return super().render(context)
class InlineDeleteButton(FormChild, Button):
def __init__(self, parentcontainerselector, label=_("Delete"), **kwargs):
"""
Show a delete button for the current inline form. This element needs to be inside a FormsetField
parentcontainerselector: CSS-selector which will be passed to element.closest in order to select the parent container which should be hidden on delete
"""
defaults = {
"notext": True,
"small": True,
"icon": "trash-can",
"buttontype": "ghost",
"onclick": f"delete_inline_element(this.querySelector('input[type=checkbox]'), this.closest('{parentcontainerselector}'))",
}
defaults.update(kwargs)
super().__init__(
label,
FormField(
forms.formsets.DELETION_FIELD_NAME,
elementattributes={"style": "display: none"},
),
**defaults,
)
class HiddenInput(FormChild, hg.INPUT):
def __init__(self, fieldname, widgetattributes, **attributes):
self.fieldname = fieldname
super().__init__(type="hidden", **{**widgetattributes, **attributes})
def render(self, context):
self.attributes["id"] = self.boundfield.auto_id
if self.boundfield is not None:
self.attributes["name"] = self.boundfield.html_name
if self.boundfield.value() is not None:
self.attributes["value"] = self.boundfield.value()
return super().render(context)
class CsrfToken(FormChild, hg.INPUT):
def __init__(self):
super().__init__(type="hidden")
def render(self, context):
self.attributes["name"] = "csrfmiddlewaretoken"
self.attributes["value"] = context["csrf_token"]
return super().render(context)
def _mapwidget(
field, fieldtype, elementattributes={}, widgetattributes={}, only_initial=False
):
from .checkbox import Checkbox
from .date_picker import DatePicker
from .file_uploader import FileUploader
from .multiselect import MultiSelect
from .select import Select
from .text_area import TextArea
from .text_input import PasswordInput, TextInput
WIDGET_MAPPING = {
forms.TextInput: TextInput,
forms.NumberInput: TextInput, # TODO HIGH
forms.EmailInput: TextInput, # TODO
forms.URLInput: TextInput, # TODO
forms.PasswordInput: PasswordInput,
forms.HiddenInput: HiddenInput,
forms.DateInput: DatePicker,
forms.DateTimeInput: TextInput, # TODO
forms.TimeInput: TextInput, # TODO HIGH
forms.Textarea: TextArea,
forms.CheckboxInput: Checkbox,
forms.Select: Select,
forms.NullBooleanSelect: Select,
forms.SelectMultiple: MultiSelect, # TODO HIGH
forms.RadioSelect: TextInput, # TODO HIGH
forms.CheckboxSelectMultiple: TextInput, # TODO HIGH
forms.FileInput: FileUploader,
forms.ClearableFileInput: FileUploader, # TODO HIGH
forms.MultipleHiddenInput: TextInput, # TODO
forms.SplitDateTimeWidget: TextInput, # TODO
forms.SplitHiddenDateTimeWidget: TextInput, # TODO
forms.SelectDateWidget: TextInput, # TODO
# 3rd party widgets
django_filters.widgets.DateRangeWidget: TextInput, # TODO
LazySelect: Select,
}
# The following is a bit of magic to play nicely with the django form processing
# TODO: This can be simplified, and improved
if field.field.localize:
field.field.widget.is_localized = True
attrs = dict(field.field.widget.attrs)
attrs.update(widgetattributes)
attrs = field.build_widget_attrs(attrs)
if getattr(field.field.widget, "allow_multiple_selected", False):
attrs["multiple"] = True
attrs["style"] = "height: 16rem"
if field.auto_id and "id" not in field.field.widget.attrs:
attrs.setdefault("id", field.html_initial_id if only_initial else field.auto_id)
if "name" not in attrs:
attrs["name"] = field.html_initial_name if only_initial else field.html_name
value = field.field.widget.format_value(field.value())
if value is not None and "value" not in attrs:
attrs["value"] = value
elementattributes = {
**getattr(field.field, "layout_kwargs", {}),
**elementattributes,
}
if isinstance(field.field.widget, forms.CheckboxInput):
attrs["checked"] = field.value()
if isinstance(field.field.widget, forms.Select):
if isinstance(field.field.widget, forms.SelectMultiple):
return hg.DIV(
MultiSelect(
field.field.widget.optgroups(
field.name,
field.field.widget.get_context(field.name, field.value(), {})[
"widget"
]["value"],
),
label=field.label,
help_text=field.help_text,
errors=field.errors,
disabled=field.field.disabled,
required=field.field.required,
widgetattributes=attrs,
**elementattributes,
),
_class="bx--form-item",
)
return hg.DIV(
Select(
field.field.widget.optgroups(
field.name,
field.field.widget.get_context(field.name, field.value(), {})[
"widget"
]["value"],
),
label=field.label,
help_text=field.help_text,
errors=field.errors,
disabled=field.field.disabled,
required=field.field.required,
widgetattributes=attrs,
**elementattributes,
),
_class="bx--form-item",
)
fieldtype = (
fieldtype
or getattr(field.field, "layout", None)
or WIDGET_MAPPING[type(field.field.widget)]
)
if isinstance(fieldtype, type) and issubclass(fieldtype, hg.BaseElement):
ret = fieldtype(
fieldname=field.name, widgetattributes=attrs, **elementattributes
)
else:
ret = fieldtype
ret.boundfield = field
if (
field.field.show_hidden_initial and fieldtype != HiddenInput
): # special case, prevent infinte recursion
return hg.BaseElement(
ret,
_mapwidget(field, HiddenInput, only_initial=True),
)
return ret
|
nilq/baby-python
|
python
|
import sys
import pytest
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram.Network import Network
class TestNetwork(object):
def test_create_network(self):
network = Network()
def test_get_invalid_connection_status(self):
network = Network()
with pytest.raises(Exception, message = 'Must instantiate a defined Network type'):
connectionStatus = network.getConnectionStatus()
def test_get_invalid_signal_strength(self):
network = Network()
with pytest.raises(Exception, message = 'Must instantiate a defined Network type'):
connectionStatus = network.getSignalStrength()
|
nilq/baby-python
|
python
|
from django.urls import path
from rest_framework_simplejwt import views as jwt_views
from . import views
from rest_framework_simplejwt.views import TokenRefreshView, TokenObtainPairView
urlpatterns = [
path('login/', views.loginView.as_view(), name='obtain_token'),
path('nlogin/', views.adminTokenObtainPairView.as_view(), name='obtain_token_new'),
path('login/refresh/', TokenRefreshView.as_view(), name="refresh_token"),
path('test/', views.testView.as_view(), name='test'),
path('register/', views.registerView.as_view(), name='register'),
path('change_password/<int:pk>/', views.changePasswordView.as_view(),
name="change_password"),
path('update_profile/<int:pk>/', views.updateProfileView.as_view(),
name="update_profile"),
path('logout/', views.logoutView.as_view(), name='logout'),
path('logout_all/', views.logoutAllView.as_view(), name='logout_all'),
path('adminlogin/', views.adminTokenObtainPairView.as_view(),
name='admin_obtain_token'),
path('adminlogin/refresh/', TokenRefreshView.as_view(),
name="admin_refresh_token"),
# Staff management:
path('staff/register/', views.staffRegisterView.as_view(), name="staff_register"),
path('staff/update_profile/<int:pk>/', views.staffProfileUpdate.as_view(), name="staff_profile_update"),
path('staff/modify_pay/<int:pk>/', views.staffPayView.as_view(), name="staff_profile_update"),
path('staff/staff_delete/<int:pk>/', views.staffDeleteView.as_view(), name="staff_profile_update"),
path('staff/stafflist/', views.staffListView.as_view(), name="staff_list"),
path('staff/staff_payment_list/', views.staffPaymentListView.as_view(), name="staff_list"),
path('staff/staffdetail/<int:pk>/', views.staffDetailView.as_view(), name="staff_detail"),
path('staff/paystaff/',views.staffPayRecordView.as_view(), name="staff_pay_record"),
path('staff/totalMoneyPaidToStaff/',views.totalMoneyPaidToStaff.as_view(), name="staff_payment_analysis"),
# Customer:
path('customer/customerlist/', views.customerListView().as_view(), name="customer_list"),
]
|
nilq/baby-python
|
python
|
"""The managers for the models
"""
from django.contrib.auth.models import UserManager as BaseUserManager
class UserManager(BaseUserManager):
"""The user manager
"""
def create_user(self, username, email=None, password=None, **extra_fields):
"""Create a user.
:param username: The user name.
:param email: The user email.
:param password: The user password.
:param extra_fields: The user extra field.
:return: The created user.
"""
return super().create_user(username, email, password, is_admin=False, **extra_fields)
def create_superuser(self, username, email=None, password=None, **extra_fields):
"""Create a superuser.
:param username: The user name.
:param email: The user email.
:param password: The user password.
:param extra_fields: The user extra field.
:return: The created superuser.
"""
return self._create_user(username, email, password, is_admin=True, **extra_fields)
|
nilq/baby-python
|
python
|
"""
Project: RadarBook
File: ecef_to_lla.py
Created by: Lee A. Harrison
On: 3/18/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (artech@artechhouse.com)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
from numpy import sqrt, sin, cos, arctan2, mod
from scipy.constants import pi
def convert(ecef_x, ecef_y, ecef_z):
"""
Convert coordinates in ECEF to LLA.
:param ecef_x: The x coordinate of the point (m).
:param ecef_y: The y coordinate of the point (m).
:param ecef_z: The z coordinate of the point (m).
:return: The LLA coordinates of the point (rad, rad, m).
"""
# Earth constants
effective_earth_radius = 6378137
earth_eccentricity = 8.1819190842622e-2
# Effective polar radius
earth_radius_polar = sqrt(effective_earth_radius**2 * (1. - earth_eccentricity**2))
ep = sqrt((effective_earth_radius**2 - earth_radius_polar**2) / earth_radius_polar**2)
# Radius in xy plane
r = sqrt(ecef_x * ecef_x + ecef_y * ecef_y)
# Angle from the xy plane
theta = arctan2(effective_earth_radius * ecef_z, earth_radius_polar * r)
# Calculate the coordinates
lat = arctan2((ecef_z + ep**2 * earth_radius_polar * sin(theta)**3),
(r - earth_eccentricity**2 * effective_earth_radius * cos(theta)**3))
lon = mod(arctan2(ecef_y, ecef_x), 2. * pi)
alt = r / cos(lat) - effective_earth_radius / sqrt(1. - earth_eccentricity**2 * sin(lat)**2)
return lat, lon, alt
|
nilq/baby-python
|
python
|
x,y=map(int,input().split())
z=0
for j in range(x,y):
z=j
a=0
for i in range(len(str(j))):
r=j%10
a=a+r**3
j=j//10
if a==z:
print(a,end=" ")
print()
|
nilq/baby-python
|
python
|
import os
from .base import *
API_DB_URL = os.environ.get("API_DB_URL", "sqlite+aiosqlite:///db.sqlite")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, patterns
from django.contrib import admin
from .views import ChatRoomTokenView, ChatRoomView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^room/(?P<token>\w{32})$', ChatRoomView.as_view(), name='chat-room'),
url(r'^new-room/$', ChatRoomTokenView.as_view(), name='chat-room-token'),
)
|
nilq/baby-python
|
python
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class TranscodeInfo(object):
def __init__(self, videoCodec=None, videoCodeRate=None, videoFrameRate=None, width=None, height=None, template=None, templateName=None, audioCodec=None, audioFormat=None, audioSampleRate=None, audioChannel=None, audioCodeRate=None, jdchd=None, audioComfort=None):
"""
:param videoCodec: (Optional) 视频编码格式
- 取值:h264,h265,默认h264
:param videoCodeRate: (Optional) 转码输出的码率值:
- 取值: [128,15000]
- 单位: kpbs
:param videoFrameRate: (Optional) 转码输出的帧率值:
- 取值:[1,30]
:param width: (Optional) 转码输出视频宽度:
- 取值: [128,4096]
- 等比: 如果只填写一个参数,则按参数比例调节输出转码视频
- 随源: 如果两个参数都不填写,则按照源比例输出转码视频
:param height: (Optional) 转码输出视频高度:
- 取值: [128,4096]
- 等比: 如果只填写一个参数,则按参数比例调节输出转码视频
- 随源: 如果两个参数都不填写,则按照源比例输出转码视频
:param template: (Optional) 转码模板自定义名称:
- 自定义模板: 枚举类型校验,忽略大小写,自动删除空格,
取值要求:数字、大小写字母或短横线("-"),
首尾不能有特殊字符("-")
- 注意: 不能与标准的转码模板和已定义命名重复
:param templateName: (Optional) 转码模板名称
:param audioCodec: (Optional) 转码输出音频编码格式:
- 取值: aac、mp3
- 不区分大小写
:param audioFormat: (Optional) 转码输出音频格式:
- 取值: aac_lc,aac_low,aac_he,aac_he_v2
- 不区分大小写
:param audioSampleRate: (Optional) 转码输出音频采样率:
- 取值: [44100,48000]
:param audioChannel: (Optional) 转码输出音频通道数:
- 1 单声道
- 2 双声道
:param audioCodeRate: (Optional) 转码输出音频码率:
- 取值: [16,128]
- 单位: kbps
:param jdchd: (Optional) 京享超清
- 取值: jdchd-1.0,off
:param audioComfort: (Optional) 舒适音频
- 取值: on,off
"""
self.videoCodec = videoCodec
self.videoCodeRate = videoCodeRate
self.videoFrameRate = videoFrameRate
self.width = width
self.height = height
self.template = template
self.templateName = templateName
self.audioCodec = audioCodec
self.audioFormat = audioFormat
self.audioSampleRate = audioSampleRate
self.audioChannel = audioChannel
self.audioCodeRate = audioCodeRate
self.jdchd = jdchd
self.audioComfort = audioComfort
|
nilq/baby-python
|
python
|
from panflute import run_filter, Header
def increase_header_level(elem, doc):
if type(elem)==Header:
if elem.level < 6:
elem.level += 1
else:
return []
def main(doc=None):
return run_filter(increase_header_level, doc=doc)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import time
import json
import requests
import logging
import pika
from config import PROXY
from twython import TwythonStreamer
class sampleStreamer(TwythonStreamer):
"""
Retrieve data from the Twitter Streaming API.
The streaming API requires
`OAuth 1.0 <http://en.wikipedia.org/wiki/OAuth>`_ authentication.
"""
def __init__(self, rabbit_host, rabbit_port, app_key, app_secret, oauth_token, oauth_token_secret, tag):
"""Create a new instance of the sampleStreamer class that will connect to Twitter API and send tweets
to rabbitmq queue using pika module.
:param str app_key, app_secret, oauth_token, oauth_token_secret: credentials for Twitter API authentication
:param str tag: a tag that will be added to the tweet body to indicate its collection method
"""
self.rabbit_host = rabbit_host
self.rabbit_port = rabbit_port
self.rabbit_client = self.open_rabbit_connection()
self.tweets_queue = self.open_rabbit_channel()
if PROXY:
client_args = {
'proxies': PROXY
}
else:
client_args = {}
self.do_continue = True
TwythonStreamer.__init__(self, app_key, app_secret, oauth_token,
oauth_token_secret, timeout=100, chunk_size=200, client_args=client_args)
self.tag = tag
def open_rabbit_connection(self):
for i in range(10):
try:
rabbit_client = pika.BlockingConnection(
pika.ConnectionParameters(host=self.rabbit_host, port=self.rabbit_port,
connection_attempts=100, retry_delay=2,
# blocked_connection_timeout=1000,
# socket_timeout=1000,
ssl=False,
credentials=pika.credentials.PlainCredentials(
username='user',
password='password')))
break
except pika.exceptions.AMQPConnectionError:
time.sleep(2)
logging.error("pika AMQPConnectionError, retrying")
except Exception as error:
time.sleep(2)
logging.error("other error, retrying " + str(error))
return rabbit_client
def open_rabbit_channel(self):
tweets_queue = self.rabbit_client.channel()
tweets_queue.queue_declare(queue='tweets')
return tweets_queue
def on_success(self, data):
"""
:param data: response from Twitter API
"""
data["tags"] = [self.tag]
data["events"] = [""]
try:
self.tweets_queue.basic_publish(exchange='',
routing_key='tweets',
body=json.dumps(data))
except pika.exceptions.AMQPConnectionError:
logging.error("AMQPConnectionError, trying to reconnect")
self.rabbit_client = self.open_rabbit_connection()
self.tweets_queue = self.open_rabbit_channel()
self.on_success(data)
if self.do_continue == False:
logging.info("disconnect")
self.disconnect()
def on_error(self, status_code, data, logs="logs"):
"""
:param status_code: The status code returned by the Twitter API
:param data: The response from Twitter API
:param logs: this parameter does not match TwythonStreamer implementation but received from Twitter API.
"""
if status_code == 401:
logging.error(
'Error 401: Unauthorized. Check if the Twitter API access token is correct in file config.py.')
raise requests.exceptions.HTTPError
else:
logging.error("Error {}: {}".format(status_code, data))
def sample(self, lang=None):
"""
Wrapper for 'statuses / sample' API call
"""
while self.do_continue:
# Stream in an endless loop until limit is reached. See twython
# issue 288: https://github.com/ryanmcgrath/twython/issues/288
try:
self.statuses.sample(language=lang)
except requests.exceptions.ChunkedEncodingError as e:
if e is not None:
logging.error("Encoding error (stream will continue): {}".format(e))
def filter(self, track='', lang='fr'):
"""
Wrapper for 'statuses / filter' API call
"""
while self.do_continue:
# Stream in an endless loop until limit is reached
try:
self.statuses.filter(track=track, language=lang)
except requests.exceptions.ChunkedEncodingError as e:
if e is not None:
logging.error("Encoding error (stream will continue): {}".format(e))
continue
except requests.exceptions.ConnectionError as error:
logging.error(str(error) + " sleep 5 sec")
time.sleep(5)
continue
|
nilq/baby-python
|
python
|
""" Package for cookie auth modules. """
__author__ = "William Tucker"
__date__ = "2020-02-14"
__copyright__ = "Copyright 2020 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
|
nilq/baby-python
|
python
|
"""
Tests scikit-learn's KNeighbours Classifier and Regressor converters.
"""
import unittest
from distutils.version import StrictVersion
import numpy
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import onnxruntime
from onnxruntime import InferenceSession
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType, Int64TensorType
from skl2onnx.common.data_types import onnx_built_with_ml
from test_utils import dump_data_and_model, fit_classification_model
class TestNearestNeighbourConverter(unittest.TestCase):
def _fit_model_binary_classification(self, model):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
y[y == 2] = 1
model.fit(X, y)
return model, X
def _fit_model_multiclass_classification(self, model, use_string=False):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
if use_string:
y = numpy.array(["cl%d" % _ for _ in y])
model.fit(X, y)
return model, X
def _fit_model(self, model, n_targets=1, label_int=False):
X, y = datasets.make_regression(n_features=4,
random_state=0,
n_targets=n_targets)
if label_int:
y = y.astype(numpy.int64)
model.fit(X, y)
return model, X
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor(self):
model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2))
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressor")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_yint(self):
model, X = self._fit_model(
KNeighborsRegressor(n_neighbors=2), label_int=True)
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressorYInt")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor2_1(self):
model, X = self._fit_model(KNeighborsRegressor(n_neighbors=1),
n_targets=2)
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:2],
model, model_onnx,
basename="SklearnKNeighborsRegressor2")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor2_2(self):
model, X = self._fit_model(KNeighborsRegressor(n_neighbors=2),
n_targets=2)
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:2],
model, model_onnx,
basename="SklearnKNeighborsRegressor2")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_weights_distance(self):
model, X = self._fit_model(
KNeighborsRegressor(
weights="distance", algorithm="brute", n_neighbors=1))
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:3],
model, model_onnx,
basename="SklearnKNeighborsRegressorWeightsDistance-Dec3")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_metric_cityblock(self):
model, X = self._fit_model(KNeighborsRegressor(metric="cityblock"))
model_onnx = convert_sklearn(model, "KNN regressor",
[("input", FloatTensorType([None, 4]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressorMetricCityblock")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_binary_class(self):
model, X = self._fit_model_binary_classification(
KNeighborsClassifier())
model_onnx = convert_sklearn(
model,
"KNN classifier binary",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32),
model, model_onnx,
basename="SklearnKNeighborsClassifierBinary")
@unittest.skipIf(True, reason="later")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_multi_class(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier())
model_onnx = convert_sklearn(
model,
"KNN classifier multi-class",
[("input", FloatTensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32),
model, model_onnx,
basename="SklearnKNeighborsClassifierMulti")
@unittest.skipIf(not onnx_built_with_ml(),
reason="Requires ONNX-ML extension.")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_multi_class_string(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier(), use_string=True)
model_onnx = convert_sklearn(
model,
"KNN classifier multi-class",
[("input", FloatTensorType([None, 3]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32),
model, model_onnx,
basename="SklearnKNeighborsClassifierMulti")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_weights_distance(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier(weights='distance'))
model_onnx = convert_sklearn(
model, 'KNN classifier', [('input', FloatTensorType([None, 3]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7], model, model_onnx,
basename="SklearnKNeighborsClassifierWeightsDistance")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_classifier_metric_cityblock(self):
model, X = self._fit_model_multiclass_classification(
KNeighborsClassifier(metric='cityblock'))
model_onnx = convert_sklearn(
model, 'KNN classifier', [('input', FloatTensorType([None, 3]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:7], model, model_onnx,
basename="SklearnKNeighborsClassifierMetricCityblock")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_int(self):
model, X = self._fit_model(KNeighborsRegressor())
X = X.astype(numpy.int64)
model_onnx = convert_sklearn(
model,
"KNN regressor",
[("input", Int64TensorType([None, X.shape[1]]))],
)
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnGradientBoostingRegressionInt-Dec4"
)
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor_equal(self):
X, y = datasets.make_regression(
n_samples=1000, n_features=100, random_state=42)
X = X.astype(numpy.int64)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=42)
model = KNeighborsRegressor(
algorithm='brute', metric='manhattan').fit(X_train, y_train)
model_onnx = convert_sklearn(
model, 'knn',
[('input', Int64TensorType([None, X_test.shape[1]]))])
exp = model.predict(X_test)
sess = InferenceSession(model_onnx.SerializeToString())
res = sess.run(None, {'input': numpy.array(X_test)})[0]
# The conversion has discrepencies when
# neighbours are at the exact same distance.
maxd = 1000
accb = numpy.abs(exp - res) > maxd
ind = [i for i, a in enumerate(accb) if a == 1]
assert len(ind) == 0
accp = numpy.abs(exp - res) < maxd
acc = numpy.sum(accp)
ratio = acc * 1.0 / res.shape[0]
assert ratio >= 0.7
# assert_almost_equal(exp, res)
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_multi_class_nocl(self):
model, X = fit_classification_model(
KNeighborsClassifier(),
2, label_string=True)
model_onnx = convert_sklearn(
model,
"multi-class nocl",
[("input", FloatTensorType([None, X.shape[1]]))],
options={id(model): {'nocl': True}})
self.assertIsNotNone(model_onnx)
sonx = str(model_onnx)
assert 'classlabels_strings' not in sonx
assert 'cl0' not in sonx
dump_data_and_model(
X, model, model_onnx, classes=model.classes_,
basename="SklearnNaiveMultiNoCl", verbose=False,
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.2') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')")
@unittest.skipIf(
StrictVersion(onnxruntime.__version__) < StrictVersion("0.5.0"),
reason="not available")
def test_model_knn_regressor2_2_pipee(self):
pipe = make_pipeline(StandardScaler(),
KNeighborsClassifier())
model, X = self._fit_model_binary_classification(pipe)
model_onnx = convert_sklearn(
model, "KNN pipe",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X.astype(numpy.float32)[:2],
model, model_onnx,
basename="SklearnKNeighborsRegressorPipe2")
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
7
8
9
10
|
nilq/baby-python
|
python
|
import sys
# mode is one of 'left', 'right', 'center'
def horizontal_align_print(s, width, mode='left', offsetChar=' ', end='\n',
os=sys.stdout):
p = _print_to_file_func(os)
if mode[0] == 'l': # left
offset = width - len(s)
p(s, end='')
for _ in range(offset):
p(offsetChar, end='')
p('', end=end)
elif mode[0] == 'r': # right
offset = width - len(s)
for _ in range(offset):
p(offsetChar, end='')
p(s, end=end)
else: # center
sIsEven = len(s) % 2 == 0
widthIsEven = width % 2 == 0
if sIsEven != widthIsEven:
width += 1
totalOffset = width - len(s)
for _ in range(int(totalOffset / 2)):
p(offsetChar, end='')
p(s, end='')
for _ in range(int(totalOffset / 2)):
p(offsetChar, end='')
p('', end=end)
def _print_to_file_func(file):
def f(*objects, sep=' ', end='\n', flush=False):
print(*objects, sep=sep, end=end, file=file, flush=flush)
return f
|
nilq/baby-python
|
python
|
from os.path import join, dirname
import datetime
# import pandas as pd
# from scipy.signal import savgol_filter
# from bokeh.io import curdoc
# from bokeh.layouts import row, column
# from bokeh.models import ColumnDataSource, DataRange1d, Select
# from bokeh.palettes import Blues4
# from bokeh.plotting import figure
import pandas as pd
from bokeh.plotting import figure, ColumnDataSource
from bokeh.io import output_file, show, output_notebook, curdoc
from bokeh.models import HoverTool, Slider, Select, Dropdown, Div, Button, Slider, Range1d, Title, NumeralTickFormatter, Circle, Square, Asterisk, Scatter, LassoSelectTool, BoxSelectTool
from bokeh.models.widgets import Panel, Tabs, MultiChoice, Spinner, MultiSelect
from bokeh.layouts import row, column, gridplot, widgetbox, layout
from bokeh.transform import factor_cmap
from bokeh.palettes import Category20, Spectral10, Turbo256, Turbo
# from bokeh.plotting.figure.Figure import sq
from bokeh.application.handlers import FunctionHandler
from bokeh.application import Application
from bokeh.embed import file_html, server_document
from bokeh.resources import CDN
from bokeh.themes import built_in_themes,Theme
cat_columns = ['','country_category','work_station_category','production_line_category','plant_category','division_category','record_day_name','record_month_name','record_year_month','record_year']
idx_columns = ['tenant_id','record_date']
int_format = NumeralTickFormatter(format="#,##0")
global circle1
global circle2
global circle3
def get_cmap(df,fld:str):
cat = sorted(df[fld].unique())
cat_num = len(cat)
if cat_num <= 11:
return factor_cmap(field_name=fld,palette=Turbo[cat_num],factors=cat)
else:
color_step = int(256/len(cat))
palette_colors=[]
for color in range(0,255,color_step):
palette_colors.append(Turbo256[color])
return factor_cmap(field_name=fld,palette=palette_colors,factors=cat) #palette=Turbo256[len(cat)]
def get_source(selected_vars:list):
df_src = pd.read_csv('bokeh-app/data/main_dataframe_head.csv',parse_dates=['record_date'])
df_src['record_year'] = df_src['record_year'].astype(str)
return df_src[selected_vars]
def tab1_list_df_vars(var1,var2,var3,var_cat,var_size):
lst = idx_columns.copy()
if var1 == '':
lst.append(selectable_columns[1])
else:
lst.append(var1)
if var2 == '':
lst.append(selectable_columns[2])
else:
lst.append(var2)
if var3 == '':
lst.append(selectable_columns[3])
else:
lst.append(var3)
if var_cat != '':
lst.append(var_cat)
if var_size != '':
lst.append(var_size)
return lst
def set_selectable_columns():
df = pd.read_csv('bokeh-app/data/main_dataframe_head.csv',parse_dates=['record_date'])
df['record_year'] = df['record_year'].astype(str)
selectable_columns = df.columns.tolist()
selectable_columns = list(set(selectable_columns) - set(idx_columns) - set(cat_columns))
selectable_columns.insert(0,'')
selectable_columns.sort()
return selectable_columns
def set_selectable_tenants():
df = pd.read_csv('bokeh-app/data/main_dataframe_head.csv',parse_dates=['record_date'])
df['record_year'] = df['record_year'].astype(str)
selectable_tenants = sorted(df.tenant_id.unique())
selectable_tenants.insert(0,'')
selectable_tenants.sort()
return selectable_tenants
def build_plot(p, df, var_x, var_y, transparency, var_cat, var_size):
if var_size != '':
temp = ((df[var_size] - df[var_size].min()) / (df[var_size].max() - df[var_size].min())) * 100
df[var_size] = temp.round(0).astype(int)
src = ColumnDataSource(df)
if var_cat == '':
cat_cmap = 'blue'
else:
cat_cmap = get_cmap(df,var_cat)
p.title.text = '''Variable '{0}' contre Variable '{1}' '''.format(var_x,var_y)
p.renderers = []
if hasattr(p.legend,'items'):
p.legend.items = []
if var_cat != '' and var_size != '':
c = p.circle(var_x,var_y,source=src,alpha=transparency,fill_color=cat_cmap,legend_field=var_cat,size=var_size,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y)),
('catégorie-> {}'.format(var_cat),'@{}'.format(var_cat)),
('taille-> {}'.format(var_size),'@{}'.format(var_size))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
elif var_cat != '' and var_size == '':
c = p.circle(var_x,var_y,source=src,alpha=transparency,fill_color=cat_cmap,legend_field=var_cat,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency) #get_cmap(df,var_cat)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y)),
('catégorie-> {}'.format(var_cat),'@{}'.format(var_cat))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
elif var_cat == '' and var_size != '':
c = p.circle(var_x,var_y,source=src,alpha=transparency,size=var_size,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y)),
('taille-> {}'.format(var_size),'@{}'.format(var_size))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
else:
c = p.circle(var_x,var_y,source=src,alpha=transparency,
hover_fill_color='black',
hover_line_color='black',
hover_alpha=1,
selection_fill_alpha=1,
selection_line_alpha=1,
nonselection_fill_alpha=transparency,
nonselection_line_alpha=transparency)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('x-> {}'.format(var_x),'@{}'.format(var_x)),
('y-> {}'.format(var_y),'@{}'.format(var_y))
],
formatters={'@record_date' : 'datetime'},
renderers = [c],
mode = 'mouse'
)
# lasso = LassoSelectTool(renderers = [c])
# box = BoxSelectTool(renderers = [c])
p.add_tools(hover)
# p.add_tools(lasso)
# p.add_tools(box)
p.x_range = Range1d(0, df[var_x].max())
p.y_range = Range1d(0, df[var_y].max())
p.xaxis.axis_label = var_x
p.xaxis[0].formatter = int_format
p.yaxis.axis_label = var_y
p.yaxis[0].formatter = int_format
p.title.align = 'center'
return c
def select_on_change(event):
global circle1
global circle2
global circle3
vars_lst = tab1_list_df_vars(select_val1.value,select_val2.value,select_val3.value,select_cat.value,select_size.value)
df_selected = get_source(vars_lst)
circle1 = build_plot(plot_1, df_selected, select_val1.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle2 = build_plot(plot_2, df_selected, select_val3.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle3 = build_plot(plot_3, df_selected, select_val1.value, select_val3.value, alpha_slide.value, select_cat.value, select_size.value)
def change_transparency(attr, old, new):
for glyph in [circle1.glyph, circle2.glyph, circle3.glyph]:
glyph.fill_alpha = alpha_slide.value
def build_main_plot(event):
main_plot.renderers = []
src_col = idx_columns + [select_var_tab2.value,select_cat_tab2.value]
df = get_source(src_col)
df = df.loc[df[select_cat_tab2.value] == select_cat_val_tab2.value]
l_list = []
for tenant in select_tenant.options:
df_src = df.loc[df['tenant_id'] == tenant].copy()
src = ColumnDataSource(df_src)
if tenant != select_tenant.value:
l = main_plot.line('record_date',select_var_tab2.value,source=src,line_color='black',alpha=0.4,
hover_line_color='blue', hover_alpha=0.8)
l_list.append(l)
df_src = df.loc[df['tenant_id'] == select_tenant.value].copy()
src = ColumnDataSource(df_src)
main_plot.line('record_date',select_var_tab2.value,source=src,line_color='red',alpha=0.8,line_width=3)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('y-> {}'.format(select_var_tab2.value),'@{}'.format(select_var_tab2.value))
],
formatters={'@record_date' : 'datetime'},
renderers = l_list,
mode = 'mouse'
)
if len(main_plot.tools) > 5:
main_plot.tools[-1] = hover
else:
main_plot.add_tools(hover)
main_plot.yaxis.axis_label = select_var_tab2.value
main_plot.xaxis.axis_label = 'Dates'
main_plot.yaxis[0].formatter = int_format
title1_main.text = ''''{0}' des locateurs '{1}' de la catégorie '{2}' '''.format(select_var_tab2.value,select_cat_val_tab2.value,select_cat_tab2.value)
title2_main.text = '''focussé sur le locateur {0}'''.format(select_tenant.value)
print('>>>')
def get_tenants_in_category(cat,val):
src_col = idx_columns + [cat]
df = get_source(src_col)
tenants = df.loc[df[cat] == val,'tenant_id'].unique().tolist()
tenants.sort()
return tenants
def set_options_select_tenant(attr,old,new):
tenants = get_tenants_in_category(select_cat_tab2.value,select_cat_val_tab2.value)
select_tenant.options = tenants
select_tenant.value = tenants[0]
def set_options_compare_tenants(attr,old,new):
tenants = get_tenants_in_category(select_cat_tab2.value,select_cat_val_tab2.value)
tenants_wo_selected = list(set(tenants) - set([select_tenant.value]))
tenants_wo_selected.sort()
compare_tenants.options = tenants_wo_selected
def set_category_values(attr,old,new):
df = get_source([select_cat_tab2.value])
vals = df[select_cat_tab2.value].unique().tolist()
vals.sort()
select_cat_val_tab2.options = vals
select_cat_val_tab2.value = vals[0]
def get_tab2_line_graph(main_tenant,compare_tenant):
src_col = idx_columns + [select_var_tab2.value,select_cat_tab2.value]
df = get_source(src_col)
df = df.loc[df[select_cat_tab2.value] == select_cat_val_tab2.value]
fig = figure(title='Comparé à {0}'.format(compare_tenant),tools="pan,wheel_zoom,box_zoom,reset",x_axis_type='datetime',height=100,width=200)
df_src = df.loc[df['tenant_id'] == compare_tenant].copy()
src = ColumnDataSource(df_src)
l1 = fig.line('record_date',select_var_tab2.value,source=src,line_color='blue',alpha=0.4)
df_src = df.loc[df['tenant_id'] == main_tenant].copy()
src = ColumnDataSource(df_src)
l2 = fig.line('record_date',select_var_tab2.value,source=src,line_color='red',alpha=0.8,line_width=3)
hover = HoverTool(
tooltips = [
('locateur','@tenant_id'),
('date','@record_date{%Y-%m-%d}'),
('y-> {}'.format(select_var_tab2.value),'@{}'.format(select_var_tab2.value))
],
formatters={'@record_date' : 'datetime'},
renderers = [l1, l2],
mode = 'vline'
)
fig.add_tools(hover)
fig.yaxis.axis_label = select_var_tab2.value
fig.yaxis[0].formatter = int_format
fig.xaxis.axis_label = 'Dates'
fig.title.align = 'center'
return fig
def build_tab2_gridplot_graphs(event):
compared_tenants = compare_tenants.value
graphs = []
for t in compared_tenants:
g = get_tab2_line_graph(select_tenant.value,t)
if len(graphs) > 0:
g.x_range = graphs[0].x_range
g.y_range = graphs[0].y_range
graphs.append(g)
layout2.children[-1] = gridplot(graphs,ncols=nb_cols.value,merge_tools=True,sizing_mode='scale_both')
layout2.children[-2] = Div(text='''<h3>Comparaison des '{0}' du locateur {1}</h3>'''.format(select_var_tab2.value,select_tenant.value),align='center')
selectable_columns = set_selectable_columns()
selectable_tenants = set_selectable_tenants()
# Presentation models
intro_div = Div(text="""
<h1>Présentation du dashboard</h1>
<h3>Contexte</h3>
<p>Les données présentées dans ce dashboard proviennent de l'application Poka. Cette application aide la gestion manifacturière et ces données sont donc confidentielles.</p>
<h3>L'onglet 'Exploration des données'</h3>
<p>Cet onglet permet un premier coup d'oeil aux données. En choisissant 3 variables numérique, celles-ci seront présentées l'une contre l'autre dans 3 graphiques de type 'nuage de points'. Ces 3 variables sont nécessaires avant de cliquer sur 'Charger les graphiques'.</p>
<p>De plus, à ces 3 graphiques, vous pouvez aussi ajouter optionnellement une 4e variable qui sera présentée en jouant sur la taille des cercles de chaque graphique.</p>
<p>À ces 4 possibilités de variables numériques, vous pouvez aussi sélectionner optionnellement une catégorie, qui affectera la couleur des points présentés.</p>
<p>Finalement, la transparence des points peut aussi être changé, pour une meilleure visibilité de la dispersion des points.</p>
<h3>L'onglet 'Analyse temporelle'</h3>
<p>Cet onglet sert à comparer l'évolution dans le temps d'un locateur en comparant son évalotion à celles de d'autres locateurs d'une même catégorie.</p>
<p>Pour le graphique supérieur, affichant une vue générale, il faut d'un premier temps sélectionner la catégorisation, puis la catégorie souhaitée de celle-ci. Ensuite, il faut sélectionner le locateur dont on souhaite focussé l'analyse. Enfin, il faut choisir une variable numérique, comme base de l'évolution à afficher. Après la sélection de ces 4 choix, il suffit de cliquer sur 'Afficher le graphique' pour voir le résultat.</p>
<p>Pour la partie inférieur, il est possible de sélectionner les locateurs, dont on souhaiterait voir une comparaison isolée du locateur focus. D'abord, sélectionnez un ou plusieurs locateurs dans la liste affichée. Pour choisir plusieurs locateurs, cliquez et faites glisser pour sélectionner des locateurs côte-à-côte et/ou utilisez la touche 'control' en cliquant pour sélectionner des locateurs distancés. Une fois les locateurs choisis, déterminez sur combien de colonnes vous souhaitez voir les graphiques et cliquez finalement sur 'Afficher les graphiques'.</p>
<h3>Terminologie</h3>
<ul>
<li>Locateur: <em>Pour que le client puisse utiliser l'application, celui-ci se voit réserver un espace sur un instance et un identifiant locateur lui est attribué. Un client peut avoir un ou plusieurs identifiants locateur</em></li>
<li>Variables numériques
<ul>
<li>Préfixes
<ul>
<li>'created_': <em>Nombre de contenus créés de la charactéristique de l'application qui suit</em></li>
<li>'modified_': <em>Nombre de contenus modifiés de la caractéristique de l'application qui suit</em></li>
</ul>
</li>
<li>active_users: <em>Nombre d'utilisateurs uniques qui ont utilisé l'application</em></li>
<li>activities: <em>Nombre d'activités effectuées sur l'application</em></li>
<li>connected_once: <em>Nombre d'utilisateurs qui se sont connectés au moins une fois à l'application</em></li>
<li>'_forms': <em>Caractéristique formulaire de l'application</em></li>
<li>'_news': <em>Caractéristique de type publication de l'application</em></li>
<li>'_problems': <em>Caractéristique de type signalement de problèmes de l'application</em></li>
<li>'_skills': <em>Caractéristique des compétences de l'application, pouvant être associé à un utilisateur</em></li>
<li>'_skills_endorsement_requests': <em>Nombre de requêtes utilisateur pour recevoir l'approbation d'une compétence</em></li>
<li>skills_endorsements: <em>Nombre de compétences approuvés</em></li>
<li>divisions: <em>Nombre de regroupements d'usines, déterminé par le client</em></li>
<li>form_completions: <em>Nombre de formulaires remplis</em></li>
<li>plants: <em>Nombre d'usines associées au locateur</em></li>
<li>production_lines: <em>Nombre de lignes de production associées au locateur</em></li>
<li>views: <em>Nombre de contenus vu par les utilisateurs</em></li>
<li>work_stations: <em>Nombre de postes de travail associés au locateur</em></li>
<li>workinstructions: <em>Caractéristique des instructions de travail de l'application</em></li>
</ul>
</li>
<li>Variables catégorielles
<ul>
<li>Préfixe
<ul>
<li>'record_': <em>Catégories relatives à la date d'enregistrement des données [record_date]</em></li>
</ul>
</li>
<li>country_category: <em>Catégorisation selon le-s pays des usines du locateur</em></li>
<li>work_station_category: <em>Catégorisation selon le nombre de postes de travail</em></li>
<li>production_line_category: <em>Catégorisation selon le nombre de lignes de production</em></li>
<li>plant_category: <em>Catégorisation selon le nombre d'usines</em></li>
<li>division_category: <em>Catégorisation selon le nombre de divisions</em></li>
<li>'_day_name': <em>Regroupement par jour de la semaine</em></li>
<li>'_month_name': <em>Regroupement par mois</em></li>
<li>'_year_month': <em>Regroupement par année et mois</em></li>
<li>'_year': <em>Regroupement par année</em></li>
</ul>
</li>
</ul>
""")
pan0 = Panel(child=intro_div,title='Présentation')
# Data exploration
# Models
select_val1 = Select(title="Variable de l'axe des X pour les Graphiques 1 & 3",options=selectable_columns,value=selectable_columns[1])
select_val2 = Select(title="Variable de l'axe des Y pour les Graphiques 1 & 2",options=selectable_columns,value=selectable_columns[2])
select_val3 = Select(title="Variable axe X Graphique 2 & axe Y Graphique 3",options=selectable_columns,value=selectable_columns[3])
select_size = Select(title='Taille des points par',options=selectable_columns,value=None)
select_cat = Select(title='Couleur des points par',options=cat_columns,value=None)
load_graph = Button(label='Charger les graphiques',button_type='success')
alpha_slide = Slider(start=0.1,end=1,value=0.3,step=0.05,title='Transparence des points')
plot_1 = figure(tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select") #lasso_select,
plot_2 = figure(tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
plot_3 = figure(tools="pan,wheel_zoom,box_zoom,reset,save,lasso_select,box_select")
# Creation & Dynamics
vars_lst = tab1_list_df_vars(select_val1.value,select_val2.value,select_val3.value,select_cat.value,select_size.value)
df_selected = get_source(vars_lst)
circle1 = build_plot(plot_1, df_selected, select_val1.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle2 = build_plot(plot_2, df_selected, select_val3.value, select_val2.value, alpha_slide.value, select_cat.value, select_size.value)
circle3 = build_plot(plot_3, df_selected, select_val1.value, select_val3.value, alpha_slide.value, select_cat.value, select_size.value)
df_selected = None
out_legend = None
plot_1.x_range = plot_3.x_range
plot_1.y_range = plot_2.y_range
plot_2.x_range = plot_3.y_range
load_graph.on_click(select_on_change)
alpha_slide.on_change('value',change_transparency)
# Structure
page_title = Div(text='<h1>Exploration des données brutes</h1>')
widget_select_val = column(Div(),select_val1,select_val2,select_val3,select_size,select_cat,load_graph,Div(),Div(),alpha_slide)
plot_grid = gridplot([[Div(text='<h3>Graphique 1</h3>',align='center'),Div(text='<h3>Graphique 2</h3>',align='center')],
[plot_1,plot_2],
[Div(text='<h3>Graphique 3</h3>',align='center'),None],
[plot_3,out_legend]],
merge_tools=True) #,ncols=2
row_1 = row(widget_select_val,plot_grid)
layout1 = column(page_title,row_1)
pan1 = Panel(child=layout1,title='Exploration de données')
# Time analysis
# Models
select_cat_tab2 = Select(title='Choisissez une catégorisation',options=cat_columns,value=None)
select_cat_val_tab2 = Select(title='Choisissez la catégorie',value=None)
select_tenant = Select(title='Choisissez quel locateur est le focus',options=selectable_tenants,value=None)
select_var_tab2 = Select(title='Choisissez la variable à afficher',options=selectable_columns,value=None)
compare_tenants = MultiSelect(title='Choisissez le-s locateur-s à comparer au locateur focus',options=[],value=[],width=500,height=200)
nb_rows = Spinner(title='Nombre de rangées',low=1,high= 20,value=2,step=1,sizing_mode='stretch_width',visible=False) #width=125,align=('start','center')
nb_cols = Spinner(title='Nombre de colonnes',low=1,high= 20,value=2,step=1,sizing_mode='stretch_width')
load_main_graph_tab2 = Button(label='Afficher le graphique',button_type='success',align='end')
load_graphs_tab2 = Button(label='Afficher les graphiques',button_type='success',align='start',height=80,sizing_mode='stretch_width')
main_plot = figure(tools="pan,wheel_zoom,box_zoom,reset,save",x_axis_type='datetime',sizing_mode='stretch_width') #,width=1200
title1_main = Title(text='',align='center')
title2_main = Title(text='',align='center')
# Creation & Dynamics
main_plot.add_layout(title2_main,'above')
main_plot.add_layout(title1_main,'above')
main_plot.line([0,1],[0,1],alpha=0)
load_main_graph_tab2.on_click(build_main_plot)
select_cat_tab2.on_change('value',set_category_values)
select_cat_val_tab2.on_change('value',set_options_select_tenant)
select_tenant.on_change('value',set_options_compare_tenants)
load_graphs_tab2.on_click(build_tab2_gridplot_graphs)
# Structure
tab2_page_title = Div(text="<h1>Analyse temporelle d'un locateur comparé à d'autres de la même catégorie</h1>",sizing_mode='stretch_width')
tab2_select_vars_main_graph = row(select_cat_tab2,select_cat_val_tab2,select_tenant,select_var_tab2,load_main_graph_tab2) #,sizing_mode='stretch_both'
tab2_graphs_size = column(nb_rows,nb_cols,load_graphs_tab2,width=200)
tab2_select_vars_graphs = row(compare_tenants,tab2_graphs_size)
layout2 = layout([
[tab2_page_title],
[tab2_select_vars_main_graph],
[main_plot],
[Div()],
[tab2_select_vars_graphs],
[Div()],
[Div()]
])
pan2 = Panel(child=layout2,title='Analyse temporelle')
# city_select = Select(value=city, title='City', options=sorted(cities.keys()))
# distribution_select = Select(value=distribution, title='Distribution', options=['Discrete', 'Smoothed'])
# df = pd.read_csv(join(dirname(__file__), 'data/2015_weather.csv'))
# source = get_dataset(df, cities[city]['airport'], distribution)
# plot = make_plot(source, "Weather data for " + cities[city]['title'])
# city_select.on_change('value', update_plot)
# distribution_select.on_change('value', update_plot)
# controls = column(city_select, distribution_select)
tabs = Tabs(tabs=[pan0,pan1,pan2])
curdoc().add_root(tabs)
curdoc().title = "Poka"
curdoc().theme = 'light_minimal'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import requests
import os
import json
from requests.auth import HTTPBasicAuth
import requests_unixsocket
MFMODULE_RUNTIME_HOME = os.environ['MFMODULE_RUNTIME_HOME']
ADMIN_USERNAME = "admin"
ADMIN_PASSWORD = os.environ['MFADMIN_GRAFANA_ADMIN_PASSWORD']
GRAFANA_SOCKET = "%s/tmp/grafana.sock" % MFMODULE_RUNTIME_HOME
GRAFANA_HOST = "localhost"
HOME_DASHBOARD_UID = "lCmsjhHik"
BASE_URL = "http+unix://%s" % GRAFANA_SOCKET.replace('/', '%2F')
requests_unixsocket.monkeypatch()
url = "%s/api/dashboards/uid/%s" % (BASE_URL, HOME_DASHBOARD_UID)
print(url)
dashboard = requests.get(url, auth=HTTPBasicAuth(ADMIN_USERNAME,
ADMIN_PASSWORD)).json()
print(json.dumps(dashboard, indent=4))
id = dashboard['dashboard']['id']
print(id)
url = "%s/api/user/preferences" % (BASE_URL,)
print(url)
preferences = requests.get(url, auth=HTTPBasicAuth(ADMIN_USERNAME,
ADMIN_PASSWORD)).json()
print(json.dumps(preferences, indent=4))
preferences['timezone'] = 'utc'
preferences['homeDashboardId'] = id
print(json.dumps(preferences, indent=4))
print(requests.put(url, auth=HTTPBasicAuth(ADMIN_USERNAME, ADMIN_PASSWORD),
json=preferences))
url = "%s/api/org/preferences" % (BASE_URL,)
print(url)
print(requests.put(url, auth=HTTPBasicAuth(ADMIN_USERNAME, ADMIN_PASSWORD),
json=preferences))
|
nilq/baby-python
|
python
|
import os
from dotenv import load_dotenv
load_dotenv()
AV_API_KEY = os.getenv("AV_API_KEY", "value does not exist")
AV_API_KEY_2 = os.getenv("AV_API_KEY_2", "value does not exist")
BINANCE_KEY = os.getenv("BINANCE_KEY", "Binance key not found")
BINANCE_SECRET = os.getenv("BINANCE_SECRET", "Binance secret not found")
|
nilq/baby-python
|
python
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Composite Experiment data class.
"""
from typing import Optional, Union, List
from qiskit.result import marginal_counts
from qiskit.exceptions import QiskitError
from qiskit_experiments.framework.experiment_data import ExperimentData
class CompositeExperimentData(ExperimentData):
"""Composite experiment data class"""
def __init__(
self,
experiment,
backend=None,
job_ids=None,
):
"""Initialize experiment data.
Args:
experiment (CompositeExperiment): experiment object that generated the data.
backend (Backend): Optional, Backend the experiment runs on. It can either be a
:class:`~qiskit.providers.Backend` instance or just backend name.
job_ids (list[str]): Optional, IDs of jobs submitted for the experiment.
Raises:
ExperimentError: If an input argument is invalid.
"""
super().__init__(
experiment,
backend=backend,
job_ids=job_ids,
)
# Initialize sub experiments
self._components = [
expr.__experiment_data__(expr, backend, job_ids) for expr in experiment._experiments
]
def __str__(self):
line = 51 * "-"
n_res = len(self._analysis_results)
status = self.status()
ret = line
ret += f"\nExperiment: {self.experiment_type}"
ret += f"\nExperiment ID: {self.experiment_id}"
ret += f"\nStatus: {status}"
if status == "ERROR":
ret += "\n "
ret += "\n ".join(self._errors)
ret += f"\nComponent Experiments: {len(self._components)}"
ret += f"\nCircuits: {len(self._data)}"
ret += f"\nAnalysis Results: {n_res}"
ret += "\n" + line
if n_res:
ret += "\nLast Analysis Result:"
ret += f"\n{str(self._analysis_results.values()[-1])}"
return ret
def component_experiment_data(
self, index: Optional[Union[int, slice]] = None
) -> Union[ExperimentData, List[ExperimentData]]:
"""Return component experiment data"""
if index is None:
return self._components
if isinstance(index, (int, slice)):
return self._components[index]
raise QiskitError(f"Invalid index type {type(index)}.")
def _add_single_data(self, data):
"""Add data to the experiment"""
# TODO: Handle optional marginalizing IQ data
metadata = data.get("metadata", {})
if metadata.get("experiment_type") == self._type:
# Add parallel data
self._data.append(data)
# Add marginalized data to sub experiments
if "composite_clbits" in metadata:
composite_clbits = metadata["composite_clbits"]
else:
composite_clbits = None
for i, index in enumerate(metadata["composite_index"]):
sub_data = {"metadata": metadata["composite_metadata"][i]}
if "counts" in data:
if composite_clbits is not None:
sub_data["counts"] = marginal_counts(data["counts"], composite_clbits[i])
else:
sub_data["counts"] = data["counts"]
self._components[index].add_data(sub_data)
|
nilq/baby-python
|
python
|
# -*- coding: utf8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from horizon import tables
from horizon.utils import memoized
from tuskar_ui import api
class DeleteNode(tables.BatchAction):
name = "delete"
action_present = _("Delete")
action_past = _("Deleting")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
classes = ('btn-danger',)
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return (getattr(obj, 'instance_uuid', None) is None and
obj.power_state not in api.node.POWER_ON_STATES)
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to delete."))
return
api.node.Node.delete(request, obj_id)
class ActivateNode(tables.BatchAction):
name = "activate"
action_present = _("Activate")
action_past = _("Activated")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return (obj.cpus and obj.memory_mb and obj.local_gb and
obj.cpu_arch)
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to activate."))
return
api.node.Node.set_maintenance(request, obj_id, False)
api.node.Node.set_power_state(request, obj_id, 'off')
class SetPowerStateOn(tables.BatchAction):
name = "set_power_state_on"
action_present = _("Power On")
action_past = _("Powering On")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return obj.power_state not in api.node.POWER_ON_STATES
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to power on."))
return
api.node.Node.set_power_state(request, obj_id, 'on')
class SetPowerStateOff(tables.BatchAction):
name = "set_power_state_off"
action_present = _("Power Off")
action_past = _("Powering Off")
data_type_singular = _("Node")
data_type_plural = _("Nodes")
def allowed(self, request, obj=None):
if not obj:
# this is necessary because table actions use this function
# with obj=None
return True
return (
obj.power_state in api.node.POWER_ON_STATES and
getattr(obj, 'instance_uuid', None) is None
)
def action(self, request, obj_id):
if obj_id is None:
messages.error(request, _("Select some nodes to power off."))
return
api.node.Node.set_power_state(request, obj_id, 'off')
class NodeFilterAction(tables.FilterAction):
def filter(self, table, nodes, filter_string):
"""Really naive case-insensitive search."""
q = filter_string.lower()
def comp(node):
return any(q in unicode(value).lower() for value in (
node.ip_address,
node.cpus,
node.memory_mb,
node.local_gb,
))
return filter(comp, nodes)
@memoized.memoized
def _get_role_link(role_id):
if role_id:
return reverse('horizon:infrastructure:roles:detail',
kwargs={'role_id': role_id})
def get_role_link(datum):
return _get_role_link(getattr(datum, 'role_id', None))
def get_power_state_with_transition(node):
if node.target_power_state and (
node.power_state != node.target_power_state):
return "{0} -> {1}".format(
node.power_state, node.target_power_state)
return node.power_state
def get_state_string(node):
state_dict = {
api.node.DISCOVERING_STATE: _('Discovering'),
api.node.DISCOVERED_STATE: _('Discovered'),
api.node.PROVISIONED_STATE: _('Provisioned'),
api.node.PROVISIONING_FAILED_STATE: _('Provisioning Failed'),
api.node.PROVISIONING_STATE: _('Provisioning'),
api.node.FREE_STATE: _('Free'),
}
node_state = node.state
return state_dict.get(node_state, node_state)
class BaseNodesTable(tables.DataTable):
node = tables.Column('uuid',
link="horizon:infrastructure:nodes:node_detail",
verbose_name=_("Node Name"))
role_name = tables.Column('role_name',
link=get_role_link,
verbose_name=_("Deployment Role"))
cpus = tables.Column('cpus',
verbose_name=_("CPU (cores)"))
memory_mb = tables.Column('memory_mb',
verbose_name=_("Memory (MB)"))
local_gb = tables.Column('local_gb',
verbose_name=_("Disk (GB)"))
power_status = tables.Column(get_power_state_with_transition,
verbose_name=_("Power Status"))
state = tables.Column(get_state_string,
verbose_name=_("Status"))
class Meta(object):
name = "nodes_table"
verbose_name = _("Nodes")
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
def get_object_id(self, datum):
return datum.uuid
def get_object_display(self, datum):
return datum.uuid
class AllNodesTable(BaseNodesTable):
class Meta(object):
name = "all_nodes_table"
verbose_name = _("All")
hidden_title = False
columns = ('node', 'cpus', 'memory_mb', 'local_gb', 'power_status',
'state')
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
class ProvisionedNodesTable(BaseNodesTable):
class Meta(object):
name = "provisioned_nodes_table"
verbose_name = _("Provisioned")
hidden_title = False
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
class FreeNodesTable(BaseNodesTable):
class Meta(object):
name = "free_nodes_table"
verbose_name = _("Free")
hidden_title = False
columns = ('node', 'cpus', 'memory_mb', 'local_gb', 'power_status')
table_actions = (NodeFilterAction, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
row_actions = (SetPowerStateOn, SetPowerStateOff, DeleteNode,)
template = "horizon/common/_enhanced_data_table.html"
class MaintenanceNodesTable(BaseNodesTable):
class Meta(object):
name = "maintenance_nodes_table"
verbose_name = _("Maintenance")
hidden_title = False
columns = ('node', 'cpus', 'memory_mb', 'local_gb', 'power_status',
'state')
table_actions = (NodeFilterAction, ActivateNode, SetPowerStateOn,
SetPowerStateOff, DeleteNode)
row_actions = (ActivateNode, SetPowerStateOn, SetPowerStateOff,
DeleteNode)
template = "horizon/common/_enhanced_data_table.html"
|
nilq/baby-python
|
python
|
"""
Lab jack GUI
"""
import datetime
import lab_jack_lib as lj
import PySimpleGUI as sg
def logprint(message=''):
"""
printing ='on'
print and return None
"""
form = '[{}, {}]'.format(datetime.datetime.now(), message)
print(form)
def now_datetime(type=1):
"""
type1:"%Y-%m-%d %H:%M:%S"
type2:"%Y%m%d%H%M%S"
type3:"%Y%m%d_%H%M%S"
type4:"%Y%m%d%H%M"
elae: "%Y%m%d"
:return: string date
"""
now = datetime.datetime.now()
if type == 1:
now_string = now.strftime("%Y-%m-%d %H:%M:%S")
elif type == 2:
now_string = now.strftime("%Y%m%d%H%M%S")
elif type == 3:
now_string = now.strftime("%Y%m%d_%H%M%S")
elif type == 4:
now_string = now.strftime("%Y%m%d%H%M")
elif type == 5:
now_string = now.strftime("%m%d_%H:%M:%S")
elif type == 6:
now_string = now.strftime("%Y%m%d")
else:
now_string = now
return now_string
def create_window():
"""create PySimpleGUI window
"""
# sg.theme('Light Blue 1')
sg.theme('Dark Blue 3')
# sg.theme('Black')
layout = [
[sg.Text('Current Position [mm]', size=(20, 1)), sg.Text('',
font=('Helvetica', 20), size=(10, 1), key='-cpA-')],
[sg.Text('Current Position [dp]', size=(20, 1)), sg.Text('',
font=('Helvetica', 20), size=(10, 1), key='-cpD-')],
[sg.Button(button_text='Current Positon',size=(7,3),key='-cp-')],
[sg.Button(button_text='Move Abs', key='-absmove-')],
[sg.Text('Abs Position [mm]', size=(20, 1)), sg.InputText('3', size=(5, 1), key='-abP-')],
[sg.Button(button_text='Move Shift', key='-shiftmove-')],
[sg.Text('Shift position [mm]', size=(20, 1)), sg.InputText('1', size=(5, 1), key='-abS-')],
[sg.Button(button_text='Move Home', key='-homemove-')],
# UP : Current position: 6.886823333333333 [mm], 8264188 [device units]
# down : Current position: 3.2741558333333334 [mm], 3928987 [device units]
[sg.Button(button_text='Move Up', key='-upmove-')],
[sg.Text('Abs set upper Position [mm]', size=(22, 1)), sg.InputText('6.88', size=(5, 1), key='-up-')],
[sg.Button(button_text='Move down', key='-downmove-')],
[sg.Text('Abs set lower Position [mm]', size=(22, 1)), sg.InputText('3.27', size=(5, 1), key='-low-')],
[sg.Text('--Exit Close--',font=('Helvetica', 14))],
[sg.Button(button_text='Exit',key='-cancel-')],
[sg.Output(size=(50, 10))],
]
# location=(lorizontal, vertical)LT:(0,0), LB:(0,1079), RT:(1919,0),RB:(1919,1079)
return sg.Window('Lab jack Tholabs', layout, location=(900, 50))
def main():
window = create_window()
while True:
event, values = window.read(timeout=100, timeout_key='-timeout-')
if event in (None, '-cancel-',):
logprint('Exit')
break
elif event in '-cp-':
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
logprint(ap,dp)
elif event in '-absmove-':
abs_pos = float(values['-abP-'])
lj.jack_move(abs_pos)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-shiftmove-':
abs_shift = float(values['-abS-'])
lj.jack_relative_move(abs_shift)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-homemove-':
lj.jack_home()
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-upmove-':
abs_pos_up = float(values['-up-'])
lj.jack_move(abs_pos_up)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-downmove-':
abs_pos_low = float(values['-low-'])
lj.jack_move(abs_pos_low)
dp,ap = lj.jack_status()
window['-cpA-'].update(ap)
window['-cpD-'].update(dp)
elif event in '-timeout-':
pass
# dp,ap = lj.jack_status()
# window['-cpA-'].update(ap)
# window['-cpD-'].update(dp)
window.close()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.