content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
import spdnn
torch.manual_seed(7)
a = torch.rand(6, 6).cuda()
a[a<0.6] = 0.0
at = a.t()
print('at: ', at)
b = torch.rand(6, 6).cuda()
print('b: ', b)
#c = spdnn.spmm(a, b)
print('at shape: ', at.shape)
torch.cuda.synchronize()
c = spdnn.sparse_t_x_dense(a, b)
print('c=axb: ', c)
c_true = at.mm(b)
print('c_true=axb: ', c_true)
print('norm: ', float((c-c_true).norm()))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on 16 June 2021
Created by J Botha
This script attempts to join the file provided city-hex-polygons-8.geojson to the service request dataset.
When using the first 10 000 records from the service request dataset I seem to get no matches with
Latitude and Longitude variables.
I do however set the index value to 0 for any requests where the Latitude and Longitude fields are empty.
How to use:
Modules Needed:
-pip install boto3
-pip install pandas
Files needed:
Input file: "sr.csv" file that has been provided should be in the same directory.
Output file: "sr_updated.csv" file gets generated by this application.
"aws_credentials.py" file has been uploaded to the root directory.
Run: python initial_data_transformation.py
"""
from aws_credentials import access_key, secret_key, aws_region
from boto3.session import Session
from io import StringIO
from csv import reader
import boto3
import pandas as pd
import os
import itertools
import csv
import datetime
# Tracking Time taken for application to run
application_start_time = datetime.datetime.now()
list_of_rows = []
for each_file in sorted(os.listdir('.')):
#open input file provided
if each_file.endswith("sr.csv"):
#read csv file as a list of lists
with open(each_file, 'r') as read_obj:
# pass the file object to reader() to get the reader object
csv_reader = reader(read_obj)
# reading in the first 10 000 records as a sample set
for row in itertools.islice(csv_reader, 10000):
# Pass reader object to list() to get a list of lists
list_of_rows.append(row)
# create boto session
session = Session(
aws_access_key_id="AKIAYH57YDEWMHW2ESH2",
aws_secret_access_key=secret_key,
region_name=aws_region
)
# make connection
client = session.client('s3')
# query and create response
base_resp_standard = client.select_object_content(
Bucket = "cct-ds-code-challenge-input-data",
Key = "city-hex-polygons-8.geojson",
Expression = "SELECT d.properties FROM S3Object[*].features[*] d",
ExpressionType = "SQL",
InputSerialization = {"JSON": {"Type": "DOCUMENT"}},
OutputSerialization = {"JSON": {'RecordDelimiter': "\n"}}
)
# upack query response
records = []
enhanced_list = []
for event in base_resp_standard["Payload"]:
if "Records" in event:
records.append(event["Records"]["Payload"])
# store unpacked data as a CSV format
file_str = ''.join(req.decode('utf-8') for req in records)
# read CSV to dataframe
df = pd.read_csv(StringIO(file_str))
for index, row in df.iterrows():
tmp_list = []
# h3_level8_index
tmp_list.append(row[0].split(":")[2].strip('"'))
# db_latitude
tmp_list.append(row[1].split(":")[1])
# db_longitude
tmp_list.append(row[2].split(":")[1].split("}")[0])
enhanced_list.append(tmp_list)
# open output file
with open('sr_updated.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
header = ['', 'NotificationNumber', 'NotificationType', 'CreationDate', 'CompletionDate', 'Duration', 'CodeGroup', 'Code', 'Open', 'Latitude', 'Longitude', 'SubCouncil2016', 'Wards2016', 'OfficialSuburbs', 'directorate', 'department', 'ModificationTimestamp', 'CompletionTimestamp', 'CreationTimestamp', 'h3_level8_index']
# write the header to output file
writer.writerow(header)
# Loop through input data set and
for row1 in list_of_rows:
if row1[10] == 'nan':
existing_row = row1
existing_row.append(0)
writer.writerow(existing_row)
for row2 in enhanced_list:
if row1[10] == row2[2] and row1[9] == row2[1]:
enhanced_row = row1.append(row2[0])
writer.writerow(enhanced_row)
application_end_time = datetime.datetime.now()
application_time_taken = application_end_time - application_start_time
# Process time stats
print("application_start_time = ", application_start_time)
print("application_end_time = ", application_end_time)
print("application_time_taken = ", application_time_taken)
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from Jumpscale import j
JSBASE = j.baseclasses.object
class ModelBase(j.baseclasses.object):
def __init__(self, key="", new=False, collection=None):
self._propnames = []
self.collection = collection
self._key = ""
self.dbobj = None
self.changed = False
self._subobjects = {}
if j.data.types.bytes.check(key):
key = key.decode()
# if key != "":
# if len(key) != 16 and len(key) != 32 and len(key) != 64:
# raise j.exceptions.Input("Key needs to be length 16,32,64")
if new:
self.dbobj = self.collection._capnp_schema.new_message()
self._post_init()
if key != "":
self._key = key
elif key != "":
# will get from db
if self.collection._db.exists(key):
self.load(key=key)
self._key = key
else:
raise j.exceptions.Input(message="Cannot find object:%s!%s" % (self.collection.category, key))
else:
raise j.exceptions.Input(
message="key cannot be empty when no new obj is asked for.", level=1, source="", tags="", msgpub=""
)
@property
def key(self):
if self._key is None or self._key == "":
self._key = self._generate_key()
return self._key
@key.setter
def key(self, value):
if j.data.types.bytes.check(value):
value = value.decode()
self._key = value
def _post_init(self, **kwargs):
pass
def _pre_save(self):
# needs to be implemented see e.g. ActorModel
pass
def _generate_key(self):
# return a unique key to be used in db (std the key but can be overriden)
return j.data.hash.md5_string(j.data.idgenerator.generateGUID())
def index(self):
# put indexes in db as specified
if self.collection != None:
self.collection._index.index({self.dbobj.name: self.key})
def load(self, key):
if self.collection._db.inMem:
self.dbobj = self.collection._db.get(key)
else:
buff = self.collection._db.get(key)
self.dbobj = self.collection._capnp_schema.from_bytes(buff, builder=True)
# TODO: *2 would be nice that this works, but can't get it to work, something recursive
# def __setattr__(self, attr, val):
# if attr in ["_propnames", "_subobjects", "dbobj", "_capnp_schema"]:
# self.__dict__[attr] = val
# print("SETATTRBASE:%s" % attr)
# # return ModelBase.__setattr__(self, attr, val)
#
# print("SETATTR:%s" % attr)
# if attr in self._propnames:
# print("1%s" % attr)
# # TODO: is there no more clean way?
# dbobj = self._subobjects
# print(2)
# exec("dbobj.%s=%s" % (attr, val))
# print(3)
# #
# else:
# raise j.exceptions.Input(message="Cannot set attr:%s in %s" %
# (attr, self))
# def __dir__(self):
# propnames = ["key", "index", "load", "_post_init", "_pre_save", "_generate_key", "save", "logger",
# "dictFiltered", "reSerialize", "dictJson", "raiseError", "addSubItem", "_listAddRemoveItem",
# "logger", "_capnp_schema", "_category", "_db", "_index", "_key", "dbobj", "changed", "_subobjects"]
# return propnames + self._propnames
def reSerialize(self):
for key in list(self._subobjects.keys()):
prop = self.__dict__["list_%s" % key]
dbobjprop = eval("self.dbobj.%s" % key)
if len(dbobjprop) != 0:
raise j.exceptions.Base("bug, dbobj prop should be empty, means we didn't reserialize properly")
if prop is not None and len(prop) > 0:
# init the subobj, iterate over all the items we have & insert them
subobj = self.dbobj.init(key, len(prop))
for x in range(0, len(prop)):
subobj[x] = prop[x]
self._subobjects.pop(key)
self.__dict__.pop("list_%s" % key)
def save(self):
self.reSerialize()
self._pre_save()
if self.collection._db.inMem:
self.collection._db.db[self.key] = self.dbobj
else:
# no need to store when in mem because we are the object which does not have to be serialized
# so this one stores when not mem
buff = self.dbobj.to_bytes()
if hasattr(self.dbobj, "clear_write_flag"):
self.dbobj.clear_write_flag()
self.collection._db.set(self.key, buff)
self.index()
def to_dict(self):
self.reSerialize()
d = self.dbobj.to_dict()
d["key"] = self.key
return d
@property
def dictFiltered(self):
"""
remove items from obj which cannot be serialized to json or not relevant in dict
"""
# made to be overruled
return self.to_dict()
@dictFiltered.setter
def dictFiltered(self, ddict):
"""
"""
if "key" in ddict:
self.key = ddict[key]
self.dbobj = self.collection._capnp_schema.new_message(**ddict)
@property
def dictJson(self):
ddict2 = OrderedDict(self.dictFiltered)
return j.data.serializers.json.dumps(ddict2, sort_keys=True, indent=True)
def raiseError(self, msg):
msg = "Error in dbobj:%s (%s)\n%s" % (self._category, self.key, msg)
raise j.exceptions.Input(message=msg)
def updateSubItem(self, name, keys, data):
keys = keys or []
if not isinstance(keys, list):
keys = [keys]
self._listAddRemoveItem(name)
existing = self.__dict__["list_%s" % name]
for idx, item in enumerate(existing):
match = True
for key in keys:
if item.to_dict()[key] != data.to_dict()[key]:
match = False
if keys and match:
existing.pop(idx)
break
self.addSubItem(name, data)
def addDistinctSubItem(self, name, data):
self._listAddRemoveItem(name=name)
for item in self.__dict__["list_%s" % name]:
if item.to_dict() == data.to_dict():
return
self.__dict__["list_%s" % name].append(data)
def addSubItem(self, name, data):
"""
@param data is string or object first retrieved by self.collection.list_$name_constructor(**args)
can also directly add them to self.list_$name.append(self.collection.list_$name_constructor(**args)) if it already exists
"""
self._listAddRemoveItem(name=name)
self.__dict__["list_%s" % name].append(data)
def initSubItem(self, name):
self._listAddRemoveItem(name=name)
def deleteSubItem(self, name, pos):
"""
@param pos is the position in the list
"""
self._listAddRemoveItem(name=name)
self.__dict__["list_%s" % name].pop(pos)
self.reSerialize()
def _listAddRemoveItem(self, name):
"""
if you want to change size of a list on obj use this method
capnp doesn't allow modification of lists, so when we want to change size of a list then we need to reSerialize
and put content of a list in a python list of dicts
we then re-serialize and leave the subobject empty untill we know that we are at point we need to save the object
when we save we populate the subobject so we get a nicely created capnp message
"""
if name in self._subobjects:
# means we are already prepared
return
prop = eval("self.dbobj.%s" % name)
if len(prop) == 0:
self.__dict__["list_%s" % name] = []
else:
try:
self.__dict__["list_%s" % name] = [item.copy() for item in prop]
except BaseException: # means is not an object can be e.g. a string
self.__dict__["list_%s" % name] = [item for item in prop]
# empty the dbobj list
exec("self.dbobj.%s=[]" % name)
self._subobjects[name] = True
self.changed = True
def __repr__(self):
out = "key:%s\n" % self.key
out += self.dictJson
return out
__str__ = __repr__
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sep 6, 2020
@author: eljeffe
Copyright 2020 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import urandom
from hashlib import sha256
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import String, Boolean, Integer
from models import dbsession
from models.BaseModels import DatabaseObject
from libs.StringCoding import encode
from datetime import datetime, timedelta
class PasswordToken(DatabaseObject):
""" Password token definition """
user_id = Column(Integer, ForeignKey("user.id", ondelete="CASCADE"), nullable=False)
value = Column(String(32), unique=True, nullable=False)
used = Column(Boolean, nullable=False, default=False)
@classmethod
def all(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).all()
@classmethod
def by_id(cls, _id):
""" Returns a the object with id of _id """
return dbsession.query(cls).filter_by(id=_id).first()
@classmethod
def by_user_id(cls, user_id):
""" Returns a the object with id of user_id """
return dbsession.query(cls).filter_by(user_id=user_id).first()
@classmethod
def count(cls):
""" Returns a list of all objects in the database """
return dbsession.query(cls).count()
@classmethod
def by_value(cls, value):
""" Returns a the object with value of value """
return dbsession.query(cls).filter_by(value=value).first()
def is_expired(self, hours=3):
""" Check if the token is expired """
now = datetime.now()
expired = self.created + timedelta(hours=hours)
return now > expired
|
nilq/baby-python
|
python
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
from oslo_log import log as logging
from tacker.common import coordination
from tacker.sol_refactored.common import exceptions as sol_ex
LOG = logging.getLogger(__name__)
# NOTE: It is used to prevent operation for the same vnf instance
# from being processed at the same time. It can be applied between
# threads of a process and different processes (e.g. tacker-server
# and tacker-conductor) on a same host.
# Note that race condition of very short time is not considered.
def lock_vnf_instance(inst_arg, delay=False):
# NOTE: tacker-server issues RPC call to tacker-conductor
# (just) before the lock released. 'delay' is for tacker-conductor
# to be able to wait if it receives RPC call before tacker-server
# releases the lock.
def operation_lock(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
coord = coordination.COORDINATOR
# ensure coordination start
# NOTE: it is noop if already started.
coord.start()
sig = inspect.signature(func)
call_args = sig.bind(*args, **kwargs).arguments
inst_id = inst_arg.format(**call_args)
lock = coord.get_lock(inst_id)
blocking = False if not delay else 10
# NOTE: 'with lock' is not used since it can't handle
# lock failed exception well.
if not lock.acquire(blocking=blocking):
LOG.debug("Locking vnfInstance %s failed.", inst_id)
raise sol_ex.OtherOperationInProgress(inst_id=inst_id)
try:
LOG.debug("vnfInstance %s locked.", inst_id)
return func(*args, **kwargs)
finally:
lock.release()
return wrapper
return operation_lock
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the Python example service without needing
to use the bazel build system. Usage:
$ python example_compiler_gym_service/demo_without_bazel.py
It is equivalent in behavior to the demo.py script in this directory.
"""
import logging
from pathlib import Path
from typing import Iterable
import gym
from compiler_gym.datasets import Benchmark, Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.spaces import Reward
from compiler_gym.util.logging import init_logging
from compiler_gym.util.registration import register
EXAMPLE_PY_SERVICE_BINARY: Path = Path(
"example_compiler_gym_service/service_py/example_service.py"
)
assert EXAMPLE_PY_SERVICE_BINARY.is_file(), "Service script not found"
class RuntimeReward(Reward):
"""An example reward that uses changes in the "runtime" observation value
to compute incremental reward.
"""
def __init__(self):
super().__init__(
id="runtime",
observation_spaces=["runtime"],
default_value=0,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.previous_runtime = None
def reset(self, benchmark: str, observation_view):
del benchmark # unused
self.previous_runtime = None
def update(self, action, observations, observation_view):
del action
del observation_view
if self.previous_runtime is None:
self.previous_runtime = observations[0]
reward = float(self.previous_runtime - observations[0])
self.previous_runtime = observations[0]
return reward
class ExampleDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(
name="benchmark://example-v0",
license="MIT",
description="An example dataset",
)
self._benchmarks = {
"/foo": Benchmark.from_file_contents(
"benchmark://example-v0/foo", "Ir data".encode("utf-8")
),
"/bar": Benchmark.from_file_contents(
"benchmark://example-v0/bar", "Ir data".encode("utf-8")
),
}
def benchmark_uris(self) -> Iterable[str]:
yield from (f"benchmark://example-v0{k}" for k in self._benchmarks.keys())
def benchmark_from_parsed_uri(self, uri: BenchmarkUri) -> Benchmark:
if uri.path in self._benchmarks:
return self._benchmarks[uri.path]
else:
raise LookupError("Unknown program name")
# Register the environment for use with gym.make(...).
register(
id="example-v0",
entry_point="compiler_gym.envs:CompilerEnv",
kwargs={
"service": EXAMPLE_PY_SERVICE_BINARY,
"rewards": [RuntimeReward()],
"datasets": [ExampleDataset()],
},
)
def main():
# Use debug verbosity to print out extra logging information.
init_logging(level=logging.DEBUG)
# Create the environment using the regular gym.make(...) interface.
with gym.make("example-v0") as env:
env.reset()
for _ in range(20):
observation, reward, done, info = env.step(env.action_space.sample())
if done:
env.reset()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
__all__ = ['Mode', 'Format']
from dataclasses import dataclass
from enum import Enum
from typing import Tuple
class Mode(Enum):
# Manually map these to the entries in .taco_compile.taco_type_header.taco_mode_t
dense = (0, 'd')
compressed = (1, 's')
def __init__(self, c_int: int, character: 'str'):
self.c_int = c_int
self.character = character
@staticmethod
def from_c_int(value: int) -> 'Mode':
for member in Mode:
if member.value[0] == value:
return member
raise ValueError(f'No member of DimensionalMode has the integer value {value}')
@dataclass(frozen=True)
class Format:
modes: Tuple[Mode, ...]
ordering: Tuple[int, ...]
def __post_init__(self):
if len(self.modes) != len(self.ordering):
raise ValueError(f'Length of modes ({len(self.modes)}) must be equal to length of ordering '
f'({len(self.ordering)})')
@property
def order(self):
return len(self.modes)
def deparse(self):
if self.ordering == tuple(range(self.order)):
return ''.join(mode.character for mode in self.modes)
else:
return ''.join(mode.character + str(ordering) for mode, ordering in zip(self.modes, self.ordering))
|
nilq/baby-python
|
python
|
from matplotlib import pyplot,gridspec,colors,patches
import numpy
import os
from diatom import Calculate
import warnings
from scipy import constants
h = constants.h
cwd = os.path.dirname(os.path.abspath(__file__))
def make_segments(x, y):
''' segment x and y points
Create list of line segments from x and y coordinates, in the correct format for LineCollection:
an array of the form numlines x (points per line) x 2 (x and y) array
Args:
x,y (numpy.ndarray -like ) - points on lines
Returns:
segments (numpy.ndarray) - array of numlines by points per line by 2
'''
points = numpy.array([x, y]).T.reshape(-1, 1, 2)
segments = numpy.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, z=None, cmap=pyplot.get_cmap('copper'),
norm=pyplot.Normalize(0.0, 1.0), linewidth=3, alpha=1.0,
legend=False,ax=None):
'''Plot a line shaded by an extra value.
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
Args:
x,y (list-like): x and y coordinates to plot
kwargs:
z (list): Optional third parameter to colour lines by
cmap (matplotlib.cmap): colour mapping for z
norm (): Normalisation function for mapping z values to colours
linewidth (float): width of plotted lines (default =3)
alpha (float): value of alpha channel (default = 1)
legend (Bool): display a legend (default = False)
ax (matplotlib.pyplot.axes): axis object to plot on
Returns:
lc (Collection) - collection of lines
'''
if ax == None:
ax = pyplot.gca()
# Default colors equally spaced on [0,1]:
if z is None:
z = numpy.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = numpy.array([z])
z = numpy.asarray(z)
segments = make_segments(x, y)
lc = LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth,zorder=1.25)
ax.add_collection(lc)
return lc
def TDM_plot(energies,States,gs,Nmax,I1,I2,TDMs=None,
pm = +1, Offset=0,fig=pyplot.gcf(),
log=False,minf=None,maxf=None,prefactor=1e-3,col=None):
''' Create a TDM plot
this function plots a series of energy levels and their transition dipole
moments from a given ground state. In this version a lot of the plotting style
is fixed.
Args:
energies (numpy.ndarray) - array of energy levels
states (numpy.ndarray) - array of states corresponding to energies such that E[i] -> States[:,i]
gs (int) - index for ground state of interest
Nmax (int) - maximum rotational quantum number to include
I1, I2 (float) - nuclear spins of nuclei 1 and 2
Kwargs:
TDMs (list of numpy.ndarray) - optional precomputed transition dipole moments in [sigma-,pi,sigma+] order
pm (float) - flag for if the transition increases or decreases N (default = 1)
Offset (float) - yaxis offset (default = 0)
fig (matplotlib.pyplot.figure) - figure object to draw on
log (bool) - use logarithmic scaling for TDM plots
minf (float) - minimum frequency to show
maxf (float) - maximum frequency to show
prefactor (float) - scaling factor for all energies
col (list) - list of colours for lines (must be at least length 3 )
'''
gray ='xkcd:grey'
if col == None:
green ='xkcd:darkgreen'
red ='xkcd:maroon'
blue ='xkcd:azure'
col=[red,blue,green]
if TDMs == None and (Nmax == None or I1 == None or I2 == None):
raise RuntimeError("TDMs or Quantum numbers must be supplied")
elif (Nmax == None or I1 == None or I2 == None):
TDMs = numpy.array(TDMs)
dm = TDMs[0,:]
dz = TDMs[1,:]
dp = TDMs[2,:]
elif TDMs == None:
dm = numpy.round(Calculate.TDM(Nmax,I1,I2,+1,States,gs),6)
dz = numpy.round(Calculate.TDM(Nmax,I1,I2,0,States,gs),6)
dp = numpy.round(Calculate.TDM(Nmax,I1,I2,-1,States,gs),6)
if abs(pm)>1:
pm = int(pm/abs(pm))
widths = numpy.zeros(4)+1
widths[-1] = 1.4
fig.set_figheight(8)
fig.set_figwidth(6)
grid= gridspec.GridSpec(2,4,width_ratios=widths)
N,MN = Calculate.LabelStates_N_MN(States,Nmax,I1,I2)
#find the ground state that the user has put in
N0 = N[gs]
gs_E = energies[gs]
lim =10
l1 = numpy.where(N==N0)[0]
min_gs = prefactor*numpy.amin(energies[l1]-gs_E)/h
max_gs = prefactor*numpy.amax(energies[l1]-gs_E)/h
l2 = numpy.where(N==N0+pm)[0]
if minf ==None:
emin = numpy.amin(energies[l2])
minf = 10e4
f = prefactor*(emin-gs_E)/h - Offset
minf = min([minf,f])
if maxf ==None:
emax = numpy.amax(energies[l2])
maxf = 0
f = prefactor*(emax-gs_E)/h - Offset
maxf = max([maxf,f])
if pm == 1:
ax0 = fig.add_subplot(grid[1,:-1])
ax = []
for j in range(3):
if j ==0:
ax.append(fig.add_subplot(grid[0,j],zorder=1))
else:
ax.append(fig.add_subplot(grid[0,j],sharey=ax[0],zorder=1))
elif pm == -1:
ax0 = fig.add_subplot(grid[0,:-1])
ax = []
for j in range(3):
if j ==0:
ax.append(fig.add_subplot(grid[1,j],zorder=1))
else:
ax.append(fig.add_subplot(grid[1,j],sharey=ax[0],zorder=1))
#plotting the energy levels for ground state
for l in l1:
f =prefactor*(energies[l]-gs_E)/h #- Offset
if l ==gs:
ax0.plot([-lim,lim],[f,f],color='k',zorder=1.2)
else:
ax0.plot([-lim,lim],[f,f],color=gray,zorder=0.8)
lbl = ['$\sigma_-$',"$\pi$","$\sigma_+$"]
for j,axis in enumerate(ax):
#plotting for excited state
for l in l2:
f = prefactor*(energies[l]-gs_E)/h - Offset
if dz[l]!=0 and j==1:
axis.plot([-lim,lim],[f,f],color=blue,zorder=1.2)
elif dp[l] !=0 and j ==2:
axis.plot([-lim,lim],[f,f],color=green,zorder=1.2)
elif dm[l] !=0 and j ==0:
axis.plot([-lim,lim],[f,f],color=red,zorder=1.2)
else:
axis.plot([-lim,lim],[f,f],color=gray,zorder=0.8)
if j ==0 :
axis.tick_params(labelbottom=False,bottom=False,which='both')
else:
axis.tick_params(labelleft=False,left=False,labelbottom=False,
bottom=False,which='both')
axis.set_xlim(-lim,lim)
axis.set_title(lbl[j],color=col[j])
# set the ticks so that only the left most has a frequency/energy axis
# and none have an x axis
ax0.tick_params(labelbottom=False,bottom=False,which='both')
ax0.set_xlim(-lim,lim)
#add the bar plot axis
ax_bar = fig.add_subplot(grid[0,-1],sharey = ax[0])
ax_bar.tick_params(labelleft=False,left=False, which='both')
#fix the ROI to be 300 kHz around the state the user has chosen
ax0.set_ylim(min_gs,max_gs)
f = prefactor*(energies-gs_E)/h-Offset
#normalise function, returns a number between 0 and 1
Norm = colors.LogNorm(vmin=1e-3,vmax=1,clip=True)
#how thick should a line be?
max_width = 2
#setting where and how far apart the lines should all be in data coords
ax1 = ax[0]
ax2 = ax[1]
ax3 = ax[2]
disp = ax2.transData.transform((-lim,0))
x1a = ax0.transData.inverted().transform(disp)[0]
disp = ax2.transData.transform((lim,0))
x1b = ax0.transData.inverted().transform(disp)[0]
Nz = len(numpy.where(dz!=0)[0])
iz = 0
deltax = (x1b-x1a)/(Nz+1)
x0 = x1a+deltax
disp = ax3.transData.transform((-lim,0))
y1a = ax0.transData.inverted().transform(disp)[0]
disp = ax3.transData.transform((lim,0))
y1b = ax0.transData.inverted().transform(disp)[0]
Np = len(numpy.where(dp!=0)[0])
ip =0
deltay = (y1b-y1a)/(Np+1)
y0 = y1a+deltay
disp = ax1.transData.transform((-lim,0))
z1a = ax0.transData.inverted().transform(disp)[0]
disp = ax1.transData.transform((lim,0))
z1b = ax0.transData.inverted().transform(disp)[0]
Nm = len(numpy.where(dm!=0)[0])
im = 0
deltaz = (z1b-z1a)/(Nm+1)
z0 = z1a+deltaz
for j,d in enumerate(dz):
#this block of code plots the dipole moments (or transition strengths)
if abs(d)>0:
width = max_width*Norm(3*numpy.abs(d)**2)
x = x0 +iz*deltax
# makes sure that the line is perfectly vertical in display coords
disp = ax0.transData.transform((x,0))
x2 = ax2.transData.inverted().transform(disp)[0]
p = patches.ConnectionPatch((x,0),(x2,f[j]),coordsA='data',coordsB='data',
axesA=ax0,axesB=ax2,zorder=5,color='k',
lw=width) #line object
ax2.add_artist(p) # add line to axes
iz+=1
#bar plot for transition strengths. Relative to spin-stretched TDM
ax_bar.barh(f[j],numpy.abs(d),color=blue,height=5)
d=dp[j]
if abs(d)>0:
width = max_width*Norm(3*numpy.abs(d)**2)
y= y0 +ip*deltay
# makes sure that the line is perfectly vertical in display coords
disp = ax0.transData.transform((y,0))
y2 = ax3.transData.inverted().transform(disp)[0]
p = patches.ConnectionPatch((y,0),(y2,f[j]),coordsA='data',coordsB='data',
axesA=ax0,axesB=ax3,zorder=5,color='k',
lw=width) #line object
ax3.add_artist(p)
ip+=1
#bar plot for transition strengths. Relative to spin-stretched TDM
ax_bar.barh(f[j],numpy.abs(d),color=green,height=5)
d=dm[j]
if abs(d)>0:
width = max_width*Norm(3*numpy.abs(d)**2)
z = z0 +im*deltaz
# makes sure that the line is perfectly vertical in display coords
disp = ax0.transData.transform((z,0))
z2 = ax1.transData.inverted().transform(disp)[0]
p = patches.ConnectionPatch((z,0),(z2,f[j]),coordsA='data',coordsB='data',
axesA=ax0,axesB=ax1,zorder=5,color='k',
lw=width)#line object
ax1.add_artist(p)
im +=1
#bar plot for transition strengths. Relative to spin-stretched TDM
ax_bar.barh(f[j],numpy.abs(d),color=red,height = 5)
#setup log axes for axis 4 (bar plots)
if log:
ax_bar.set_xscale('log')
ax_bar.set_xticks([1e-6,1e-3,1])
ax_bar.set_xticks([1e-5,1e-4,1e-2,1e-1],minor=True)
ax_bar.set_xticklabels(["10$^{-6}$","10$^{-3}$","1"])
ax_bar.set_xticklabels(["","","",""],minor=True)
# now to rescale the other axes so that they have the same y scale
ax1.set_ylim(minf-20,maxf+20)
grid.set_height_ratios([(maxf-minf)+40,300])
pyplot.subplots_adjust(hspace=0.1)
grid.update()
#add some axis labels
ax0.set_ylabel("Energy/$h$ (kHz)")
if Offset != 0:
ax[0].set_ylabel("Energy/$h$ (kHz) - {:.1f} MHz".format(Offset))
else:
ax[0].set_ylabel("Energy/$h$ (Hz)")
ax_bar.set_xlabel("TDM ($d_0$)")
if __name__ == '__main__':
from diatom import Hamiltonian,Calculate
H0,Hz,HDC,HAC = Hamiltonian.Build_Hamiltonians(3,Hamiltonian.RbCs,zeeman=True)
eigvals,eigstate = numpy.linalg.eigh(H0+181.5e-4*Hz)
TDM_plot(eigvals,eigstate,1,
Nmax = 3,I1 = Hamiltonian.RbCs['I1'], I2 = Hamiltonian.RbCs['I2'],
Offset=980e3,prefactor=1e-3)
fig = pyplot.figure(2)
loc = 0
TDM_pi = Calculate.TDM(3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],0,eigstate,loc)
TDM_Sigma_plus = Calculate.TDM(3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],-1,eigstate,loc)
TDM_Sigma_minus = Calculate.TDM(3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],+1,eigstate,loc)
TDMs =[TDM_Sigma_minus,TDM_pi,TDM_Sigma_plus]
TDM_plot(eigvals,eigstate,loc,3,Hamiltonian.RbCs['I1'],Hamiltonian.RbCs['I2'],Offset=980e3,fig=fig)
pyplot.show()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os, sys
from chainer.links.caffe import CaffeFunction
from chainer import serializers
print('load VGG16 caffemodel')
vgg = CaffeFunction('pretrained_model/VGG_ILSVRC_16_layers.caffemodel')
print('save "vgg16.npz"')
serializers.save_npz('pretrained_model/vgg16.npz', vgg)
|
nilq/baby-python
|
python
|
from flask import Blueprint, request, jsonify, make_response
from core import config
import requests
console = Blueprint('console', __name__)
@console.route('/jobs', methods=['GET', 'POST', 'DELETE'])
def jobs():
url = 'http://' + config['zmapd'] + '/api/jobs/'
if request.method == 'GET':
resp = requests.get(url)
return jsonify({
'code': 20000,
'jobs': resp.json()
})
elif request.method == 'POST':
job = request.json['job']
resp = requests.post(url, data=job)
if resp.status_code == 201:
return jsonify({
'code': 20000
})
elif request.method == 'DELETE':
id = request.json['id']
resp = requests.delete(url+id+'/')
if resp.status_code == 204:
return jsonify({
'code': 20000
})
return jsonify({
'code': 20000,
'error': resp.status_code
})
|
nilq/baby-python
|
python
|
import hashlib
from requests import post
from observer_hub.util import logger
PRIORITY_MAPPING = {"Critical": 1, "High": 1, "Medium": 2, "Low": 3, "Info": 4}
class AdoClient(object):
def __init__(self, organization, project, personal_access_token,
team=None, issue_type="issue", rules="false", notify="false"):
self.auth = ('', personal_access_token)
self.team = f"{project}"
if team:
self.team = f"{project}\\{team}"
self.url = f'https://dev.azure.com/{organization}/{project}/_apis/wit/workitems/' \
f'${issue_type}?bypassRules={rules}&suppressNotifications={notify}&api-version=5.1'
self.query_url = f'https://dev.azure.com/{organization}/{project}/_apis/wit/wiql?api-version=5.1'
def get_issues(self, issue_hash=None):
q = f"SELECT [System.Id] From WorkItems Where [System.Description] Contains \"{issue_hash}\""
data = post(self.query_url, auth=self.auth, json={"query": q},
headers={'content-type': 'application/json'}).json()
return data["workItems"]
def create_issues(self, test_name, data):
for d in data:
if d['status'] == 'passed':
continue
issue_hash = hashlib.sha256(
f"{d['scope']} {d['name']} {d['aggregation']} {d['raw_result'].page_identifier}".encode(
'utf-8')).hexdigest()
if len(self.get_issues(issue_hash)) > 0:
continue
logger.info(f"=====> About to crate Azure DevOps issues")
steps = []
for i, cmd in enumerate(d['raw_result'].commands, 1):
command = cmd['command']
value = cmd["value"]
target = cmd['target']
action = "to" if value != "" else "on"
text = f"*{command}* {value} {action} *{target}*"
if command == "open":
text = f"*{command}* {action} {target}"
steps.append(f"{i}. {text}")
steps = "\n".join(steps)
summary = f"{d['scope'].capitalize()} [{d['name']}] {d['aggregation']} value violates threshold rule for {test_name}"
description = f"""Value {d['actual']} violates threshold rule: {d['scope']} [{d['name']}] {d['aggregation']}
{d['rule']} {d['expected']} for {test_name}"
Steps:\n {steps}
*Issue Hash:* {issue_hash}
"""
fields_mapping = {
"/fields/System.Title": summary,
"/fields/Microsoft.VSTS.Common.Priority": PRIORITY_MAPPING['High'],
"/fields/System.Description": description,
"/fields/System.AreaPath": self.team,
"/fields/System.IterationPath": self.team
}
body = []
for key, value in fields_mapping.items():
if value:
_piece = {"op": "add", "path": key, "value": value}
body.append(_piece)
res = post(self.url, auth=self.auth, json=body,
headers={'content-type': 'application/json-patch+json'})
logger.info(f"Azure DevOps issue {res.json()['id']} has been created")
def notify_azure_devops(test_name, threshold_results, args):
caps = args['desired_capabilities']
ado_organization = caps.get('ado_organization', '')
ado_project = caps.get('ado_project', '')
ado_token = caps.get('ado_token', '')
ado_team = caps.get('ado_team', '')
if ado_organization and ado_project and ado_token:
try:
client = AdoClient(ado_organization, ado_project, ado_token, ado_team)
client.create_issues(test_name, threshold_results["details"])
except Exception as e:
logger.error(f"Error during Azure DevOps ticket creation {e}")
|
nilq/baby-python
|
python
|
from terrascript import _resource
class ignition_config(_resource): pass
config = ignition_config
class ignition_disk(_resource): pass
disk = ignition_disk
class ignition_raid(_resource): pass
raid = ignition_raid
class ignition_filesystem(_resource): pass
filesystem = ignition_filesystem
class ignition_file(_resource): pass
file = ignition_file
class ignition_directory(_resource): pass
directory = ignition_directory
class ignition_link(_resource): pass
link = ignition_link
class ignition_systemd_unit(_resource): pass
systemd_unit = ignition_systemd_unit
class ignition_networkd_unit(_resource): pass
networkd_unit = ignition_networkd_unit
class ignition_user(_resource): pass
user = ignition_user
class ignition_group(_resource): pass
group = ignition_group
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 20:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Nomina', '0004_auto_20170406_2015'),
]
operations = [
migrations.RemoveField(
model_name='entradacontable',
name='asiento',
),
migrations.DeleteModel(
name='EntradaContable',
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
# Index Page
url(r'^$', views.index, name='index'),
url(r'^registBankAccount$', views.registBankAccount, name='RegistBankAccount'),
url(r'^updateBankAccount$', views.updateBankAccount, name='UpdateBankAccount'),
url(r'^closeBankAccount$', views.closeBankAccount, name='CloseBankAccount'),
url(r'^revokeCloseBankAccount$', views.revokeCloseBankAccount, name='RevokeCloseBankAccount'),
url(r'^deleteBankAccount$', views.deleteBankAccount, name='DeleteBankAccount'),
url(r'^getBankAccountInfo$', views.getBankAccountInfo, name='GetBankAccountInfo'),
url(r'^getBankAccountMgtURL$', views.getBankAccountMgtURL, name='GetBankAccountMgtURL'),
url(r'^listBankAccount$', views.listBankAccount, name='ListBankAccount'),
url(r'^requestJob$', views.requestJob, name='RequestJob'),
url(r'^getJobState$', views.getJobState, name='GetJobState'),
url(r'^listActiveJob$', views.listActiveJob, name='ListActiveJob'),
url(r'^search$', views.search, name='Search'),
url(r'^summary$', views.summary, name='Summary'),
url(r'^saveMemo$', views.saveMemo, name='SaveMemo'),
url(r'^getFlatRatePopUpURL$', views.getFlatRatePopUpURL, name='GetFlatRatePopUpURL'),
url(r'^getFlatRateState$', views.getFlatRateState, name='GetFlatRateState'),
url(r'^getBalance$', views.getBalance, name='GetBalance'),
url(r'^getChargeURL$', views.getChargeURL, name='GetChargeURL'),
url(r'^GetPaymentURL', views.getPaymentURL, name='GetPaymentURL'),
url(r'^GetUseHistoryURL', views.getUseHistoryURL, name='GetUseHistoryURL'),
url(r'^getPartnerBalance$', views.getPartnerBalance, name='GetPartnerBalance'),
url(r'^getPartnerURL$', views.getPartnerURL, name='GetPartnerURL'),
url(r'^getChargeInfo$', views.getChargeInfo, name='GetChargeInfo'),
url(r'^getAccessURL', views.getAccessURL, name='GetAccessURL'),
url(r'^checkIsMember$', views.checkIsMember, name='CheckIsMember'),
url(r'^checkID$', views.checkID, name='CheckID'),
url(r'^joinMember$', views.joinMember, name='JoinMember'),
url(r'^getCorpInfo$', views.getCorpInfo, name='GetCorpInfo'),
url(r'^updateCorpInfo$', views.updateCorpInfo, name='UpdateCorpInfo'),
url(r'^registContact$', views.registContact, name='RegistContact'),
url(r'^GetContactInfo$', views.getContactInfo, name='GetContactInfo'),
url(r'^listContact$', views.listContact, name='ListContact'),
url(r'^updateContact$', views.updateContact, name='UpdateContact'),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Utilities for filtering data.
"""
from fnmatch import fnmatch
def included_in(value, patterns):
"""
Check if the given value is included in the given list of patterns.
:param str value: The value to check for.
:param list patterns: List of patterns to check for.
:return: True in the value is included, False otherwise.
:rtype: bool
"""
return any(fnmatch(value, pattern) for pattern in patterns)
def is_wanted(value, include, exclude):
"""
Check that the given value is included in the include list and not included
in the exclude list.
:param str value: The value to check for.
:param list include: List of patterns of values to include.
:param list exclude: List of patterns of values to exclude.
:return: True in the value is wanted, False otherwise.
:rtype: bool
"""
return included_in(value, include) and not included_in(value, exclude)
def filter_dict(data, include, exclude, joinchar='.'):
"""
Filter a dictionary using the provided include and exclude patterns.
:param dict data: The data to filter
(dict or OrderedDict, type is respected).
:param list include: List of patterns of key paths to include.
:param list exclude: List of patterns of key paths to exclude.
:param str joinchar: String used to join the keys to form the path.
:return: The filtered dictionary.
:rtype: dict or OrderedDict
"""
assert isinstance(data, dict)
def filter_dict_recursive(breadcrumbs, element):
if not isinstance(element, dict):
return element
return element.__class__(
(key, filter_dict_recursive(breadcrumbs + [key], value))
for key, value in element.items()
if is_wanted(joinchar.join(breadcrumbs + [key]), include, exclude)
)
return filter_dict_recursive([], data)
__all__ = [
'included_in',
'is_wanted',
'filter_dict',
]
|
nilq/baby-python
|
python
|
import os
import re
import torch
# Formatting strings (constant)
save_format_str = "checkpoint{:08d}.pth"
save_re_string = r"checkpoint(\d{8}).pth"
assert re.match(save_re_string, save_format_str.format(0)) is not None
def save_checkpoint(model_list, save_dir, epoch, optimizer=None, lr_scheduler=None):
checkpoint = {
'model_states': [model.state_dict() for model in model_list],
'optimizer_state': optimizer.state_dict() if optimizer is not None else None,
'epoch': epoch
}
if lr_scheduler is not None:
checkpoint['lr_scheduler'] = lr_scheduler.state_dict()
torch.save(checkpoint, os.path.join(save_dir, save_format_str.format(epoch)))
def load_checkpoint(model_list, save_dir, epoch=-1, load_to_device_name=None,
optimizer=None, lr_scheduler=None):
# Search for last checkpoint if no epoch given
if epoch < 0:
files = os.listdir(save_dir)
checkpoint_files = \
list(filter(lambda s: re.match(save_re_string, s) is not None, files))
if len(checkpoint_files) == 0:
print("No save files found to load! Proceding with no loading")
return 0
last_file = sorted(checkpoint_files)[-1]
load_epoch = int(re.match(save_re_string, last_file).group(1))
full_path = os.path.join(save_dir, last_file)
else:
full_path = os.path.join(save_dir, save_format_str.format(epoch))
load_epoch = epoch
print("Loading checkpoint from: {}".format(full_path), flush=True)
checkpoint = torch.load(full_path, map_location=load_to_device_name)
model_states = checkpoint['model_states']
assert len(model_states) == len(model_list), (len(model_states), len(model_list))
for model, state in zip(model_list, model_states):
model.load_state_dict(state)
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer_state'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
return load_epoch + 1
|
nilq/baby-python
|
python
|
# Purpose: Extract frames from video
import cv2
import os
import progressbar
import threading
class ExtractFrames:
def __init__(self, video_path, person_name):
self.video_path = video_path
self.person_name = person_name
if not os.path.isdir(f"Images/Known/{str(person_name)}"):
os.makedirs(f'Images/Known/{str(person_name)}')
def extract(self):
video = cv2.VideoCapture(self.video_path)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
print(f"Frame Count: {str(frame_count)}")
bar = progressbar.ProgressBar(maxval=frame_count,
widgets=[progressbar.Bar('⬛', '[', ']', '⬜'), ' ',
progressbar.Percentage()]).start()
index = 0
while video.isOpened():
ret, frame = video.read()
if not ret:
break
cv2.imwrite(
f"Images/Known/{self.person_name}/{os.path.basename(self.video_path).split('.')[0] + '_' + str(index)}.jpg", frame)
index += 1
bar.update(bar.currval + 1)
bar.finish()
video.release()
cv2.destroyAllWindows()
# Example
if __name__ == "__main__":
videos = os.listdir("Videos")
threads = [ExtractFrames(
f"Videos/{video}", "Olivia Rodrigo").extract() for video in videos]
for thread in threads:
thread.start()
|
nilq/baby-python
|
python
|
from pysnooper import snoop
from tools import *
import datetime
import binascii
import time
import nfc
#入退室の際のデータベース操作関数
def IO(ID: str, STATUS: str) -> None:
conn=sql()
cursor = conn.cursor()
#入退室する前の人の数をチェック------------------------------------------------------
cursor.execute(f"select count(*) from student_tb where {STATUS}='IN'")
_num = cursor.fetchone()
num_before = _num['count(*)']
#そのIDに関して登録されている事をここで全て取得----------------------------------------
cursor.execute(f"select * from student_tb where ID='{str(ID)}'")
io = cursor.fetchone()
#その人の入退室状況を変更-----------------------------------------------------------
if str(io[STATUS]) == "OUT": #"OUT"だったら"IN"に
color, status_now = "good", "入室"
cursor.execute(f"update student_tb set {STATUS}='IN' where ID='{str(ID)}'")
conn.commit()
cursor.close()
conn.close()
#もしもう一方の部屋の入退室で退室処理をせずにこちらの部屋に来た時
ANOTHER_STATUS='STATUS_B' if STATUS=='STATUS_A' else 'STATUS_A'
#もう一方の部屋が"IN"の時それを"OUT"にするためにIO関数を再帰的に動かす
#再帰的と言ってもループではなく一回だけ
if str(io[ANOTHER_STATUS]) == "IN": #もしSTATUSBがまだINの状態であれば
IO(ID, ANOTHER_STATUS)
else: #"IN"だったら"OUT"に
color, status_now = "danger", "退室"
cursor.execute(f"update student_tb set {STATUS}='OUT' where ID='{str(ID)}'")
conn.commit()
cursor.close()
conn.close()
#上で再帰的に関数を呼び出す処理があるためconnは一回閉じなければいけない
conn=sql()
cursor = conn.cursor()
#そのIDに結び付けられているNICKNAMEを呼び出す-------------------------------------------
cursor.execute(f"select NICKNAME from student_tb where ID='{str(ID)}'")
nickname = cursor.fetchone()['NICKNAME']
#入退室した後の人の数-----------------------------------------------------------------
cursor.execute(f"select count(*) from student_tb where {STATUS}='IN'")
_num_after = cursor.fetchone()
num_after = _num_after['count(*)']
print(nickname)
cursor.close()
conn.close()
#======================================================================================
#もともと0人で、1人入ってきたらOPEN
if num_before == 0 and num_after == 1: message(None, STATUS, status_now, dics[status_now])
#現在の状態をお知らせ
message(color, STATUS, status_now, f"<{status_now}>: {nickname}\n現在 {num_after} 人です")
#0人になったらCLOSE
if num_after == 0: message(None, STATUS, status_now, dics[status_now])
#学生証から名前と学生証のIDを読み取る関数
def scan_UNIV(target_res: nfc, clf: nfc) -> str:
tag = nfc.tag.activate_tt3(clf, target_res)
service_code = [nfc.tag.tt3.ServiceCode(0x100B >> 6, 0x100B & 0x3f)]
bc_univ_id = [nfc.tag.tt3.BlockCode(0)]
bc_name = [nfc.tag.tt3.BlockCode(1)]
name = tag.read_without_encryption(service_code, bc_name).decode() #学生証から名前を引き出す
univ_id = tag.read_without_encryption(service_code, bc_univ_id).decode() #学生証から(学生証の)IDを抜き出す
return name, univ_id
#学生証のIDからIDを検索する関数
def connected_UNIV(univ_id: str) -> str:
ID=update_sql(f"select ID from student_tb where UNIV_ID='{univ_id}'")['ID']
return ID
#交通系ICカードからidmを読み取る関数
def scan_transport(target_res: nfc, clf: nfc) -> str:
tag = nfc.tag.activate_tt3(clf, target_res)
_idm = binascii.hexlify(tag.idm)
idm=_idm.decode() #idmを抜き出す
return idm
#交通系ICカードのidmからIDを読み取る関数
def connected_transport(idm: str) -> str:
try: return update_sql(f"select ID from student_tb where TRANSPORTATION_ID1='{idm}'")['ID']
except: pass
try: return update_sql(f"select ID from student_tb where TRANSPORTATION_ID2='{idm}'")['ID']
except: return
#そのIDが直近で検出されたかどうかを判別する関数
def process(ID:str, STATUS: str, latestID:str, latestTIME: datetime) -> str and datetime:
lag = datetime.datetime.now() - latestTIME
#IDが直近7秒以内に検出されたことのあるIDのとき
if ID==latestID and lag.total_seconds() < WAIT_TIME:
#次にスキャンできるまでの秒数を一応表示
print("Please wait "+str(int(WAIT_TIME-lag.total_seconds())+1)+" seconds")
time.sleep(0.5)
return latestID, latestTIME
else: #IDが3秒以内に検出されてものでなければ
IO(ID, STATUS) #入退室の動作を行う
return ID, datetime.datetime.now()
#学生証でニックネームを登録するための関数
def regist_UNIV(name: str, univ_id: str) -> None:
result="NULL"
try:
nickname=update_sql(f"select * from {DATA_TB}")['nickname']
#もしそのニックネームがデータベースに既に登録されていれば例外
assert update_sql(f"select count(*) from student_tb where NICKNAME='{nickname}'")['count(*)']==0
if update_sql(f"select count(*) from student_tb where UNIV_ID='{univ_id}'")['count(*)'] == 1:
#その学生証がすでにデータベースに登録されている時
#NICKNAMEを変更
update_sql(f"update student_tb set NICKNAME='{nickname}' where UNIV_ID='{univ_id}'")
result='success'
else:
#その学生証がまだデータベースに登録されていないとき
number=update_sql("select max(ID) from student_tb")['max(ID)']+1 #初めて登録する人にはデータベースのIDの最大値に1を足したIDを割り当てる
update_sql(f"insert into student_tb values('{number}', '{univ_id}', NULL, NULL, '{name}', '{nickname}', 'OUT', 'OUT')")
result='fir_suc'
except: result='failure'
finally:
update_sql(f"update {DATA_TB} set result='{result}'")
update_sql(f"update {DATA_TB} set flag='1'")
print(result)
#交通系ICカードでニックネームを登録するための関数
def regist_transportation(idm: str) -> None:
result="NULL"
#もしこれまでに登録がされたことのないsuicaであれば、入力されたnicknameからtransportation_idを登録する
#もしこれまでに登録されたことのあるsuicaであれば、入力されたnicknameに変更する
try:
nickname=update_sql(f"select * from {DATA_TB}")['nickname']
#そのニックネームの人が交通系ICカードを何枚登録しているかをカウント
count0=int(update_sql(f"select count(TRANSPORTATION_ID1) from student_tb where NICKNAME='{nickname}'")['count(TRANSPORTATION_ID1)'])+ \
int(update_sql(f"select count(TRANSPORTATION_ID2) from student_tb where NICKNAME='{nickname}'")['count(TRANSPORTATION_ID2)'])
#そのidmがデータベースに登録されているか否かをカウント
count1=update_sql(f"select count(*) from student_tb where TRANSPORTATION_ID1='{idm}'")['count(*)']
count2=update_sql(f"select count(*) from student_tb where TRANSPORTATION_ID2='{idm}'")['count(*)']
if count0==0 and count1==0 and count2==0:
#そのニックネームに交通系ICカードが登録されていない、且つ
#そのidmを持つ交通系ICがデータベースのどこにも登録されていない
#入力されたニックネームのところに交通系ICのidmを入れる
update_sql(f"update student_tb set TRANSPORTATION_ID1='{idm}' where NICKNAME='{nickname}'")
elif count0==1 and count1==0 and count2==0:
#そのニックネームに交通系ICカードが登録されている、且つ
#そのidmを持つ交通系ICがデータベースのどこにも登録されていない
#入力されたニックネームのところに交通系ICのidmを入れる
update_sql(f"update student_tb set TRANSPORTATION_ID2='{idm}' where NICKNAME='{nickname}'")
else: #そのidmと結び付けられているところのnicknameを入力されたものに変える
#もしそのニックネームがデータベースに既に登録されていれば例外
assert update_sql(f"select count(*) from student_tb where NICKNAME='{nickname}'")['count(*)'] == 0
try: update_sql(f"update student_tb set NICKNAME='{nickname}' where TRANSPORTATION_ID1='{idm}'")
except: pass
try: update_sql(f"update student_tb set NICKNAME='{nickname}' where TRANSPORTATION_ID2='{idm}'")
except: raise
result='success'
except: result='failure'
finally:
update_sql(f"update {DATA_TB} set result='{result}'")
update_sql(f"update {DATA_TB} set flag='1'")
print(result)
#@snoop()
def Read(clf: nfc, STATUS: str) -> None:
latestID = "0"
latestTIME = datetime.datetime.now()
while True:
#学生証の読み取り
target_req = nfc.clf.RemoteTarget("212F")
target_res = clf.sense(target_req, iterations=1, interval=0.01)
#読み取りを交通系ICカード専用モードに設定。これによりiPhoneのSuicaやPasmoを呼び出せる
target_req.sensf_req = bytearray.fromhex("0000030000")
if not target_res is None: #もし学生証が読み込めていたら
try:
name, univ_id=scan_UNIV(target_res, clf)
#入退室管理モードの時
if update_sql(f'select * from {DATA_TB}')['flag']=="1":
ID=connected_UNIV(univ_id) #電通研の各個人に割り振られているIDを学生証のIDから抽出
latestID, latestTIME=process(ID, STATUS, latestID, latestTIME)
else: #登録モードの時
regist_UNIV(name, univ_id) #学生証のIDと名前をデータベースに登録 or ニックネームの変更
time.sleep(2.0)
#except Exception as e: print(e)
except: pass
else: #もし交通系ICカードが読み込めていたら or どちらも読み込めていなかったら
target_res = clf.sense(target_req, iterations=30, interval=0.01)
try:
#交通系ICカードの読み取り。もしここで読み込めなかったら、またループの最初に戻る
idm=scan_transport(target_res, clf)
#入退室管理モードの時
if update_sql(f'select * from {DATA_TB}')['flag']=="1":
ID=connected_transport(idm) #電通研の各個人に割り振られているIDを交通系ICカードのidmから抽出
latestID, latestTIME=process(ID, STATUS, latestID, latestTIME)
else: #登録モードの時
regist_transportation(idm) #交通系ICのidmをデータベースに登録 or ニックネームの変更
time.sleep(2.0)
# except Exception as e: print(e)
except: pass
if __name__ == "__main__":
#カード読み取りシステムの実行=============
print('===== I\'M READY =====')
with nfc.ContactlessFrontend(usb) as clf:
Read(clf, STATUS)
|
nilq/baby-python
|
python
|
import yaml
import os
import time
import re
from my_devices import nxos1, nxos2
from netmiko import ConnectHandler
from ciscoconfparse import CiscoConfParse
from jinja2 import FileSystemLoader, StrictUndefined, Template
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
#env.loader = FileSystemLoader("C://Users//John Celani//Documents//Scripts//Python Class//Week5")
env.loader = FileSystemLoader(".")
sw1_vars = {
"hostname" : "!_nxos1",
"int" : "Ethernet1/1",
"ip_add" : "10.1.100.1",
"ip_subnet" : "24",
"local_as" : 22,
"remote_ip" : "10.1.100.2",
"remote_as" : 22,
}
sw2_vars = {
"hostname" : "!_nxos2",
"int" : "Ethernet1/1",
"ip_add" : "10.1.100.2",
"ip_subnet" : "24",
"local_as" : 22,
"remote_ip" : "10.1.100.1",
"remote_as" : 22,
}
template_file = 'exercise_2_2.j2'
nxos1["j2_vars"] = sw1_vars
nxos2["j2_vars"] = sw2_vars
for device in (nxos1, nxos2):
temp_dict = device.copy()
j2_vars_temp = temp_dict.pop("j2_vars")
template = env.get_template(template_file)
temp_config = template.render(**j2_vars_temp)
configs = [temp_config.strip() for temp_config in temp_config.splitlines()]
netconnect = ConnectHandler(**temp_dict)
device["net_conn"] = netconnect
print(f"Sending Configurations to {netconnect.find_prompt()}")
output = netconnect.send_config_set(configs)
print("Completed")
print()
print("Waiting 15s for BGP to Converge")
print()
time.sleep(15)
print("Testing BGP and Connectivity")
print()
for device in (nxos1, nxos2):
remote_ip = device["j2_vars"]["remote_ip"]
netconnect = device["net_conn"]
local_ip = device["host"]
print(f"Checking BGP Connectivity on {local_ip} to {remote_ip}")
bgpoutput = netconnect.send_command(f"show ip bgp summary | include {remote_ip}")
match = re.search(r"\s+(\S+)\s*$", bgpoutput)
prefix_received = match.group(1)
try:
int(prefix_received)
print( f"{local_ip} BGP Reached Established state with {remote_ip}")
except ValueError:
print(f"{local_ip} BGP failed to reach established state with {remote_ip}")
print()
print(f"Testing connectivity from {local_ip} to {remote_ip}")
pingoutput = netconnect.send_command(f"ping {remote_ip}", delay_factor=5)
if "64 bytes from" not in pingoutput:
print(f"Failed ping test to {remote_ip}")
else:
print(f"Conenctivity between {local_ip} to {remote_ip} succesful")
print()
for device in (nxos1, nxos2):
netconnect = device["net_conn"]
netconnect.disconnect()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import ipaddress
from dnsdb_common.library.exception import BadParam
from dnsdb_common.library.utils import format_ip
from . import commit_on_success
from . import db
from .models import DnsColo
from .models import DnsRecord
from .models import IpPool
from .models import Subnets
class SubnetIpDal(object):
@staticmethod
def get_colo_by_group(group):
return [record.colo_name
for record in
db.session.query(DnsColo.colo_name).filter_by(colo_group=group).order_by(DnsColo.colo_name)]
@staticmethod
def list_region(**condition):
q = Subnets.query
if condition:
q = q.filter_by(**condition)
return [item.json_serialize() for item in q.order_by(Subnets.region_name, Subnets.subnet)]
@staticmethod
def get_region_by_ip(ip):
ip, _ = format_ip(ip)
record = IpPool.query.filter_by(fixed_ip=ip).first()
if not record:
raise BadParam('no such ip: %s' % ip, msg_ch=u'没有对应的ip记录')
return SubnetIpDal.get_region_by_name(record.region)
@staticmethod
def get_region_by_name(region):
record = Subnets.query.filter_by(region_name=region).first()
if not record:
raise BadParam('no such subnet with region_name: %s' % region, msg_ch=u'没有对应的网段记录')
return record.json_serialize()
@staticmethod
def get_region_by_name_like(region):
region = '%{}%'.format(region)
records = Subnets.query.filter(Subnets.region_name.like(region))
return [record.json_serialize() for record in records]
@staticmethod
def is_intranet_region(region):
record = Subnets.query.filter_by(region_name=region).first()
if not record:
raise BadParam('no such subnet with region_name: %s' % region, msg_ch=u'没有对应的网段记录')
return record.intranet
@staticmethod
def is_ip_exist(record):
return IpPool.query.filter_by(fixed_ip=record).first() is not None
@staticmethod
def get_subnet_ip(region):
records = IpPool.query.outerjoin(DnsRecord, DnsRecord.record == IpPool.fixed_ip).add_columns(
IpPool.fixed_ip, IpPool.allocated,
DnsRecord.domain_name).filter(IpPool.region == region).order_by(IpPool.fixed_ip)
result = [{"ip": item.fixed_ip, "domain": item.domain_name} for item in records]
return result
@staticmethod
def add_subnet(subnet, region, colo, comment, username):
subnet = ipaddress.ip_network(subnet)
intranet = subnet.is_private
net_id = subnet.network_address
broadcast_ip = subnet.broadcast_address
is_ipv6 = (subnet.version == 6)
ips_dict_list = []
for i in subnet:
if i == net_id or i == broadcast_ip:
continue
ips_dict_list.append({
'region': region,
'fixed_ip': str(i),
'is_ipv6': is_ipv6
})
if Subnets.query.filter_by(region_name=region).first():
raise BadParam('region already exist', msg_ch='网段名已存在')
try:
with db.session.begin(subtransactions=True):
subnet_item = Subnets(
region_name=region,
subnet=str(subnet),
create_user=username,
intranet=intranet,
colo=colo,
is_ipv6=is_ipv6
)
if comment:
subnet_item.comment = comment
db.session.add(subnet_item)
db.session.bulk_insert_mappings(IpPool, ips_dict_list)
except Exception:
raise BadParam('Ip conflict with other regions', msg_ch=u'和已有的网段有交叉,请检查后重试')
@staticmethod
@commit_on_success
def delete_subnet(subnet, region):
record = Subnets.query.filter_by(region_name=region, subnet=subnet).first()
if not record:
raise BadParam('Region does not exist: %s' % region, msg_ch=u'网段不存在')
# 删除一个region
ip_records = SubnetIpDal.get_subnet_ip(region)
if list(filter(lambda x: x['domain'], ip_records)):
raise BadParam('Region %s has records,delete failed!' % region, msg_ch=u'网段正在使用中,不允许删除')
Subnets.query.filter_by(region_name=region, subnet=subnet).delete()
IpPool.query.filter_by(region=region).delete()
@staticmethod
@commit_on_success
def rename_subnet(old_region, new_region, username):
if Subnets.query.filter_by(region_name=new_region).first():
raise BadParam("Region %s existed, rename %s failed" % (new_region, old_region),
msg_ch=u'%s已经存在' % new_region)
if not Subnets.query.filter_by(region_name=old_region).first():
raise BadParam("Region %s does not existed, rename failed" % old_region,
msg_ch=u'%s不存在' % old_region)
Subnets.query.filter(Subnets.region_name == old_region).update({
"region_name": new_region
})
IpPool.query.filter(IpPool.region == old_region).update({
'region': new_region
})
@staticmethod
def get_subnets_by_condition(**kwargs):
session = db.session
query = session.query(Subnets)
if kwargs:
query = query.filter_by(**kwargs)
return query.order_by(Subnets.region_name, Subnets.subnet).all()
@staticmethod
def bulk_update_subnet(update_mapping):
session = db.session
with session.begin(subtransactions=True):
session.bulk_update_mappings(Subnets, update_mapping)
|
nilq/baby-python
|
python
|
# activity/urls.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.conf.urls import url
from . import views_admin
urlpatterns = [
# url(r'^$', views_admin.batches_home_view, name='batches_home',),
# url(r'^batch_action_list/$', views_admin.batch_action_list_view, name='batch_action_list'),
# url(r'^batch_list/$', views_admin.batch_list_view, name='batch_list'),
# url(r'^batch_list_process/$', views_admin.batch_list_process_view, name='batch_list_process'),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
.. module:: openzwave.network
This file is part of **python-openzwave** project https://github.com/OpenZWave/python-openzwave.
:platform: Unix, Windows, MacOS X
:sinopsis: openzwave API
.. moduleauthor: bibi21000 aka Sébastien GALLET <bibi21000@gmail.com>
License : GPL(v3)
**python-openzwave** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**python-openzwave** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-openzwave. If not, see http://www.gnu.org/licenses.
"""
import os
#from collections import namedtuple
import time
import sys
import six
if six.PY3:
from pydispatch import dispatcher
else:
from louie import dispatcher
import threading
import libopenzwave
import openzwave
from openzwave.object import ZWaveException, ZWaveTypeException, ZWaveObject
from openzwave.controller import ZWaveController
from openzwave.node import ZWaveNode
from openzwave.option import ZWaveOption
from openzwave.scene import ZWaveScene
from openzwave.singleton import Singleton
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
"""NullHandler logger for python 2.6"""
def emit(self, record):
pass
logger = logging.getLogger('openzwave')
logger.addHandler(NullHandler())
try:
import sqlite3 as lite
except ImportError:
logger.warning('pysqlite is not installed')
class ZWaveNetwork(ZWaveObject):
"""
The network object = homeid.
It contains a reference to the manager and the controller.
It dispatches the following louie signals :
* SIGNAL_NETWORK_FAILED = 'NetworkFailed'
* SIGNAL_NETWORK_STARTED = 'NetworkStarted'
* SIGNAL_NETWORK_READY = 'NetworkReady'
* SIGNAL_NETWORK_STOPPED = 'NetworkStopped'
* SIGNAL_NETWORK_RESETTED = 'DriverResetted'
* SIGNAL_NETWORK_AWAKED = 'DriverAwaked'
* SIGNAL_DRIVER_FAILED = 'DriverFailed'
* SIGNAL_DRIVER_READY = 'DriverReady'
* SIGNAL_DRIVER_RESET = 'DriverReset'
* SIGNAL_DRIVER_REMOVED = 'DriverRemoved'
* SIGNAL_NODE_ADDED = 'NodeAdded'
* SIGNAL_NODE_EVENT = 'NodeEvent'
* SIGNAL_NODE_NAMING = 'NodeNaming'
* SIGNAL_NODE_NEW = 'NodeNew'
* SIGNAL_NODE_PROTOCOL_INFO = 'NodeProtocolInfo'
* SIGNAL_NODE_READY = 'NodeReady'
* SIGNAL_NODE_REMOVED = 'NodeRemoved'
* SIGNAL_SCENE_EVENT = 'SceneEvent'
* SIGNAL_VALUE_ADDED = 'ValueAdded'
* SIGNAL_VALUE_CHANGED = 'ValueChanged'
* SIGNAL_VALUE_REFRESHED = 'ValueRefreshed'
* SIGNAL_VALUE_REMOVED = 'ValueRemoved'
* SIGNAL_POLLING_ENABLED = 'PollingEnabled'
* SIGNAL_POLLING_DISABLED = 'PollingDisabled'
* SIGNAL_CREATE_BUTTON = 'CreateButton'
* SIGNAL_DELETE_BUTTON = 'DeleteButton'
* SIGNAL_BUTTON_ON = 'ButtonOn'
* SIGNAL_BUTTON_OFF = 'ButtonOff'
* SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = 'EssentialNodeQueriesComplete'
* SIGNAL_NODE_QUERIES_COMPLETE = 'NodeQueriesComplete'
* SIGNAL_AWAKE_NODES_QUERIED = 'AwakeNodesQueried'
* SIGNAL_ALL_NODES_QUERIED = 'AllNodesQueried'
* SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = 'AllNodesQueriedSomeDead'
* SIGNAL_MSG_COMPLETE = 'MsgComplete'
* SIGNAL_ERROR = 'Error'
* SIGNAL_NOTIFICATION = 'Notification'
* SIGNAL_CONTROLLER_COMMAND = 'ControllerCommand'
* SIGNAL_CONTROLLER_WAITING = 'ControllerWaiting'
The table presented below sets notifications in the order they might typically be received,
and grouped into a few logically related categories. Of course, given the variety
of ZWave controllers, devices and network configurations the actual sequence will vary (somewhat).
The descriptions below the notification name (in square brackets) identify whether the
notification is always sent (unless there’s a significant error in the network or software)
or potentially sent during the execution sequence.
Driver Initialization Notification
The notification below is sent when OpenZWave has successfully connected
to a physical ZWave controller.
* DriverReady
[always sent] Sent when the driver (representing a connection between OpenZWave
and a Z-Wave controller attached to the specified serial (or HID) port) has been initialized.
At the time this notification is sent, only certain information about the controller itself is known:
* Controller Z-Wave version
* Network HomeID
* Controller capabilities
* Controller Application Version & Manufacturer/Product ID
* Nodes included in the network
* DriverRemoved
[always sent (either due to Error or by request)] The Driver is being removed.
Do Not Call Any Driver Related Methods after receiving this
Node Initialization Notifications
As OpenZWave starts, it identifies and reads information about each node in the network.
The following notifications may be sent during the initialization process.
* NodeNew
[potentially sent] Sent when a new node has been identified as part of the Z-Wave network.
It is not sent if the node was identified in a prior execution of the OpenZWave library
and stored in the zwcfg*.xml file.
At the time this notification is sent, very little is known about the node itself...
only that it is new to OpenZWave. This message is sent once for each new node identified.
* NodeAdded
[always sent (for each node associated with the controller)]
Sent when a node has been added to OpenZWave’s set of nodes. It can be
triggered either as the zwcfg*.xml file is being read, when a new node
is found on startup (see NodeNew notification above), or if a new node
is included in the network while OpenZWave is running.
As with NodeNew, very little is known about the node at the time the
notification is sent…just the fact that a new node has been identified
and its assigned NodeID.
* NodeProtocolInfo
[potentially sent] Sent after a node’s protocol information has been
successfully read from the controller.
At the time this notification is sent, only certain information about the node is known:
* Whether it is a “listening” or “sleeping” device
* Whether the node is capable of routing messages
* Maximum baud rate for communication
* Version number
* Security byte
NodeNaming
[potentially sent] Sent when a node’s name has been set or changed
(although it may be “set” to “” or NULL).
* ValueAdded
[potentially sent] Sent when a new value has been associated with the node.
At the time this notification is sent, the new value may or may not
have “live” data associated with it. It may be populated, but it may
alternatively just be a placeholder for a value that has not been read
at the time the notification is sent.
* NodeQueriesComplete
[always sent (for each node associated with the controller that has been successfully queried)] Sent when a node’s values and attributes have been fully queried. At the time this notification is sent, the node’s information has been fully read at least once. So this notification might trigger “full” display of the node’s information, values, etc. If this notification is not sent, it indicates that there has been a problem initializing the device. The most common issue is that the node is a “sleeping” device. The NodeQueriesComplete notification will be sent when the node wakes up and the query process completes.
Initialization Complete Notifications
As indicated above, when OpenZWave starts it reads certain information
from a file, from the controller and from the network. The following
notifications identify when this initialization/querying process is complete.
* AwakeNodesQueried
[always sent] Sent when all “listening” -always-on-devices have been
queried successfully. It also indicates, by implication, that there
are some “sleeping” nodes that will not complete their queries until
they wake up. This notification should be sent relatively quickly
after start-up. (Of course, it depends on the number of devices on
the ZWave network and whether there are any messages that “time out”
without a proper response.)
* AllNodesQueried
[potentially sent] Sent when all nodes have been successfully queried.
This notification should be sent relatively quickly if there are
no “sleeping” nodes. But it might be sent quite a while after start-up
if there are sleeping nodes and at least one of these nodes has a long “wake-up” interval.
Other Notifications
In addition to the notifications described above, which are primarily
“initialization” notifications that are sent during program start-up,
the following notifications may be sent as a result of user actions,
external program control, etc.
* ValueChanged : Sent when a value associated with a node has changed. Receipt of this notification indicates that it may be a good time to read the new value and display or otherwise process it accordingly.
* ValueRemoved : Sent when a value associated with a node has been removed.
* Group : Sent when a node’s group association has changed.
* NodeRemoved : Sent when a node has been removed from the ZWave network.
* NodeEvent : Sent when a node sends a Basic_Set command to the controller. This notification can be generated by certain sensors, for example, motion detectors, to indicate that an event has been sensed.
* PollingEnabled : Sent when node/value polling has been enabled.
* PollingDisabled : Sent when node/value polling has been disabled.
* DriverReset : Sent to indicate when a controller has been reset. This notification is intended to replace the potentially hundreds of notifications representing each value and node removed from the network.
About the use of louie signals :
For network, python-openzwave send the following louie signal :
SIGNAL_NETWORK_FAILED : the driver has failed to start.
SIGNAL_NETWORK_STARTED : the driver is ready, but network is not available.
SIGNAL_NETWORK_AWAKED : all awake nodes are queried. Some sleeping nodes may be missing.
SIGNAL_NETWORK_READY : all nodes are queried. Network is fully functionnal.
SIGNAL_NETWORK_RESETTED : the network has been resetted. It will start again.
SIGNAL_NETWORK_STOPPED : the network has been stopped.
Deprecated : SIGNAL_DRIVER_* shouldn't be used anymore.
"""
SIGNAL_NETWORK_FAILED = 'NetworkFailed'
SIGNAL_NETWORK_STARTED = 'NetworkStarted'
SIGNAL_NETWORK_READY = 'NetworkReady'
SIGNAL_NETWORK_STOPPED = 'NetworkStopped'
SIGNAL_NETWORK_RESETTED = 'DriverResetted'
SIGNAL_NETWORK_AWAKED = 'DriverAwaked'
SIGNAL_DRIVER_FAILED = 'DriverFailed'
SIGNAL_DRIVER_READY = 'DriverReady'
SIGNAL_DRIVER_RESET = 'DriverReset'
SIGNAL_DRIVER_REMOVED = 'DriverRemoved'
SIGNAL_GROUP = 'Group'
SIGNAL_NODE = 'Node'
SIGNAL_NODE_ADDED = 'NodeAdded'
SIGNAL_NODE_EVENT = 'NodeEvent'
SIGNAL_NODE_NAMING = 'NodeNaming'
SIGNAL_NODE_NEW = 'NodeNew'
SIGNAL_NODE_PROTOCOL_INFO = 'NodeProtocolInfo'
SIGNAL_NODE_READY = 'NodeReady'
SIGNAL_NODE_REMOVED = 'NodeRemoved'
SIGNAL_SCENE_EVENT = 'SceneEvent'
SIGNAL_VALUE = 'Value'
SIGNAL_VALUE_ADDED = 'ValueAdded'
SIGNAL_VALUE_CHANGED = 'ValueChanged'
SIGNAL_VALUE_REFRESHED = 'ValueRefreshed'
SIGNAL_VALUE_REMOVED = 'ValueRemoved'
SIGNAL_POLLING_ENABLED = 'PollingEnabled'
SIGNAL_POLLING_DISABLED = 'PollingDisabled'
SIGNAL_CREATE_BUTTON = 'CreateButton'
SIGNAL_DELETE_BUTTON = 'DeleteButton'
SIGNAL_BUTTON_ON = 'ButtonOn'
SIGNAL_BUTTON_OFF = 'ButtonOff'
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = 'EssentialNodeQueriesComplete'
SIGNAL_NODE_QUERIES_COMPLETE = 'NodeQueriesComplete'
SIGNAL_AWAKE_NODES_QUERIED = 'AwakeNodesQueried'
SIGNAL_ALL_NODES_QUERIED = 'AllNodesQueried'
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = 'AllNodesQueriedSomeDead'
SIGNAL_MSG_COMPLETE = 'MsgComplete'
SIGNAL_NOTIFICATION = 'Notification'
SIGNAL_CONTROLLER_COMMAND = 'ControllerCommand'
SIGNAL_CONTROLLER_WAITING = 'ControllerWaiting'
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
ignoreSubsequent = True
def __init__(self, options, log=None, autostart=True, kvals=True):
"""
Initialize zwave network
:param options: Options to use with manager
:type options: ZWaveOption
:param log: A log file (not used. Deprecated
:type log:
:param autostart: should we start the network.
:type autostart: bool
:param kvals: Enable kvals (use pysqlite)
:type kvals: bool
"""
logger.debug("Create network object.")
self.log = log
self._options = options
ZWaveObject.__init__(self, None, self)
self._controller = ZWaveController(1, self, options)
self._manager = libopenzwave.PyManager()
self._manager.create()
self._state = self.STATE_STOPPED
self.nodes = None
self._semaphore_nodes = threading.Semaphore()
self._id_separator = '.'
self.network_event = threading.Event()
self.dbcon = None
if kvals == True:
try:
self.dbcon = lite.connect(os.path.join(self._options.user_path, 'pyozw.sqlite'), check_same_thread=False)
cur = self.dbcon.cursor()
version = cur.execute('SELECT SQLITE_VERSION()').fetchone()
logger.debug("Use sqlite version : %s", version)
self._check_db_tables()
except lite.Error as e:
logger.warning("Can't connect to sqlite database : kvals are disabled - %s", e.args[0])
self._started = False
if autostart:
self.start()
def __str__(self):
"""
The string representation of the node.
:rtype: str
"""
return u'home_id: [%s] controller: [%s]' % \
(self.home_id_str, self.controller)
def _check_db_tables(self):
"""
Check that the tables for "classes" are in database.
:returns: True if operation succeed. False oterwise
:rtype: boolean
"""
if self.dbcon is None:
return False
cur = self.dbcon.cursor()
for mycls in ['ZWaveOption', 'ZWaveOptionSingleton', 'ZWaveNetwork', 'ZWaveNetworkSingleton', 'ZWaveNode', 'ZWaveController', 'ZWaveValue']:
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (mycls,))
data = cur.fetchone()
if data is None:
cur.execute("CREATE TABLE %s(object_id INT, key TEXT, value TEXT)" % mycls)
return True
def start(self):
"""
Start the network object :
- add a watcher
- add a driver
"""
if self._started == True:
return
logger.info(u"Start Openzwave network.")
self._manager.addWatcher(self.zwcallback)
self._manager.addDriver(self._options.device)
self._started = True
def stop(self, fire=True):
"""
Stop the network object.
- remove the watcher
- remove the driver
- clear the nodes
.. code-block:: python
dispatcher.send(self.SIGNAL_NETWORK_STOPPED, **{'network': self})
"""
if self._started == False:
return
logger.info(u"Stop Openzwave network.")
if self.controller is not None:
self.controller.stop()
self.write_config()
try:
self._semaphore_nodes.acquire()
self._manager.removeWatcher(self.zwcallback)
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
self._manager.removeDriver(self._options.device)
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
for i in range(0, 60):
if self.controller.send_queue_count <= 0:
break
else:
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
self.nodes = None
except:
import sys, traceback
logger.exception(u'Stop network : %s')
finally:
self._semaphore_nodes.release()
self._started = False
self._state = self.STATE_STOPPED
try:
self.network_event.wait(1.0)
except AssertionError:
#For gevent AssertionError: Impossible to call blocking function in the event loop callback
pass
if fire:
dispatcher.send(self.SIGNAL_NETWORK_STOPPED, **{'network': self})
def destroy(self):
"""
Destroy the netwok and all related stuff.
"""
if self.dbcon is not None:
self.dbcon.commit()
self.dbcon.close()
self._manager.destroy()
self._options.destroy()
self._manager = None
self._options = None
@property
def home_id(self):
"""
The home_id of the network.
:rtype: int
"""
if self._object_id is None:
return 0
return self._object_id
@home_id.setter
def home_id(self, value):
"""
The home_id of the network.
:param value: new home_id
:type value: int
"""
self._object_id = value
@property
def home_id_str(self):
"""
The home_id of the network as string.
:rtype: str
"""
return "0x%0.8x" % self.home_id
@property
def is_ready(self):
"""
Says if the network is ready for operations.
:rtype: bool
"""
return self._state >= self.STATE_READY
@property
def state(self):
"""
The state of the network. Values may be changed in the future,
only order is important.
You can safely ask node information when state >= STATE_READY
* STATE_STOPPED = 0
* STATE_FAILED = 1
* STATE_RESETTED = 3
* STATE_STARTED = 5
* STATE_AWAKED = 7
* STATE_READY = 10
:rtype: int
"""
return self._state
@state.setter
def state(self, value):
"""
The state of the network. Values may be changed in the future,
only order is important.
* STATE_STOPPED = 0
* STATE_FAILED = 1
* STATE_RESETTED = 3
* STATE_STARTED = 5
* STATE_AWAKED = 7
* STATE_READY = 10
:param value: new state
:type value: int
"""
self._state = value
@property
def state_str(self):
"""
The state of the network. Values may be changed in the future,
only order is important.
You can safely ask node informations when state >= STATE_AWAKED
:rtype: int
"""
if self._state == self.STATE_STOPPED:
return "Network is stopped"
elif self._state == self.STATE_FAILED:
return "Driver failed"
elif self._state == self.STATE_STARTED:
return "Driver initialised"
elif self._state == self.STATE_RESETTED:
return "Driver is reset"
elif self._state == self.STATE_AWAKED:
return "Topology loaded"
elif self._state == self.STATE_READY:
return "Network ready"
else:
return "Unknown state"
@property
def manager(self):
"""
The manager to use to communicate with the lib c++.
:rtype: ZWaveManager
"""
if self._manager is not None:
return self._manager
else:
raise ZWaveException(u"Manager not initialised")
@property
def controller(self):
"""
The controller of the network.
:return: The controller of the network
:rtype: ZWaveController
"""
if self._controller is not None:
return self._controller
else:
raise ZWaveException(u"Controller not initialised")
@property
def nodes(self):
"""
The nodes of the network.
:rtype: dict()
"""
return self._nodes
def nodes_to_dict(self, extras=['all']):
"""
Return a dict representation of the network.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret = {}
for ndid in self._nodes.keys():
ret[ndid]=self._nodes[ndid].to_dict(extras=extras)
return ret
def to_dict(self, extras=['kvals']):
"""
Return a dict representation of the network.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret = {}
ret['state'] = self.state,
ret['state_str'] = self.state_str,
ret['home_id'] = self.home_id_str,
ret['nodes_count'] = self.nodes_count,
if 'kvals' in extras and self.network.dbcon is not None:
vals = self.kvals
for key in vals.keys():
ret[key]=vals[key]
return ret
@nodes.setter
def nodes(self, value):
"""
The nodes of the network.
:param value: The new value
:type value: dict() or None
"""
if type(value) == type(dict()):
self._nodes = value
else:
self._nodes = dict()
def switch_all(self, state):
"""
Method for switching all devices on or off together. The devices must support
the SwitchAll command class. The command is first broadcast to all nodes, and
then followed up with individual commands to each node (because broadcasts are
not routed, the message might not otherwise reach all the nodes).
:param state: True to turn on the switches, False to turn them off
:type state: bool
"""
if state:
self.manager.switchAllOn(self.home_id)
else:
self.manager.switchAllOff(self.home_id)
def test(self, count=1):
"""
Send a number of test messages to every node and record results.
:param count: The number of test messages to send.
:type count: int
"""
self.manager.testNetwork(self.home_id, count)
def heal(self, upNodeRoute=False):
"""
Heal network by requesting nodes rediscover their neighbors.
Sends a ControllerCommand_RequestNodeNeighborUpdate to every node.
Can take a while on larger networks.
:param upNodeRoute: Optional Whether to perform return routes initialization. (default = false).
:type upNodeRoute: bool
:return: True is the ControllerCommand ins sent. False otherwise
:rtype: bool
"""
if self.network.state < self.network.STATE_AWAKED:
logger.warning(u'Network must be awake')
return False
self.manager.healNetwork(self.home_id, upNodeRoute)
return True
def get_value(self, value_id):
"""
Retrieve a value on the network.
Check every nodes to see if it holds the value
:param value_id: The id of the value to find
:type value_id: int
:return: The value or None
:rtype: ZWaveValue
"""
for node in self.nodes:
if value_id in self.nodes[node].values:
return self.nodes[node].values[value_id]
return None
@property
def id_separator(self):
"""
The separator in id representation.
:rtype: char
"""
return self._id_separator
@id_separator.setter
def id_separator(self, value):
"""
The nodes of the network.
:param value: The new separator
:type value: char
"""
self._id_separator = value
def get_value_from_id_on_network(self, id_on_network):
"""
Retrieve a value on the network from it's id_on_network.
Check every nodes to see if it holds the value
:param id_on_network: The id_on_network of the value to find
:type id_on_network: str
:return: The value or None
:rtype: ZWaveValue
"""
for node in self.nodes.itervalues():
for val in node.values.itervalues():
if val.id_on_network == id_on_network:
return val
return None
def get_scenes(self):
"""
The scenes of the network.
Scenes are generated directly from the lib. There is no notification
support to keep them up to date. So for a batch job, consider
storing them in a local variable.
:return: return a dict() (that can be empty) of scene object. Return None if betwork is not ready
:rtype: dict() or None
"""
if self.state < self.STATE_AWAKED:
return None
else:
return self._load_scenes()
def scenes_to_dict(self, extras=['all']):
"""
Return a JSONifiable dict representation of the scenes.
:param extras: The extra inforamtions to add
:type extras: []
:returns: A dict
:rtype: dict()
"""
ret={}
scenes = self.get_scenes()
for scnid in scenes.keys():
ret[scnid] = scenes[scnid].to_dict(extras=extras)
return ret
def _load_scenes(self):
"""
Load the scenes of the network.
:return: return a dict() (that can be empty) of scene object.
:rtype: dict()
"""
ret = {}
set_scenes = self._manager.getAllScenes()
logger.debug(u'Load Scenes: %s', set_scenes)
for scene_id in set_scenes:
scene = ZWaveScene(scene_id, network=self)
ret[scene_id] = scene
return ret
def create_scene(self, label=None):
"""
Create a new scene on the network.
If label is set, also change the label of the scene
If you store your scenes on a local variable, get a new one
to get the scene id
:param label: The new label
:type label: str or None
:return: return the id of scene on the network. Return 0 if fails
:rtype: int
"""
scene = ZWaveScene(None, network=self)
return scene.create(label)
def scene_exists(self, scene_id):
"""
Check that the scene exists
:param scene_id: The id of the scene to check
:type scene_id: int
:return: True if the scene exist. False in other cases
:rtype: bool
"""
return self._network.manager.sceneExists(scene_id)
@property
def scenes_count(self):
"""
Return the number of scenes
:return: The number of scenes
:rtype: int
"""
return self._network.manager.getNumScenes()
def remove_scene(self, scene_id):
"""
Delete the scene on the network.
:param scene_id: The id of the scene to check
:type scene_id: int
:return: True if the scene was removed. False in other cases
:rtype: bool
"""
return self._network.manager.removeScene(scene_id)
@property
def nodes_count(self):
"""
The nodes count of the network.
:rtype: int
"""
return len(self.nodes)
@property
def sleeping_nodes_count(self):
"""
The count of sleeping nodes on the network.
:rtype: int
"""
result = 0
for node in self.nodes:
if node.is_sleeping:
result += 1
return result
def get_poll_interval(self):
"""
Get the time period between polls of a nodes state
:return: The number of milliseconds between polls
:rtype: int
"""
return self.manager.getPollInterval()
def set_poll_interval(self, milliseconds=500, bIntervalBetweenPolls=True):
"""
Set the time period between polls of a nodes state.
Due to patent concerns, some devices do not report state changes automatically
to the controller. These devices need to have their state polled at regular
intervals. The length of the interval is the same for all devices. To even
out the Z-Wave network traffic generated by polling, OpenZWave divides the
polling interval by the number of devices that have polling enabled, and polls
each in turn. It is recommended that if possible, the interval should not be
set shorter than the number of polled devices in seconds (so that the network
does not have to cope with more than one poll per second).
:param milliseconds: The length of the polling interval in milliseconds.
:type milliseconds: int
:param bIntervalBetweenPolls: If set to true (via SetPollInterval), the pollInterval will be interspersed between each poll (so a much smaller m_pollInterval like 100, 500, or 1,000 may be appropriate). If false, the library attempts to complete all polls within m_pollInterval.
:type bIntervalBetweenPolls: bool
"""
self.manager.setPollInterval(milliseconds, bIntervalBetweenPolls)
def zwcallback(self, args):
"""
The Callback Handler used with the libopenzwave.
n['valueId'] = {
* 'home_id' : v.GetHomeId(),
* 'node_id' : v.GetNodeId(),
* 'commandClass' : PyManager.COMMAND_CLASS_DESC[v.GetCommandClassId()],
* 'instance' : v.GetInstance(),
* 'index' : v.GetIndex(),
* 'id' : v.GetId(),
* 'genre' : PyGenres[v.GetGenre()],
* 'type' : PyValueTypes[v.GetType()],
* #'value' : value.c_str(),
* 'value' : getValueFromType(manager,v.GetId()),
* 'label' : label.c_str(),
* 'units' : units.c_str(),
* 'readOnly': manager.IsValueReadOnly(v)
}
:param args: A dict containing informations about the state of the controller
:type args: dict()
"""
logger.debug('zwcallback args=[%s]', args)
try:
notify_type = args['notificationType']
if notify_type == self.SIGNAL_DRIVER_FAILED:
self._handle_driver_failed(args)
elif notify_type == self.SIGNAL_DRIVER_READY:
self._handle_driver_ready(args)
elif notify_type == self.SIGNAL_DRIVER_RESET:
self._handle_driver_reset(args)
elif notify_type == self.SIGNAL_NODE_ADDED:
self._handle_node_added(args)
elif notify_type == self.SIGNAL_NODE_EVENT:
self._handle_node_event(args)
elif notify_type == self.SIGNAL_NODE_NAMING:
self._handle_node_naming(args)
elif notify_type == self.SIGNAL_NODE_NEW:
self._handle_node_new(args)
elif notify_type == self.SIGNAL_NODE_PROTOCOL_INFO:
self._handle_node_protocol_info(args)
elif notify_type == self.SIGNAL_NODE_READY:
self._handleNodeReady(args)
elif notify_type == self.SIGNAL_NODE_REMOVED:
self._handle_node_removed(args)
elif notify_type == self.SIGNAL_GROUP:
self._handle_group(args)
elif notify_type == self.SIGNAL_SCENE_EVENT:
self._handle_scene_event(args)
elif notify_type == self.SIGNAL_VALUE_ADDED:
self._handle_value_added(args)
elif notify_type == self.SIGNAL_VALUE_CHANGED:
self._handle_value_changed(args)
elif notify_type == self.SIGNAL_VALUE_REFRESHED:
self._handle_value_refreshed(args)
elif notify_type == self.SIGNAL_VALUE_REMOVED:
self._handle_value_removed(args)
elif notify_type == self.SIGNAL_POLLING_DISABLED:
self._handle_polling_disabled(args)
elif notify_type == self.SIGNAL_POLLING_ENABLED:
self._handle_polling_enabled(args)
elif notify_type == self.SIGNAL_CREATE_BUTTON:
self._handle_create_button(args)
elif notify_type == self.SIGNAL_DELETE_BUTTON:
self._handle_delete_button(args)
elif notify_type == self.SIGNAL_BUTTON_ON:
self._handle_button_on(args)
elif notify_type == self.SIGNAL_BUTTON_OFF:
self._handle_button_off(args)
elif notify_type == self.SIGNAL_ALL_NODES_QUERIED:
self._handle_all_nodes_queried(args)
elif notify_type == self.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD:
self._handle_all_nodes_queried_some_dead(args)
elif notify_type == self.SIGNAL_AWAKE_NODES_QUERIED:
self._handle_awake_nodes_queried(args)
elif notify_type == self.SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE:
self._handle_essential_node_queries_complete(args)
elif notify_type == self.SIGNAL_NODE_QUERIES_COMPLETE:
self._handle_node_queries_complete(args)
elif notify_type == self.SIGNAL_MSG_COMPLETE:
self._handle_msg_complete(args)
elif notify_type == self.SIGNAL_NOTIFICATION:
self._handle_notification(args)
elif notify_type == self.SIGNAL_DRIVER_REMOVED:
self._handle_driver_removed(args)
elif notify_type == self.SIGNAL_CONTROLLER_COMMAND:
self._handle_controller_command(args)
else:
logger.warning(u'Skipping unhandled notification [%s]', args)
except:
import sys, traceback
logger.exception(u'Error in manager callback')
def _handle_driver_failed(self, args):
"""
Driver failed to load.
:param args: data sent by the notification
:type args: dict()
dispatcher.send(self.SIGNAL_NETWORK_FAILED, **{'network': self})
"""
logger.warning(u'Z-Wave Notification DriverFailed : %s', args)
self._manager = None
self._controller = None
self.nodes = None
self._state = self.STATE_FAILED
dispatcher.send(self.SIGNAL_DRIVER_FAILED, **{'network': self})
dispatcher.send(self.SIGNAL_NETWORK_FAILED, **{'network': self})
def _handle_driver_ready(self, args):
"""
A driver for a PC Z-Wave controller has been added and is ready to use.
The notification will contain the controller's Home ID,
which is needed to call most of the Manager methods.
dispatcher.send(self.SIGNAL_NETWORK_STARTED, **{'network': self, 'controller': self._controller})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DriverReady : %s', args)
self._object_id = args['homeId']
try:
controller_node = ZWaveNode(args['nodeId'], network=self)
self._semaphore_nodes.acquire()
self.nodes = None
self.nodes[args['nodeId']] = controller_node
self._controller.node = self.nodes[args['nodeId']]
logger.info(u'Driver ready using library %s', self._controller.library_description)
logger.info(u'home_id 0x%0.8x, controller node id is %d', self.home_id, self._controller.node_id)
logger.debug(u'Network %s', self)
#Not needed. Already sent by the lib
#~ dispatcher.send(self.SIGNAL_DRIVER_READY, \
#~ **{'network': self, 'controller': self._controller})
self._state = self.STATE_STARTED
dispatcher.send(self.SIGNAL_NETWORK_STARTED, \
**{'network': self})
ctrl_state = libopenzwave.PyControllerState[0]
ctrl_message = libopenzwave.PyControllerState[0].doc
dispatcher.send(self.controller.SIGNAL_CONTROLLER, \
**{'state': ctrl_state, 'message': ctrl_message, 'network': self, 'controller': self.controller})
except:
import sys, traceback
logger.exception('Z-Wave Notification DriverReady',)
finally:
self._semaphore_nodes.release()
def _handle_driver_reset(self, args):
"""
This notification is never fired.
Look at
and
All nodes and values for this driver have been removed.
This is sent instead of potentially hundreds of individual node
and value notifications.
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DriverReset : %s', args)
try:
self._semaphore_nodes.acquire()
logger.debug(u'DriverReset received. Remove all nodes')
self.nodes = None
self._state = self.STATE_RESETTED
dispatcher.send(self.SIGNAL_DRIVER_RESET, \
**{'network': self})
dispatcher.send(self.SIGNAL_NETWORK_RESETTED, \
**{'network': self})
finally:
self._semaphore_nodes.release()
def _handle_driver_removed(self, args):
"""
The Driver is being removed. (either due to Error or by request)
Do Not Call Any Driver Related Methods after receiving this
dispatcher.send(self.SIGNAL_DRIVER_REMOVED, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DriverRemoved : %s', args)
try:
self._semaphore_nodes.acquire()
self._state = self.STATE_STOPPED
dispatcher.send(self.SIGNAL_DRIVER_REMOVED, \
**{'network': self})
finally:
self._semaphore_nodes.release()
def _handle_group(self, args):
"""
The associations for the node have changed.
The application should rebuild any group information
it holds about the node.
dispatcher.send(self.SIGNAL_GROUP, **{'network': self, 'node': self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification Group : %s', args)
dispatcher.send(self.SIGNAL_GROUP, \
**{'network': self, 'node': self.nodes[args['nodeId']], 'groupidx': args['groupIdx']})
def _handle_node(self, node):
"""
Sent when a node is changed, added, removed, ...
If you don't interest in nodes event details you can listen to this
signal only.
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param node: the node
:type node: ZWaveNode
"""
logger.debug(u'Z-Wave Notification Node : %s', node)
dispatcher.send(self.SIGNAL_NODE, \
**{'network': self, 'node':node})
def _handle_node_added(self, args):
"""
A new node has been added to OpenZWave's set.
This may be due to a device being added to the Z-Wave network,
or because the application is initializing itself.
dispatcher.send(self.SIGNAL_NODE_ADDED, **{'network': self, 'node': node})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeAdded : %s', args)
try:
node = ZWaveNode(args['nodeId'], network=self)
self._semaphore_nodes.acquire()
self.nodes[args['nodeId']] = node
dispatcher.send(self.SIGNAL_NODE_ADDED, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
finally:
self._semaphore_nodes.release()
def _handle_scene_event(self, args):
"""
Scene Activation Set received
Not implemented
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification SceneEvent : %s', args)
dispatcher.send(self.SIGNAL_SCENE_EVENT, \
**{'network': self, 'node': self.nodes[args['nodeId']],
'scene_id': args['sceneId']})
def _handle_node_event(self, args):
"""
A node has triggered an event. This is commonly caused when a
node sends a Basic_Set command to the controller.
The event value is stored in the notification.
dispatcher.send(self.SIGNAL_NODE_EVENT, **{'network': self, 'node': self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeEvent : %s', args)
dispatcher.send(self.SIGNAL_NODE_EVENT,
**{'network': self, 'node': self.nodes[args['nodeId']], 'value': args['event']})
def _handle_node_naming(self, args):
"""
One of the node names has changed (name, manufacturer, product).
dispatcher.send(self.SIGNAL_NODE_NAMING, **{'network': self, 'node': self.nodes[args['nodeId']]})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeNaming : %s', args)
dispatcher.send(self.SIGNAL_NODE_NAMING, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
def _handle_node_new(self, args):
"""
A new node has been found (not already stored in zwcfg*.xml file).
:param args: data sent by the notification
:type args: dict()
"""
logger.debug('Z-Wave Notification NodeNew : %s', args)
dispatcher.send(self.SIGNAL_NODE_NEW, \
**{'network': self, 'node_id': args['nodeId']})
def _handle_node_protocol_info(self, args):
"""
Basic node information has been received, such as whether
the node is a listening device, a routing device and its baud rate
and basic, generic and specific types.
It is after this notification that you can call Manager::GetNodeType
to obtain a label containing the device description.
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeProtocolInfo : %s', args)
dispatcher.send(self.SIGNAL_NODE_PROTOCOL_INFO, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
def _handle_node_removed(self, args):
"""
A node has been removed from OpenZWave's set.
This may be due to a device being removed from the Z-Wave network,
or because the application is closing.
dispatcher.send(self.SIGNAL_NODE_REMOVED, **{'network': self, 'node_id': args['nodeId']})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeRemoved : %s', args)
try:
self._semaphore_nodes.acquire()
if args['nodeId'] in self.nodes:
node = self.nodes[args['nodeId']]
del self.nodes[args['nodeId']]
dispatcher.send(self.SIGNAL_NODE_REMOVED, \
**{'network': self, 'node': node})
self._handle_node(node)
finally:
self._semaphore_nodes.release()
def _handle_essential_node_queries_complete(self, args):
"""
The queries on a node that are essential to its operation have
been completed. The node can now handle incoming messages.
dispatcher.send(self.SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE, **{'network': self, 'node': self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification EssentialNodeQueriesComplete : %s', args)
dispatcher.send(self.SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
def _handle_node_queries_complete(self, args):
"""
All the initialisation queries on a node have been completed.
dispatcher.send(self.SIGNAL_NODE_QUERIES_COMPLETE, **{'network': self, 'node': self.nodes[args['nodeId']]})
dispatcher.send(self.SIGNAL_NODE, **{'network': self, 'node':self.nodes[args['nodeId']]})
When receiving this value, we consider that the node is ready.
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification NodeQueriesComplete : %s', args)
#the query stage are now completed, set the flag is ready to operate
self.nodes[args['nodeId']].is_ready = True
dispatcher.send(self.SIGNAL_NODE_QUERIES_COMPLETE, \
**{'network': self, 'node': self.nodes[args['nodeId']]})
self._handle_node(self.nodes[args['nodeId']])
def _handle_all_nodes_queried(self, args):
"""
All nodes have been queried, so client application can expected
complete data.
:param args: data sent by the notification
:type args: dict()
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED, **{'network': self, 'controller': self._controller})
"""
logger.debug(u'Z-Wave Notification AllNodesQueried : %s', args)
self._state = self.STATE_READY
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED, \
**{'network': self, 'controller': self._controller})
def _handle_all_nodes_queried_some_dead(self, args):
"""
All nodes have been queried, but some node ar mark dead, so client application can expected
complete data.
:param args: data sent by the notification
:type args: dict()
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED, **{'network': self, 'controller': self._controller})
"""
logger.debug(u'Z-Wave Notification AllNodesQueriedSomeDead : %s', args)
self._state = self.STATE_READY
dispatcher.send(self.SIGNAL_NETWORK_READY, **{'network': self})
dispatcher.send(self.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD, \
**{'network': self, 'controller': self._controller})
def _handle_awake_nodes_queried(self, args):
"""
All awake nodes have been queried, so client application can
expected complete data for these nodes.
dispatcher.send(self.SIGNAL_NETWORK_AWAKED, **{'network': self})
dispatcher.send(self.SIGNAL_AWAKE_NODES_QUERIED, **{'network': self, 'controller': self._controller})
dispatcher.send(self.SIGNAL_NETWORK_AWAKED, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification AwakeNodesQueried : %s', args)
self._object_id = args['homeId']
try:
if self._state < self.STATE_AWAKED:
self._state = self.STATE_AWAKED
dispatcher.send(self.SIGNAL_NETWORK_AWAKED, **{'network': self})
dispatcher.send(self.SIGNAL_AWAKE_NODES_QUERIED, \
**{'network': self, 'controller': self._controller})
except:
import sys, traceback
logger.error('Z-Wave Notification AwakeNodesQueried : %s', traceback.format_exception(*sys.exc_info()))
finally:
pass
def _handle_polling_disabled(self, args):
"""
Polling of a node has been successfully turned off by a call
to Manager::DisablePoll.
dispatcher.send(self.SIGNAL_POLLING_DISABLED, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification PollingDisabled : %s', args)
dispatcher.send(self.SIGNAL_POLLING_DISABLED, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_polling_enabled(self, args):
"""
Polling of a node has been successfully turned on by a call
to Manager::EnablePoll.
dispatcher.send(self.SIGNAL_POLLING_ENABLED, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification PollingEnabled : %s', args)
dispatcher.send(self.SIGNAL_POLLING_ENABLED, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_create_button(self, args):
"""
Handheld controller button event created.
dispatcher.send(self.SIGNAL_CREATE_BUTTON, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification CreateButton : %s', args)
dispatcher.send(self.SIGNAL_CREATE_BUTTON, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_delete_button(self, args):
"""
Handheld controller button event deleted.
dispatcher.send(self.SIGNAL_DELETE_BUTTON, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification DeleteButton : %s', args)
dispatcher.send(self.SIGNAL_DELETE_BUTTON, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_button_on(self, args):
"""
Handheld controller button on pressed event.
dispatcher.send(self.SIGNAL_BUTTON_ON, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ButtonOn : %s', args)
dispatcher.send(self.SIGNAL_BUTTON_ON, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_button_off(self, args):
"""
Handheld controller button off pressed event.
dispatcher.send(self.SIGNAL_BUTTON_OFF, **{'network': self, 'node' : self.nodes[args['nodeId']]})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ButtonOff : %s', args)
dispatcher.send(self.SIGNAL_BUTTON_OFF, \
**{'network': self, 'node' : self.nodes[args['nodeId']]})
def _handle_value(self, node=None, value=None):
"""
Sent when a value is changed, addes, removed, ...
If you don't interrest in values event details you can listen to this
signal only.
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param nodeid: the id of the node who hold the value
:type nodeid: int
:param valueid: the id of the value
:type valueid: int
"""
dispatcher.send(self.SIGNAL_VALUE, \
**{'network': self, 'node' : node, \
'value' : value})
def _handle_value_added(self, args):
"""
A new node value has been added to OpenZWave's set.
These notifications occur after a node has been discovered,
and details of its command classes have been received.
Each command class may generate one or more values depending
on the complexity of the item being represented.
dispatcher.send(self.SIGNAL_VALUE_ADDED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueAdded : %s', args)
self.nodes[args['nodeId']].add_value(args['valueId']['id'])
dispatcher.send(self.SIGNAL_VALUE_ADDED, \
**{'network': self, \
'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
self._handle_value(node=self.nodes[args['nodeId']], value=self.nodes[args['nodeId']].values[args['valueId']['id']])
def _handle_value_changed(self, args):
"""
A node value has been updated from the Z-Wave network and it is
different from the previous value.
dispatcher.send(self.SIGNAL_VALUE_CHANGED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueChanged : %s', args)
if args['nodeId'] not in self.nodes:
logger.warning('Z-Wave Notification ValueChanged (%s) for an unknown node %s', args['valueId'], args['nodeId'])
return False
self.nodes[args['nodeId']].change_value(args['valueId']['id'])
dispatcher.send(self.SIGNAL_VALUE_CHANGED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
self._handle_value(node=self.nodes[args['nodeId']], value=self.nodes[args['nodeId']].values[args['valueId']['id']])
def _handle_value_refreshed(self, args):
"""
A node value has been updated from the Z-Wave network.
dispatcher.send(self.SIGNAL_VALUE_REFRESHED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueRefreshed : %s', args)
if args['nodeId'] not in self.nodes:
logger.warning('Z-Wave Notification ValueRefreshed (%s) for an unknown node %s', args['valueId'], args['nodeId'])
return False
self.nodes[args['nodeId']].refresh_value(args['valueId']['id'])
dispatcher.send(self.SIGNAL_VALUE_REFRESHED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : self.nodes[args['nodeId']].values[args['valueId']['id']]})
self._handle_value(node=self.nodes[args['nodeId']], value=self.nodes[args['nodeId']].values[args['valueId']['id']])
def _handle_value_removed(self, args):
"""
A node value has been removed from OpenZWave's set.
This only occurs when a node is removed.
dispatcher.send(self.SIGNAL_VALUE_REMOVED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : val})
dispatcher.send(self.SIGNAL_VALUE, **{'network': self, 'node' : node, 'value' : value})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification ValueRemoved : %s', args)
if args['nodeId'] not in self.nodes:
logger.warning(u'Z-Wave Notification ValueRemoved (%s) for an unknown node %s', args['valueId'], args['nodeId'])
return False
if args['valueId']['id'] in self.nodes[args['nodeId']].values:
logger.warning(u'Z-Wave Notification ValueRemoved for an unknown value (%s) on node %s', args['valueId'], args['nodeId'])
dispatcher.send(self.SIGNAL_VALUE_REMOVED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : None, 'valueId' : args['valueId']['id']})
return False
val = self.nodes[args['nodeId']].values[args['valueId']['id']]
if self.nodes[args['nodeId']].remove_value(args['valueId']['id']):
dispatcher.send(self.SIGNAL_VALUE_REMOVED, \
**{'network': self, 'node' : self.nodes[args['nodeId']], \
'value' : val, 'valueId' : args['valueId']['id']})
#self._handle_value(node=self.nodes[args['nodeId']], value=val)
if args['nodeId'] in self.nodes and args['valueId']['id'] in self.nodes[args['nodeId']].values:
del self.nodes[args['nodeId']].values[args['valueId']['id']]
return True
def _handle_notification(self, args):
"""
Called when an error happened, or node changed (awake, sleep, death, no operation, timeout).
dispatcher.send(self.SIGNAL_NOTIFICATION, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification : %s', args)
dispatcher.send(self.SIGNAL_NOTIFICATION, \
**{'network': self, 'args': args})
def _handle_controller_command(self, args):
"""
Called when a message from controller is sent.
The state could be obtained here :
dispatcher.send(self.SIGNAL_CONTROLLER_WAITING, \
**{'network': self, 'controller': self.controller,
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
})
And the full command here :
dispatcher.send(self.SIGNAL_CONTROLLER_COMMAND, \
**{'network': self, 'controller': self.controller,
'node':self.nodes[args['nodeId']] if args['nodeId'] in self.nodes else None, 'node_id' : args['nodeId'],
'state_int': args['controllerStateInt'], 'state': args['controllerState'], 'state_full': args['controllerStateDoc'],
'error_int': args['controllerErrorInt'], 'error': args['controllerError'], 'error_full': args['controllerErrorDoc'],
})
:param args: data sent by the notification
:type args: dict()
"""
self._controller._handle_controller_command(args)
def _handle_msg_complete(self, args):
"""
The last message that was sent is now complete.
dispatcher.send(self.SIGNAL_MSG_COMPLETE, **{'network': self})
:param args: data sent by the notification
:type args: dict()
"""
logger.debug(u'Z-Wave Notification MsgComplete : %s', args)
dispatcher.send(self.SIGNAL_MSG_COMPLETE, \
**{'network': self})
def write_config(self):
"""
The last message that was sent is now complete.
"""
self._manager.writeConfig(self.home_id)
logger.info(u'ZWave configuration written to user directory.')
"""
initialization callback sequence:
[driverReady]
[nodeAdded] <-------------------------+ This cycle is extremely quick, well under one second.
[nodeProtocolInfo] |
[nodeNaming] |
[valueAdded] <---------------+ |
| |
{REPEATS FOR EACH VALUE} ----+ |
|
[group] <--------------------+ |
| |
{REPEATS FOR EACH GROUP} ----+ |
|
{REPEATS FOR EACH NODE} --------------+
[? (no notification)] <---------------+ (no notification announces the beginning of this cycle)
|
[valueChanged] <-------------+ | This cycle can take some time, especially if some nodes
| | are sleeping or slow to respond.
{REPEATS FOR EACH VALUE} ----+ |
|
[group] <--------------------+ |
| |
{REPEATS FOR EACH GROUP} ----+ |
|
[nodeQueriesComplete] |
|
{REPEATS FOR EACH NODE} --------------+
[awakeNodesQueried] or [allNodesQueried] (with node_id 255)
[driverRemoved]
"""
class ZWaveNetworkSingleton(ZWaveNetwork):
"""
Represents a singleton Zwave network.
"""
__metaclass__ = Singleton
|
nilq/baby-python
|
python
|
# ---
# name: web-csv
# deployed: true
# title: CSV Reader
# description: Returns the data for the CSVs given by the URLs
# params:
# - name: url
# type: array
# description: Urls for which to get the info
# required: true
# examples:
# - '"https://raw.githubusercontent.com/flexiodata/data/master/sample/sample-contacts.csv"'
# notes:
# ---
import csv
import json
import tempfile
import io
import aiohttp
import asyncio
import itertools
from cerberus import Validator
from contextlib import closing
from collections import OrderedDict
def flexio_handler(flex):
# get the input
input = flex.input.read()
input = json.loads(input)
if not isinstance(input, list):
raise ValueError
# define the expected parameters and map the values to the parameter names
# based on the positions of the keys/values
params = OrderedDict()
params['urls'] = {'required': True, 'validator': validator_list, 'coerce': to_list}
#params['columns'] = {'required': True, 'validator': validator_list, 'coerce': to_list}
input = dict(zip(params.keys(), input))
# validate the mapped input against the validator
v = Validator(params, allow_unknown = True)
input = v.validated(input)
if input is None:
raise ValueError
urls = input['urls']
loop = asyncio.get_event_loop()
temp_fp_all = loop.run_until_complete(fetch_all(urls))
flex.output.content_type = 'application/json'
flex.output.write('[')
# get the columns for each of the input urls
properties = []
for temp_fp in temp_fp_all:
try:
fp = io.TextIOWrapper(temp_fp, encoding='utf-8-sig')
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
properties = list(row.keys())
break
finally:
fp.seek(0)
fp.detach()
flex.output.write(json.dumps(properties))
for temp_fp in temp_fp_all:
fp = io.TextIOWrapper(temp_fp, encoding='utf-8-sig')
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
row = ',' + json.dumps([(row.get(p) or '') for p in properties])
flex.output.write(row)
temp_fp.close()
flex.output.write(']')
async def fetch_all(urls):
tasks = []
async with aiohttp.ClientSession() as session:
for url in urls:
tasks.append(fetch(session, url))
temp_fp_all = await asyncio.gather(*tasks)
return temp_fp_all
async def fetch(session, url):
# stream the data from the url into a temporary file and return
# it for processing, after which it'll be closed and deleted
temp_fp = tempfile.TemporaryFile()
async with session.get(url) as response:
while True:
data = await response.content.read(1024)
if not data:
break
temp_fp.write(data)
temp_fp.seek(0) # rewind to the beginning
return temp_fp
def validator_list(field, value, error):
if isinstance(value, str):
return
if isinstance(value, list):
for item in value:
if not isinstance(item, str):
error(field, 'Must be a list with only string values')
return
error(field, 'Must be a string or a list of strings')
def to_list(value):
# if we have a list of strings, create a list from them; if we have
# a list of lists, flatten it into a single list of strings
if isinstance(value, str):
return value.split(",")
if isinstance(value, list):
return list(itertools.chain.from_iterable(value))
return None
|
nilq/baby-python
|
python
|
# utilities for dealing with webtiles configuration. The actual configuration
# data does *not* go in here.
import collections
import os.path
import logging
from webtiles import load_games
server_config = {}
source_file = None
# light wrapper class that maps get/set/etc to getattr/setattr/etc
# doesn't bother to implement most of the dict interface...
class ConfigModuleWrapper(object):
def __init__(self, module):
self.module = module
def get(self, key, default):
return getattr(self.module, key, default)
def __setitem__(self, key, val):
setattr(self.module, key, val)
def pop(self, key):
r = getattr(self.module, key)
delattr(self.module, key)
return r
def __contains__(self, key):
return hasattr(self.module, key)
# temporary compatibility shim for config calls in templates
allow_password_reset = False
admin_password_reset = False
# classic config: everything is just done in a module
# (TODO: add some alternative)
def init_config_from_module(module):
global server_config, source_file
server_config = ConfigModuleWrapper(module)
source_file = os.path.abspath(module.__file__)
global allow_password_reset, admin_password_reset
allow_password_reset = get('allow_password_reset')
admin_password_reset = get('admin_password_reset')
server_path = None
games = collections.OrderedDict()
game_modes = {} # type: Dict[str, str]
# for values not in this dict, the default is None
defaults = {
'dgl_mode': True,
'logging_config': {
"level": logging.INFO,
"format": "%(asctime)s %(levelname)s: %(message)s"
},
'server_socket_path': None,
'watch_socket_dirs': False,
'use_game_yaml': True,
'milestone_file': [],
'status_file_update_rate': 5,
'lobby_update_rate': 2,
'recording_term_size': (80, 24),
'max_connections': 100,
'connection_timeout': 600,
'max_idle_time': 5 * 60 * 60,
'use_gzip': True,
'kill_timeout': 10,
'nick_regex': r"^[a-zA-Z0-9]{3,20}$",
'max_passwd_length': 20,
'allow_password_reset': False,
'admin_password_reset': False,
'crypt_algorithm': "broken", # should this be the default??
'crypt_salt_length': 16,
'login_token_lifetime': 7, # Days
'daemon': False,
'development_mode': False,
'no_cache': False,
'live_debug': False,
'lobby_update_rate': 2,
}
def get(key, default=None):
global server_config
return server_config.get(key, defaults.get(key, default))
def set(key, val):
global server_config
server_config[key] = val
def pop(key):
global server_config
return server_config.pop(key)
def has_key(key):
global server_config
return key in server_config
def check_keys_all(required, raise_on_missing=False):
# accept either a single str, or an iterable for `required`
if isinstance(required, str):
required = [required]
for k in required:
if not has_key(k) or get(k) is None:
if raise_on_missing:
raise ValueError("Webtiles config: Missing configuration key: %s" % k)
return False
return True
def check_keys_any(required, raise_on_missing=False):
# use `has_keys`: if any member of required is itself a list, require
# all keys in the list
if not any([check_keys_all(key) for key in required]):
if raise_on_missing:
raise ValueError("Webtiles config: Need at least one of %s!" %
", ".join([repr(r) for r in required]))
return False
return True
def check_game_config():
success = True
for (game_id, game_data) in get('games').items():
if not os.path.exists(game_data["crawl_binary"]):
logging.warning("Crawl executable for %s (%s) doesn't exist!",
game_id, game_data["crawl_binary"])
success = False
if ("client_path" in game_data and
not os.path.exists(game_data["client_path"])):
logging.warning("Client data path %s doesn't exist!", game_data["client_path"])
success = False
return success
def load_game_data():
# TODO: should the `load_games` module be refactored into config?
global games
games = get('games', collections.OrderedDict())
if get('use_game_yaml', False):
games = load_games.load_games(games)
# TODO: check_games here or in validate?
if len(games) == 0:
raise ValueError("No games defined!")
if not check_game_config():
raise ValueError("Errors in game data!")
global game_modes
game_modes = load_games.collect_game_modes()
def validate():
# TODO: some way of setting defaults in this module?
check_keys_any(['bind_nonsecure', 'ssl_options'], True)
if has_key('bind_nonsecure') and get('bind_nonsecure'):
check_keys_any(['bind_pairs', ['bind_address', 'bind_port']], True)
if has_key('ssl_options') and get('ssl_options'):
check_keys_any(['ssl_bind_pairs', ['ssl_address', 'ssl_port']], True)
required = ['static_path', 'template_path', 'server_id',
'dgl_status_file', 'init_player_program',]
if get('allow_password_reset') or get('admin_password_reset'):
required.add('lobby_url')
check_keys_all(required, raise_on_missing=True)
smpt_opts = ['smtp_host', 'smtp_port', 'smtp_from_addr']
if check_keys_any(smpt_opts):
check_keys_all(smpt_opts, True)
if (has_key('smtp_user')):
check_keys_all('smtp_password', True)
# set up defaults that are conditioned on other values
if not has_key('settings_db'):
set('settings_db', os.path.join(os.path.dirname(get('password_db')),
"user_settings.db3"))
|
nilq/baby-python
|
python
|
# black=\033[30m
# red=\033[31m
# green=\033[32m
# orange=\033[33m
# blue=\033[34m
# purple=\033[35m
# cyan=\033[36m
# lightgrey=\033[37m
# darkgrey=\033[90m
# lightred=\033[91m
# lightgreen=\033[92m
# yellow=\033[93m
# lightblue=\033[94m
# pink=\033[95m
# lightcyan=\033[96m
# BOLD = \033[1m
# FAINT = \033[2m
# ITALIC = \033[3m
# UNDERLINE = \033[4m
# BLINK = \033[5m
# NEGATIVE = \033[7m
# CROSSED = \033[9m
# END = \033[0m
from time import sleep
import sys
import os
from remove import remove
def del_lines(i, fname):
for _ in range(i):
sys.stdout.write('\x1b[1A')
remove(fname)
def delete_1_line():
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
def create():
fname = input('\033[32mEnter filename (default: code.vypr):\033[0m') or ' '
if fname == ' ':
file = open('Testcases/code.vypr', 'w', encoding='utf8')
file.write("import modulename;\nint main()\n{\n return 0;\n}")
else:
fname = f'Testcases/{fname}'
file = open(f'{fname}.vypr', "w", encoding='utf8')
print('''\033[32mWhat Do You Want To Write To Your File?
[Write "$EOF" (without quotes) to end]
[Write "$RET" (without quotes) to delete upper line]
[Write "$REM" (without quotes) to clear file]\033[0m''')
print('***START***')
print('> ', end='')
text = input()
x = 0
while text != '$EOF' and text != '\n$EOF':
if(text == '$RET' or text == '\n$RET'):
file.close()
delete_1_line()
del_lines(1, f'{fname}.vypr')
file = open(f'{fname}.vypr', "a+")
print('> ', end='')
text = input()
x = x-1
elif (text == '$REM' or text == '\n$REM'):
delete_1_line()
for _ in range(x):
delete_1_line()
file.close()
with open(f'{fname}.vypr', 'w') as f:
f.write('')
file = open(f'{fname}.vypr', "a+")
print('> ', end='')
text = input("\b ")
else:
file.write(text+'\n')
print('> ', end='')
text = input()
x = x+1
file.close()
print("\033[93mFile Created Successfully...\033[0m")
if __name__ == '__main__':
create()
|
nilq/baby-python
|
python
|
import json
def save(name, csar):
# TODO(@tadeboro): Temporary placeholder
with open("{}.deploy".format(name), "w") as fd:
json.dump(dict(name=csar), fd)
def load(name):
# TODO(@tadeboro): Temporary placeholder
with open("{}.deploy".format(name)) as fd:
return json.load(fd)["name"]
|
nilq/baby-python
|
python
|
import os
import bpy
from bStream import *
from itertools import chain
import math
def load_anim(pth):
stream = bStream(path=pth)
target_name = f"{os.path.basename(pth).split('.')[0]}_PTH"
target_action = bpy.data.actions.new(f"{target_name}_PTH_ACN")
target = bpy.data.objects.new(target_name, None)
# Start loading anmation
frame_count = stream.readUInt16()
print(frame_count)
stream.readUInt16() #Padding
frames = {
'x':[],
'y':[],
'z':[],
'rx':[],
'ry':[],
'rz':[]
}
XGroup = PTHLoadGroup(stream)
YGroup = PTHLoadGroup(stream)
ZGroup = PTHLoadGroup(stream)
RXGroup = PTHLoadGroup(stream)
RYGroup = PTHLoadGroup(stream)
RZGroup = PTHLoadGroup(stream)
key_data_offset = stream.readUInt32()
#Load Frame Data
PTHLoadGroupData(stream, key_data_offset, XGroup, 'x', frames)
PTHLoadGroupData(stream, key_data_offset, YGroup, 'y', frames)
PTHLoadGroupData(stream, key_data_offset, ZGroup, 'z', frames)
PTHLoadGroupData(stream, key_data_offset, RXGroup, 'rx', frames)
PTHLoadGroupData(stream, key_data_offset, RYGroup, 'ry', frames)
PTHLoadGroupData(stream, key_data_offset, RZGroup, 'rz', frames)
#Set Frame Data
bpy.context.scene.frame_end = frame_count
target.animation_data_clear()
target_anim_data = target.animation_data_create()
GenerateFCurves(target_action, "rotation_euler", 'x', 0, frames['rx'])
GenerateFCurves(target_action, "rotation_euler", 'y', 1, frames['rz'], invert=True)
GenerateFCurves(target_action, "rotation_euler", 'z', 2, frames['ry'])
GenerateFCurves(target_action, "location", 'x', 0, frames['x'])
GenerateFCurves(target_action, "location", 'y', 1, frames['z'], invert=True)
GenerateFCurves(target_action, "location", 'z', 2, frames['y'])
target_anim_data.action = target_action
bpy.context.scene.collection.objects.link(target)
def GenerateFCurves(action, curve, track, track_index, keyframes, invert=False):
curve = action.fcurves.new(curve, index=track_index, action_group=f"Loc{track.upper()}")
curve.keyframe_points.add(count=len(keyframes))
if(invert):
for f in range(len(keyframes)):
keyframes[f][1] = -keyframes[f][1]
curve.keyframe_points.foreach_set("co", list(chain.from_iterable(keyframes)))
curve.update()
def GenerateFCurvesRot(action, track, track_index, keyframes, invert=False):
curve = action.fcurves.new("rotation_euler", index=track_index, action_group=f"Loc{track.upper()}")
curve.keyframe_points.add(count=len(keyframes))
if(invert):
for f in range(len(keyframes)):
keyframes[f][1] = -keyframes[f][1]
for f in range(len(keyframes)):
keyframes[f][1] = math.degrees(keyframes[f][1] * 0.0001533981)
curve.keyframe_points.foreach_set("co", list(chain.from_iterable(keyframes)))
curve.update()
def GenerateKeyframes(obj, data_path, keyframes):
for keyframe in keyframes:
obj[data_path] = keyframe[1]
obj.keyframe_insert(data_path, frame=keyframe[0])
def PTHLoadGroup(stream):
return {'KeyCount':stream.readUInt16(),'BeginIndex':stream.readUInt16(),'ElementCount':stream.readUInt16()}
def PTHWriteGroup(stream, group):
stream.writeUInt16(group['KeyCount'])
stream.writeUInt16(group['BeginIndex'])
stream.writeUInt16(group['ElementCount']) # should always be 2 for now
def PTHLoadGroupData(stream, offset, group, out_pos, frames):
stream.seek(offset + (4 * group['BeginIndex']))
for frame in range(0,group['KeyCount']):
frame_data = [stream.readFloat() for x in range(0, group['ElementCount'])]
if(group['ElementCount'] == 1):
frames[out_pos].append([frame, frame_data[0]])
else:
frames[out_pos].append([int(frame_data[0]), frame_data[1]])
def PTHWriteGroupData(stream, curve, data_offset, dummy=None, invert=False):
begin_index = int((stream.fhandle.tell() - data_offset) / 4)
print(f'Writing Group with begin index {begin_index}')
if(dummy is not None):
stream.writeFloat(dummy)
return {'KeyCount': 1, 'BeginIndex':begin_index, 'ElementCount':1}
for keyframe in curve.keyframe_points:
stream.writeFloat(keyframe.co[0])
stream.writeFloat(keyframe.co[1] if not invert else -keyframe.co[1])
stream.writeFloat((keyframe.co[1] - keyframe.handle_right[1]) / (keyframe.co[0] - keyframe.handle_right[0]))
return {'KeyCount': len(curve.keyframe_points), 'BeginIndex':begin_index, 'ElementCount':3}
def save_anim(pth): #TODO
stream = bStream(path=pth)
obj = bpy.context.view_layer.objects.active
if(not(obj.type == 'EMPTY')):
return False
stream.writeUInt16(int(bpy.context.scene.frame_end))
stream.writeUInt16(0)
groups_definitoins = stream.fhandle.tell()
stream.pad(36)
keydata_offset = stream.fhandle.tell()
stream.writeUInt32(0)
target_curves = obj.animation_data.action.fcurves
data_offset = stream.fhandle.tell()
XGroup = PTHWriteGroupData(stream, target_curves[0], data_offset)
YGroup = PTHWriteGroupData(stream, target_curves[2], data_offset)
ZGroup = PTHWriteGroupData(stream, target_curves[1], data_offset, invert=True)
# These groups are written manually due to not being anmiatible in blender or unknown
UnkGroup1 = PTHWriteGroupData(stream, None, data_offset, dummy=0.0)
UnkGroup2 = PTHWriteGroupData(stream, None, data_offset, dummy=0.0)
UnkGroup3 = PTHWriteGroupData(stream, None, data_offset, dummy=0.0)
stream.seek(groups_definitoins)
PTHWriteGroup(stream, XGroup)
PTHWriteGroup(stream, YGroup)
PTHWriteGroup(stream, ZGroup)
PTHWriteGroup(stream, UnkGroup1)
PTHWriteGroup(stream, UnkGroup2)
PTHWriteGroup(stream, UnkGroup3)
stream.seek(keydata_offset)
stream.writeUInt32(data_offset)
return True
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.5 on 2021-08-11 19:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Allergy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Diagnosis',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='InsuranceProvider',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Medication',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True)),
('phone', models.CharField(blank=True, max_length=25, null=True, unique=True)),
('dob', models.DateField(blank=True, null=True)),
('insurance_member_id', models.CharField(blank=True, max_length=254, null=True)),
('is_new', models.BooleanField(default=True)),
('sex', models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=2, null=True)),
('insurance_provider', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.insuranceprovider')),
],
),
migrations.CreateModel(
name='ProgressNote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('weight', models.DecimalField(decimal_places=2, max_digits=6)),
('height', models.DecimalField(decimal_places=2, max_digits=6)),
('blood_pressure_sys', models.IntegerField()),
('blood_pressure_dia', models.IntegerField()),
('chief_complaint', models.CharField(blank=True, max_length=254, null=True)),
('medical_history', models.TextField(blank=True, null=True)),
('treatment', models.CharField(blank=True, max_length=254, null=True)),
('doctors_orders', models.CharField(blank=True, max_length=254, null=True)),
('allergies', models.ManyToManyField(blank=True, to='api.Allergy')),
('diagnoses', models.ManyToManyField(blank=True, to='api.Diagnosis')),
('medication', models.ManyToManyField(blank=True, to='api.Medication')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.patient')),
],
),
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateTimeField()),
('end', models.DateTimeField()),
('status', models.CharField(choices=[('SC', 'Scheduled'), ('CI', 'Checked In'), ('DO', 'Done')], default='SC', max_length=2)),
('created_at', models.DateTimeField(auto_now_add=True)),
('notes', models.TextField(blank=True, null=True)),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.patient')),
],
),
]
|
nilq/baby-python
|
python
|
from src.extract_old_site.modules import excavation_details_page as exc_det
import pathlib
import os
from unittest import mock
import pytest
# Structure 1, /dig/html/excavations/exc_is.html
exc_is_html_str = """
<html><head><title>Excavating Occaneechi Town - [Excavations]</title></head>
<frameset cols="408,*" border=1>
<frame name="image" src="slid_azt.html" marginwidth=1 marginheight=1>
<frame name="ctrl" src="ctrl_is.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
ctrl_is_html_str = """
<html><frameset rows="75%,25%" border=1>
<frame name="info" src="info_is.html" marginwidth=1 marginheight=1>
<frame name="zoom" src="zoom_is.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
info_is_html_str = """
<html><body>
<big><b>Structure 1</b></big><p>
<img align="right" src="../images/l/l240r60.gif">
Type: Structure<br>
Dimensions<br>
Length: 13.4 ft<br>
Width: 11.3 ft<br>
Depth: Unknown ft<br>
Volume: Unknown ft<sup><small>3</small></sup><br>
Area: 115.88 ft<sup><small>2</small></sup><p>
<table border=2 width="100%">
<tr><td rowspan=4>Image:<br>
<a href="slid_azt.html" target="image">1</a>
<a href="slid_bdo.html" target="image">2</a>
<a href="slid_bet.html" target="image">3</a>
</td>
<td align="center"><a href="../artifacts/art_is0.html" target="_top">Artifacts</a></td></tr>
<tr><td align="center">Description</td></tr>
<tr><td align="center"><a href="../maps/exc2.html" target="_top">Map</a></td></tr>
<tr><td align="center"><a href="../index.html" target="_top">Home</a></td></tr>
</table></body></html>
"""
zoom_is_html_str = """
<html><body><big>Zoom To:</big><p>
<a href="exc_cl.html" target="_top">Feature 9</a><br>
<a href="exc_fg.html" target="_top">Sq. 240R60</a><br>
<a href="exc_fh.html" target="_top">Sq. 240R70</a><br>
<a href="exc_ft.html" target="_top">Sq. 250R60</a><br>
<a href="exc_fu.html" target="_top">Sq. 250R70</a><br>
</body></html>
"""
slid_azt_html_str = """
<html><body><map name="hotlinks">
<area coords="144,140,224,214" target="_top" href="exc_cl.html">
<area coords="38,78,80,127" target="_top" href="exc_au.html">
<area coords="359,292,388,361" target="_top" href="exc_am.html">
<area coords="364,134,389,198" target="_top" href="exc_iy.html">
<area coords="326,155,363,190" target="_top" href="exc_iy.html">
<area coords="305,3,363,154" target="_top" href="exc_iy.html">
<area coords="364,90,388,133" target="_top" href="exc_ae.html">
<area coords="364,3,389,89" target="_top" href="exc_iy.html">
</map><center><img src="../images/s/str1.gif" usemap="#hotlinks" border=0><p>Figure 1039. Structure 1, plan view (view to north).</center></body></html>
"""
slid_bdo_html_str = """
<html><body><map name="hotlinks">
<area coords="43,102,193,152" target="_top" href="exc_is.html">
<area coords="22,151,113,219" target="_top" href="exc_is.html">
<area coords="194,118,243,220" target="_top" href="exc_is.html">
<area coords="16,220,237,298" target="_top" href="exc_is.html">
<area coords="114,152,196,223" target="_top" href="exc_cl.html">
</map><center><img src="../images/x16/x6801.jpeg" usemap="#hotlinks" border=0><p>Figure 1038. Structure 1 at top of subsoil (view to southwest).</center></body></html>
"""
slid_bet_html_str = """
<html><body><map name="hotlinks">
</map><center><img src="../images/x16/x6968.jpeg" usemap="#hotlinks" border=0><p>Figure 1037. Structure 1 after excavation (view to southwest).</center></body></html>
"""
# Sq. 240R60, /dig/html/excavations/exc_fg.html
exc_fg_html_str = """
<html><head><title>Excavating Occaneechi Town - [Excavations]</title></head>
<frameset cols="408,*" border=1>
<frame name="image" src="slid_ada.html" marginwidth=1 marginheight=1>
<frame name="ctrl" src="ctrl_fg.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
ctrl_fg_html_str = """
<html><frameset rows="75%,25%" border=1>
<frame name="info" src="info_fg.html" marginwidth=1 marginheight=1>
<frame name="zoom" src="zoom_fg.html" marginwidth=1 marginheight=1>
</frameset><noframes>you need frames</noframes></html>
"""
info_fg_html_str = """
<html><body>
<big><b>Sq. 240R60</b></big><p>
<img align="right" src="../images/l/l240r60.gif">
Type: Excavation Unit<br>
Dimensions<br>
Length: 10.0 ft<br>
Width: 10.0 ft<br>
Depth: 0.6 ft<br>
Volume: 61.06 ft<sup><small>3</small></sup><br>
Area: 100.00 ft<sup><small>2</small></sup><p>
<table border=2 width="100%">
<tr><td rowspan=4>Image:<br>
<a href="slid_ada.html" target="image">1</a>
<a href="slid_bde.html" target="image">2</a>
</td>
<td align="center"><a href="../artifacts/art_fg0.html" target="_top">Artifacts</a></td></tr>
<tr><td align="center">Description</td></tr>
<tr><td align="center"><a href="../maps/exc0.html" target="_top">Map</a></td></tr>
<tr><td align="center"><a href="../index.html" target="_top">Home</a></td></tr>
</table></body></html>
"""
zoom_fg_html_str = """
<html><body><big>Zoom To:</big><p>
<a href="exc_cl.html" target="_top">Feature 9</a><br>
<a href="exc_is.html" target="_top">Structure 1</a><br>
</body></html>
"""
slid_ada_html_str = """
<html><body><map name="hotlinks">
<area coords="70,283,388,389" target="_top" href="exc_is.html">
<area coords="149,197,386,282" target="_top" href="exc_is.html">
<area coords="343,1,388,197" target="_top" href="exc_is.html">
<area coords="14,1,148,282" target="_top" href="exc_is.html">
<area coords="149,0,342,196" target="_top" href="exc_cl.html">
</map><center><img src="../images/2/240r60.gif" usemap="#hotlinks" border=0><p>Figure 860. Sq. 240R60, top of subsoil (view to north).</center></body></html>
"""
slid_bde_html_str = """
<html><body><map name="hotlinks">
<area coords="175,100,312,160" target="_top" href="exc_cl.html">
<area coords="70,93,113,215" target="_top" href="exc_is.html">
</map><center><img src="../images/x16/x6730.jpeg" usemap="#hotlinks" border=0><p>Figure 859. Sq. 240R60 at top of subsoil (view to north).</center></body></html>
"""
# Extracted
slid_azt_extracted = {
"path": "/dig/html/images/s/str1.gif",
"htmlPagePath": "/dig/html/excavations/slid_azt.html",
"figureNum": "1039",
"caption": "Structure 1, plan view (view to north).",
"clickableAreas": [
{"x1": 144, "y1": 140, "x2": 224, "y2": 214,
"path": "/dig/html/excavations/exc_cl.html"},
{"x1": 38, "y1": 78, "x2": 80, "y2": 127,
"path": "/dig/html/excavations/exc_au.html"},
{"x1": 359, "y1": 292, "x2": 388, "y2": 361,
"path": "/dig/html/excavations/exc_am.html"},
{"x1": 364, "y1": 134, "x2": 389, "y2": 198,
"path": "/dig/html/excavations/exc_iy.html"},
{"x1": 326, "y1": 155, "x2": 363, "y2": 190,
"path": "/dig/html/excavations/exc_iy.html"},
{"x1": 305, "y1": 3, "x2": 363, "y2": 154,
"path": "/dig/html/excavations/exc_iy.html"},
{"x1": 364, "y1": 90, "x2": 388, "y2": 133,
"path": "/dig/html/excavations/exc_ae.html"},
{"x1": 364, "y1": 3, "x2": 389, "y2": 89,
"path": "/dig/html/excavations/exc_iy.html"}
],
"originalDimensions": {
"width": 390,
"height": 390
}
}
slid_bdo_extracted = {
"path": "/dig/html/images/x16/x6801.jpeg",
"htmlPagePath": "/dig/html/excavations/slid_bdo.html",
"figureNum": "1038",
"caption": "Structure 1 at top of subsoil (view to southwest).",
"clickableAreas": [
{"x1": 43, "y1": 102, "x2": 193, "y2": 152,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 22, "y1": 151, "x2": 113, "y2": 219,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 194, "y1": 118, "x2": 243, "y2": 220,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 16, "y1": 220, "x2": 237, "y2": 298,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 114, "y1": 152, "x2": 196, "y2": 223,
"path": "/dig/html/excavations/exc_cl.html"}
],
"originalDimensions": {
"width": 251,
"height": 390
}
}
slid_bet_extracted = {
"path": "/dig/html/images/x16/x6968.jpeg",
"htmlPagePath": "/dig/html/excavations/slid_bet.html",
"figureNum": "1037",
"caption": "Structure 1 after excavation (view to southwest).",
"clickableAreas": [],
"originalDimensions": {
"width": 390,
"height": 347
}
}
slid_ada_extracted = {
"path": "/dig/html/images/2/240r60.gif",
"htmlPagePath": "/dig/html/excavations/slid_ada.html",
"figureNum": "860",
"caption": "Sq. 240R60, top of subsoil (view to north).",
"clickableAreas": [
{"x1": 70, "y1": 283, "x2": 388, "y2": 389,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 149, "y1": 197, "x2": 386, "y2": 282,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 343, "y1": 1, "x2": 388, "y2": 197,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 14, "y1": 1, "x2": 148, "y2": 282,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 149, "y1": 0, "x2": 342, "y2": 196,
"path": "/dig/html/excavations/exc_cl.html"}
],
"originalDimensions": {
"width": 390,
"height": 390
}
}
slid_bde_extracted = {
"path": "/dig/html/images/x16/x6730.jpeg",
"htmlPagePath": "/dig/html/excavations/slid_bde.html",
"figureNum": "859",
"caption": "Sq. 240R60 at top of subsoil (view to north).",
"clickableAreas": [
{"x1": 175, "y1": 100, "x2": 312, "y2": 160,
"path": "/dig/html/excavations/exc_is.html"},
{"x1": 70, "y1": 93, "x2": 113, "y2": 215,
"path": "/dig/html/excavations/exc_is.html"}
],
"originalDimensions": {
"width": 390,
"height": 275
}
}
info_is_extracted = {
"name": "Structure 1",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "13.4 ft",
"Width": "11.3 ft",
"Depth": "Unknown ft"
},
"Type": "Structure",
"Volume": "Unknown ft<sup>3</sup>",
"Area": "115.88 ft<sup>2</sup>"
},
"images": [slid_azt_extracted, slid_bdo_extracted, slid_bet_extracted],
"artifactsPath": "/dig/html/artifacts/art_is0.html",
"descriptionPath": None
}
info_fg_extracted = {
"name": "Sq. 240R60",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "10.0 ft",
"Width": "10.0 ft",
"Depth": "0.6 ft"
},
"Type": "Excavation Unit",
"Volume": "61.06 ft<sup>3</sup>",
"Area": "100.00 ft<sup>2</sup>"
},
"images": [slid_ada_extracted, slid_bde_extracted],
"artifactsPath": "/dig/html/artifacts/art_fg0.html",
"descriptionPath": None
}
zoom_is_extracted = [{
"name": "Feature 9",
"path": "/dig/html/excavations/exc_cl.html"
}, {
"name": "Sq. 240R60",
"path": "/dig/html/excavations/exc_fg.html"
}, {
"name": "Sq. 240R70",
"path": "/dig/html/excavations/exc_fh.html"
}, {
"name": "Sq. 250R60",
"path": "/dig/html/excavations/exc_ft.html"
}, {
"name": "Sq. 250R70",
"path": "/dig/html/excavations/exc_fu.html"
}]
zoom_fg_extracted = [{
"name": "Feature 9",
"path": "/dig/html/excavations/exc_cl.html"
}, {
"name": "Structure 1",
"path": "/dig/html/excavations/exc_is.html"
}]
ctrl_is_fully_extracted = {
"name": "Structure 1",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "13.4 ft",
"Width": "11.3 ft",
"Depth": "Unknown ft"
},
"Type": "Structure",
"Volume": "Unknown ft<sup>3</sup>",
"Area": "115.88 ft<sup>2</sup>"
},
"images": [slid_azt_extracted, slid_bdo_extracted, slid_bet_extracted],
"artifactsPath": "/dig/html/artifacts/art_is0.html",
"descriptionPath": None,
"relatedElements": zoom_is_extracted
}
ctrl_fg_fully_extracted = {
"name": "Sq. 240R60",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "10.0 ft",
"Width": "10.0 ft",
"Depth": "0.6 ft"
},
"Type": "Excavation Unit",
"Volume": "61.06 ft<sup>3</sup>",
"Area": "100.00 ft<sup>2</sup>"
},
"images": [slid_ada_extracted, slid_bde_extracted],
"artifactsPath": "/dig/html/artifacts/art_fg0.html",
"descriptionPath": None,
"relatedElements": zoom_fg_extracted
}
# fg, then is according to how mock_iterdir is defined later on
exc_dir_fully_extracted = [{
"name": "Sq. 240R60",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "10.0 ft",
"Width": "10.0 ft",
"Depth": "0.6 ft"
},
"Type": "Excavation Unit",
"Volume": "61.06 ft<sup>3</sup>",
"Area": "100.00 ft<sup>2</sup>"
},
"images": [slid_ada_extracted, slid_bde_extracted],
"artifactsPath": "/dig/html/artifacts/art_fg0.html",
"descriptionPath": None,
"relatedElements": zoom_fg_extracted,
"path": "/dig/html/excavations/exc_fg.html"
}, {
"name": "Structure 1",
"miniMapIcon": "/dig/html/images/l/l240r60.gif",
"info": {
"Dimensions": {
"Length": "13.4 ft",
"Width": "11.3 ft",
"Depth": "Unknown ft"
},
"Type": "Structure",
"Volume": "Unknown ft<sup>3</sup>",
"Area": "115.88 ft<sup>2</sup>"
},
"images": [slid_azt_extracted, slid_bdo_extracted, slid_bet_extracted],
"artifactsPath": "/dig/html/artifacts/art_is0.html",
"descriptionPath": None,
"relatedElements": zoom_is_extracted,
"path": "/dig/html/excavations/exc_is.html"
}]
def mock_extract_image_page(image_html_str, extra1, extra2, extra3):
if image_html_str == slid_ada_html_str:
return slid_ada_extracted
elif image_html_str == slid_azt_html_str:
return slid_azt_extracted
elif image_html_str == slid_bde_html_str:
return slid_bde_extracted
elif image_html_str == slid_bdo_html_str:
return slid_bdo_extracted
elif image_html_str == slid_bet_html_str:
return slid_bet_extracted
raise Exception("did not find details for this particular img string")
def mock_readfile(filename, parent_dir_path_obj):
resolved_path_obj = pathlib.Path(os.path.normpath(parent_dir_path_obj / filename))
filename = resolved_path_obj.name
parent_dir_str = resolved_path_obj.parent.as_posix()
if parent_dir_str == "C:/dig/html/excavations":
# Structure 1
if filename == "slid_azt.html":
return slid_azt_html_str
elif filename == "slid_bdo.html":
return slid_bdo_html_str
elif filename == "slid_bet.html":
return slid_bet_html_str
elif filename == "zoom_is.html":
return zoom_is_html_str
elif filename == "info_is.html":
return info_is_html_str
elif filename == "ctrl_is.html":
return ctrl_is_html_str
elif filename == "exc_is.html":
return exc_is_html_str
# Sq. 240R60, /dig/html/excavations/exc_fg.html
elif filename == "exc_fg.html":
return exc_fg_html_str
elif filename == "ctrl_fg.html":
return ctrl_fg_html_str
elif filename == "info_fg.html":
return info_fg_html_str
elif filename == "zoom_fg.html":
return zoom_fg_html_str
elif filename == "slid_ada.html":
return slid_ada_html_str
elif filename == "slid_bde.html":
return slid_bde_html_str
raise Exception("did not find file in mock_readfile")
@pytest.mark.parametrize("zoom_html_str,expected_result", [
(zoom_is_html_str, zoom_is_extracted),
(zoom_fg_html_str, zoom_fg_extracted),
("""
<html><body><big>Zoom To:</big><p>
<a href="exc_gw.html" target="_top">Sq. 270R90</a><br>
<a href="exc_gn.html" target="_top">Sq. 270R100</a><br>
</body></html>
""", [{
"name": "Sq. 270R90",
"path": "/dig/html/excavations/exc_gw.html"
}, {
"name": "Sq. 270R100",
"path": "/dig/html/excavations/exc_gn.html"
}])
])
def test_extract_zoom_to(zoom_html_str, expected_result):
assert exc_det.extract_zoom_to(zoom_html_str) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
@pytest.mark.parametrize("info_html_str,expected_result", [
(info_fg_html_str, info_fg_extracted),
(info_is_html_str, info_is_extracted)
])
def test_extract_info_page(mock_ext_i_p, info_html_str, expected_result):
mock_ext_i_p.side_effect = mock_extract_image_page
assert exc_det.extract_info_page(
info_html_str, "/dig/html/excavations", "C:/", mock_readfile
) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
@pytest.mark.parametrize("ctrl_html_str,expected_result", [
(ctrl_fg_html_str, ctrl_fg_fully_extracted),
(ctrl_is_html_str, ctrl_is_fully_extracted)
])
def test_get_ctrl_page_contents(mock_ext_i_p, ctrl_html_str, expected_result):
mock_ext_i_p.side_effect = mock_extract_image_page
assert exc_det.get_ctrl_page_contents(
ctrl_html_str, "/dig/html/excavations", "C:/", mock_readfile
) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
@pytest.mark.parametrize("exc_html_str,expected_result", [
(exc_fg_html_str, ctrl_fg_fully_extracted),
(exc_is_html_str, ctrl_is_fully_extracted)
])
def test_get_exc_page_contents(mock_ext_i_p, exc_html_str, expected_result):
mock_ext_i_p.side_effect = mock_extract_image_page
assert exc_det.get_exc_page_contents(
exc_html_str, "/dig/html/excavations", "C:/", mock_readfile
) == expected_result
@mock.patch("src.extract_old_site.modules.excavation_details_page.extract_image_page")
def test_extract_all_exc_pages(mock_ext_i_p):
mock_ext_i_p.side_effect = mock_extract_image_page
with mock.patch.object(pathlib.Path, "iterdir") as mock_iterdir:
filenames_list = [
"exc_fg.html", "exc_is.html", "info_fg.html", "info_is.html",
"slid_ada.html", "slid_azt.html", "slid_bde.html", "slid_bdo.html", "slid_bet.html",
"zoom_fg.html", "zoom_is.html",
]
iterdir_path_objs = [(pathlib.Path("C:/dig/html/excavations") / filename)
for filename in filenames_list]
mock_iterdir.return_value = iterdir_path_objs
assert exc_det.extract_all_exc_pages("C:/", mock_readfile) == exc_dir_fully_extracted
|
nilq/baby-python
|
python
|
import numpy as np
import sys
import os
from keras.models import load_model
sys.path.append("../utilities")
import constants
from data import get_train_test
from metrics import plot_n_roc_sic
datasets_c = ['h_qq_rot_charged', 'h_gg_rot_charged', 'cp_qq_rot_charged', 'qx_qg_rot_charged', 's8_gg_rot_charged', 'zp_qq_rot_charged']
datasets_s = ['h_qq', 'h_gg', 'cp_qq', 'qx_qg', 's8_gg', 'zp_qq']
def comp_all(i, datasets = datasets_s, n = 150000):
name = 'all_' + datasets[i] + '_comps'
X_tests = []
y_yests = []
models = []
model_types = []
labels = []
sig = datasets[i]
for j in range(6):
if j == i:
continue
bg = datasets[j]
constants.SIG_H5 = os.path.join(constants.DATA_DIR, sig + '.h5')
constants.BG_H5 = os.path.join(constants.DATA_DIR, bg + '.h5')
X_train, X_test, y_train, y_test, \
_, _, sig_metadata, \
bg_metadata, _ = get_train_test(n=n)
if os.path.isfile('../best_model/' + sig + '_vs_' + bg + '_model'):
model_name = sig + '_vs_' + bg
else:
model_name = bg + '_vs_' + sig
model = load_model('../best_model/' + model_name + '_model')
X_tests.append(X_test)
y_yests.append(y_test)
models.append(model)
model_types.append(True)
labels.append(model_name)
plot_n_roc_sic(name, 'final_curves/sic_'+name, X_tests, y_yests, models, model_types, labels, True, fontfac=0.5)
plot_n_roc_sic(name, 'final_curves/roc_'+name, X_tests, y_yests, models, model_types, labels, False, fontfac=0.5)
if __name__ == '__main__':
for i in range(len(datasets_s)):
comp_all(i)
|
nilq/baby-python
|
python
|
DEFAULT_REGID = u'strongswan.org'
DEFAULT_ENTITY_NAME = u'strongSwan Project'
DEFAULT_HASH_ALGORITHM = u'sha256'
|
nilq/baby-python
|
python
|
import sys
import os
import cv2 # it is necessary to use cv2 library
import numpy as np
def main( background, input_filename, output_filename ):
# Read the input image
bak = cv2.imread(background)
img = cv2.imread(input_filename)
dif = img - bak
dif = np.sqrt( np.sum( dif * dif, axis=2 ) )
msk = ( dif > 10 ).astype(np.uint8)*255
kernel = np.ones((3,3),np.uint8)
# opening
msk = cv2.erode(msk, kernel,iterations = 1)
msk = cv2.dilate(msk, kernel,iterations = 1)
# closing
msk = cv2.dilate(msk, kernel,iterations = 2)
msk = cv2.erode(msk, kernel,iterations = 2)
cv2.imwrite( output_filename, msk )
if( __name__ == '__main__' ):
if( len(sys.argv) >= 3 ):
main( sys.argv[1], sys.argv[2], sys.argv[3] )
else:
print( 'usage: python '+sys.argv[0]+' background input_filenname output_filename' )
|
nilq/baby-python
|
python
|
from pycylon import Table
from pycylon import CylonContext
import numpy as np
ctx: CylonContext = CylonContext(config=None, distributed=False)
data_dictionary = {'col-1': [1, 2, 3, 4], 'col-2': [5, 6, 7, 8], 'col-3': [9, 10, 11, 12]}
tb: Table = Table.from_pydict(ctx, data_dictionary)
print("Convert to PyArrow Table")
print(tb.to_arrow())
print("Convert to Pandas")
print(tb.to_pandas())
print("Convert to Dictionar")
print(tb.to_pydict())
print("Convert to Numpy")
npy: np.ndarray = tb.to_numpy(order='F', zero_copy_only=True)
print(npy)
print(npy.flags)
npy: np.ndarray = tb.to_numpy(order='C', zero_copy_only=True)
print(npy)
print(npy.flags)
|
nilq/baby-python
|
python
|
from django.forms import Form
def set_form_widgets_attrs(form: Form, attrs: dict):
"""Applies a given HTML attributes to each field widget of a given form.
Example:
set_form_widgets_attrs(my_form, {'class': 'clickable'})
"""
for _, field in form.fields.items():
attrs_ = dict(attrs)
for name, val in attrs.items():
if hasattr(val, '__call__'):
attrs_[name] = val(field)
field.widget.attrs = field.widget.build_attrs(attrs_)
|
nilq/baby-python
|
python
|
# add_request_point.py
from arcgis.features import Feature, FeatureSet
from arcgis.geometry import Point
from copy import deepcopy
def add_request_point(gis, item_id, address_json, ip_address, user_agent, request_time):
# get feature layer to edit
layer_item = gis.content.get(item_id)
feature_layer = layer_item.layers[0]
# compose a Point object
pt = Point({'x':address_json['longitude'],
'y':address_json['latitude'],
'spatialReference':{'wkid':4326}
})
# compose a Feature object
request_attributes = {'ip_address':ip_address,
'user_agent':user_agent,
'request_address': f"{address_json['city']}, {address_json['region_name']}, {address_json['country_name']}, {address_json['zip']}",
'request_time2':request_time.timestamp()*1000
}
ft = Feature(geometry=pt, attributes=request_attributes)
# Edit the feature layer
edit_result = feature_layer.edit_features(adds=[ft])
return edit_result
|
nilq/baby-python
|
python
|
from .utils import send_message
__version__ = '1.0.1'
__all__ = ['send_message']
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.dispatch import Signal
validate_custom_order_field = Signal(
providing_args=[
'value',
]
)
order_paid = Signal(
providing_args=[
'invoice',
]
)
|
nilq/baby-python
|
python
|
"""
utility functions
"""
import pandas as pd
import numpy as np
TEST_DF = pd.DataFrame([1,2,3,4,5,6])
def five_mult(x):
"""multiplying a number by 5 function"""
return 5 * x
def tri_recursion(k):
"""recursion of a value"""
if(k>0):
result = k + tri_recursion(k-1)
# print(result)
else:
result = 0
return result
def sum_two_numbers(a,b):
"""sum two numbers"""
return a + b
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created at 2019-10-30
@author: dongwan.kim
Converting 'https://nlp.seas.harvard.edu/2018/04/03/attention.html'
which is pytorch implementation
to Keras implementation.
# ToDo: copy layer test with simple multi hidden layer regression.
"""
import copy
import numpy as np
import math
import matplotlib.pyplot as plt
from functools import partial
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Dense, Flatten, Conv1D, Dropout, Embedding, Input, Lambda, Layer, Softmax
)
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import InputSpec
from transformer.test_config import *
class PositionalEncodingK(Layer):
"""
>>> # test implementation
>>> pe = np.zeros([max_words_in_sentence, d_model]); print(pe, pe.shape)
>>> position = np.expand_dims(np.array(range(max_words_in_sentence)), 1); print(position, position.shape)
>>> div_term = np.exp(np.arange(start=0.0, stop=d_model, step=2) * -(math.log(10000.0) / d_model)); print(div_term, div_term.shape)
>>> pe[:, 0::2] = np.sin(position * div_term)
>>> pe[:, 1::2] = np.cos(position * div_term)
>>> pe = np.expand_dims(pe, 0); print(pe, pe.shape)
>>> # plotting
>>> d_model = 12
>>> num_sentences = 1
>>> num_tokens_in_sentence = 100
>>> plt.figure(figsize=(15, 5))
>>> pe = PositionalEncodingK(d_model=d_model, dropout_rate=0)
>>> y = pe(K.zeros((num_sentences, num_tokens_in_sentence, d_model)))
>>> plt.plot(np.arange(num_tokens_in_sentence), K.eval(y)[0, :, 4:8])
>>> plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
>>> plt.show()
"""
def __init__(self, d_model, dropout_rate, max_len=5000, **kwargs):
"""
Parameters
----------
max_len: max number of tokens in sentence.
d_model: embedding dim
kwargs
"""
super(PositionalEncodingK, self).__init__(**kwargs)
self.dropout = Dropout(rate=dropout_rate)
pe = np.zeros([max_len, d_model])
position = np.expand_dims(np.array(range(max_len)), 1)
div_term = np.exp(
np.arange(start=0.0, stop=d_model, step=2) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
self.pe = np.expand_dims(pe, 0)
def call(self, x):
# x = x + K.constant(self.pe[:, :x.shape[1].value])
x = x + K.constant(self.pe[:, :x.shape[1]])
return self.dropout(x)
def compute_output_shape(self, input_shape):
return input_shape
class EmbeddingsK(Layer):
"""
>>> x = K.constant([[0, 6, 1, 1, 1]]); print(x, x.shape) # one sentence with 5 token
>>> y = EmbeddingsK(d_model=12, vocab=7)(x) # embedding on 12 dim for 7 tokens total.
>>> out = K.eval(y)
>>> print(out, out.shape)
>>> np.random.seed(0)
>>> emb_weight = np.random.rand(7, 12) # total 7 tokens and hidden size is 12
>>> x = K.constant([list(range(7))]); print(x, x.shape) # one sentence with 5 token
>>> y = EmbeddingsK(d_model=12, vocab=7, weight=emb_weight)(x) # embedding on 12 dim for 7 tokens total.
>>> test_emb_keras = K.eval(y)
>>> print(test_emb_keras, test_emb_keras.shape)
>>> # np.equal(test_emb_pytorch, test_emb_keras)
>>> # np.array_equal(test_emb_pytorch, test_emb_keras)
"""
def __init__(self, d_model, vocab, weight=None):
"""
Parameters
----------
d_model : 512 or 1024 or ..
vocab : size of token dict
"""
super(EmbeddingsK, self).__init__()
self.d_model = d_model
if weight is None:
self.lut = Embedding(input_dim=vocab, output_dim=d_model)
elif isinstance(weight, np.ndarray):
self.lut = Embedding(input_dim=vocab, output_dim=d_model, weights=[weight],
trainable=False)
else:
raise ValueError('Invalid weight')
def call(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class LayerNormK(Layer):
"""
btw in TF2.0, LayerNormalization functionality is provided.
>>> ln = LayerNormK(features=12)
>>> x = K.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]); print(x, x.shape) # one token with d_model=12
>>> y = K.eval(ln(x))
>>>
"""
def __init__(self, features, eps=1e-6):
super(LayerNormK, self).__init__()
self.features = features # d_model
self.eps = eps
self.a_2 = None
self.b_2 = None
def build(self, _):
"""
weights are shared for all layer normalization.
according to description of add_weight function
'Adds a new variable to the layer, or gets an existing one; returns it'
Parameters
----------
_
Returns
-------
"""
self.a_2 = self.add_weight(
name='layer_norm_scale',
shape=(self.features,),
initializer='ones',
trainable=True
)
self.b_2 = self.add_weight(
name='layer_norm_bias',
shape=(self.features,),
initializer='zeros',
trainable=True
)
return super(LayerNormK, self).build(self.features)
def call(self, x):
mean = K.mean(x=x, axis=-1, keepdims=True)
std = K.std(x=x, axis=-1, keepdims=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class GeneratorK(Layer):
"""
linear + softmax for final output layer.
>>> ge = GeneratorK(d_model=12, vocab=7)
>>> x = K.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]); print(x, x.shape) # output of final layer
>>> y = ge(x)
>>> out = K.eval(y)
>>> print(out, out.shape, K.eval(K.argmax(out)))
"""
def __init__(self, d_model, vocab):
"""
Parameters
----------
d_model: hidden size
vocab: size of token dict
"""
super(GeneratorK, self).__init__()
self.proj = Dense(input_shape=(d_model,), units=vocab)
def call(self, x):
"""
softmax followed by log is not stable,
need to use log_softmax after upgrade to tf 2.0
"""
return K.log(x=K.softmax(x, axis=-1))
def subsequent_mask_k(size):
"""
Mask out subsequent positions.
>>> subsequent_mask(3)
tensor([
[
[1, 0, 0],
[1, 1, 0],
[1, 1, 1]
]], dtype=torch.uint8) # [1, 3, 3]
This function gives mask for a sentence with 'size' words.
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return K.equal(K.constant(subsequent_mask), 0)
class BatchK:
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = K.expand_dims(K.not_equal(src, pad), axis=-2)
if trg is not None:
self.trg = trg[:, :-1] # without last token of sentence
self.trg_y = trg[:, 1:] # without first token of sentence
self.trg_mask = self.make_std_mask(self.trg, pad)
self.ntokens = K.sum(K.cast(K.not_equal(self.trg_y, pad), dtype='uint8'))
@staticmethod
def make_std_mask(trg, pad):
trg_mask = K.expand_dims(K.not_equal(trg, pad), axis=-2)
trg_mask = trg_mask & subsequent_mask_k(size=trg.shape.as_list()[-1])
return trg_mask
class EncoderLayerK(Layer):
"""
"""
def __init__(self):
super(EncoderLayerK, self).__init__()
# ToDo: implement
def clones_k(module, N):
"""
>>> d = Dense(input_shape=(d_model,), units=d_model)
>>> d_list = clones_k(d, 4)
Parameters
----------
module: layer to be copied
N: number of copy
Returns
-------
"""
# return [copy.deepcopy(module) for _ in range(N)] # probability not working
# reference: https://keras.io/layers/about-keras-layers/
config = module.get_config()
return [type(module).from_config(config) for _ in range(N)]
def attention_k(q_w_q, k_w_k, v_w_v, mask=None, dropout=None):
"""
Parameters
----------
q_w_q: (batch size, num heads, num tokens in sentence, d_model / d_k), (5, 2, 4, 6)
k_w_k
v_w_v
mask: (5, 1, 1, 4)
dropout: dropout layer, not dropout rate
Returns
-------
"""
def masked_fill(x, mask, target_mask_val, filled_value=-1e9):
return x * (x != target_mask_val) + (mask == target_mask_val) * filled_value
d_k = q_w_q.shape.as_list()[-1]
scores = K.batch_dot(q_w_q, k_w_k, axes=[3, 3]) / math.sqrt(d_k) # (5, 2, 4, 4)
if mask is not None:
scores = masked_fill(scores, mask, 0, -1e9)
p_attn = K.softmax(scores)
if dropout is not None:
p_attn = dropout(p_attn)
return K.batch_dot(p_attn, v_w_v, axes=[3, 2]), p_attn
class MultiHeadedAttentionK(Layer):
"""
"""
def __init__(self, h, d_model, dropout=0.1, linears=None):
"""
Parameters
----------
h: number of heads
d_model:
"""
super(MultiHeadedAttentionK, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h # d_k = d_v = d_model/h
self.h = h # number of heads
if linears:
assert len(linears) == 4
self.linears = linears
else:
self.linears = clones_k(Dense(input_shape=(d_model,), units=d_model), 4)
self.attn = None
self.dropout = Dropout(rate=dropout)
def call(self, query_key_value_mask):
query, key, value, mask = query_key_value_mask
if mask is not None:
mask = K.expand_dims(mask, 1) # (5, 1, 1, 4)
nbatches = query.shape.as_list()[0]
q_w_q, k_w_k, v_w_v = [
K.permute_dimensions(
x=K.reshape(
x=l(x),
shape=(nbatches, -1, self.h, self.d_k)
),
pattern=(0, 2, 1, 3))
for l, x in zip(self.linears, (query, key, value))
]
x, self.attn = attention_k(q_w_q, k_w_k, v_w_v, mask=mask, dropout=self.dropout)
x = K.reshape(K.permute_dimensions(x, pattern=(0, 2, 1, 3)), shape=(batch_size, -1, d_model))
return self.linears[-1](x)
class SublayerConnectionK(Layer):
# def __init__(self, size, sublayer, dropout):
def __init__(self, size, dropout):
"""
Parameters
----------
size: features = d_model
dropout: dropout rate
"""
super(SublayerConnectionK, self).__init__()
self.norm = LayerNormK(features=size)
self.dropout = Dropout(rate=dropout)
# self.sublayer = sublayer
def call(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class PositionwiseFeedForwardK(Layer):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardK, self).__init__()
self.w_1 = Dense(input_shape=(d_model,), units=d_ff)
self.w_2 = Dense(input_shape=(d_ff,), units=d_model)
self.dropout = Dropout(rate=dropout)
def call(self, x):
return self.w_2(self.dropout(K.relu(self.w_1(x))))
class Transformer(Layer):
"""
>>> model = Transformer(
d_model=512,
src_vocab=100,
trg_vocab=100,
dropout_rate=0.1,
num_coder_blocks=2,
num_heads=4,
d_ff=1024
)
>>> model.build(input_shape=(None, 12))
>>> model.compile(
optimizer=Adam(
)
"""
def __init__(self, d_model, src_vocab, trg_vocab, dropout_rate, num_coder_blocks, num_heads, d_ff):
super().__init__()
self.d_model = d_model
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.dropout_rate = dropout_rate
self.num_coder_blocks = num_coder_blocks
self.num_heads = num_heads
self.d_ff = d_ff
# noinspection PyAttributeOutsideInit
def build(self, input_shape):
print(input_shape)
# assert isinstance(input_shape, list) and len(input_shape) ==
assert len(input_shape) == 4
src_shape, trg_shape, src_mask_shape, trg_mask_shape = input_shape
self.input_spec = [
InputSpec(shape=(src_shape, None)),
InputSpec(shape=(trg_shape, None)),
InputSpec(shape=(src_mask_shape, None)),
InputSpec(shape=(trg_mask_shape, None))
]
self.src_emb_layer = EmbeddingsK(d_model=self.d_model, vocab=self.src_vocab)
self.src_pe = PositionalEncodingK(d_model=self.d_model, dropout_rate=self.dropout_rate)
self.encoder_mha_list = [
MultiHeadedAttentionK(h=self.num_heads, d_model=self.d_model, dropout=self.dropout_rate)
for _ in range(self.num_coder_blocks)
]
self.encoder_pff_list = [
PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
for _ in range(self.num_coder_blocks)
]
self.encoder_slc_mha_list = [
SublayerConnectionK(size=self.d_model, sublayer=encoder_mha, dropout=self.dropout_rate)
for encoder_mha in self.encoder_mha_list
]
self.encoder_slc_pff_list = [
SublayerConnectionK(size=self.d_model, sublayer=encoder_pff, dropout=self.dropout_rate)
for encoder_pff in self.encoder_pff_list
]
# self.encoder_slc_list = [
# SublayerConnectionK(size=self.d_model, sublayer=, dropout=self.dropout_rate)
# for _ in range(self.num_coder_blocks * 2)
# ]
self.encoder_layer_norm = LayerNormK(features=d_model)
self.trg_emb_layer = EmbeddingsK(d_model=self.d_model, vocab=self.trg_vocab)
self.trg_pe = PositionalEncodingK(d_model=self.d_model, dropout_rate=self.dropout_rate)
self.decoder_mha_list = [
MultiHeadedAttentionK(h=self.num_heads, d_model=self.d_model, dropout=self.dropout_rate)
for _ in range(self.num_coder_blocks * 2)
]
self.decoder_pff_list = [
PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
for _ in range(self.num_coder_blocks)
]
self.decoder_slc_mha_list = [
SublayerConnectionK(size=self.d_model, sublayer=decoder_mha, dropout=self.dropout_rate)
for decoder_mha in self.decoder_mha_list
]
self.decoder_slc_pff_list = [
SublayerConnectionK(size=self.d_model, sublayer=decoder_pff, dropout=self.dropout_rate)
for decoder_pff in self.decoder_pff_list
]
self.decoder_layer_norm = LayerNormK(features=d_model)
def call(self, src_trg_smask_tmask):
src, trg, src_mask, trg_mask = src_trg_smask_tmask
input_encoder = self.src_pe(self.src_emb_layer(src))
# encoder
for i in range(self.num_coder_blocks):
# multi headed attention and 1st sublayer connection
self_attn = lambda x: self.encoder_mha_list[i](x, x, x, src_mask)
out_slc1 = self.encoder_slc_mha_list[i](x=input_encoder, sublayer=self_attn)
# position wise feed forward and 2nd sublayer connection
input_encoder = self.encoder_slc_pff_list[i](x=out_slc1, sublayer=self.encoder_pff_list[i])
output_encoder = self.encoder_layer_norm(input_encoder)
# input to decoder (embedding and positional encoding)
input_decoder = self.trg_pe(self.trg_emb_layer(trg))
# decoder
for j in range(self.num_coder_blocks):
# sublayer 1 of decoder
self_attn1 = lambda x: self.decoder_mha_list[j](x, x, x, trg_mask)
out_slc1 = self.decoder_slc_mha_list[j](x=input_decoder, sublayer=self_attn1)
# sublayer 2 of decoder
src_attn2 = lambda x: self.decoder_mha_list[j * 2](x, output_encoder, output_encoder, src_mask)
out_slc2 = self.decoder_slc_mha_list[j * 2](x=out_slc1, sublayer=src_attn2)
# position-wise feed-forward and 2nd sublayer connection
input_encoder = self.decoder_slc_pff_list[j](x=out_slc2, sublayer=self.decoder_pff_list[j])
output_decoder = self.decoder_layer_norm(input_encoder)
return output_decoder
class TransformerSmall(Layer):
"""
>>> model = Sequential([TransformerSmall(
d_model=512,
src_vocab=100,
dropout_rate=0.1,
num_coder_blocks=2,
num_heads=4,
d_ff=1024
)])
>>> dummy_batch = K.constant(np.random.randint(low=0, high=max_words_in_sentence, size=(batch_size, max_words_in_sentence)))
>>> dummy_batch
>>> dummy_src_mask = subsequent_mask_k(max_words_in_sentence)
>>> dummy_src_mask
>>> model([dummy_batch, dummy_src_mask])
>>> model([12, 12])
>>> model.build([12, 12])
>>> model.compile(
optimizer=Adam(lr=0.002)
)
"""
def __init__(self, d_model, src_vocab, dropout_rate, num_coder_blocks, num_heads, d_ff):
super().__init__()
self.d_model = d_model
self.src_vocab = src_vocab
self.dropout_rate = dropout_rate
self.num_coder_blocks = num_coder_blocks
self.num_heads = num_heads
self.d_ff = d_ff
# noinspection PyAttributeOutsideInit
def build(self, input_shape):
print('input_shape:', input_shape)
# assert isinstance(input_shape, list) and len(input_shape) ==
assert len(input_shape) == 2
src_shape, src_mask_shape = input_shape
self.input_spec = [
InputSpec(shape=src_shape),
InputSpec(shape=src_mask_shape)
]
self.src_emb_layer = EmbeddingsK(d_model=self.d_model, vocab=self.src_vocab)
self.src_pe = PositionalEncodingK(d_model=self.d_model, dropout_rate=self.dropout_rate)
self.encoder_mha_list = [
MultiHeadedAttentionK(h=self.num_heads, d_model=self.d_model, dropout=self.dropout_rate)
for _ in range(self.num_coder_blocks)
]
self.encoder_pff_list = [
PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
for _ in range(self.num_coder_blocks)
]
self.encoder_slc_mha_list = [
SublayerConnectionK(size=self.d_model, dropout=self.dropout_rate)
for _ in self.encoder_mha_list
]
self.encoder_slc_pff_list = [
SublayerConnectionK(size=self.d_model, dropout=self.dropout_rate)
for _ in self.encoder_pff_list
]
self.encoder_layer_norm = LayerNormK(features=d_model)
super().build(input_shape)
def call(self, src_smask):
src, src_mask = src_smask
input_encoder = self.src_pe(self.src_emb_layer(src))
# encoder
for i in range(self.num_coder_blocks):
# multi headed attention and 1st sublayer connection
self_attn = lambda x: self.encoder_mha_list[i]([x, x, x, src_mask])
out_slc1 = self.encoder_slc_mha_list[i](input_encoder, sublayer=self_attn)
# position wise feed forward and 2nd sublayer connection
input_encoder = self.encoder_slc_pff_list[i](x=out_slc1, sublayer=self.encoder_pff_list[i])
output_encoder = self.encoder_layer_norm(input_encoder)
return output_encoder
# if __name__ == '__test__':
# max_words_in_sentence = 4 # of words in each sentence
# batch_size = 5 # of sentences
# size_dict = 7 # size of word dictionary
# d_model = 12
# hidden_size_pff = 11
# num_head = 2
# dropout_rate = 0.1
# num_encoder_layer = 2
# learning_rate = 0.001
#
# x = Input(shape=(max_words_in_sentence,))
# src = K.constant([[0, 3, 0, 2],
# [1, 0, 3, 2],
# [0, 0, 0, 1],
# [1, 0, 0, 1],
# [3, 2, 2, 1]])
# print(src, src.shape)
# src_mask = K.constant([[[1, 1, 1, 1]],
# [[1, 1, 1, 1]],
# [[1, 1, 1, 1]],
# [[1, 1, 1, 1]],
# [[1, 1, 1, 1]]]);
# print(src_mask, src_mask.shape)
# x = EmbeddingsK(d_model=d_model, vocab=size_dict)(src) # embedding on 12 dim for 7 tokens total.
# x = PositionalEncodingK(d_model=d_model, dropout_rate=0.)(x)
#
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
import os
parser = argparse.ArgumentParser(description='splits query name output by HAP.py and builds table required for ABCENTH')
parser.add_argument('--table',default = None, help = 'table output by HAP.py')
parser.add_argument('--hmm_dir',default = None, help = "director with all cluster hmms")
args = parser.parse_args()
if args.table:
for line in open(args.table):
fields = line.replace('\n','').replace('\r','').split('\t')
cluster = fields[0].split('exon')[0]
exon_number = fields[0].split('exon')[1].split('of')[0]
number_of_exons = fields[0].split('of')[1].split('phases')[0]
start_phase = fields[0].split('phases')[1].split('and')[0]
end_phase = fields[0].split('and')[1].split('.')[0]
aa_len = fields[12]
print('\t'.join([cluster] + fields[1:12] + [start_phase,end_phase,aa_len,exon_number,number_of_exons]))
elif args.hmm_dir:
for hmm_file in os.listdir(args.hmm_dir):
if hmm_file[-4:] == ".hmm" and not "fullLenForHMM" in hmm_file:
cluster = hmm_file.split('exon')[0]
exon_number = hmm_file.split('exon')[1].split('of')[0]
number_of_exons = hmm_file.split('of')[1].split('phases')[0]
start_phase = hmm_file.split('phases')[1].split('and')[0]
end_phase = hmm_file.split('and')[1].split('.')[0]
aa_len = open(args.hmm_dir + "/" + hmm_file).read().split('\n')[2].split()[1].replace('\r','')
print('\t'.join([cluster,exon_number,number_of_exons,start_phase,end_phase,aa_len,os.path.abspath(args.hmm_dir) + '/' + hmm_file]))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""Three philosophers thinking and eating dumplings - deadlock happens"""
import time
from threading import Thread, Lock
dumplings = 20
class Philosopher(Thread):
def __init__(self, name: str, left_chopstick: Lock, right_chopstick: Lock) -> None:
super().__init__()
self.name = name
self.left_chopstick = left_chopstick
self.right_chopstick = right_chopstick
def run(self) -> None:
# using globally shared variable
global dumplings
while dumplings > 0:
self.left_chopstick.acquire()
print(f"{id(self.left_chopstick)} chopstick grabbed by {self.name}")
self.right_chopstick.acquire()
print(f"{id(self.right_chopstick)} chopstick grabbed by {self.name}")
if dumplings > 0:
dumplings -= 1
print(f"{self.name} eat a dumpling. Dumplings left: {dumplings}")
self.right_chopstick.release()
print(f"{id(self.right_chopstick)} chopstick released by {self.name}")
self.left_chopstick.release()
print(f"{id(self.left_chopstick)} chopstick released by {self.name}")
time.sleep(0.00001)
if __name__ == "__main__":
chopstick_a = Lock()
chopstick_b = Lock()
philosopher_1 = Philosopher("Philosopher #1", chopstick_a, chopstick_b)
philosopher_2 = Philosopher("Philosopher #2", chopstick_b, chopstick_a)
philosopher_1.start()
philosopher_2.start()
|
nilq/baby-python
|
python
|
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import sys
import os
import subprocess
import argparse
import re
# add the current to sys.path
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPTS_DIR)
from string import Template
from LoggerManager import getTempLogFile, logger, initConsoleLogging
from PatchInfoParser import PatchInfo, installNameToDirName
from GitUtils import addChangeSet, commitChange, getGitRepoRevisionHash
"""
constants
"""
DEFAULT_OUTPUT_LOG_FILE_NAME = "MCompReposCommitter.log"
PATCH_SRC_WEB_LINK = "http://code.osehra.org/VistA.git/${type}/${hb}/${patch_dir}"
"""
class to commit all the changes under the Packages directory
in VistA-FOIA repository after patch(s) are applied and extracted.
"""
class MCompReposCommitter(object):
def __init__(self, vistAMRepo):
assert os.path.exists(vistAMRepo)
self._vistAMRepoDir = os.path.abspath(vistAMRepo)
self._packagesDir = os.path.join(self._vistAMRepoDir, 'Packages')
def commit(self, commitMsgFile):
self.__addChangeSet__()
self.__commit__(commitMsgFile)
def __addChangeSet__(self):
logger.info("Add change set")
#validChangeFileList = ["\*.zwr", "\*.m"]
addChangeSet(self._packagesDir)
def __commit__(self, commitMsgFile):
logger.info("Commit the change")
commitChange(commitMsgFile, self._packagesDir)
def generateCommitMsgFileByPatchInfo(patchInfo, commitMsgFile,
branch="HEAD", reposDir=None):
reposHash = getGitRepoRevisionHash(branch, reposDir)[:8]
with open(commitMsgFile, 'w') as output:
topicLine = "Install: %s" % patchInfo.installName
if patchInfo.multiBuildsList:
topicLine = "Install: %s" % (", ".join(patchInfo.multiBuildsList))
output.write("%s\n" % topicLine)
output.write("\nPatch Subject: %s" % patchInfo.subject)
output.write('\n')
output.write("Description:\n\n" + '\n'.join([str(x) for x in patchInfo.description]))
output.write('\n')
output.write('\n')
output.write('Use default answers for KIDS load/install questions.\n')
output.write('\n')
if patchInfo.isMultiBuilds: # special logic for multibuilds
buildLink, otherLinks = getWebLinkForPatchSourceMultiBuilds(patchInfo,
reposHash)
output.write('Multi-Build: %s\n' % buildLink)
for link in otherLinks:
if link:
output.write('Patch-Files: %s\n' % link)
else:
packageLink = getWebLinkForPatchSourceByFile(patchInfo.kidsFilePath,
reposHash)
output.write('Patch-Files: %s\n' % packageLink)
def getWebLinkForPatchSourceMultiBuilds(patchInfo, reposHash):
# find the package path from the patchInfo
buildLink = getWebLinkForPatchSourceByFile(patchInfo.kidsFilePath,
reposHash, fileType=True)
otherLink = []
for item in patchInfo.otherKidsInfoList:
if item[0]:
otherLink.append(getWebLinkForPatchSourceByFile(item[0], reposHash))
else:
otherLink.append(None)
return buildLink, otherLink
def getWebLinkForPatchSourceByFile(filePath, reposHash, fileType=False):
packageDir = os.path.dirname(filePath)
typeName = "tree"
if fileType:
typeName = "blob"
packageDir = filePath
packageDir = packageDir[packageDir.find('Packages'):]
packageDir = packageDir.replace('\\','/').replace(' ','+')
webLink = Template(PATCH_SRC_WEB_LINK)
packageLink = webLink.substitute(type=typeName,
patch_dir=packageDir,
hb="master")
return packageLink
def testSinglePatchCommitMsg():
patchInfo = PatchInfo()
patchInfo.installName = "LR*5.2*334"
patchInfo.kidsFilePath = "C:/users/jason.li/git/VistA/Packages/"\
"Lab Service/Patches/LR_5.2_334/LR_52_334.KIDs.json"
commitMsgFile = getDefaultCommitMsgFileByPatchInfo(patchInfo)
print commitMsgFile
generateCommitMsgFileByPatchInfo(patchInfo, commitMsgFile,
"origin/master", SCRIPTS_DIR)
def testMultiBuildPatchCommitMsg():
patchInfo = PatchInfo()
patchInfo.installName = "HDI*1.0*7"
patchInfo.kidsFilePath = "C:/users/jason.li/git/VistA/Packages/"\
"MultiBuilds/LAB_LEDI_IV.KIDs.json"
patchInfo.kidsInfoPath = \
"C:/users/jason.li/git/VistA/Packages/Health Data and Informatics/"\
"Patches/HDI_1.0_7/HDI-1_SEQ-8_PAT-7.TXT"
patchInfo.kidsInfoSha1 = None
patchInfo.isMultiBuilds = True
patchInfo.multiBuildsList = ["HDI*1.0*7", "LR*5.2*350", "LA*5.2*74"]
patchInfo.otherKidsInfoList = [
["C:/users/jason.li/git/VistA/Packages/Lab Service/"\
"Patches/LR_5.2_350/LR-5P2_SEQ-332_PAT-350.TXT" , None],
["C:/users/jason.li/git/VistA/Packages/Automated Lab Instruments/"\
"Patches/LA_5.2_74/LA-5P2_SEQ-57_PAT-74.TXT", None],
]
commitMsgFile = getDefaultCommitMsgFileByPatchInfo(patchInfo)
generateCommitMsgFileByPatchInfo(patchInfo, commitMsgFile,
"origin/master", SCRIPTS_DIR)
def getDefaultCommitMsgFileByPatchInfo(patchInfo, dir=None):
outputFile = installNameToDirName(patchInfo.installName) + ".msg"
if dir is None:
return getTempLogFile(outputFile)
else:
return os.path.join(dir, outputFile)
def testMain():
testSinglePatchCommitMsg()
testMultiBuildPatchCommitMsg()
def main():
pass
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import tkinter
from time import strftime
top = tkinter.Tk()
top.title('Clock')
top.resizable(0, 0)
def time():
string = strftime('%H:%M:%S %p')
clockTime.config(text=string)
clockTime.after(1000, time)
clockTime = tkinter.Label(top, font=(
'courier new', 40,), background='black', foreground='white')
clockTime.pack(anchor='center')
time()
top.mainloop()
|
nilq/baby-python
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os, common
import subprocess
from proton import *
from common import Skipped
class SslTest(common.Test):
def __init__(self, *args):
common.Test.__init__(self, *args)
def setup(self):
try:
self.server_domain = SSLDomain(SSLDomain.MODE_SERVER)
self.client_domain = SSLDomain(SSLDomain.MODE_CLIENT)
except SSLUnavailable, e:
raise Skipped(e)
def teardown(self):
self.server_domain = None
self.client_domain = None
class SslTestConnection(object):
""" Represents a single SSL connection.
"""
def __init__(self, domain=None, session_details=None):
try:
self.ssl = None
self.domain = domain
self.transport = Transport()
self.connection = Connection()
self.transport.bind(self.connection)
if domain:
self.ssl = SSL( self.transport, self.domain, session_details )
except SSLUnavailable, e:
raise Skipped(e)
def _pump(self, ssl_client, ssl_server, buffer_size=1024):
""" Allow two SslTestConnections to transfer data until done.
"""
out_client_leftover_by_server = ""
out_server_leftover_by_client = ""
i = 0
while True:
out_client = out_client_leftover_by_server + (ssl_client.transport.output(buffer_size) or "")
out_server = out_server_leftover_by_client + (ssl_server.transport.output(buffer_size) or "")
if out_client:
number_server_consumed = ssl_server.transport.input(out_client)
if number_server_consumed is None:
# special None return value means input is closed so discard the leftovers
out_client_leftover_by_server = ""
else:
out_client_leftover_by_server = out_client[number_server_consumed:]
if out_server:
number_client_consumed = ssl_client.transport.input(out_server)
if number_client_consumed is None:
# special None return value means input is closed so discard the leftovers
out_server_leftover_by_client = ""
else:
out_server_leftover_by_client = out_server[number_client_consumed:]
if not out_client and not out_server: break
i = i + 1
def _testpath(self, file):
""" Set the full path to the certificate,keyfile, etc. for the test.
"""
return os.path.join(os.path.dirname(__file__),
"ssl_db/%s" % file)
def _do_handshake(self, client, server):
""" Attempt to connect client to server. Will throw a TransportException if the SSL
handshake fails.
"""
client.connection.open()
server.connection.open()
self._pump(client, server)
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump(client, server)
def test_defaults(self):
""" By default, both the server and the client support anonymous
ciphers - they should connect without need for a certificate.
"""
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
# check that no SSL connection exists
assert not server.ssl.cipher_name()
assert not client.ssl.protocol_name()
#client.transport.trace(Transport.TRACE_DRV)
#server.transport.trace(Transport.TRACE_DRV)
client.connection.open()
server.connection.open()
self._pump( client, server )
# now SSL should be active
assert server.ssl.cipher_name() is not None
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_ssl_with_small_buffer(self):
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
small_buffer_size = 1
self._pump( client, server, small_buffer_size )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_certificate(self):
""" Test that anonymous clients can still connect to a server that has
a certificate configured.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_authentication(self):
""" Simple SSL connection with authentication of the server
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_client_authentication(self):
""" Force the client to authenticate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain )
# give the client a certificate, but let's not require server authentication
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_client_authentication_fail_bad_cert(self):
""" Ensure that the server can detect a bad client certificate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain )
self.client_domain.set_credentials(self._testpath("bad-server-certificate.pem"),
self._testpath("bad-server-private-key.pem"),
"server-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Server failed to reject bad certificate."
except TransportException, e:
pass
def test_client_authentication_fail_no_cert(self):
""" Ensure that the server will fail a client that does not provide a
certificate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain )
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Server failed to reject bad certificate."
except TransportException, e:
pass
def test_client_server_authentication(self):
""" Require both client and server to mutually identify themselves.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_only_authentication(self):
""" Client verifies server, but server does not verify client.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_bad_server_certificate(self):
""" A server with a self-signed certificate that is not trusted by the
client. The client should reject the server.
"""
self.server_domain.set_credentials(self._testpath("bad-server-certificate.pem"),
self._testpath("bad-server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Client failed to reject bad certificate."
except TransportException, e:
pass
del server
del client
# now re-try with a client that does not require peer verification
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
server = SslTest.SslTestConnection( self.server_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_allow_unsecured_client(self):
""" Server allows an unsecured client to connect if configured.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
# allow unsecured clients on this connection
self.server_domain.allow_unsecured_client()
server = SslTest.SslTestConnection( self.server_domain )
# non-ssl connection
client = SslTest.SslTestConnection()
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_disallow_unsecured_client(self):
""" Non-SSL Client is disallowed from connecting to server.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
server = SslTest.SslTestConnection( self.server_domain )
# non-ssl connection
client = SslTest.SslTestConnection()
client.connection.open()
server.connection.open()
try:
self._pump( client, server )
assert False, "Server did not reject client as expected."
except TransportException:
pass
def test_session_resume(self):
""" Test resume of client session.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
# details will be used in initial and subsequent connections to allow session to be resumed
initial_session_details = SSLSessionDetails("my-session-id")
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain, initial_session_details )
# bring up the connection and store its state
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
# cleanly shutdown the connection
client.connection.close()
server.connection.close()
self._pump( client, server )
# destroy the existing clients
del client
del server
# now create a new set of connections, use last session id
server = SslTest.SslTestConnection( self.server_domain )
# provide the details of the last session, allowing it to be resumed
client = SslTest.SslTestConnection( self.client_domain, initial_session_details )
#client.transport.trace(Transport.TRACE_DRV)
#server.transport.trace(Transport.TRACE_DRV)
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
if(LANGUAGE=="C"):
assert client.ssl.resume_status() == SSL.RESUME_REUSED
else:
# Java gives no way to check whether a previous session has been resumed
pass
client.connection.close()
server.connection.close()
self._pump( client, server )
# now try to resume using an unknown session-id, expect resume to fail
# and a new session is negotiated
del client
del server
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain, SSLSessionDetails("some-other-session-id") )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
if(LANGUAGE=="C"):
assert client.ssl.resume_status() == SSL.RESUME_NEW
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_multiple_sessions(self):
""" Test multiple simultaineous active SSL sessions with bi-directional
certificate verification, shared across two domains.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
max_count = 100
sessions = [(SslTest.SslTestConnection( self.server_domain ),
SslTest.SslTestConnection( self.client_domain )) for x in
range(max_count)]
for s in sessions:
s[0].connection.open()
self._pump( s[0], s[1] )
for s in sessions:
s[1].connection.open()
self._pump( s[1], s[0] )
assert s[0].ssl.cipher_name() is not None
assert s[1].ssl.cipher_name() == s[0].ssl.cipher_name()
for s in sessions:
s[1].connection.close()
self._pump( s[0], s[1] )
for s in sessions:
s[0].connection.close()
self._pump( s[1], s[0] )
def test_server_hostname_authentication(self):
""" Test authentication of the names held in the server's certificate
against various configured hostnames.
"""
# Check the CommonName matches (case insensitive).
# Assumes certificate contains "CN=A1.Good.Server.domain.com"
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "a1.good.server.domain.com"
assert client.ssl.peer_hostname == "a1.good.server.domain.com"
self._do_handshake( client, server )
del server
del client
self.teardown()
# Should fail on CN name mismatch:
self.setup()
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "A1.Good.Server.domain.comX"
try:
self._do_handshake( client, server )
assert False, "Expected connection to fail due to hostname mismatch"
except TransportException:
pass
del server
del client
self.teardown()
# Wildcarded Certificate
# Assumes:
# 1) certificate contains Server Alternate Names:
# "alternate.name.one.com" and "another.name.com"
# 2) certificate has wildcarded CommonName "*.prefix*.domain.com"
#
# Pass: match an alternate
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "alternate.Name.one.com"
self._do_handshake( client, server )
del client
del server
self.teardown()
# Pass: match an alternate
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "ANOTHER.NAME.COM"
self._do_handshake(client, server)
del client
del server
self.teardown()
# Pass: match the pattern
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "SOME.PREfix.domain.COM"
self._do_handshake( client, server )
del client
del server
self.teardown()
# Pass: match the pattern
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "FOO.PREfixZZZ.domain.com"
self._do_handshake( client, server )
del client
del server
self.teardown()
# Fail: must match prefix on wildcard
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "FOO.PREfi.domain.com"
try:
self._do_handshake( client, server )
assert False, "Expected connection to fail due to hostname mismatch"
except TransportException:
pass
del server
del client
self.teardown()
# Fail: leading wildcards are not optional
self.setup()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "PREfix.domain.COM"
try:
self._do_handshake( client, server )
assert False, "Expected connection to fail due to hostname mismatch"
except TransportException:
pass
self.teardown()
|
nilq/baby-python
|
python
|
# SPDX-FileCopyrightText: 2021 Gabriel Lisaca <gabriel.lisaca@gmail.com>
#
# SPDX-License-Identifier: Apache-2.0
import logging
import pytest
@pytest.fixture
def placeholder_elvis_name():
return "placeholder"
@pytest.fixture
def placeholder_domain():
return "example.com"
@pytest.fixture
def placeholder_url(placeholder_domain):
return f"https://{placeholder_domain}"
@pytest.fixture
def caplog_cli_error(caplog):
caplog.set_level(logging.CRITICAL)
return caplog
|
nilq/baby-python
|
python
|
from exopy.tasks.api import (InstrumentTask)
from atom.api import Float, Unicode, Str, set_default
from qm.qua import *
class ResumeProgramTask(InstrumentTask):
""" Resumes a paused program.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def perform(self):
self.driver.resume()
|
nilq/baby-python
|
python
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
value = 1
for i in range(n+1):
if i%2 == 0 and i > 0:
value += 1
if i%2 != 0 and i > 0:
value *= 2
return value
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
|
nilq/baby-python
|
python
|
import os, sys, imaplib, rfc822, re, StringIO
import RPi.GPIO as GPIO
import time
server ='mail.xxx.us'
username='juan@xxx.us'
password='xxx'
GPIO.setmode(GPIO.BOARD)
GREEN_LED = 22
RED_LED = 7
GPIO.setup(GREEN_LED, GPIO.OUT)
GPIO.setup(RED_LED, GPIO.OUT)
M = imaplib.IMAP4_SSL(server)
M.login(username, password)
M.select()
try:
while 1:
print "checking email"
typ, data = M.search(None, '(UNSEEN SUBJECT "PIFI MESSAGE")')
for num in data[0].split():
typ, data = M.fetch(num, '(RFC822)')
#print 'Message %s\n%s\n' % (num, data[0][1])
redon = re.search( "RED ON",
data[0][1],
re.MULTILINE|re.DOTALL )
greenon = re.search( "GREEN ON",
data[0][1],
re.MULTILINE|re.DOTALL )
redoff = re.search( "RED OFF",
data[0][1],
re.MULTILINE|re.DOTALL )
greenoff = re.search( "GREEN OFF",
data[0][1],
re.MULTILINE|re.DOTALL )
if redon:
GPIO.output(RED_LED, True)
print "red on"
if greenon:
GPIO.output(GREEN_LED, True)
print "green on"
if redoff:
GPIO.output(RED_LED, False)
print "red off"
if greenoff:
GPIO.output(GREEN_LED, False)
print "green off"
time.sleep(120)
except KeyboardInterrupt:
GPIO.cleanup()
pass
M.close()
M.logout()
|
nilq/baby-python
|
python
|
from engineauth import models
from engineauth.middleware import AuthMiddleware
import test_base
import webapp2
from webob import Request
__author__ = 'kyle.finley@gmail.com (Kyle Finley)'
app = AuthMiddleware(webapp2.WSGIApplication())
class TestAppEngineOpenIDStrategy(test_base.BaseTestCase):
def setUp(self):
super(TestAppEngineOpenIDStrategy, self).setUp()
def test_handle_request(self):
# No User or Profile
p_count0 = models.UserProfile.query().count()
u_count0 = models.User.query().count()
self.assertEqual(p_count0, 0)
self.assertEqual(u_count0, 0)
# Create New User
provider = 'gmail.com'
req = Request.blank('/auth/appengine_openid?provider=' + provider)
resp = req.get_response(app)
self.assertEqual(resp.location, 'https://www.google.com/accounts/'
'Login?continue=http%3A//localhost/'
'auth/appengine_openid/callback')
# # Retrieve user from datastore
# user = models.User.get_by_auth_id(auth_id)
# self.assertIn(auth_id, user.auth_ids)
# self.assertTrue(user._has_email(email))
# # Retrieve profile from datastore
# profile = models.UserProfile.get_by_id(auth_id)
# self.assertTrue(profile is not None)
# p_count1 = models.UserProfile.query().count()
# u_count1 = models.User.query().count()
# self.assertEqual(p_count1, 1)
# self.assertEqual(u_count1, 1)
# # Login User
# req = Request.blank('/auth/appengine_openid?provider=' + provider)
# resp = req.get_response(app)
# # Make sure a new User is not created.
# p_count2 = models.UserProfile.query().count()
# u_count2 = models.User.query().count()
# self.assertEqual(p_count2, 1)
# self.assertEqual(u_count2, 1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
import datetime
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.multiprocessing as mp
from parameters import get_args
import pcode.create_dataset as create_dataset
import pcode.create_optimizer as create_optimizer
import pcode.create_metrics as create_metrics
import pcode.create_model as create_model
import pcode.create_scheduler as create_scheduler
import pcode.utils.topology as topology
import pcode.utils.checkpoint as checkpoint
import pcode.utils.op_paths as op_paths
import pcode.utils.stat_tracker as stat_tracker
import pcode.utils.logging as logging
from pcode.utils.timer import Timer
def init_distributed_world(conf, backend):
if backend == "mpi":
dist.init_process_group("mpi")
elif backend == "nccl" or backend == "gloo":
# init the process group.
_tmp_path = os.path.join(conf.checkpoint, "tmp", conf.timestamp)
op_paths.build_dirs(_tmp_path)
dist_init_file = os.path.join(_tmp_path, "dist_init")
torch.distributed.init_process_group(
backend=backend,
init_method="file://" + os.path.abspath(dist_init_file),
timeout=datetime.timedelta(seconds=120),
world_size=conf.n_mpi_process,
rank=conf.local_rank,
)
else:
raise NotImplementedError
def main(conf):
try:
init_distributed_world(conf, backend=conf.backend)
conf.distributed = True and conf.n_mpi_process > 1
except AttributeError as e:
print(f"failed to init the distributed world: {e}.")
conf.distributed = False
# init the config.
init_config(conf)
# define the timer for different operations.
# if we choose the `train_fast` mode, then we will not track the time.
conf.timer = Timer(
verbosity_level=1 if conf.track_time and not conf.train_fast else 0,
log_fn=conf.logger.log_metric,
on_cuda=conf.on_cuda,
)
# create dataset.
data_loader = create_dataset.define_dataset(conf, force_shuffle=True)
# create model
model = create_model.define_model(conf, data_loader=data_loader)
# define the optimizer.
optimizer = create_optimizer.define_optimizer(conf, model)
# define the lr scheduler.
scheduler = create_scheduler.Scheduler(conf)
# add model with data-parallel wrapper.
if conf.graph.on_cuda:
if conf.n_sub_process > 1:
model = torch.nn.DataParallel(model, device_ids=conf.graph.device)
# (optional) reload checkpoint
try:
checkpoint.maybe_resume_from_checkpoint(conf, model, optimizer, scheduler)
except RuntimeError as e:
conf.logger.log(f"Resume Error: {e}")
conf.resumed = False
# train amd evaluate model.
if "rnn_lm" in conf.arch:
from pcode.distributed_running_nlp import train_and_validate
# safety check.
assert (
conf.n_sub_process == 1
), "our current data-parallel wrapper does not support RNN."
# define the criterion and metrics.
criterion = nn.CrossEntropyLoss(reduction="mean")
criterion = criterion.cuda() if conf.graph.on_cuda else criterion
metrics = create_metrics.Metrics(
model.module if "DataParallel" == model.__class__.__name__ else model,
task="language_modeling",
)
# define the best_perf tracker, either empty or from the checkpoint.
best_tracker = stat_tracker.BestPerf(
best_perf=None if "best_perf" not in conf else conf.best_perf,
larger_is_better=False,
)
scheduler.set_best_tracker(best_tracker)
# get train_and_validate_func
train_and_validate_fn = train_and_validate
else:
from pcode.distributed_running_cv import train_and_validate
# define the criterion and metrics.
criterion = nn.CrossEntropyLoss(reduction="mean")
criterion = criterion.cuda() if conf.graph.on_cuda else criterion
metrics = create_metrics.Metrics(
model.module if "DataParallel" == model.__class__.__name__ else model,
task="classification",
)
# define the best_perf tracker, either empty or from the checkpoint.
best_tracker = stat_tracker.BestPerf(
best_perf=None if "best_perf" not in conf else conf.best_perf,
larger_is_better=True,
)
scheduler.set_best_tracker(best_tracker)
# get train_and_validate_func
train_and_validate_fn = train_and_validate
# save arguments to disk.
checkpoint.save_arguments(conf)
# start training.
train_and_validate_fn(
conf,
model=model,
criterion=criterion,
scheduler=scheduler,
optimizer=optimizer,
metrics=metrics,
data_loader=data_loader,
)
def init_config(conf):
# define the graph for the computation.
cur_rank = dist.get_rank() if conf.distributed else 0
conf.graph = topology.define_graph_topology(
graph_topology=conf.graph_topology,
world=conf.world,
n_mpi_process=conf.n_mpi_process, # the # of total main processes.
# the # of subprocess for each main process.
n_sub_process=conf.n_sub_process,
comm_device=conf.comm_device,
on_cuda=conf.on_cuda,
rank=cur_rank,
)
conf.is_centralized = conf.graph_topology == "complete"
# re-configure batch_size if sub_process > 1.
if conf.n_sub_process > 1:
conf.batch_size = conf.batch_size * conf.n_sub_process
# configure cuda related.
if conf.graph.on_cuda:
assert torch.cuda.is_available()
torch.manual_seed(conf.manual_seed)
torch.cuda.manual_seed(conf.manual_seed)
torch.cuda.set_device(conf.graph.device[0])
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True if conf.train_fast else False
# define checkpoint for logging.
checkpoint.init_checkpoint(conf)
# configure logger.
conf.logger = logging.Logger(conf.checkpoint_dir)
# display the arguments' info.
logging.display_args(conf)
if __name__ == "__main__":
conf = get_args()
if conf.optimizer == "parallel_choco":
mp.set_start_method("forkserver", force=True)
# mp.set_start_method("spawn", force=True)
mp.set_sharing_strategy("file_system")
main(conf)
|
nilq/baby-python
|
python
|
import vigra
from init_exp import meta
from volumina_viewer import volumina_n_layer
def view_train():
ds = meta.get_dataset('snemi3d_train')
pmap = vigra.readHDF5('/home/constantin/Downloads/traininf-cst-inv.h5', 'data')
volumina_n_layer([ds.inp(0), ds.inp(1), pmap, ds.seg(0),ds.gt()])
def view_test(res1, res2):
ds = meta.get_dataset('snemi3d_test')
#volumina_n_layer([ds.inp(0), ds.inp(1), pm_new, pm_new1], ['raw','pm_old', 'pm_new1', 'pm_new2'])
#else:
volumina_n_layer([ds.inp(0), ds.inp(1), ds.seg(0), res1, res2], ['raw','pmap','ws','curr_res','best_res'])
def view_test_pmaps(new_pmaps):
ds = meta.get_dataset('snemi3d_test')
raw = ds.inp(0)
pm_old = ds.inp(1)
pm_2d = vigra.readHDF5('/home/constantin/Work/neurodata_hdd/snemi3d_data/probabilities/pmaps_icv2_test.h5', 'data')
data = [raw, pm_old, pm_2d]
data.extend(new_pmaps)
labels = ['raw', '3d_v2', '2d', '3d_v3_i1', '3d_v3_i2', '3d_v3_i3', 'ensemble']
volumina_n_layer(data, labels)
if __name__ == '__main__':
meta.load()
res1 = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/snemi_ultimate_seglmc_myel_myelmerged.h5', 'data')
#res2 = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/snemi_final_segmc_myel.h5', 'data')
res3 = vigra.readHDF5('/home/constantin/Work/multicut_pipeline/software/multicut_exp/rebuttal/snemi/round3/snemi_final_seglmc_myel_myelmerged.h5', 'data')
view_test(res1, res3)
|
nilq/baby-python
|
python
|
#Test the frame by frame image output for image classification using a previous classifier
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateBatch, ImageFileCreateEntry, Region
from msrest.authentication import ApiKeyCredentials
import os, time, uuid
import pandas as pd
import glob
# Replace with valid values
ENDPOINT = " " #Use the Cognitive Services endpoint
training_key = " " #Take from the second resource which is used at the project inception stage
prediction_key = " " #Take this from the resource created for the Prediction Cog Service
prediction_resource_id = " "
# use the entire string generated by the Prediction Endpoint
credentials = ApiKeyCredentials(in_headers={"Training-key": training_key})
trainer = CustomVisionTrainingClient(ENDPOINT, credentials)
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
os.chdir(' ') #Put the local folder where the code module resides
#Additional settings
publish_iteration_name = " " #Use the name of the model, not the iteration name
project_name = " " #Use the project name
projectid = "" #Use the project id. Run the code in lines 37 through 40 to get the project id
base_image_location = base_image_location = os.path.join (os.path.dirname(__file__), "Images")
#You can use any sub folder in the main folder but change the name of the folder where the images reside that need image classification
#Get the project credentials
project = trainer.get_project(projectid)
#Getting the tag
tags = trainer.get_tags(projectid)
#for i in tags:
# print(i)
#print(project.id)
#print(tags)
#Running the classification
#Testing the prediction end point
# Now there is a trained endpoint that can be used to make a prediction
prediction_credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
predictor = CustomVisionPredictionClient(ENDPOINT, prediction_credentials)
output_folder = "" #Put the folder where the csv files will be placed.
directory = ' ' #Use the folder where the images are
for filename in os.listdir(directory):
if filename.endswith("jpg"):
with open(os.path.join(directory, filename), "rb") as image_contents:
results = predictor.classify_image(
project.id, publish_iteration_name, image_contents.read())
# Display the results.
with open('%s.csv' %filename, 'wt') as csv:
os.chdir('') #Use the folder where the csv files need to be written
csv.write("ImageName,TagName,Probability\n")
for prediction in results.predictions:
#for tag
csv.write((filename + "," + prediction.tag_name +
", {0:.2f}%".format(prediction.probability * 100)+"\n")
)
# Once the individual files are generated, using glob to combine them into one corpus
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])
os.chdir(' ') #Use the folder where the final combined file needs to reside.
combined_csv.to_csv('ImageClassificationFinal.csv', index=False) #Saving our combined csv data as a new file!
#Remove all csv files created individually
directory = " " #Folder where the csv files are there
files_in_directory = os.listdir(directory)
filtered_files = [file for file in files_in_directory if file.endswith(".csv")]
for file in filtered_files:
path_to_file = os.path.join(directory, file)
os.remove(path_to_file)
|
nilq/baby-python
|
python
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _WeightedLoss, _Loss
def one_hot(class_labels, num_classes=None):
if num_classes==None:
return torch.zeros(len(class_labels), class_labels.max()+1).scatter_(1, class_labels.unsqueeze(1), 1.)
else:
return torch.zeros(len(class_labels), num_classes).scatter_(1, class_labels.unsqueeze(1), 1.)
class CrossEntropyLoss(nn.CrossEntropyLoss):
pass
class MSELoss(nn.MSELoss):
pass
class KLDivLoss(_Loss):
def __init__(self):
super(KLDivLoss, self).__init__()
def forward(self,pert,dp):
return F.kl_div(pert.softmax(dim=-1).log(), dp.softmax(dim=-1).repeat(len(pert),1), reduction='batchmean')
class CoSLoss(_WeightedLoss):
def __init__(self):
super(CoSLoss, self).__init__()
self.name='CoS'
def forward(self, logit_i_p, logit_p, target=None):
if target is not None: # label_dependent (deprecated)
target_logits = (target * logit_i_p).sum(1)
loss = - 0.05*target_logits - torch.cosine_similarity(logit_p,logit_i_p)
else: # label_free
loss = 1-torch.cosine_similarity(logit_p, logit_i_p)
return torch.mean(loss)
|
nilq/baby-python
|
python
|
from wordfilter import censored_words
from lxml import etree
import datetime
import javmovie
BASEURL="https://www.javlibrary.com/en/vl_searchbyid.php?keyword="
DIRECTURL="https://www.javlibrary.com/en/?v="
xpath_title = "/html/body/div[3]/div[2]/div[1]/h3/a"
xpath_javcode = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[1]/table/tr/td[2]"
xpath_tags = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[8]/table/tr/td[2]"
xpath_tags_no_rating = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[7]/table/tr/td[2]"
xpath_actress = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[9]/table/tr/td[2]"
xpath_studiolabel = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[6]/table/tr/td[2]/span/a"
xpath_releasedate = "/html/body/div[3]/div[2]/table/tr/td[2]/div/div[2]/table/tr/td[2]"
xpath_image = "/html/body/div[3]/div[2]/table/tr/td[1]/div/img"
xpath_notfound = "/html/body/div[3]/div[2]/p/em"
xpath_multiple_found = "/html/body/div[3]/div[2]/div[1]"
xpath_multiple_list = "/html/body/div[3]/div[2]/div[2]/div"
releasedate_format = "%Y-%m-%d"
def get_by_jav_id(jav_id, BASEURL=BASEURL):
try:
html = HTTP.Request(BASEURL + jav_id).content
except Exception as e:
return None
tree = etree.HTML(html)
args = {}
if len(tree.xpath(xpath_notfound)) > 0 and "Search returned no result." in tree.xpath(xpath_notfound)[0].text:
return None
if BASEURL != DIRECTURL and len(tree.xpath(xpath_multiple_found)) > 0 and tree.xpath(xpath_multiple_found)[0].text is not None:
if "ID Search Result" in tree.xpath(xpath_multiple_found)[0].text:
if len(tree.xpath(xpath_multiple_list)[0]) > 0:
results = []
for videolink in tree.xpath(xpath_multiple_list)[0]:
vid = get_by_jav_id(videolink[0].attrib["href"].replace("./?v=", ""), DIRECTURL)
results.append(vid)
return results
args["jav_code"] = tree.xpath(xpath_javcode)[0].text
title = str(tree.xpath(xpath_title)[0].text).replace("[" + args["jav_code"] + "]", "").replace(args["jav_code"], "").lower()
for word, replacement in censored_words.items():
title = title.replace(word.lower(), replacement)
args["title"] = title.title().strip()
tags = []
try:
for a in tree.xpath(xpath_tags)[0]:
tags.append(a[0].text.title())
except AttributeError:
for a in tree.xpath(xpath_tags_no_rating)[0]:
tags.append(a[0].text.title())
args["tags"] = tags
if len(tree.xpath(xpath_studiolabel)) > 0:
args["studio_label"] = tree.xpath(xpath_studiolabel)[0].text
date = tree.xpath(xpath_releasedate)[0].text
if date is None:
args["release_date"] = None
else:
args["release_date"] = datetime.datetime.strptime(date, releasedate_format)
args["image_url"] = ("https:" + tree.xpath(xpath_image)[0].attrib["src"]) if tree.xpath(xpath_image)[0].attrib["src"].startswith("//") else tree.xpath(xpath_image)[0].attrib["src"]
return javmovie.JAVMovie(args)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest
from monasca_analytics.sink import iptables_sqlite as ipt_snk
class TestIptablesSQLiteSink(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self._valid_config = {"module": "IptablesSQLiteSink"}
self.snk = ipt_snk.IptablesSQLiteSink("fake_id", self._valid_config)
def test_rdds_table_create_query(self):
query = self.snk._rdds_table_create_query()
self.assertEqual("""CREATE TABLE IF NOT EXISTS rdds
(msg TEXT, anomalous TEXT, msg_id TEXT, ctime TEXT)""", query)
def test_rdd_insert_query_valid_rdd(self):
rdd_entry = {
"msg": "test message",
"id": 1,
"anomalous": True,
"ctime": "t1"
}
query = self.snk._rdd_insert_query(rdd_entry)
self.assertEqual(
'INSERT INTO rdds VALUES("test message", "True", "1", "t1")',
query)
def test_rdd_insert_query_invalid_rdd(self):
rdd_entry = {
"msg": "test message",
"anomalous": True,
"ctime": "t1"
}
self.assertRaises(KeyError, self.snk._rdd_insert_query, rdd_entry)
def tearDown(self):
unittest.TestCase.tearDown(self)
os.remove("sqlite_sink.db")
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
import telebot
import time
import threading
#Variables Globales
enviados = 0
recibidos = 0
#Decoradores
def controlador_mensajes(cant_enviar):
"""
controlador_mensajes:
Cuenta cuantos mensajes recibe y envia, si recibe o envia mas de 20 entonces duerme por un segundo
sacado de la documentacion de telegram:
My bot is hitting limits, how do I avoid this?
When sending messages inside a particular chat, avoid sending more than one message per second.
We may allow short bursts that go over this limit, but eventually you'll begin receiving 429 errors.
If you're sending bulk notifications to multiple users, the API will not allow more than 30 messages
per second or so. Consider spreading out notifications over large intervals of 8—12 hours for best results.
Also note that your bot will not be able to send more than 20 messages per minute to the same group.
"""
def Decorador(funcion):
def wrapper(*args, **kwargs):
global recibidos,enviados
recibidos +=1
enviados += cant_enviar
if enviados >= 20 or recibidos >= 20:
time.sleep(1)
enviados = 0
recibidos = 0
funcion(*args,**kwargs)
return wrapper
return Decorador
class Bot(telebot.Telebot):
def __init__(self,token, threaded=True, skip_pending=False, num_threads=2):
super().__init__(token, threaded=True, skip_pending=False, num_threads=2)
#messages_handler
"""
diccionario de todos los metodos de mensajes que reciba el bot
"""
messages_handler={
'start':dict(
function=lambda msg, obj= self: obj.start(msg),
filters = dict(
commands=["start"]
)
),
}
#callback_query_answers
"""
diccionario de todos los metodso de callback query answers que reciba el bot
"""
callback_query_handler={
'start':dict(
function=lambda msg, obj= self: obj.start(msg),
filters = dict(
commands=["start"]
)
),
}
"""
para agregar cada comando se debe usar estos metodos
"""
for comando in messages_handler.values():
self.add_message_handler(comando)
for comando in messages_handler.values():
self.add_callback_query_handler(comando)
def bot_polling(token):
while True:
bot = None
try:
bot = Bot(token,threaded=False)
bot.polling(none_stop=True,interval=0,timeout=0)
except Exception as ex: #Error in polling
bot.stop_polling()
else: #Clean exit
bot.stop_polling()
break #End loop
polling_thread = threading.Thread(target=bot_polling)
polling_thread.daemon = True
polling_thread.start()
if __name__ == "__main__":
while True:
try:
time.sleep(120)
except KeyboardInterrupt:
break
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from qtpy import QtWidgets, QtCore
from planetaryimage import PDS3Image
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
from pdsview import pdsview
from pdsview.channels_dialog import ChannelsDialog
from pdsview.histogram import HistogramWidget, HistogramModel
FILE_1 = os.path.join(
'tests', 'mission_data', '2m132591087cfd1800p2977m2f1.img')
FILE_2 = os.path.join(
'tests', 'mission_data', '2p129641989eth0361p2600r8m1.img')
FILE_3 = os.path.join(
'tests', 'mission_data', '1p190678905erp64kcp2600l8c1.img')
FILE_4 = os.path.join(
'tests', 'mission_data', 'h58n3118.img')
FILE_5 = os.path.join(
'tests', 'mission_data', '1p134482118erp0902p2600r8m1.img')
FILE_6 = os.path.join(
'tests', 'mission_data', '0047MH0000110010100214C00_DRCL.IMG')
FILE_1_NAME = '2m132591087cfd1800p2977m2f1.img'
FILE_2_NAME = '2p129641989eth0361p2600r8m1.img'
FILE_3_NAME = '1p190678905erp64kcp2600l8c1.img'
FILE_4_NAME = 'h58n3118.img'
FILE_5_NAME = '1p134482118erp0902p2600r8m1.img'
FILE_6_NAME = '0047MH0000110010100214C00_DRCL.IMG'
def test_image_stamp():
"""Test that ImageStamp sets correct attributes to pds compatible image"""
pds_image = PDS3Image.open(FILE_1)
test_image = pdsview.ImageStamp(FILE_1, FILE_1, pds_image, pds_image.data)
assert test_image.file_name == FILE_1_NAME
assert test_image.image_name == FILE_1
assert 'PDS' in test_image.label[0]
assert isinstance(test_image.label, list)
assert not test_image.cuts
assert not test_image.sarr
assert not test_image.zoom
assert not test_image.rotation
assert not test_image.transforms
assert test_image.not_been_displayed
class TestImageSet(object):
filepaths = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5]
test_set = pdsview.ImageSet(filepaths)
def test_init(self):
assert self.test_set._views == set()
assert len(self.test_set.images) == len(self.filepaths)
filepaths = sorted(self.filepaths)
for image, filepath in zip(self.test_set.images, filepaths):
assert image[0].file_name == os.path.basename(filepath)
assert self.test_set._current_image_index == 0
assert self.test_set._channel == 0
# assert self.test_set._last_channel is None
assert self.test_set._x_value == 0
assert self.test_set._y_value == 0
assert self.test_set._pixel_value == (0, )
assert self.test_set.use_default_text
assert self.test_set.rgb == []
assert self.test_set.current_image is not None
def test_next_prev_enabled(self):
assert self.test_set.next_prev_enabled
test_set2 = pdsview.ImageSet([])
assert not test_set2.next_prev_enabled
@pytest.mark.parametrize(
"index, expected, channel",
[
(1, 1, 1),
(5, 0, 4),
(11, 1, -1),
(-1, 4, 7),
(-13, 2, 42),
(0, 0, 0)
])
def test_current_image_index(self, index, expected, channel):
self.test_set.channel = channel
self.test_set.current_image_index = index
assert self.test_set.current_image_index == expected
assert self.test_set.current_image == self.test_set.images[expected]
assert self.test_set.channel == 0
def test_channel(self):
assert self.test_set._channel == self.test_set.channel
assert len(self.test_set.current_image) == 1
self.test_set.channel = 42
# When the current image only has one band, don't change the channel
assert self.test_set.channel == 0
assert self.test_set._channel == self.test_set.channel
# TODO: When an rgb image is in the default test_mission_data, test
# actually chaning the channel
def test_x_value(self):
assert self.test_set.x_value == self.test_set._x_value
self.test_set.x_value = 42.123456789
assert isinstance(self.test_set.x_value, int)
assert self.test_set.x_value == 42
assert self.test_set.x_value == self.test_set._x_value
self.test_set.x_value = 0
assert self.test_set.x_value == 0
assert self.test_set.x_value == self.test_set._x_value
def test_y_value(self):
assert self.test_set.y_value == self.test_set._y_value
self.test_set.y_value = 42.123456789
assert isinstance(self.test_set.y_value, int)
assert self.test_set.y_value == 42
assert self.test_set.y_value == self.test_set._y_value
self.test_set.y_value = 0
assert self.test_set.y_value == 0
assert self.test_set.y_value == self.test_set._y_value
def test_pixel_value(self):
def check_pixel_value(new_pixel, expected):
self.test_set.pixel_value = new_pixel
assert self.test_set.pixel_value == expected
assert isinstance(self.test_set.pixel_value, tuple)
for val in self.test_set.pixel_value:
assert isinstance(val, float)
assert self.test_set.pixel_value == (0.0,)
check_pixel_value(
(2.3456, 3.4567, 4.5678), (2.346, 3.457, 4.568))
check_pixel_value([2.3456, 3.4567, 4.5678], (2.346, 3.457, 4.568))
check_pixel_value(
np.array([2.3456, 3.4567, 4.5678]), (2.346, 3.457, 4.568))
check_pixel_value(
42.1234, (42.123,))
check_pixel_value(
int(42), (42.0,))
check_pixel_value(
0, (0,))
def test_pixel_value_text(self):
assert self.test_set.pixel_value_text == 'Value: 0.000'
# TODO: TEST WITH RGB IMAGE
def test_image_set_append_method(self):
"""Test append method with multiple images"""
filepaths = [FILE_1]
new_files = [FILE_2, FILE_3]
test_set = pdsview.ImageSet(filepaths)
assert test_set.current_image_index == 0
assert test_set.current_image[0].file_name == FILE_1_NAME
assert len(test_set.images) == 1
assert not(test_set.next_prev_enabled)
# Mimic how append method is used in pdsview
first_new_image = len(test_set.images)
test_set.append(new_files, first_new_image)
assert test_set.current_image_index == 1
assert test_set.current_image[0].file_name == FILE_2_NAME
assert FILE_3_NAME in str(test_set.images)
assert test_set.next_prev_enabled
def test_bands_are_composite(self):
self.test_set.rgb = [image[0] for image in self.test_set.images[:3]]
assert not self.test_set.bands_are_composite
# TODO: TEST WITH RGB IMAGE
# TODO: TEST create_rgb_image WHEN RGB IMAGE IN TEST DATA
def test_ROI_data(self):
"""Test the ROI_data to cut out the correct region of data"""
test_set = pdsview.ImageSet([FILE_3])
width = test_set.current_image[0].width
height = test_set.current_image[0].height
test_data_1 = test_set.ROI_data(0, 0, width, height)
assert test_data_1[0][0] == 23
assert test_data_1[512][16] == 25
assert test_data_1[1023][31] == 115
test_data_2 = test_set.ROI_data(9.5, 18.5, 11.5, 20.5)
assert test_data_2[0][0] == 22
assert test_data_2[0][1] == 23
assert test_data_2[1][0] == 24
assert test_data_2[1][1] == 24
def test_ROI_pixels(self):
"""Test ROI_pixels to return the correct number of pixels for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_pixels = test_set.ROI_pixels(9.5, 18.5, 11.5, 20.5)
assert test_pixels == 4
def test_ROI_std_dev(self):
"""Test ROI_std_dev to return the correct standard deviation for ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_std_dev = test_set.ROI_std_dev(9.5, 18.5, 11.5, 20.5)
assert test_std_dev == 0.829156
def test_ROI_mean(self):
"""Test ROI_mean to return the correct mean value of pixels for ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_mean = test_set.ROI_mean(9.5, 18.5, 11.5, 20.5)
assert test_mean == 23.25
def test_ROI_median(self):
"""Test ROI_median to return the correct median value for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_median = test_set.ROI_median(9.5, 18.5, 11.5, 20.5)
assert test_median == 23.5
def test_ROI_min(self):
"""Test ROI_min to return the correct minimum pixel value for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_min = test_set.ROI_min(9.5, 18.5, 11.5, 20.5)
assert test_min == 22
def test_ROI_max(self):
"""Test ROI_mx to return the correct maximum pixel value for a ROI"""
test_set = pdsview.ImageSet([FILE_3])
test_max = test_set.ROI_max(9.5, 18.5, 11.5, 20.5)
assert test_max == 24
# TODO test channels when there is a 3 band test image
class TestPDSController(object):
filepaths = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5]
test_set = pdsview.ImageSet(filepaths)
controller = pdsview.PDSController(test_set, None)
def test_init(self):
assert self.controller.model == self.test_set
assert self.controller.view is None
def test_next_image(self):
assert self.test_set.current_image_index == 0
self.controller.next_image()
assert self.test_set.current_image_index == 1
self.test_set.current_image_index = len(self.test_set.images) - 1
self.controller.next_image()
assert self.test_set.current_image_index == 0
def test_previous_image(self):
assert self.test_set.current_image_index == 0
self.controller.previous_image()
last = len(self.test_set.images) - 1
assert self.test_set.current_image_index == last
self.test_set.current_image_index = 1
self.controller.previous_image()
assert self.test_set.current_image_index == 0
def test_next_channel(self):
assert self.test_set.channel == 0
self.controller.next_channel()
assert self.test_set.channel == 0
# TODO: TEST MORE WHEN THERE IS AN RGB IMAGE
def test_previous_channel(self):
assert self.test_set.channel == 0
self.controller.previous_channel()
assert self.test_set.channel == 0
# TODO: TEST MORE WHEN THERE IS AN RGB IMAGE
def test_new_x_value(self):
self.controller.new_x_value(42.123456789)
assert isinstance(self.test_set.x_value, int)
assert self.test_set.x_value == 42
assert self.test_set.x_value == self.test_set._x_value
self.controller.new_x_value(0)
assert self.test_set.x_value == 0
assert self.test_set.x_value == self.test_set._x_value
def test_new_y_value(self):
assert self.test_set.y_value == self.test_set._y_value
self.controller.new_y_value(42.123456789)
assert isinstance(self.test_set.y_value, int)
assert self.test_set.y_value == 42
assert self.test_set.y_value == self.test_set._y_value
self.controller.new_y_value(0)
assert self.test_set.y_value == 0
assert self.test_set.y_value == self.test_set._y_value
def test_new_pixel_value(self):
def check_pixel_value(new_pixel, expected):
self.controller.new_pixel_value(new_pixel)
assert self.test_set.pixel_value == expected
assert isinstance(self.test_set.pixel_value, tuple)
for val in self.test_set.pixel_value:
assert isinstance(val, float)
assert self.test_set.pixel_value == (0.0,)
check_pixel_value(
(2.3456, 3.4567, 4.5678), (2.346, 3.457, 4.568))
check_pixel_value([2.3456, 3.4567, 4.5678], (2.346, 3.457, 4.568))
check_pixel_value(
np.array([2.3456, 3.4567, 4.5678]), (2.346, 3.457, 4.568))
check_pixel_value(
42.1234, (42.123,))
check_pixel_value(
int(42), (42.0,))
check_pixel_value(
0, (0,))
images = test_set.images
@pytest.mark.parametrize(
'image_index, expected',
[
(0, [images[0][0], images[1][0], images[2][0]]),
(1, [images[1][0], images[2][0], images[3][0]]),
(len(images) - 1, [images[-1][0], images[0][0], images[1][0]])
])
def test_populate_rgb(self, image_index, expected):
test_rgb = self.controller._populate_rgb(image_index)
assert test_rgb == expected
def test_update_rgb(self):
expected = [self.images[0][0], self.images[1][0], self.images[2][0]]
self.test_set.rgb = [1, 2, 3]
self.controller.update_rgb()
assert self.test_set.rgb != [1, 2, 3]
assert self.test_set.rgb == expected
class TestPDSViewer(object):
filepaths = [FILE_1, FILE_2, FILE_3, FILE_4, FILE_5]
test_set = pdsview.ImageSet(filepaths)
viewer = pdsview.PDSViewer(test_set)
viewer.show()
def test_init(self):
assert self.viewer.image_set == self.test_set
assert self.viewer in self.test_set._views
assert self.viewer._label_window is None
assert self.viewer._label_window_pos is None
assert self.viewer.channels_window is None
assert not self.viewer.channels_window_is_open
assert self.viewer.channels_window_pos is None
assert isinstance(
self.viewer.view_canvas, ImageViewCanvas)
assert isinstance(
self.viewer.next_image_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.previous_image_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.open_label, QtWidgets.QPushButton)
assert isinstance(
self.viewer.next_channel_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.previous_channel_btn, QtWidgets.QPushButton)
assert isinstance(
self.viewer.restore_defaults, QtWidgets.QPushButton)
assert isinstance(
self.viewer.channels_button, QtWidgets.QPushButton)
assert isinstance(
self.viewer.x_value_lbl, QtWidgets.QLabel)
assert isinstance(
self.viewer.y_value_lbl, QtWidgets.QLabel)
assert isinstance(
self.viewer.pixel_value_lbl, QtWidgets.QLabel)
assert isinstance(
self.viewer.pixels, QtWidgets.QLabel)
assert isinstance(
self.viewer.std_dev, QtWidgets.QLabel)
assert isinstance(
self.viewer.mean, QtWidgets.QLabel)
assert isinstance(
self.viewer.median, QtWidgets.QLabel)
assert isinstance(
self.viewer.min, QtWidgets.QLabel)
assert isinstance(
self.viewer.max, QtWidgets.QLabel)
assert isinstance(
self.viewer.histogram, HistogramModel)
assert isinstance(
self.viewer.histogram_widget, HistogramWidget)
assert isinstance(
self.viewer.rgb_check_box, QtWidgets.QCheckBox)
assert self.viewer.windowTitle() == FILE_5_NAME
assert self.viewer.pixels.text() == '#Pixels: 32768'
assert self.viewer.std_dev.text() == 'Std Dev: 16.100793'
assert self.viewer.mean.text() == 'Mean: 24.6321'
assert self.viewer.median.text() == 'Median: 22.0'
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
assert self.viewer.x_value_lbl.text() == 'X: ????'
assert self.viewer.y_value_lbl.text() == 'Y: ????'
assert self.viewer.pixel_value_lbl.text() == 'Value: ????'
assert not self.viewer.rgb_check_box.isChecked()
def test_current_image(self):
expected = self.test_set.current_image[self.test_set.channel]
assert self.viewer.current_image == expected
def test_refresh_ROI_text(self):
self.viewer.min.setText("Min: 0")
self.viewer.max.setText("Max: 100")
self.viewer._refresh_ROI_text()
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
def test_reset_ROI(self):
self.viewer.min.setText("Min: 0")
self.viewer.max.setText("Max: 100")
self.viewer._reset_ROI()
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
# TODO: When have RGB Image Test _disable_next_previous
def test_reset_display_values(self):
self.viewer.x_value_lbl.setText("X: 42")
self.viewer.y_value_lbl.setText("Y: 42")
self.viewer.pixel_value_lbl.setText("Value: 42")
self.viewer._reset_display_values()
assert self.viewer.x_value_lbl.text() == 'X: ????'
assert self.viewer.y_value_lbl.text() == 'Y: ????'
assert self.viewer.pixel_value_lbl.text() == 'Value: ????'
def test_window_cascade(self, qtbot):
"""Tests the window cascade."""
# Initial checks
assert self.viewer._label_window is None
assert self.viewer.open_label.isEnabled()
# Open the label window and run appropriate checks
qtbot.mouseClick(self.viewer.open_label, QtCore.Qt.LeftButton)
qtbot.add_widget(self.viewer._label_window)
assert self.viewer._label_window is not None
assert self.viewer._label_window._finder_window is None
assert self.viewer._label_window.is_open
# Open the finder window and run appropriate checks
qtbot.mouseClick(
self.viewer._label_window.find_button, QtCore.Qt.LeftButton)
assert self.viewer._label_window._finder_window is not None
qtbot.add_widget(self.viewer._label_window._finder_window)
assert not(self.viewer._label_window._finder_window.query_edit)
# Hide windows and check to make sure they are hidden
qtbot.mouseClick(
self.viewer._label_window._finder_window.ok_button,
QtCore.Qt.LeftButton)
assert self.viewer._label_window._finder_window.isHidden()
qtbot.mouseClick(
self.viewer._label_window.cancel_button, QtCore.Qt.LeftButton)
assert self.viewer._label_window.isHidden()
# Test the ability for the parent (label) to hide the child (finder)
qtbot.mouseClick(
self.viewer.open_label, QtCore.Qt.LeftButton)
qtbot.mouseClick(
self.viewer._label_window.find_button, QtCore.Qt.LeftButton)
assert not(self.viewer._label_window.isHidden())
assert not(self.viewer._label_window._finder_window.isHidden())
qtbot.mouseClick(
self.viewer._label_window.cancel_button, QtCore.Qt.LeftButton)
assert self.viewer._label_window.isHidden()
assert self.viewer._label_window._finder_window.isHidden()
def test_label_refresh(self, qtbot):
"""Tests the label display and refresh features."""
qtbot.mouseClick(self.viewer.open_label, QtCore.Qt.LeftButton)
qtbot.add_widget(self.viewer._label_window)
label_contents = self.viewer._label_window.label_contents
assert label_contents.toPlainText()[233:236] == "341"
qtbot.mouseClick(self.viewer.next_image_btn, QtCore.Qt.LeftButton)
label_contents = self.viewer._label_window.label_contents
assert label_contents.toPlainText()[228:231] == "338"
qtbot.mouseClick(self.viewer.previous_image_btn, QtCore.Qt.LeftButton)
label_contents = self.viewer._label_window.label_contents
assert label_contents.toPlainText()[233:236] == "341"
def test_channels_dialog(self, qtbot):
assert self.viewer.channels_window is None
assert not self.viewer.channels_window_is_open
assert self.viewer.channels_window_pos is None
qtbot.add_widget(self.viewer)
qtbot.mouseClick(self.viewer.channels_button, QtCore.Qt.LeftButton)
assert self.viewer.channels_window is not None
assert self.viewer.channels_window_is_open
assert isinstance(self.viewer.channels_window, ChannelsDialog)
assert self.viewer.channels_window_pos is None
qtbot.add_widget(self.viewer.channels_window)
new_pos = QtCore.QPoint(42, 24)
self.viewer.channels_window.move(new_pos)
qtbot.mouseClick(
self.viewer.channels_window.close_button, QtCore.Qt.LeftButton)
assert self.viewer.channels_window_pos is not None
assert self.viewer.channels_window_pos == new_pos
qtbot.mouseClick(self.viewer.channels_button, QtCore.Qt.LeftButton)
self.viewer.channels_window.pos() == new_pos
def test_apply_parameters(self, qtbot):
"""Test that images maintain their parameters"""
self.viewer.save_parameters()
image1 = self.viewer.current_image
assert image1.sarr[0] == 0
assert image1.sarr[255] == 255
# assert image1.zoom == 1.0
assert image1.rotation == 0.0
assert image1.transforms == (False, False, False)
assert image1.cuts == (17, 25)
# Change parameters
image1.sarr[0] = 42
image1.sarr[255] = 13
self.viewer.view_canvas.get_rgbmap().set_sarr(image1.sarr)
# self.viewer.view_canvas.zoom_to(3)
self.viewer.view_canvas.rotate(45)
self.viewer.view_canvas.transform(False, True, False)
self.viewer.view_canvas.cut_levels(24, 95)
qtbot.mouseClick(self.viewer.next_image_btn, QtCore.Qt.LeftButton)
# Test the second image parameters are None by defualt
image2 = self.viewer.current_image
# Test the view was reset to defualt paramters for the image
assert self.viewer.view_canvas.get_rgbmap().get_sarr()[0] == 0
assert self.viewer.view_canvas.get_rgbmap().get_sarr()[255] == 255
# assert self.viewer.view_canvas.get_zoom() == 1.0
assert self.viewer.view_canvas.get_rotation() == 0.0
assert self.viewer.view_canvas.get_transforms() == (
False, False, False
)
assert self.viewer.view_canvas.get_cut_levels() == (22, 26)
# Test changing back to the first image maintains image1's parameters
qtbot.mouseClick(self.viewer.previous_image_btn, QtCore.Qt.LeftButton)
image1 = self.viewer.image_set.current_image[0]
assert image1.sarr[0] == 42
assert image1.sarr[255] == 13
# assert image1.zoom == 3.0
assert image1.rotation == 45.0
assert image1.transforms == (False, True, False)
assert image1.cuts == (24, 95)
# Test that image2 stored its parameters
image2 = self.viewer.image_set.images[1][0]
assert image2.sarr[0] == 0
assert image2.sarr[255] == 255
# assert image2.zoom == 4.746031746031746
assert image2.rotation == 0.0
assert image2.transforms == (False, False, False)
assert image2.cuts == (22, 26)
def test_restore(self, qtbot):
image1 = self.viewer.image_set.current_image[0]
image1.sarr[0] = 42
image1.sarr[255] = 13
self.viewer.view_canvas.get_rgbmap().set_sarr(image1.sarr)
# self.viewer.view_canvas.zoom_to(3)
self.viewer.view_canvas.rotate(45)
self.viewer.view_canvas.transform(False, True, False)
self.viewer.view_canvas.cut_levels(24, 95)
assert image1.sarr[0] == 42
assert image1.sarr[255] == 13
# assert image1.zoom == 3.0
assert image1.rotation == 45.0
assert image1.transforms == (False, True, False)
assert image1.cuts == (24, 95)
qtbot.mouseClick(self.viewer.restore_defaults, QtCore.Qt.LeftButton)
self.viewer.save_parameters()
assert image1.sarr[0] == 0
assert image1.sarr[255] == 255
# assert image1.zoom == 1.0
assert image1.rotation == 0.0
assert image1.transforms == (False, False, False)
assert image1.cuts == (17, 25)
def test_set_ROI_text(self, qtbot):
"""Test the ROI text to contain the correct values"""
# Test Whole image ROI
assert self.viewer.pixels.text() == '#Pixels: 32768'
assert self.viewer.std_dev.text() == 'Std Dev: 16.100793'
assert self.viewer.mean.text() == 'Mean: 24.6321'
assert self.viewer.median.text() == 'Median: 22.0'
assert self.viewer.min.text() == 'Min: 17'
assert self.viewer.max.text() == 'Max: 114'
# Test 2x2 random ROI
# .5 values because these are the edge of the ROI pixels
self.viewer.set_ROI_text(14.5, 512.5, 16.5, 514.5)
assert self.viewer.pixels.text() == '#Pixels: 4'
assert self.viewer.std_dev.text() == 'Std Dev: 1.000000'
assert self.viewer.mean.text() == 'Mean: 23.0000'
assert self.viewer.median.text() == 'Median: 23.0'
assert self.viewer.min.text() == 'Min: 22'
assert self.viewer.max.text() == 'Max: 24'
def test_top_right_pixel_snap(self):
test_snap_1 = self.viewer.top_right_pixel_snap(10, 5)
assert test_snap_1[0] == 5.5
assert test_snap_1[1]
test_snap_2 = self.viewer.top_right_pixel_snap(-5, 5)
assert not test_snap_2[1]
test_snap_3 = self.viewer.top_right_pixel_snap(5.4, 10)
assert test_snap_3[0] == 5.5
assert test_snap_3[1]
test_snap_4 = self.viewer.top_right_pixel_snap(5.5, 10)
assert test_snap_4[0] == 5.5
assert test_snap_4[1]
test_snap_5 = self.viewer.top_right_pixel_snap(5.6, 10)
assert test_snap_5[0] == 6.5
assert test_snap_5[1]
def test_bottom_left_pixel_snap(self):
test_snap_1 = self.viewer.bottom_left_pixel_snap(-5, 5)
assert test_snap_1[0] == -0.5
assert test_snap_1[1]
test_snap_2 = self.viewer.bottom_left_pixel_snap(10, 5)
assert not test_snap_2[1]
test_snap_3 = self.viewer.bottom_left_pixel_snap(5.4, 10)
assert test_snap_3[0] == 4.5
assert test_snap_3[1]
test_snap_4 = self.viewer.bottom_left_pixel_snap(5.5, 10)
assert test_snap_4[0] == 5.5
assert test_snap_4[1]
def test_left_right_bottom_top(self):
test_coords_1 = self.viewer.left_right_bottom_top(1, 2, 1, 2)
assert test_coords_1[0:4] == (1, 2, 1, 2)
assert test_coords_1[4]
assert test_coords_1[5]
test_coords_2 = self.viewer.left_right_bottom_top(2, 1, 1, 2)
assert test_coords_2[0:4] == (1, 2, 1, 2)
assert not test_coords_2[4]
assert test_coords_2[5]
test_coords_3 = self.viewer.left_right_bottom_top(1, 2, 2, 1)
assert test_coords_3[0:4] == (1, 2, 1, 2)
assert test_coords_3[4]
assert not test_coords_3[5]
test_coords_4 = self.viewer.left_right_bottom_top(2, 1, 2, 1)
assert test_coords_4[0:4] == (1, 2, 1, 2)
assert not test_coords_4[4]
assert not test_coords_4[5]
|
nilq/baby-python
|
python
|
import copy
import datetime
import functools
import inspect
import os
import textwrap
import traceback
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Union
import click
import tqdm
from experitur.core.context import get_current_context
from experitur.core.parameters import (
Multi,
ParameterGenerator,
check_parameter_generators,
)
from experitur.core.trial import Trial
from experitur.errors import ExperiturError
from experitur.helpers import tqdm_redirect
from experitur.helpers.merge_dicts import merge_dicts
from experitur.util import callable_to_name, ensure_list
if TYPE_CHECKING: # pragma: no cover
from experitur.core.context import Context
def try_str(obj):
try:
return str(obj)
except: # pylint: disable=bare-except # noqa: E722
return "<error>"
class ExperimentError(ExperiturError):
pass
class StopExecution(ExperimentError):
pass
class CommandNotFoundError(ExperimentError):
pass
class TrialNotFoundError(ExperimentError):
pass
def format_trial_parameters(func=None, parameters=None, experiment=None):
if func is not None:
try:
func = func.__name__
except AttributeError:
func = str(func)
else:
func = "_"
if parameters is not None:
parameters = (
"("
+ (", ".join("{}={}".format(k, repr(v)) for k, v in parameters.items()))
+ ")"
)
else:
parameters = "()"
if experiment is not None:
func = "{}:{}".format(str(experiment), func)
return func + parameters
class Experiment:
"""
Define an experiment.
Args:
name (:py:class:`str`, optional): Name of the experiment (Default: None).
parameter_grid (:py:class:`dict`, optional): Parameter grid (Default: None).
parent (:py:class:`~experitur.Experiment`, optional): Parent experiment (Default: None).
meta (:py:class:`dict`, optional): Dict with experiment metadata that should be recorded.
active (:py:class:`bool`, optional): Is the experiment active? (Default: True).
When False, the experiment will not be executed.
volatile (:py:class:`bool`, optional): If True, the results of a successful run will not be saved (Default: False).
minimize (:py:class:`str` or list of str, optional): Metric or list of metrics to minimize.
maximize (:py:class:`str` or list of str, optional): Metric or list of metrics to maximize.
This can be used as a constructor or a decorator:
.. code-block:: python
# When using as a decorator, the name of the experiment is automatically inferred.
@Experiment(...)
def exp1(trial):
...
# Here, the name must be supplied.
exp2 = Experiment("exp2", parent=exp1)
When the experiment is run, `trial` will be a :py:class:`~experitur.Trial` instance.
As such, it has the following characteristics:
- :obj:`dict`-like interface (`trial[<name>]`): Get the value of the parameter named `name`.
- Attribute interface (`trial.<attr>`): Get meta-data for this trial.
- :py:meth:`~experitur.Trial.call`: Run a function and automatically assign parameters.
See :py:class:`~experitur.Trial` for more details.
"""
def __init__(
self,
name: Optional[str] = None,
parameters=None,
parent: "Experiment" = None,
meta: Optional[Mapping] = None,
active: bool = True,
volatile: bool = False,
minimize: Union[str, List[str], None] = None,
maximize: Union[str, List[str], None] = None,
):
if not (isinstance(name, str) or name is None):
raise ValueError(f"'name' has to be a string or None, got {name!r}")
self.ctx = get_current_context()
self.name = name
self.parent = parent
self.meta = meta
self.active = active
self.volatile = volatile
self.minimize, self.maximize = self._validate_minimize_maximize(
minimize, maximize
)
self._own_parameter_generators: List[ParameterGenerator]
self._own_parameter_generators = check_parameter_generators(parameters)
self._pre_trial = None
self._commands: Dict[str, Any] = {}
self.func = None
# Merge parameters from all ancestors
parent = self.parent
while parent is not None:
self._merge(parent)
parent = parent.parent
self._base_parameter_generators: List[ParameterGenerator]
self._base_parameter_generators = (
[] if self.parent is None else self.parent._parameter_generators
)
self.ctx._register_experiment(self)
@staticmethod
def _validate_minimize_maximize(minimize, maximize):
minimize, maximize = ensure_list(minimize), ensure_list(maximize)
common = set(minimize) & set(maximize)
if common:
common = ", ".join(sorted(common))
raise ValueError(f"minimize and maximize share common metrics: {common}")
return minimize, maximize
def __call__(self, func: Callable) -> "Experiment":
"""
Register an entry-point.
Allows an Experiment object to be used as a decorator::
@Experiment()
def entry_point(trial):
...
"""
if not self.name:
self.name = func.__name__
self.func = func
return self
@property
def _parameter_generators(self) -> List[ParameterGenerator]:
return self._base_parameter_generators + self._own_parameter_generators
def add_parameter_generator(
self, parameter_generator: ParameterGenerator, prepend=False
):
if prepend:
self._own_parameter_generators.insert(0, parameter_generator)
else:
self._own_parameter_generators.append(parameter_generator)
@property
def parameter_generator(self) -> ParameterGenerator:
return Multi(self._parameter_generators)
@property
def independent_parameters(self) -> List[str]:
"""Independent parameters. (Parameters that were actually configured.)"""
return sorted(self.varying_parameters + self.invariant_parameters)
@property
def varying_parameters(self) -> List[str]:
"""Varying parameters of this experiment."""
return sorted(self.parameter_generator.varying_parameters.keys())
@property
def invariant_parameters(self) -> List[str]:
"""Varying parameters of this experiment."""
return sorted(self.parameter_generator.invariant_parameters.keys())
def __str__(self):
if self.name is not None:
return self.name
return repr(self)
def __repr__(self): # pragma: no cover
return "Experiment(name={})".format(self.name)
def run(self):
"""
Run this experiment.
Create trials for every combination in the parameter grid and run them.
"""
if not self.active:
print("Skip inactive experiment {}.".format(self.name))
return
if self.func is None:
raise ValueError("No function was registered for {}.".format(self))
if self.name is None:
raise ValueError("Experiment has no name {}.".format(self))
print("Experiment", self)
parameter_generator = self.parameter_generator
print("Independent parameters:")
for k, v in parameter_generator.varying_parameters.items():
print("{}: {}".format(k, v))
# Generate trial configurations
trial_configurations = parameter_generator.generate(self)
pbar = tqdm.tqdm(trial_configurations, unit="")
for trial_configuration in pbar:
# Inject experiment data into trial_configuration
trial_configuration = self._setup_trial_configuration(trial_configuration)
# Run the pre-trial hook to allow the user to interact
# with the parameters before the trial is created and run.
if self._pre_trial is not None:
self._pre_trial(self.ctx, trial_configuration)
if self.ctx.config["skip_existing"]:
# Check, if a trial with this parameter set already exists
existing = self.ctx.store.match(
func=self.func,
parameters=trial_configuration.get("parameters", {}),
)
if len(existing):
pbar.write(
"Skip existing configuration: {}".format(
format_trial_parameters(
func=self.func, parameters=trial_configuration
)
)
)
pbar.set_description("[Skipped]")
continue
trial_configuration = self.ctx.store.create(trial_configuration)
wdir = self.ctx.get_trial_wdir(trial_configuration["id"])
os.makedirs(wdir, exist_ok=True)
trial = Trial(merge_dicts(trial_configuration, wdir=wdir), self.ctx.store)
pbar.write("Trial {}".format(trial.id))
pbar.set_description("Running trial {}...".format(trial.id))
# Run the trial
try:
with tqdm_redirect.redirect_stdout():
result = self.run_trial(trial)
result = self._validate_trial_result(result)
except Exception: # pylint: disable=broad-except
msg = textwrap.indent(traceback.format_exc(-1), " ")
pbar.write("{} failed!".format(trial.id))
pbar.write(msg)
if not self.ctx.config["catch_exceptions"]:
raise
else:
if self.volatile:
trial.remove()
pbar.set_description("Running trial {}... Done.".format(trial.id))
def run_trial(self, trial: Trial):
"""Run the current trial and save the results."""
# Record intital state
trial.success = False
trial.time_start = datetime.datetime.now()
trial.result = None
trial.error = None
trial.save()
try:
result = self.func(trial)
except (Exception, KeyboardInterrupt) as exc:
# TODO: Store.log_error()
# Log complete exc to file
error_fn = os.path.join(trial.wdir, "error.txt")
with open(error_fn, "w") as f:
f.write(str(exc))
f.write(traceback.format_exc())
f.write("\n")
for k, v in inspect.trace()[-1][0].f_locals.items():
f.write(f"{k}: {try_str(v)}\n")
trial.error = ": ".join(filter(None, (exc.__class__.__name__, str(exc))))
print("\n", flush=True)
print(
f"Error running {trial.id}.\n"
f"See {error_fn} for the complete traceback.",
flush=True,
)
raise exc
else:
trial.result = result
trial.success = True
finally:
trial.time_end = datetime.datetime.now()
trial.save()
return trial.result
def _setup_trial_configuration(self, trial_configuration):
trial_configuration.setdefault("parameters", {})
return merge_dicts(
trial_configuration,
experiment={
"name": self.name,
"parent": self.parent.name if self.parent is not None else None,
"func": callable_to_name(self.func),
"meta": self.meta,
# Parameters that where actually configured.
"independent_parameters": self.independent_parameters,
"varying_parameters": self.varying_parameters,
"minimize": self.minimize,
"maximize": self.maximize,
},
)
def _validate_trial_result(self, trial_result: Optional[dict]):
if trial_result is None:
trial_result = {}
if not isinstance(trial_result, dict):
raise ExperimentError(
f"Experiments are expected to return a dict, got {trial_result!r}"
)
missing_metrics = (
set(self.maximize) | set(self.maximize)
) - trial_result.keys()
if missing_metrics:
missing_metrics = ", ".join(sorted(missing_metrics))
raise ExperimentError(f"Missing metrics in result: {missing_metrics}")
return trial_result
def _merge(self, other):
"""
Merge configuration of other into self.
This does not include parameter generators!
`other` is usually the parent experiment.
"""
# Copy attributes: func, meta, ...
for name in ("func", "meta"):
ours = getattr(self, name)
theirs = getattr(other, name)
if ours is None and theirs is not None:
# Shallow-copy regular attributes
setattr(self, name, copy.copy(theirs))
elif isinstance(ours, dict) and isinstance(theirs, dict):
# Merge dict attributes
setattr(self, name, {**theirs, **ours})
def pre_trial(self, func):
"""Update the pre-trial hook.
The pre-trial hook is called after the parameters for a trial are
calculated and before its ID is calculated and it is run.
This hook can be used to alter the parameters.
Use :code:`pre_trial(None)` to reset the hook.
This can be used as a decorator::
@experiment()
def exp(trial):
...
@exp.pre_trial
def pre_trial_handler(ctx, trial_parameters):
...
Args:
func: A function with the signature (ctx, trial_parameters).
"""
self._pre_trial = func
def command(self, name=None, *, target="trial"):
"""Attach a command to an experiment.
.. code-block:: python
@experiment()
def experiment1(trial):
...
@experiment1.command()
def frobnicate(trial):
...
"""
if target not in ("trial", "experiment"):
msg = "target has to be one of 'trial', 'experiment', not {}.".format(
target
)
raise ValueError(msg)
def _decorator(f):
_name = name or f.__name__
self._commands[_name] = (f, target)
return f
return _decorator
def do(self, cmd_name, target_name, cmd_args):
try:
cmd, target = self._commands[cmd_name]
except KeyError:
raise CommandNotFoundError(cmd_name)
if target == "trial":
try:
trial = self.ctx.store[target_name]
except KeyError as exc:
raise TrialNotFoundError(target_name) from exc
# Inject the Trial
cmd_wrapped = functools.partial(cmd, Trial(trial, self.ctx.store))
# Copy over __click_params__ if they exist
try:
cmd_wrapped.__click_params__ = cmd.__click_params__
except AttributeError:
pass
cmd = click.command(name=cmd_name)(cmd_wrapped)
cmd.main(args=cmd_args, standalone_mode=False)
elif target == "experiment":
# Inject self
cmd_wrapped = functools.partial(cmd, self)
# Copy over __click_params__ if they exist
try:
cmd_wrapped.__click_params__ = cmd.__click_params__
except AttributeError:
pass
cmd = click.command(name=cmd_name)(cmd_wrapped)
cmd.main(args=cmd_args, standalone_mode=False)
else:
msg = "target={} is not implemented.".format(target)
raise NotImplementedError(msg)
|
nilq/baby-python
|
python
|
from importlib.util import find_spec
from os.path import isfile, join
import xdg.BaseDirectory
from json_database import JsonStorage
from xdg import BaseDirectory as XDG
from ovos_utils.json_helper import load_commented_json, merge_dict
from ovos_utils.log import LOG
from ovos_utils.system import search_mycroft_core_location
def get_ovos_config():
config = {"xdg": True,
"base_folder": "mycroft",
"config_filename": "mycroft.conf",
"default_config_path": find_default_config()}
try:
if isfile("/etc/OpenVoiceOS/ovos.conf"):
config = merge_dict(config,
load_commented_json(
"/etc/OpenVoiceOS/ovos.conf"))
elif isfile("/etc/mycroft/ovos.conf"):
config = merge_dict(config,
load_commented_json("/etc/mycroft/ovos.conf"))
except:
# tolerate bad json TODO proper exception (?)
pass
# This includes both the user config and
# /etc/xdg/OpenVoiceOS/ovos.conf
for p in xdg.BaseDirectory.load_config_paths("OpenVoiceOS"):
if isfile(join(p, "ovos.conf")):
try:
xdg_cfg = load_commented_json(join(p, "ovos.conf"))
config = merge_dict(config, xdg_cfg)
except:
# tolerate bad json TODO proper exception (?)
pass
# let's check for derivatives specific configs
# the assumption is that these cores are exclusive to each other,
# this will never find more than one override
# TODO this works if using dedicated .venvs what about system installs?
cores = config.get("module_overrides") or {}
for k in cores:
if find_spec(k):
config = merge_dict(config, cores[k])
break
else:
subcores = config.get("submodule_mappings") or {}
for k in subcores:
if find_spec(k):
config = merge_dict(config, cores[subcores[k]])
break
return config
def is_using_xdg():
return get_ovos_config().get("xdg", True)
def get_xdg_base():
return get_ovos_config().get("base_folder") or "mycroft"
def save_ovos_core_config(new_config):
OVOS_CONFIG = join(xdg.BaseDirectory.save_config_path("OpenVoiceOS"),
"ovos.conf")
cfg = JsonStorage(OVOS_CONFIG)
cfg.update(new_config)
cfg.store()
return cfg
def set_xdg_base(folder_name):
LOG.info(f"XDG base folder set to: '{folder_name}'")
save_ovos_core_config({"base_folder": folder_name})
def set_config_filename(file_name, core_folder=None):
if core_folder:
set_xdg_base(core_folder)
LOG.info(f"config filename set to: '{file_name}'")
save_ovos_core_config({"config_filename": file_name})
def set_default_config(file_path=None):
file_path = file_path or find_default_config()
LOG.info(f"default config file changed to: {file_path}")
save_ovos_core_config({"default_config_path": file_path})
def find_default_config():
mycroft_root = search_mycroft_core_location()
if not mycroft_root:
raise FileNotFoundError("Couldn't find mycroft core root folder.")
return join(mycroft_root, "mycroft", "configuration", "mycroft.conf")
def find_user_config():
if is_using_xdg():
path = join(XDG.xdg_config_home, get_xdg_base(), get_config_filename())
if isfile(path):
return path
old, path = get_config_locations(default=False, web_cache=False,
system=False, old_user=True,
user=True)
if isfile(path):
return path
if isfile(old):
return old
# mark1 runs as a different user
sysconfig = MycroftSystemConfig()
platform_str = sysconfig.get("enclosure", {}).get("platform", "")
if platform_str == "mycroft_mark_1":
path = "/home/mycroft/.mycroft/mycroft.conf"
return path
def get_config_locations(default=True, web_cache=True, system=True,
old_user=True, user=True):
locs = []
ovos_cfg = get_ovos_config()
if default:
locs.append(ovos_cfg["default_config_path"])
if system:
locs.append(f"/etc/{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
if web_cache:
locs.append(f"{XDG.xdg_config_home}/{ovos_cfg['base_folder']}/web_cache.json")
if old_user:
locs.append(f"~/.{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
if user:
if is_using_xdg():
locs.append(f"{XDG.xdg_config_home}/{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
else:
locs.append(f"~/.{ovos_cfg['base_folder']}/{ovos_cfg['config_filename']}")
return locs
def get_webcache_location():
return join(XDG.xdg_config_home, get_xdg_base(), 'web_cache.json')
def get_xdg_config_locations():
# This includes both the user config and
# /etc/xdg/mycroft/mycroft.conf
xdg_paths = list(reversed(
[join(p, get_config_filename())
for p in XDG.load_config_paths(get_xdg_base())]
))
return xdg_paths
def get_config_filename():
return get_ovos_config().get("config_filename") or "mycroft.conf"
def set_config_name(name, core_folder=None):
# TODO deprecate, was only out in a couple versions
# renamed to match HolmesV
set_config_filename(name, core_folder)
def read_mycroft_config():
conf = LocalConf("tmp/dummy.conf")
conf.merge(MycroftDefaultConfig())
conf.merge(MycroftSystemConfig())
conf.merge(MycroftUserConfig())
return conf
def update_mycroft_config(config, path=None):
if path is None:
conf = MycroftUserConfig()
else:
conf = LocalConf(path)
conf.merge(config)
conf.store()
return conf
class LocalConf(JsonStorage):
"""
Config dict from file.
"""
allow_overwrite = True
def __init__(self, path=None):
super(LocalConf, self).__init__(path)
class ReadOnlyConfig(LocalConf):
""" read only """
def __init__(self, path, allow_overwrite=False):
super().__init__(path)
self.allow_overwrite = allow_overwrite
def reload(self):
old = self.allow_overwrite
self.allow_overwrite = True
super().reload()
self.allow_overwrite = old
def __setitem__(self, key, value):
if not self.allow_overwrite:
raise PermissionError
super().__setitem__(key, value)
def __setattr__(self, key, value):
if not self.allow_overwrite:
raise PermissionError
super().__setattr__(key, value)
def merge(self, *args, **kwargs):
if not self.allow_overwrite:
raise PermissionError
super().merge(*args, **kwargs)
def store(self, path=None):
if not self.allow_overwrite:
raise PermissionError
super().store(path)
class MycroftUserConfig(LocalConf):
def __init__(self):
path = find_user_config()
super().__init__(path)
class MycroftDefaultConfig(ReadOnlyConfig):
def __init__(self):
path = get_ovos_config()["default_config_path"]
super().__init__(path)
if not self.path or not isfile(self.path):
LOG.debug(f"mycroft root path not found, could not load default .conf: {self.path}")
def set_root_config_path(self, root_config):
# in case we got it wrong / non standard
self.path = root_config
self.reload()
class MycroftSystemConfig(ReadOnlyConfig):
def __init__(self, allow_overwrite=False):
path = get_config_locations(default=False, web_cache=False,
system=True, old_user=False,
user=False)[0]
super().__init__(path, allow_overwrite)
class MycroftXDGConfig(LocalConf):
def __init__(self):
path = get_config_locations(default=False, web_cache=False,
system=False, old_user=False,
user=True)[0]
super().__init__(path)
|
nilq/baby-python
|
python
|
'''
TOOL SHARE
steven small
stvnsmll
Full Project Structure:
~/toolshare
|-- application.py # main script (this file)
|__ /views # contains all blueprints for app.routes
|-- __init__.py # empty
|-- neighborhoods.py
|-- tools_and_actions.py
|-- users.py
|__ /sub_modules # contains all helper and supporting functions
|-- __init__.py # imports all from each sub module
|-- helpers.py
|-- config.py
|-- emails.py
|-- image_mgmt.py
|-- SQL.py
|__ /templates # contains all of the html jinja layout templates and files
|-- layout.html
|__ /accountmgmt
|__ /emailtemplates
|__ /FAQs # sub-folder with its own layout template and files for FAQs
|-- FAQ_home.html
|-- FAQ_layout.html
|__ /pages
|__ /general
|__ /neighborhood
|__ /tools
|__ /static
|__ /LandingMedia
|__ /manifest
|__ /toolimages
|-- FOO.js
|-- BAR.css
|-- other_images.png
...
|-- requirements.txt
|-- toolshare.db
|-- README.md
|-- LICENSE
|-- Procfile
application.py (main) Structure:
1- Library imports
2- Flask application setup
A- Initialize the Flask app
B- Configure the database
C- Setup AWS S3 for image storage
D- Configure email functionality
E- Webapp installation requirements
3 - Register Bluebprints (app routes)
A- Main features: tools & actions
B- Neighborhood management
C- User management
4- Misc other helper functions
'''
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [1] IMPORTS | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
import os
#for baggage
import io
import base64
import sub_modules
import datetime
import requests
#for baggage
import boto3, botocore
from flask import Flask, send_from_directory, make_response
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
#for sending emails
from flask_mail import Mail
#import all of the helper functions from sub_modules (helpers.py, emails.py, image_mgmt.py, SQL.py)
from sub_modules import *
from sub_modules import config
#import blueprints for all of the app.routes
from views.neighborhoods import neighborhoods_bp
from views.tools_and_actions import tools_and_actions_bp
from views.users import users_bp
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [2] FLASK APPLICATION SETUP | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
#----------------------------------------------------
# A- INITIALIZE FLASK APP
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
#----------------------------------------------------
# B- CONFIGURE DATABASE
# sqlite = 1 (development)
# postgreSQL = 2 (production on Heroku)
DATABASE__TYPE = 2
try:
db = SQL.SQL_db(os.getenv("DATABASE_URL"))
print("postgreSQL database: production mode")
except:
print("UNABLE TO CONNECT TO postgreSQL DATABASE")
db = SQL.SQL_db("sqlite:///toolshare.db")
app.config["SESSION_FILE_DIR"] = mkdtemp()# <-- not used for Heroku
print("sqlite3 database: development mode")
DATABASE__TYPE = 1
# assign the database object to a config variable to be accessed by other modules
app.config['database_object'] = db
Session(app)
#----------------------------------------------------
# C- SETUP STORAGE ON S3 FOR IMAGES
# setup s3 file storage
app.config['S3_BUCKET'] = config.S3_BUCKET
app.config['S3_REGION'] = config.S3_REGION
app.config['S3_KEY'] = os.environ.get('AWS_ACCESS_KEY_ID')
app.config['S3_SECRET'] = os.environ.get('AWS_SECRET_ACCESS_KEY')
app.config['S3_LOCATION'] = 'http://{}.s3.amazonaws.com/'.format(config.S3_BUCKET)
s3 = boto3.client(
"s3",
aws_access_key_id=app.config['S3_KEY'],
aws_secret_access_key=app.config['S3_SECRET'],
region_name=app.config['S3_REGION'],
config=botocore.client.Config(signature_version='s3v4')
)
# assign the s3 object to a config variable to be accessed by other modules
app.config["s3_object"] = s3
# Used for *local* image upload
# code credit: https://roytuts.com/upload-and-display-image-using-python-flask/
UPLOAD_FOLDER = 'static/toolimages/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
#----------------------------------------------------
# D- CONFIGURE EMAIL FUNCTIONALITY
app.config['MAIL_SERVER'] = config.MAIL_SERVER
app.config['MAIL_PORT'] = config.MAIL_PORT
app.config['MAIL_USERNAME'] = config.MAIL_USERNAME
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
# set to 1 to send emails when every action happens (approve or reject)
# set to 0 to only send the required account management emails
SEND_EMAIL_ACTIONS = 0
app.config["SEND_EMAIL_ACTIONS"] = SEND_EMAIL_ACTIONS
app.config["mail_object"] = mail
#----------------------------------------------------
# E- WEB APP INSTALLATION REQUIREMENTS
@app.route('/manifest.json')
def manifest():
return send_from_directory('static/manifest', 'manifest.json')
@app.route('/sw.js')
def service_worker():
response = make_response(send_from_directory('static', 'sw.js'))
response.headers['Cache-Control'] = 'no-cache'
return response
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [3] REGISTER BLUEPRINTS (routes) | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
#----------------------------------------------------
# A- MAIN FEATURES: TOOLS & ACTIONS
app.register_blueprint(tools_and_actions_bp)
#----------------------------------------------------
# B- NEIGHBORHOOD MANAGEMENT
app.register_blueprint(neighborhoods_bp)
#----------------------------------------------------
# C- USER MANAGEMENT
app.register_blueprint(users_bp)
#tmp. for the lugger tracker
@app.route("/found_luggage", methods=["GET", "POST"])
def found_luggage():
'''Log user out'''
if request.method == "POST":
#confirm reCaptcha
if DATABASE__TYPE == 1:#no captcha needed
recaptcha_passed = True
else:
print("POST from the production bag website")
parameters = request.form
print(parameters)
recaptcha_passed = False
print("testing recaptcha")
recaptcha_response = parameters.get('g-recaptcha-response')
try:
recaptcha_secret = os.environ.get('RECAPTCHA_SECRET')
response = requests.post(f'https://www.google.com/recaptcha/api/siteverify?secret={recaptcha_secret}&response={recaptcha_response}').json()
recaptcha_passed = response.get('success')
except Exception as e:
print(f"failed to get reCaptcha: {e}")
return apology("reCaptcha fail...")
print(f"reCaptcha Status: {recaptcha_passed}")
if recaptcha_passed:
returnAction = request.form.get("returnAction")
longitude = request.form.get("longit")
latitude = request.form.get("latit")
bag_name = request.form.get("bag_name")
bagID = request.form.get("bagID")
ipaddress = request.form.get("ipaddress")
usermessage = request.form.get("usermessage")
print(f"The included message was: {usermessage}")
print(f"IP Address: {ipaddress}")
email_address = os.environ.get('BAG_EMAIL')
print(returnAction)
noEmail = request.form.get("noEmail")
location_shared = 0
if returnAction == "location":
maplink = "https://www.latlong.net/c/?lat=" + latitude + "&long=" + longitude
print(f"Bag location = Lo:{longitude}, La:{latitude}")
print(maplink)
location_shared = 1
#send email
print("send the location email!")
now = datetime.datetime.now()
message = f"Bag: {bag_name} scanned at {now}\n\nIP Addr: {ipaddress}\n\nLatLong={latitude}:{longitude}\n{maplink}\n\nMessage:\n{usermessage}"
if noEmail != "1":#don't send if in development mode...
sub_modules.emails.send_mail([email_address],"bag log - LOCATION!",message)
#pass
print("location mail sent")
if returnAction == "sendMessage":
print("send the message email!")
now = datetime.datetime.now()
message = f"Bag: {bag_name} scanned at {now}\n\nIP Addr: {ipaddress}\n\nMessage:\n{usermessage}"
sub_modules.emails.send_mail([email_address],"bag log - message!",message)
print(".mail sent.")
extra_url = ""
extra_url2 = ""
print(noEmail)
if noEmail == "1":
extra_url = "&noEmail=1"
if location_shared == 1:
extra_url2 = "&locshared=1"
print(extra_url2)
#if DATABASE__TYPE == 1:
return redirect(url_for('found_luggage') + f'?bagID={bagID}' + extra_url + extra_url2)
#else:
# fullURL = f"https://sharetools.tk/found_luggage?bagID={bagID}{extra_url}{extra_url2}"
# print(fullURL)
# return redirect(fullURL)
else:#reCaptcha failed...
return apology("reCaptcha fail...")
else:#GET
list_of_actual_bags = {
"10d8520f7f2246c4b246437d6e5985e7": "green_carryon",
"6851b0e7efd640b3853ea2eda21c9863": "sjs_black_checkunder",
"093bd25584754feab29938fcbd85193e": "hcs_grey_checkunder",
"0198f1b8385a4c61b116b80cb7f3eca1": "big_carryon_backpack",
"6ce2b15894c4414f88627f9cf673d273": "small_roller_carryon_black",
"8e7d5a80643843d6bc84c8eb73678d1c": "green_duffel_bag",
"25a98613f623400aa14336a47a5bae20": "sjs_volleyball_6_bag",
"80aaa20845dc403cbe17704e8c1e5776": "purple_big_checkunder"
}
bagID = request.args.get("bagID")
if bagID in list_of_actual_bags:
print("valid bag")
else:
return render_template("foundluggage.html")
bag_name = list_of_actual_bags[bagID]
s3 = app.config["s3_object"]
image_uuid_with_ext = bagID + ".jpeg"
expire_in=3600
imageURL = ""
#get the bag image
# just send the full asw filepath for now
#return "{}{}".format(app.config["S3_LOCATION"], image_uuid_with_ext) <--- delete this...
# returns the presigned url for the full-sized image
try:
imageURL = s3.generate_presigned_url('get_object',
Params={'Bucket': app.config["S3_BUCKET"],
'Key': image_uuid_with_ext},
ExpiresIn=expire_in)#seconds
except:# ClientError as e:
#logging.error(e)
e = "get_image_s3, misc error"
print("Something Happened - ImageFetchFail: ", e)
#personal details stored in environment variables
luggage_owner = os.environ.get('BAG_OWNER')
luggage_firstname = luggage_owner.split(" ")[0]
email_address = os.environ.get('BAG_EMAIL')
phone_number = os.environ.get('BAG_PHONE')
address = os.environ.get('BAG_ADDRESS')
if request.headers.getlist("X-Forwarded-For"):
print(request.headers.getlist("X-Forwarded-For"))
visiting_IP = request.headers.getlist("X-Forwarded-For")[0]
else:
visiting_IP = request.remote_addr
#send the email!
noEmail = request.args.get("noEmail")
if noEmail == "1":
print("Don't send the email")
else:
noEmail = "0"
print("send the email!")
now = datetime.datetime.now()
message = f"Bag: {bag_name} scanned at {now}\n\nIP Addr: {visiting_IP}"
sub_modules.emails.send_mail([email_address],"bag log - scan",message)
print(".mail sent.")
#user selected to share their location
locshared = request.args.get("locshared")
if locshared == "1":
#thank the user
locshared = True
pass
else:
locshared = False
return render_template("foundluggage.html", owner=luggage_owner,
firstname=luggage_firstname,
email=email_address,
phone=phone_number,
address=address,
bagID=bagID,
bag_name=bag_name,
ipaddress = visiting_IP,
imageURL = imageURL,
noEmail = noEmail,
locshared = locshared)
#tmp. for the lugger tracker
@app.route("/make_QR", methods=["GET", "POST"])
def make_QR_Code():
if request.method == "POST":
return apology("No POST allowed", 403)
else:#GET
bagID = request.args.get("bagID")
noEmail = request.args.get("noEmail")
extra_url = ""
if noEmail == "1":
extra_url = "&noEmail=1"
img = qrcode.make(f"https://www.sharetools.tk/found_luggage?bagID={bagID}{extra_url}")
data = io.BytesIO()
img.save(data, "PNG")
encoded_qr_image = base64.b64encode(data.getvalue())
#pass to template:
qrcode_data=encoded_qr_image.decode('utf-8')
return render_template("simpleqrcode_page.html", qrcode_data = qrcode_data)
################################################################
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
# | [4] misc other helper functions... | #
# ~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~--~-- #
################################################################
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
nilq/baby-python
|
python
|
from collections import OrderedDict
from .attributes import read_attribute_dict
from .core import read_word, read_line
# non-word characters that we allow in tag names, ids and classes
DOM_OBJECT_EXTRA_CHARS = ('-',)
def read_tag(stream):
"""
Reads an element tag, e.g. span, ng-repeat, cs:dropdown
"""
part1 = read_word(stream, DOM_OBJECT_EXTRA_CHARS)
if stream.ptr < stream.length and stream.text[stream.ptr] == ':':
stream.ptr += 1
part2 = read_word(stream, DOM_OBJECT_EXTRA_CHARS)
else:
part2 = None
return (part1 + ':' + part2) if part2 else part1
def read_element(stream, compiler):
"""
Reads an element, e.g. %span, #banner{style:"width: 100px"}, .ng-hide(foo=1)
"""
assert stream.text[stream.ptr] in ('%', '.', '#')
tag = None
empty_class = False
if stream.text[stream.ptr] == '%':
stream.ptr += 1
tag = read_tag(stream)
elif stream.text[stream.ptr] == '.':
# Element may start with a period representing an unidentified div rather than a CSS class. In this case it
# can't have other classes or ids, e.g. .{foo:"bar"}
next_ch = stream.text[stream.ptr + 1] if stream.ptr < stream.length - 1 else None
if not (next_ch.isalnum() or next_ch == '_' or next_ch in DOM_OBJECT_EXTRA_CHARS):
stream.ptr += 1
empty_class = True
_id = None
classes = []
if not empty_class:
while stream.ptr < stream.length and stream.text[stream.ptr] in ('#', '.'):
is_id = stream.text[stream.ptr] == '#'
stream.ptr += 1
id_or_class = read_word(stream, DOM_OBJECT_EXTRA_CHARS)
if is_id:
_id = id_or_class
else:
classes.append(id_or_class)
attributes = OrderedDict()
while stream.ptr < stream.length and stream.text[stream.ptr] in ('{', '('):
attributes.update(read_attribute_dict(stream, compiler))
if stream.ptr < stream.length and stream.text[stream.ptr] == '>':
stream.ptr += 1
nuke_outer_ws = True
else:
nuke_outer_ws = False
if stream.ptr < stream.length and stream.text[stream.ptr] == '<':
stream.ptr += 1
nuke_inner_ws = True
else:
nuke_inner_ws = False
if stream.ptr < stream.length and stream.text[stream.ptr] == '/':
stream.ptr += 1
self_close = True
else:
self_close = tag in Element.SELF_CLOSING
if stream.ptr < stream.length and stream.text[stream.ptr] == '=':
stream.ptr += 1
django_variable = True
else:
django_variable = False
if stream.ptr < stream.length:
inline = read_line(stream)
if inline is not None:
inline = inline.strip()
else:
inline = None
return Element(tag, _id, classes, attributes, nuke_outer_ws, nuke_inner_ws, self_close, django_variable, inline)
class Element(object):
"""
An HTML element with an id, classes, attributes etc
"""
SELF_CLOSING = (
'meta', 'img', 'link', 'br', 'hr', 'input', 'source', 'track', 'area', 'base', 'col', 'command', 'embed',
'keygen', 'param', 'wbr'
)
DEFAULT_TAG = 'div'
def __init__(self, tag, _id, classes, attributes, nuke_outer_whitespace, nuke_inner_whitespace, self_close,
django_variable, inline_content):
self.tag = tag or self.DEFAULT_TAG
self.attributes = attributes
self.nuke_inner_whitespace = nuke_inner_whitespace
self.nuke_outer_whitespace = nuke_outer_whitespace
self.self_close = self_close
self.django_variable = django_variable
self.inline_content = inline_content
# merge ids from the attribute dictionary
ids = [_id] if _id else []
id_from_attrs = attributes.get('id')
if isinstance(id_from_attrs, (tuple, list)):
ids += id_from_attrs
elif isinstance(id_from_attrs, str):
ids += [id_from_attrs]
# merge ids to a single value with _ separators
self.id = '_'.join(ids) if ids else None
# merge classes from the attribute dictionary
class_from_attrs = attributes.get('class', [])
if not isinstance(class_from_attrs, (tuple, list)):
class_from_attrs = [class_from_attrs]
self.classes = class_from_attrs + classes
def render_attributes(self, options):
def attr_wrap(val):
return '%s%s%s' % (options.attr_wrapper, val, options.attr_wrapper)
rendered = []
for name, value in self.attributes.items():
if name in ('id', 'class') or value in (None, False):
# this line isn't recorded in coverage because it gets optimized away (http://bugs.python.org/issue2506)
continue # pragma: no cover
if value is True: # boolean attribute
if options.xhtml:
rendered.append("%s=%s" % (name, attr_wrap(name)))
else:
rendered.append(name)
else:
value = self._escape_attribute_quotes(value, options.attr_wrapper)
rendered.append("%s=%s" % (name, attr_wrap(value)))
if len(self.classes) > 0:
rendered.append("class=%s" % attr_wrap(" ".join(self.classes)))
if self.id:
rendered.append("id=%s" % attr_wrap(self.id))
return ' '.join(rendered)
@staticmethod
def _escape_attribute_quotes(v, attr_wrapper):
"""
Escapes quotes with a backslash, except those inside a Django tag
"""
escaped = []
inside_tag = False
for i, _ in enumerate(v):
if v[i:i + 2] == '{%':
inside_tag = True
elif v[i:i + 2] == '%}':
inside_tag = False
if v[i] == attr_wrapper and not inside_tag:
escaped.append('\\')
escaped.append(v[i])
return ''.join(escaped)
|
nilq/baby-python
|
python
|
import scipy.misc
import scipy.io
from ops import *
from setting import *
def img_net(inputs, bit, numclass):
data = scipy.io.loadmat(MODEL_DIR)
layers = (
'conv1', 'relu1', 'norm1', 'pool1', 'conv2', 'relu2', 'norm2', 'pool2', 'conv3', 'relu3', 'conv4', 'relu4',
'conv5', 'relu5', 'pool5', 'fc6', 'relu6', 'fc7', 'relu7')
weights = data['layers'][0]
labnet = {}
current = tf.convert_to_tensor(inputs, dtype='float32')
for i, name in enumerate(layers):
if name.startswith('conv'):
kernels, bias = weights[i][0][0][0][0]
bias = bias.reshape(-1)
pad = weights[i][0][0][1]
stride = weights[i][0][0][4]
current = conv_layer(current, kernels, bias, pad, stride, i, labnet)
elif name.startswith('relu'):
current = tf.nn.relu(current)
elif name.startswith('pool'):
stride = weights[i][0][0][1]
pad = weights[i][0][0][2]
area = weights[i][0][0][5]
current = pool_layer(current, stride, pad, area)
elif name.startswith('fc'):
kernels, bias = weights[i][0][0][0][0]
bias = bias.reshape(-1)
current = full_conv(current, kernels, bias, i, labnet)
elif name.startswith('norm'):
current = tf.nn.local_response_normalization(current, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
labnet[name] = current
W_fc8 = tf.random_normal([1, 1, 4096, SEMANTIC_EMBED], stddev=1.0) * 0.01
b_fc8 = tf.random_normal([SEMANTIC_EMBED], stddev=1.0) * 0.01
w_fc8 = tf.Variable(W_fc8, name='w' + str(20))
b_fc8 = tf.Variable(b_fc8, name='bias' + str(20))
fc8 = tf.nn.conv2d(current, w_fc8, strides=[1, 1, 1, 1], padding='VALID')
fc8 = tf.nn.bias_add(fc8, b_fc8)
relu8 = tf.nn.relu(fc8)
labnet['feature'] = relu8
W_fc9 = tf.random_normal([1, 1, SEMANTIC_EMBED, bit], stddev=1.0) * 0.01
b_fc9 = tf.random_normal([bit], stddev=1.0) * 0.01
w_fc9 = tf.Variable(W_fc9, name='w' + str(21))
b_fc9 = tf.Variable(b_fc9, name='bias' + str(21))
# fc9 = tf.nn.conv2d(fc8, w_fc9, strides=[1, 1, 1, 1], padding='VALID')
fc9 = tf.nn.conv2d(relu8, w_fc9, strides=[1, 1, 1, 1], padding='VALID')
fc9 = tf.nn.bias_add(fc9, b_fc9)
labnet['hash'] = tf.nn.tanh(fc9)
W_fc10 = tf.random_normal([1, 1, SEMANTIC_EMBED, numclass], stddev=1.0) * 0.01
b_fc10 = tf.random_normal([numclass], stddev=1.0) * 0.01
w_fc10 = tf.Variable(W_fc10, name='w' + str(22))
b_fc10 = tf.Variable(b_fc10, name='bias' + str(22))
# fc10 = tf.nn.conv2d(fc8, w_fc10, strides=[1, 1, 1, 1], padding='VALID')
fc10 = tf.nn.conv2d(relu8, w_fc10, strides=[1, 1, 1, 1], padding='VALID')
fc10 = tf.nn.bias_add(fc10, b_fc10)
labnet['label'] = tf.nn.sigmoid(fc10)
return tf.squeeze(labnet['hash']), tf.squeeze(labnet['feature']), tf.squeeze(labnet['label'])
def lab_net(imput_label, bit, numClass):
LAYER1_NODE = 4096
labnet = {}
W_fc1 = tf.random_normal([1, numClass, 1, LAYER1_NODE], stddev=1.0) * 0.01
b_fc1 = tf.random_normal([1, LAYER1_NODE], stddev=1.0) * 0.01
labnet['fc1W'] = tf.Variable(W_fc1)
labnet['fc1b'] = tf.Variable(b_fc1)
labnet['conv1'] = tf.nn.conv2d(imput_label, labnet['fc1W'], strides=[1, 1, 1, 1], padding='VALID')
W1_plus_b1 = tf.nn.bias_add(labnet['conv1'], tf.squeeze(labnet['fc1b']))
relu1 = tf.nn.relu(W1_plus_b1)
norm1 = tf.nn.local_response_normalization(relu1, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc2 = tf.random_normal([1, 1, LAYER1_NODE, SEMANTIC_EMBED], stddev=1.0) * 0.01
b_fc2 = tf.random_normal([1, SEMANTIC_EMBED], stddev=1.0) * 0.01
labnet['fc2W'] = tf.Variable(W_fc2)
labnet['fc2b'] = tf.Variable(b_fc2)
labnet['conv2'] = tf.nn.conv2d(norm1, labnet['fc2W'], strides=[1, 1, 1, 1], padding='VALID')
fc2 = tf.nn.bias_add(labnet['conv2'], tf.squeeze(labnet['fc2b']))
relu2 = tf.nn.relu(fc2)
labnet['feature'] = relu2
#norm2 = tf.nn.local_response_normalization(relu2, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc3 = tf.random_normal([1, 1, SEMANTIC_EMBED, bit], stddev=1.0) * 0.01
b_fc3 = tf.random_normal([1, bit], stddev=1.0) * 0.01
labnet['fc3W'] = tf.Variable(W_fc3)
labnet['fc3b'] = tf.Variable(b_fc3)
labnet['conv3'] = tf.nn.conv2d(relu2, labnet['fc3W'], strides=[1, 1, 1, 1], padding='VALID')
output_h = tf.nn.bias_add(labnet['conv3'], tf.squeeze(labnet['fc3b']))
labnet['hash'] = tf.nn.tanh(output_h)
W_fc4 = tf.random_normal([1, 1, SEMANTIC_EMBED, numClass], stddev=1.0) * 0.01
b_fc4 = tf.random_normal([1, numClass], stddev=1.0) * 0.01
labnet['fc4W'] = tf.Variable(W_fc4)
labnet['fc4b'] = tf.Variable(b_fc4)
labnet['conv4'] = tf.nn.conv2d(relu2, labnet['fc4W'], strides=[1, 1, 1, 1], padding='VALID')
label_ = tf.nn.bias_add(labnet['conv4'], tf.squeeze(labnet['fc4b']))
labnet['label'] = tf.nn.sigmoid(label_)
return tf.squeeze(labnet['hash']), tf.squeeze(labnet['feature']), tf.squeeze(labnet['label'])
def dis_net_IL(feature, keep_prob, reuse=False, name="disnet_IL"):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
disnet = {}
relu1 = relu(conv2d(feature, [1,SEMANTIC_EMBED,1,512], [1,1,1,1], 'VALID', 1.0, "disnet_IL_fc1"))
dropout1 = tf.nn.dropout(relu1, keep_prob)
relu2 = relu(conv2d(dropout1, [1,1,512,256], [1,1,1,1], 'VALID', 1.0, "disnet_IL_fc2"))
dropout2 = tf.nn.dropout(relu2, keep_prob)
disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_out")
# relu1 = relu(batch_norm(conv2d(feature, [1, 1, SEMANTIC_EMBED, 512], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_fc1")))
# dropout1 = tf.nn.dropout(relu1, keep_prob)
# relu2 = relu(batch_norm(conv2d(dropout1, [1, 1, 512, 256], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_fc2")))
# dropout2 = tf.nn.dropout(relu2, keep_prob)
# disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_IL_out")
return tf.squeeze(disnet['output'])
def dis_net_TL(feature, keep_prob, reuse=False, name="disnet_TL"):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
disnet = {}
relu1 = relu(conv2d(feature, [1, SEMANTIC_EMBED, 1, 512], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc1"))
dropout1 = tf.nn.dropout(relu1, keep_prob)
relu2 = relu(conv2d(dropout1, [1, 1, 512, 256], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc2"))
dropout2 = tf.nn.dropout(relu2, keep_prob)
disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_out")
# relu1 = relu(batch_norm(conv2d(feature, [1, 1, SEMANTIC_EMBED, 512], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc1")))
# dropout1 = tf.nn.dropout(relu1, keep_prob)
# relu2 = relu(batch_norm(conv2d(dropout1, [1, 1, 512, 256], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_fc2")))
# dropout2 = tf.nn.dropout(relu2, keep_prob)
# disnet['output'] = conv2d(dropout2, [1, 1, 256, 1], [1, 1, 1, 1], 'VALID', 1.0, "disnet_TL_out")
return tf.squeeze(disnet['output'])
def txt_net(text_input, dimy, bit, numclass):
txtnet={}
MultiScal = MultiScaleTxt(text_input)
W_fc1 = tf.random_normal([1, dimy, 6, 4096], stddev=1.0) * 0.01
b_fc1 = tf.random_normal([1, 4096], stddev=1.0) * 0.01
fc1W = tf.Variable(W_fc1)
fc1b = tf.Variable(b_fc1)
txtnet['conv1'] = tf.nn.conv2d(MultiScal, fc1W, strides=[1, 1, 1, 1], padding='VALID')
W1_plus_b1 = tf.nn.bias_add(txtnet['conv1'], tf.squeeze(fc1b))
txtnet['fc1'] = tf.nn.relu(W1_plus_b1)
txtnet['norm1'] = tf.nn.local_response_normalization(txtnet['fc1'], depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc2 = tf.random_normal([1, 1, 4096, SEMANTIC_EMBED], stddev=1.0) * 0.01
b_fc2 = tf.random_normal([1, SEMANTIC_EMBED], stddev=1.0) * 0.01
fc2W = tf.Variable(W_fc2)
fc2b = tf.Variable(b_fc2)
txtnet['conv2'] = tf.nn.conv2d(txtnet['norm1'], fc2W, strides=[1, 1, 1, 1], padding='VALID')
W2_plus_b2 = tf.nn.bias_add(txtnet['conv2'], tf.squeeze(fc2b))
relu2 = tf.nn.relu(W2_plus_b2)
txtnet['feature'] = relu2
txtnet['norm2'] = tf.nn.local_response_normalization(relu2, depth_radius=2, bias=2.000, alpha=0.0001, beta=0.75)
W_fc3 = tf.random_normal([1, 1, SEMANTIC_EMBED, bit], stddev=1.0) * 0.01
b_fc3 = tf.random_normal([bit], stddev=1.0) * 0.01
fc3W = tf.Variable(W_fc3)
fc3b = tf.Variable(b_fc3)
txtnet['conv3'] = tf.nn.conv2d(txtnet['norm2'], fc3W, strides=[1, 1, 1, 1], padding='VALID')
W3_plus_b3 = tf.nn.bias_add(txtnet['conv3'], tf.squeeze(fc3b))
txtnet['hash'] = tf.nn.tanh(W3_plus_b3)
W_fc4 = tf.random_normal([1, 1, SEMANTIC_EMBED, numclass], stddev=1.0) * 0.01
b_fc4 = tf.random_normal([numclass], stddev=1.0) * 0.01
fc4W = tf.Variable(W_fc4)
fc4b = tf.Variable(b_fc4)
txtnet['conv4'] = tf.nn.conv2d(txtnet['norm2'], fc4W, strides=[1, 1, 1, 1], padding='VALID')
W4_plus_b4 = tf.nn.bias_add(txtnet['conv4'], tf.squeeze(fc4b))
txtnet['label'] = tf.nn.sigmoid(W4_plus_b4)
return tf.squeeze(txtnet['hash']), tf.squeeze(txtnet['feature']), tf.squeeze(txtnet['label'])
def interp_block(text_input, level):
shape = [1, 1, 5 * level, 1]
stride = [1, 1, 5 * level, 1]
prev_layer = tf.nn.avg_pool(text_input, ksize=shape, strides=stride, padding='VALID')
W_fc1 = tf.random_normal([1, 1, 1, 1], stddev=1.0) * 0.01
fc1W = tf.Variable(W_fc1)
prev_layer = tf.nn.conv2d(prev_layer, fc1W, strides=[1, 1, 1, 1], padding='VALID')
prev_layer = tf.nn.relu(prev_layer)
prev_layer = tf.image.resize_images(prev_layer, [1, dimTxt])
return prev_layer
def MultiScaleTxt(input):
interp_block1 = interp_block(input, 10)
interp_block2 = interp_block(input, 6)
interp_block3 = interp_block(input, 3)
interp_block6 = interp_block(input, 2)
interp_block10 = interp_block(input, 1)
output = tf.concat([input,
interp_block10,
interp_block6,
interp_block3,
interp_block2,
interp_block1], axis = -1)
return output
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Search utilities."""
from invenio_search.api import RecordsSearch
class DocumentSearch(RecordsSearch):
"""RecordsSearch for documents."""
class Meta:
"""Search only on documents index."""
index = "documents"
doc_types = None
class ItemSearch(RecordsSearch):
"""RecordsSearch for items."""
class Meta:
"""Search only on items index."""
index = "items"
doc_types = None
def search_by_document_pid(self, document_pid=None):
"""Retrieve items based on the given document pid."""
search = self
if document_pid:
search = search.filter("term", document_pid=document_pid)
else:
raise ValueError("Must specify document_pid param")
return search
class LocationSearch(RecordsSearch):
"""RecordsSearch for locations."""
class Meta:
"""Search only on locations index."""
index = "locations"
doc_types = None
class InternalLocationSearch(RecordsSearch):
"""RecordsSearch for internal locations."""
class Meta:
"""Search only on internal locations index."""
index = "internal_locations"
doc_types = None
|
nilq/baby-python
|
python
|
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_inputs', required=True,
multiple=True,
help="Input files to merge")
@click.option('-o', '--output', 'opt_output', required=True,
help='Output file')
@click.option('--minify', 'opt_minify', is_flag=True,
default=False,
help='Minify JSON')
@click.option('--replace-path', 'opt_replace_path',
help="Replace file parent path")
@click.pass_context
def cli(ctx, opt_inputs, opt_output, opt_replace_path, opt_minify):
"""Merge JSON detections"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from tqdm import tqdm
from vframe.utils import file_utils
from vframe.settings import app_cfg
# ------------------------------------------------
# start
log = app_cfg.LOG
# load first file
merge_results = {}
# merge
for fp_in in tqdm(opt_inputs, desc='Files'):
# load json
log.debug(f'load: {fp_in}')
detection_results = file_utils.load_json(fp_in)
# add all the current detections to cumulative detections
for detection_result in detection_results:
# replaced place in item data
if opt_replace_path is not None:
detection_result['filepath'] = join(opt_replace_path, Path(detection_result['filepath']).name)
filepath = detection_result['filepath']
if not filepath in merge_results.keys():
merge_results[filepath] = {'filepath': filepath}
for frame_idx, frame_data in detection_result['frames_data'].items():
if not 'frames_data' in merge_results[filepath].keys():
merge_results[filepath]['frames_data'] = {}
if not frame_idx in merge_results[filepath]['frames_data'].keys():
merge_results[filepath]['frames_data'][frame_idx] = {}
for model_name, model_results in frame_data.items():
merge_results[filepath]['frames_data'][frame_idx][model_name] = model_results
# write
results_out = list(merge_results.values())
file_utils.write_json(results_out, opt_output, minify=opt_minify)
|
nilq/baby-python
|
python
|
# 该模型模仿自 keras/examples/lstm_text_generation.py 和吴恩达老师课程中讲的并不相同
# 吴恩达老师的模型是训练一个RNN模型,然后每一次输入一个单词,预测出下一个最可能的单词作为输出
# 此模型则是利用 corpus 构建一个监督学习模型,模型的构成为,选择一个当前字母作为x,下一个字母作为y,从而构建模型
# 当前采用了每一个单词之后使用 \n 补齐,对模型效果可能会有点影响
from os import path
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras.preprocessing.text import one_hot
import numpy as np
# from keras.preprocessing.sequence import pad_sequences
# 提供的不能用,因为 split 不支持 ''
# one = one_hot("t e x t", 27, lower=True, split=' ')
# print(one)
# 读取训练集
def get_train_data():
dataset_path = path.join(path.dirname(__file__), "dataset.csv")
with open(dataset_path, "r") as f:
dataset = f.read()
chars = list(set(dataset.lower()))
dataset = dataset.split('\n')
# 找到长度最长的名字
maxlen = len(max(dataset))
# 使用 \n 填充长度短于 maxlen 的名字
dataset = [item.ljust(maxlen, '\n') for item in dataset]
return dataset, chars, maxlen
dataset, chars, maxlen = get_train_data()
vocab_size = len(chars)
print(f'There are {len(dataset)} total names and {len(chars)} unique characters in your data.')
# embeding
char_to_ix = {ch: i for i, ch in enumerate(sorted(chars))}
ix_to_char = {i: ch for i, ch in enumerate(sorted(chars))}
# print(char_to_ix)
# print(ix_to_char)
# def word_to_one_hot(word):
# # 将单词转换成 one-hot
# one_hot = []
# for w in word:
# zeros = np.zeros((vocab_size, 1))
# zeros[char_to_ix[w]] = 1
# one_hot.append(zeros)
# return one_hot
# def one_hot_to_word(one_hot):
# # 将 one-hot 转换成单词
# word = ''
# for one in one_hot:
# # 找到 word index
# index = 0
# for i in one:
# if i[0] == 1:
# word += ix_to_char[index]
# index += 1
# return word
# print(word_to_one_hot("text"))
# print(one_hot_to_word(word_to_one_hot("text")))
# build model
def build():
model = Sequential()
# model.add(Embedding(len(chars) + 1, 64, input_length=10))
# 模型将输入一个大小为 (batch, input_length) 的整数矩阵。
# 输入中最大的整数(即词索引)不应该大于 999 (词汇表大小)
# 现在 model.output_shape == (None, 10, 64),其中 None 是 batch 的维度。
model.add(LSTM(128))
# model.add(Dropout(0.5))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy',
# optimizer='rmsprop',
# metrics=['accuracy'])
# model.fit(x_train, y_train, batch_size=16, epochs=10)
# score = model.evaluate(x_test, y_test, batch_size=16)
|
nilq/baby-python
|
python
|
import tensorflow as tf
initializer = tf.keras.initializers.HeNormal()
regularizer = tf.keras.regularizers.L1(l1=.001)
inputs = tf.keras.Input(shape=(8,8,19))
filters = 32
x = tf.keras.layers.Conv2D(filters,(3,3),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(filters,(3,3),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(filters,(3,3),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(1,(1,1),padding='same',kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
x = tf.keras.layers.Flatten()(x)
outputs = tf.keras.layers.Dense(3,kernel_regularizer=regularizer, bias_regularizer=regularizer, kernel_initializer=initializer)(x)
model = tf.keras.Model(inputs=inputs,outputs=outputs)
model.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=.1),loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.CategoricalAccuracy()])
model.summary()
model.save('current_model.h5')
|
nilq/baby-python
|
python
|
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, LongTable
from reportlab.platypus.doctemplate import PageAccumulator
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib.utils import simpleSplit
from reportlab.lib import colors
styleSheet = getSampleStyleSheet()
class MyPageAccumulator(PageAccumulator):
def pageEndAction(self,canv,doc):
L42 = [x[0] for x in self.data if not x[0]%42]
L13 = [x[0] for x in self.data if not x[0]%13]
if L42 and L13:
s = 'Saw multiples of 13 and 42'
elif L13:
s = 'Saw multiples of 13'
elif L42:
s = 'Saw multiples of 42'
else:
return
canv.saveState()
canv.setFillColor(colors.purple)
canv.setFont("Helvetica",6)
canv.drawString(1*inch,1*inch,s)
canv.restoreState()
PA = MyPageAccumulator('_42_divides')
class MyDocTemplate(SimpleDocTemplate):
def beforeDocument(self):
for pt in self.pageTemplates:
PA.attachToPageTemplate(pt)
def textAccum2():
doc = MyDocTemplate(outputfile('test_platypus_accum2.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
('FONTNAME',(0,0),(-1,-1),'Helvetica'),
('FONTSIZE',(0,0),(-1,-1),10),
]
def myCV(s,fontName='Helvetica',fontSize=10,maxWidth=72):
return '\n'.join(simpleSplit(s,fontName,fontSize,maxWidth))
data = [[PA.onDrawStr(str(i+1),i+1),
myCV("xx "* (i%10),maxWidth=100-12),
myCV("blah "*(i%40),maxWidth=200-12)]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
def textAccum1():
doc = MyDocTemplate(outputfile('test_platypus_accum1.pdf'),
pagesize=(8.5*inch, 11*inch), showBoundary=1)
story=[]
story.append(Paragraph("A table with 500 rows", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i+1), Paragraph("xx "* (i%10),
styleSheet["BodyText"]),
Paragraph(("blah "*(i%40))+PA.onDrawText(i+1), styleSheet["BodyText"])]
for i in range(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
story.append(t)
doc.build(story)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test1(self):
textAccum1()
def test2(self):
textAccum2()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
nilq/baby-python
|
python
|
from qgis.PyQt.QtCore import Qt, QTimer
from qgis.core import QgsProject, QgsRectangle, QgsWkbTypes
from qgis.gui import QgsMapToolEmitPoint, QgsRubberBand
POLLING_RATE_MS = 250
class WindowShow(QWidget):
def __init__(self, mode='single_picture'):
super().__init__()
self.initUI()
self._button_counter = 0
def initUI(self):
vbox = QVBoxLayout()
hbox_text = QHBoxLayout()
self.text_lbl = QLabel()
self.text_lbl.setAlignment(Qt.AlignTop)
hbox_text.addWidget(self.text_lbl)
hbox_button = QHBoxLayout()
button = QPushButton('press me')
button.clicked.connect(self.add_counter_button_pressed)
hbox_button.addWidget(button)
vbox.addLayout(hbox_text)
vbox.addLayout(hbox_button)
self.setLayout(vbox)
self.move(400, 300)
self.setWindowTitle('Picture ... ')
self.show()
@property
def button_counter(self):
return self._button_counter
def show_text(self):
self.text_lbl.setText('Something more interesting ...')
def add_counter_button_pressed(self):
self._button_counter += 1
class SelectRectangleMapTool(QgsMapToolEmitPoint):
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand(self.canvas, True)
self.rubberBand.setColor(Qt.blue)
self.rubberBand.setFillColor(Qt.transparent)
self.rubberBand.setWidth(2)
self.timer_poll_id = QTimer()
self.timer_poll_id.timeout.connect(self.call_button_counter)
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset(True)
self.timer_poll_id.stop()
self.window_show = None
self.counter = 0
def canvasPressEvent(self, e):
self.reset()
self.start_point = self.toMapCoordinates(e.pos())
self.end_point = self.start_point
self.isEmittingPoint = True
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
self.show_rect(self.start_point, self.end_point)
self.window_show = WindowShow()
self.window_show.show_text()
self.counter = 0
self.timer_poll_id.start(POLLING_RATE_MS)
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.end_point = self.toMapCoordinates(e.pos())
self.show_rect(self.start_point, self.end_point)
def show_rect(self, start_point, end_point):
self.rubberBand.reset(QgsWkbTypes.PolygonGeometry)
if start_point.x() == end_point.x() or start_point.y() == end_point.y():
return
self.rubberBand.addPoint(QgsPointXY(start_point.x(), start_point.y()), False)
self.rubberBand.addPoint(QgsPointXY(start_point.x(), end_point.y()), False)
self.rubberBand.addPoint(QgsPointXY(end_point.x(), end_point.y()), False)
self.rubberBand.addPoint(QgsPointXY(end_point.x(), start_point.y()), True)
self.rubberBand.show()
def call_button_counter(self):
if not self.window_show:
return
new_counter = self.window_show.button_counter
if new_counter != self.counter:
self.counter = new_counter
print(f'Button pressed in WindowShow: {self.counter}')
else:
return
def deactivate(self):
self.reset()
QgsMapTool.deactivate(self)
self.deactivated.emit()
canvas = iface.mapCanvas()
select_pic = SelectRectangleMapTool(canvas)
canvas.setMapTool(select_pic)
|
nilq/baby-python
|
python
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from modelscript.metamodels.classes.associations import opposite
from modelscript.metamodels.objects import PackagableElement, Entity
from modelscript.base.exceptions import (
UnexpectedCase,
MethodToBeDefined)
class Link(PackagableElement, Entity, metaclass=ABCMeta):
def __init__(self,
model, association,
sourceObject, targetObject,
name=None,
package=None,
step=None,
astNode=None, lineNo=None,
description=None):
#type: (ObjectModel, Union[Association, Placeholder], Object, Object, Optional[Text], Optional[Package], Optional['Step'],Optional['ASTNode'], Optional[int], Optional[TextBlock]) -> None
PackagableElement.__init__(
self,
model=model,
name=name,
package=package,
step=step,
astNode=astNode,
lineNo=lineNo,
description=description
)
Entity.__init__(self)
self.association=association
#type: association
self.sourceObject = sourceObject
# type: Object
self.targetObject = targetObject
# type: Object
# Singleton-like link roles to allow direct comparison
# of link role instances. (see linkRole method)
self._linkRole=OrderedDict()
self._linkRole['source']=LinkRole(self, 'source')
self._linkRole['target']=LinkRole(self, 'target')
@abstractmethod
def isPlainLink(self):
# just used to prevent creating object of this class
# (ABCMeta is not enough)
raise MethodToBeDefined( #raise:OK
'method isPlainLink() is not defined.'
)
def object(self, position):
#type: () -> RolePosition
if position=='source':
return self.sourceObject
elif position=='target':
return self.targetObject
else:
raise UnexpectedCase( #raise:OK
'role position "%s" is not implemented' % position)
def linkRole(self, position):
return self._linkRole[position]
def __str__(self):
return '(%s,%s,%s)' % (
self.sourceObject.name,
self.association.name,
self.targetObject.name
)
class LinkRole(object):
def __init__(self, link, position):
self.link=link
self.position=position
@property
def object(self):
return self.link.object(self.position)
@property
def association(self):
return self.link.association
@property
def role(self):
return self.link.association.role(self.position)
@property
def roleType(self):
return self.role.type
@property
def objectType(self):
return self.object.class_
@property
def opposite(self):
return self.link.linkRole(opposite(self.position))
def __str__(self):
if self.position=='source':
return '([[%s]],%s,%s)' % (
self.link.sourceObject.name,
self.association.name,
self.link.targetObject.name
)
elif self.position=='target':
return '(%s,%s,[[%s]])' % (
self.link.sourceObject.name,
self.association.name,
self.link.targetObject.name
)
else:
raise UnexpectedCase( #raise:OK
'Unexpected position: %s' % self.position)
class PlainLink(Link):
def __init__(self,
model, association,
sourceObject, targetObject,
name=None,
package=None,
step=None,
astNode=None, lineNo=None,
description=None):
#type: (ObjectModel, Union[Association, Placeholder], Object, Object, Optional[Text], Optional[Package], Optional['Step'], Optional['ASTNode'], Optional[int], Optional[TextBlock]) -> None
super(PlainLink, self).__init__(
model=model,
association=association,
sourceObject=sourceObject,
targetObject=targetObject,
name=name,
package=package,
step=step,
astNode=astNode,
lineNo=lineNo,
description=description
)
model._plainLinks.append(self)
def isPlainLink(self):
return True
# def delete(self):
# self.state.links=[l for l in self.state.links if l != self]
|
nilq/baby-python
|
python
|
# coding: utf-8
from atomate.vasp.config import ADD_WF_METADATA
from atomate.vasp.powerups import (
add_wf_metadata,
add_common_powerups,
)
from atomate.vasp.workflows.base.core import get_wf
__author__ = "Ryan Kingsbury, Shyam Dwaraknath, Anubhav Jain"
__email__ = "rkingsbury@lbl.gov, shyamd@lbl.gov, ajain@lbl.gov"
def wf_scan_opt(structure, c=None):
"""
Structure optimization using the SCAN metaGGA functional.
This workflow performs a 2-step optmization. The first step
is a GGA structure optimization using the PBESol functional that serves to
precondition the geometry and charge density. The second step is a
SCAN structure optimization.
The first optimization is force converged with EDIFFG = -0.05,
and the second optimization is force converged with EDIFFG=-0.02.
The bandgap from the first step is used to update the KSPACING parameter,
which sets the appropriate number of k-points for the subsequent SCAN
calculation.
"""
c = c or {}
vasp_input_set_params = {}
if c.get("USER_INCAR_SETTINGS"):
vasp_input_set_params["user_incar_settings"] = c.get("USER_INCAR_SETTINGS")
if c.get("vdw"):
vasp_input_set_params["vdw"] = c.get("vdw")
if c.get("bandgap"):
vasp_input_set_params["bandgap"] = c.get("bandgap")
wf = get_wf(
structure,
"SCAN_optimization.yaml",
common_params={"vasp_input_set_params": vasp_input_set_params}
)
wf = add_common_powerups(wf, c)
if c.get("ADD_WF_METADATA", ADD_WF_METADATA):
wf = add_wf_metadata(wf, structure)
return wf
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
ID = 'id'
NAME = 'nombre'
PROV = 'provincia'
PROV_ID = 'provincia_id'
PROV_NAM = 'provincia_nombre'
DEPT = 'departamento'
DEPT_ID = 'departamento_id'
DEPT_NAM = 'departamento_nombre'
MUN = 'municipio'
MUN_ID = 'municipio_id'
MUN_NAM = 'municipio_nombre'
LOC = 'localidad'
LAT = 'centroide_lat'
LON = 'centroide_lon'
class GeorefWrapper:
"""Interfaz para la API REST de Georef."""
def __init__(self):
self.url = "http://apis.datos.gob.ar/georef/api/"
self.max_bulk_len = 5000
def search_province(self, data):
entity = 'provincias'
return self._get_response(entity, data)
def search_departament(self, data):
entity = 'departamentos'
return self._get_response(entity, data)
def search_municipality(self, data):
entity = 'municipios'
return self._get_response(entity, data)
def search_locality(self, data):
entity = 'localidades'
return self._get_response(entity, data)
def _get_response(self, entity, data):
result = []
result_partial = []
data_len = len([i for i in data[entity] if i])
resource = self.url + entity
# Valida si es necesario compaginar la data a enviar
if data_len > self.max_bulk_len:
data = self._getrows_byslice(
entity, data[entity], self.max_bulk_len)
else:
data = [data]
for row in data:
r = requests.post(resource, json=row)
if 'resultados' in r.content.decode('utf8'):
result_partial.append(json.loads(r.content)['resultados'])
else:
error = self._get_first_error(json.loads(r.content)['errores'])
return {'error': error}
for row in result_partial:
for v in row:
if v[entity]:
result.append({entity: [v[entity][0]]})
else:
result.append({entity: []})
return result
@staticmethod
def _getrows_byslice(entity, seq, rowlen):
data_slice = []
for start in range(0, len(seq), rowlen):
data_slice.append({entity: seq[start:start + rowlen]})
return data_slice
@staticmethod
def _get_first_error(result):
idx = next(i for i, j in enumerate(result) if j)
return result[idx]
|
nilq/baby-python
|
python
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import luigi
from servicecatalog_puppet.workflow.tag_policies import tag_policies_base_task
from servicecatalog_puppet.workflow.tag_policies import execute_tag_policies_task
from servicecatalog_puppet.workflow.manifest import manifest_mixin
class TagPoliciesForTask(
tag_policies_base_task.TagPoliciesBaseTask, manifest_mixin.ManifestMixen,
):
tag_policies_name = luigi.Parameter()
puppet_account_id = luigi.Parameter()
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"tag_policies_name": self.tag_policies_name,
"cache_invalidator": self.cache_invalidator,
}
def get_klass_for_provisioning(self):
return execute_tag_policies_task.ExecuteTagPoliciesTask
def run(self):
self.write_output(self.params_for_results_display())
|
nilq/baby-python
|
python
|
import os
from os import path
from imageio import imread
from konlpy.tag import Hannanum
from wordcloud import WordCloud, ImageColorGenerator
"""This code is to generate and to plot a wordcloud in Korean version.
Of course it is possible to generate a simple wordcloud with the original codes. However
due to the major difference with English and complexity, the result from the original codes will not
be as perfect as we expected.
The major difference between English and Korean(Hangul) is that English words can be devided by space(' ')
while Korean words cannot be divided by space. To make a Korean sentence, every single noun has to be combined with
articles without space(ex. I am --> 나는, 나:I 는:am).
For this reason, even though the text want to say 'I' in every appearance as '나는','나를', '나에게',
the original codes will separate these words as a different meaning and a different word.
'"""
"""To implement the codes, you must install konlpy package which is a module for natural language processing for Korean.
It provides a function with separating the main words and articles, and only extract the main words."""
"""So don't forget to install konlpy package!"""
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
#read the color image taken from
back_coloring = imread(path.join(d, d + '/word_cloud/kor_text/image/나뭇잎.jpg'))
#get the path of Korean_fonts otf file
font_path = d + '/word_cloud/examples/fonts/NotoSansKR/NotoSansKR-Black.otf'
def listToString(list1):
str=" " #distinguish nouns by 'space'
return (str.join(list1))
def get_string(path):
f = open(path, "r", encoding="utf-8")
sample = f.read()
f.close()
h = Hannanum()
list_nouns = h.nouns(sample) #get list of nouns from sample
return listToString(list_nouns) #get string of list_nouns
path = d + '/word_cloud/kor_text/황순원_소나기.txt' #path of korean text
tags = get_string(path) # tags : string of list_nouns
wc = WordCloud(font_path=font_path, background_color="white", mask=back_coloring,
max_font_size=100, random_state=42, width=1000, height=860, margin=2) #collocations=false
#display the generated image
wordcloud = wc.generate(tags)
import matplotlib.pyplot as plt
plt.imshow(wordcloud, interpolation ='bilinear')
#image_colors_byImg = ImageColorGenerator(back_coloring)
#plt.imshow(wordcloud.recolor(color_func=image_colors_byImg), interpolation='bilinear')
plt.axis("off")
plt.show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Aplicando estilo via classe.
Adicionando uma classe através do método `add_class()` e
arquivo css é caregado via linguagem de programação.
"""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk, Gdk
class MainWindow(Gtk.ApplicationWindow):
def __init__(self):
super().__init__()
self._set_custom_css(file='./css/custom.css')
self.set_title(title='Aplicando estilo via classe')
self.set_default_size(width=1366 / 2, height=768 / 2)
self.set_position(position=Gtk.WindowPosition.CENTER)
self.set_default_icon_from_file(filename='../../assets/icons/icon.png')
hbbox = Gtk.ButtonBox.new(orientation=Gtk.Orientation.HORIZONTAL)
hbbox.set_halign(align=Gtk.Align.CENTER)
hbbox.set_valign(align=Gtk.Align.CENTER)
hbbox.set_spacing(spacing=12)
self.add(widget=hbbox)
button_ok = Gtk.Button.new_with_label(label='OK')
# Adicionando classe natitiva `suggested-action` ao widget.
button_ok.get_style_context().add_class('suggested-action')
hbbox.add(widget=button_ok)
button_cancel = Gtk.Button.new_with_label(label='Cancelar')
# Adicionando classe natitiva `destructive-action` ao widget.
button_cancel.get_style_context().add_class('destructive-action')
hbbox.add(widget=button_cancel)
button_warning = Gtk.Button.new_with_label(label='Cancelar')
# Adicionando classe PERSONALIZADA `warning-action` ao widget.
button_warning.get_style_context().add_class('warning-action')
hbbox.add(widget=button_warning)
@staticmethod
def _set_custom_css(file):
css_provider = Gtk.CssProvider.new()
css_provider.load_from_path(path=file)
screen = Gdk.Screen()
style_context = Gtk.StyleContext.new()
style_context.add_provider_for_screen(
screen=screen.get_default(),
provider=css_provider,
priority=Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,
)
if __name__ == '__main__':
win = MainWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.2 on 2019-06-30 13:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('favorites', '0003_auto_20190630_1317'),
]
operations = [
migrations.RenameField(
model_name='auditlog',
old_name='favourite_id',
new_name='favourite',
),
migrations.RenameField(
model_name='favorite',
old_name='category_id',
new_name='category',
),
]
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.7 on 2020-03-14 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("postcards", "0004_contact_language"),
]
operations = [
migrations.AlterField(
model_name="card",
name="sent_at",
field=models.DateTimeField(blank=True, null=True),
),
]
|
nilq/baby-python
|
python
|
from projects.utils.multiprocessing import *
from projects.utils.sql import *
from projects.utils.data_table import *
|
nilq/baby-python
|
python
|
from Constants import ALL_LEVELS, CAP_LEVELS, MISSION_LEVELS, BOWSER_STAGES, LVL_BOB, SPECIAL_LEVELS, LVL_MAIN_SCR, LVL_CASTLE_GROUNDS, BEHAVIOUR_NAMES
from randoutils import format_binary
import random
import sys
import numpy as np
from Entities.Object3D import Object3D
import logging
#from Parsers.LevelScript import LevelScriptParser
from random import shuffle
WHITELIST_SHUFFLING = [
(None, 0xBC), # Bob-Omb
(0x13003174, None), # Bob-Omb
(0x1300472C, None), # Goomba,
(0x13004770, None), # Goomba Triplet
(0x13001298, None), # Coin Triplet
(0x130001F4, None), # King Bob-Omb
(0x13002BB8, None), # King Whomp
(0x130039D4, None), # Moneybag
(None, 0x68), # Koopa (The Quick, Normal, etc)
#(0x130005B4, None), # Rotating Platform WF
(0x13002AA4, None), # Tree Behaviour
(None, 0x65), # Scuttlebug
(None, 0x19), # Tree (Snow)
(None, 0x17), # Tree (In Courses)
(None, 0x18), # Tree (Courtyard)
(None, 0x1B), # Tree (SSL)
(0x13001548, None), # Heave-Ho
(None, 0x78), # Heart
(0x13004348, None), # Red Coin
(0x13003E8C, None), # Red Coin Star
(0x13002EC0, None), # Mario Spawn
(0x13005468, None), # Skeeter (WDW Bug thing)
(0x13000BC8, None), # Thwomp
(0x13000B8C, None), # Thwomp 2
(0x1300525C, None), # Grindel
(0x13001FBC, None), # Piranha
(0x13005120, None), # Fire-Spitting
(0x13002EF8, None), # Toad
(0x130009A4, None), # Single Coin
(0x13000964, None), # Coins (x3)
(0x13000984, None), # Coins (x10)
(0x130008EC, None), # Coins (Formations)
(0x13005440, 0x58), # Clam in JRB
(0x13004634, None), # Pokey
(0x13004668, 0x55), # Pokeys Head
(0x130030A4, None), # Blue Coin
(None, 0x7C), # Sign
(0x13003EAC, 0xD7),
(None, 0x74), # Coin Type 1
(None, 0x75), # Coin Type 2
(None, 0x74), # Coin Type 3
(None, 0x75), # Multiple Coins
(None, 0xD4), # One-Up
(0x13001F3C, None), # Koopa Shell
(0x130020E8, 0x57), # Lost Penguin
(0x13002E58, None), # Wandering Penguin
(0x13004148, 0xD4), # Homing-One-Up
(0x130031DC, 0xC3), # Bob-Omb Buddy (With Message)
(0x13003228, None), # Bob-Omb Buddy (Opening Canon)
(0x1300478C, 0x66),
#(None, 0xDF), # Chuckya
(0x13000054, None), # Eye-Ball
(0x13001108, None), # Flamethrower
(0x130046DC, 0xDC), # Fly-Guy
(None, 0x89), # Item-Box
(0x13004698, None), # Bat
(0x130046DC, None), # Fly-Guy
(0x13004918, None), # Lakitu
(0x13004954, None), # Evil Lakitu
(0x130049C8, None), # Spiny
(0x13004A00, None), # Mole
(0x13004A58, None), # Mole in Hole
(0x13003700, 0x65), # Ice Bully (Big)
(0x130036C8, 0x64), # Ice Bully (Small)
(0x13001650, 0x00), # Bouncing Box
(0x130027E4, 0x65), # Boo
(0x130027D0, 0x00), # Boo (x3)
(0x13002794, 0x65), # Big Boo
(0x130007F8, 0x7A), # Star
(0x13003E3C, 0x7A), # Star
#(0x13001B70, 0x00), # Checkerboard Elevator (Logic: DON'T TOUCH FOR VANISH CAP LEVEL)
(0x13002F74, 0x00), # Mario Start 1
(0x1300442C, None), # TTC: Pendulum
(0x130054B8, None), # TTC: Pendulum
(0x13004FD4, None), # BBH: Haunted Chair
(0x13005024, None), # BBH: Piano
(0x1300506C, None), # BBH: Bookend
]
BSCRIPT_START = 0x10209C
HEIGHT_OFFSETS = {
(None, 0x89): 200,
(0x130007F8, 0x7A): 200,
(0x13002250, None): 200,
(None, 0x75): 300,
}
CANT_BE_IN_WATER = [
(None, 0x89), # Star
(0x13003700, None), # Ice Bully (Big) - otherwise you win instantly
(0x130031DC, 0xC3), # Bob-Omb Buddy (With Message)
(0x13003228, None) # Bob-Omb Buddy (Opening Canon)
]
WALKABLE_COLLISION_TYPES = [
0x00, # environment default
0x29, # default floor with noise
0x14, # slightly slippery
0x15, # anti slippery
0x0B, # close camera
0x30, # hard floor (always fall damage)
## may be harder
#0x13, # slippery
#0x2A, # slippery with noise
0x0D, # water (stationary)
]
def signed_tetra_volume(a, b, c, d):
return np.sign(np.dot(np.cross(b-a, c-a), d-a)/6.0)
def trace_geometry_intersections(level_geometry, ray, face_type = None):
# algorithm that was used for this:
# http://www.lighthouse3d.com/tutorials/maths/ray-triangle-intersection/
# or maybe this
# https://wiki2.org/en/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
[q0, q1] = ray
ray_origin = q0
ray_vector = q1 - q0
#print("origin", ray_origin)
#print("dir", ray_vector)
ray_is_vertical = ray_vector[0] == 0.0 and ray_vector[1] == 0.0
faces = level_geometry.get_triangles(face_type) # [[[-1.0, -1.0, 0.0], [1.0, -1.0, 0.0], [0.0, 1.0, 0.0]]]
intersection_count = 0
intersection_positions = []
intersection_faces = []
for face in faces:
#print("next face", face.index)
[p1, p2, p3] = face.vertices
[xmin, xmax, ymin, ymax, zmin, zmax] = face.bounding_box
# precheck bounds
if ray_is_vertical:
# for vertical rays we can quickly check if the coordinates are atleast in the bounding box of the tri
if ray_origin[0] < xmin or ray_origin[0] > xmax or ray_origin[1] < ymin or ray_origin[1] > ymax:
#print('oob precheck')
continue
edge_a = p2 - p1
edge_b = p3 - p1
h = np.cross(ray_vector, edge_b)
a = np.dot(edge_a, h)
if abs(a) < 0e-10:
#print("parallel")
continue
f = 1.0/a
s = ray_origin - p1
u = f * (np.dot(s, h))
if u < 0.0 or u > 1.0:
#print("u outside 0-1")
continue
q = np.cross(s, edge_a)
v = f * (np.dot(ray_vector, q))
if v < 0.0 or u + v > 1.0:
#print("v < 0 or u + v > 1")
continue
t = f * np.dot(edge_b, q)
if t > 0e-10:
#print("hit")
intersection_count += 1
intersection_positions.append(
ray_origin + ray_vector * t
)
intersection_faces.append(face)
continue
#print("doesnt reach", t)
return (intersection_count, intersection_positions, intersection_faces)
"""
[q0, q1] = ray
triangles = level_geometry.get_triangles() # [[[-1.0, -1.0, 0.0], [1.0, -1.0, 0.0], [0.0, 1.0, 0.0]]]
intersection_count = 0
intersection_positions = []
for triangle in triangles:
[p1, p2, p3] = triangle
signed_volume_a = signed_tetra_volume(q0, p1, p2, p3)
signed_volume_b = signed_tetra_volume(q1, p1, p2, p3)
if signed_volume_a != signed_volume_b:
s3 = signed_tetra_volume(q0,q1,p1,p2)
s4 = signed_tetra_volume(q0,q1,p2,p3)
s5 = signed_tetra_volume(q0,q1,p3,p1)
if s3 == s4 and s4 == s5:
intersection_count += 1
n = np.cross(p2-p1,p3-p1)
t = np.dot(p1-q0,n) / np.dot(q1-q0,n)
intersection_positions.append(
q0 + t * (12-q0)
)
return (intersection_count, intersection_positions)
"""
def get_closest_intersection(intersections, position):
closest_dist = 1e20 # big number as "infinity"
closest_index = 0
for index, intersection_point in enumerate(intersections):
diff = position - intersection_point
dist = np.sqrt(np.sum(np.power(diff, 2)))
if dist < closest_dist:
closest_dist = dist
closest_index = index
return closest_dist
class LevelRandomizer:
def __init__(self, rom : 'ROM'):
self.rom = rom
@staticmethod
def can_shuffle(obj : Object3D):
if obj.source == "MARIO_SPAWN":
return True
else:
for (target_bscript_address, target_model_id) in WHITELIST_SHUFFLING:
if (target_model_id is None or target_model_id == obj.model_id) and (target_bscript_address is None or target_bscript_address == obj.behaviour):
return True
return False
def get_height_offset(self, obj : Object3D):
for (target_bscript_address, target_model_id) in HEIGHT_OFFSETS:
if (target_model_id is None or target_model_id == obj.model_id) and (target_bscript_address is None or target_bscript_address == obj.behaviour):
return HEIGHT_OFFSETS[(target_bscript_address, target_model_id)]
return 1 # fallback to ensure it doesn't fail oob check or falls out of level
def can_be_in_water(self, obj : Object3D):
for (target_bscript_address, target_model_id) in CANT_BE_IN_WATER:
if (target_model_id is None or target_model_id == obj.model_id) and (target_bscript_address is None or target_bscript_address == obj.behaviour):
return False
return True
def is_in_water_box(self, water_box, position):
(
water_box_id,
water_box_start_x, water_box_start_z,
water_box_end_x, water_box_end_z,
water_box_y,
water_box_type
) = water_box
if water_box_type != "WATER":
#print("waterbox is not water, all good")
return False
if position[0] < water_box_start_x or position[0] > water_box_end_x:
#print("x is outside waterbox x, all good")
return False
if position[2] < water_box_start_z or position[2] > water_box_end_z:
#print("y is outside waterbox y, all good")
return False
if position[1] > water_box_y:
#print("item is higher than waterbox")
return False
return True
def is_valid_position(self, level_script, object3d, position):
if not self.can_be_in_water(object3d):
#print(object3d, 'cant be in water')
#print("found an object that cannot be in water", len(level_script.water_boxes))
for water_box in level_script.water_boxes:
#print(water_box)
if self.is_in_water_box(water_box, position):
logging.info("invalid position for object, in water box")
#print(position, object3d)
return False
# count floors under the position we want to test
(floors_underneath, floor_positions, floor_faces) = trace_geometry_intersections(
level_script.level_geometry,
[
position + np.array([0.0, 0.0, 1.0]),
position + np.array([0.0, 0.0, -1.0e7])
]
)
# if the amount is even, we're inside a wall or (if it's 0) oob
# if the amount is odd we're ok
is_valid_amount = floors_underneath % 2 == 1
if not is_valid_amount: return False
if floor_faces[0].collision_type not in WALKABLE_COLLISION_TYPES:
#print("invalid floor type", hex(floor_faces[0].collision_type))
return False
# require minimum distance from point from ceilings
(_, ceiling_positions, ceiling_faces) = trace_geometry_intersections(
level_script.level_geometry,
[
position + np.array([0.0, 0.0, 1.0]),
position + np.array([0.0, 0.0, +1.0e7])
]
)
closest_ceiling = get_closest_intersection(ceiling_positions, position)
if closest_ceiling < 10.0: return False
return is_valid_amount
def shuffle_objects(self):
for (level, parsed) in self.rom.levelscripts.items():
if level in SPECIAL_LEVELS:
continue
floor_triangles = parsed.level_geometry.get_triangles('FLOOR')
shufflable_objects = list(filter(LevelRandomizer.can_shuffle, parsed.objects))
other_objects = list(filter(lambda x: not LevelRandomizer.can_shuffle(x), parsed.objects))
for other_object in other_objects:
parsed.level_geometry.add_debug_marker(other_object.position, other_object, color=(100, 100, 255))
while len(shufflable_objects) > 0:
obj = shufflable_objects.pop()
face = random.choice(floor_triangles)
[p1, p2, p3] = face.vertices
r1 = random.random()
r2 = random.random()
if r1 + r2 > 1:
r1 = r1 - 1
r2 = r2 - 1
point = p1 + (r1 * (p2 - p1)) + (r2 * (p3 - p1))
# match bscript and model_id
height_offset = self.get_height_offset(obj)
point[2] += height_offset
if not self.is_valid_position(parsed, obj, point):
#print('invalid position')
shufflable_objects.append(obj)
else:
obj.set(self.rom, 'position', tuple([int(p) for p in list(point)]))
parsed.level_geometry.add_debug_marker(point, obj, color=(255, 100, 100))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pyenv
short_description: Run pyenv command
options:
always_copy:
description:
- the "--always-copy" option of pyenv virtualenv
required: false
type: bool
default: false
bare:
description:
- the "--bare" option of "versions" and "virtualenvs" subcommand
required: false
type: bool
default: true
clear:
description:
- the "--clear" option of pyenv virtualenv
required: false
type: bool
default: false
copies:
description:
- the "--copies" option of pyenv virtualenv
required: false
type: bool
default: false
expanduser:
description:
- whether the environment variable PYENV_ROOT and "pyenv_root" option are filtered by os.path.expanduser
required: false
type: bool
default: true
force:
description:
- the "-f/--force" option of pyenv install
required: false
type: bool
default: false
list:
description:
- -l/--list option of pyenv install command
required: false
type: bool
default: false
no_pip:
description:
- the "--no-pip" option of pyenv virtualenv
required: false
type: bool
default: false
no_setuptools:
description:
- the "--no-setuptools" option of pyenv virtualenv
required: false
type: bool
default: false
no_wheel:
description:
- the "--no-wheel" option of pyenv virtualenv
required: false
type: bool
default: false
pyenv_root:
description:
- PYENV_ROOT
required: false
type: str
default: null
skip_aliases:
description:
- the "-s/--skip-aliases" option of pyenv virtualenvs
required: false
type: bool
default: true
skip_existing:
description:
- the "-s/--skip-existing" option of pyenv install
required: false
type: bool
default: true
subcommand:
description:
- pyenv subcommand
choices: ["install", "uninstall", "versions", "global", "virtualenv", "virtualenvs"]
required: false
default: install
symlinks:
description:
- the "--symlinks" option of pyenv virtualenv
required: false
type: bool
default: false
version:
description:
- A python version name
type: str
required: false
default: null
versions:
description:
- python version names
type: list
required: false
default: null
virtualenv_name:
description:
- A virtualenv name
type: str
required: false
default: null
without_pip:
description:
- the "--without_pip" option of pyenv virtualenv
required: false
type: bool
default: false
requirements:
- pyenv
author: "Suzuki Shunsuke"
'''
EXAMPLES = '''
- name: pyenv install -s 3.6.1
pyenv:
version: 3.6.1
pyenv_root: "~/.pyenv"
- name: pyenv install -f 3.6.1
pyenv:
version: 3.6.1
pyenv_root: "~/.pyenv"
force: yes
- name: pyenv uninstall -f 2.6.9
pyenv:
subcommand: uninstall
version: 2.6.9
pyenv_root: "~/.pyenv"
- name: pyenv global 3.6.1
pyenv:
subcommand: global
versions:
- 3.6.1
pyenv_root: "~/.pyenv"
- name: pyenv global
pyenv:
subcommand: global
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.versions
- name: pyenv install -l
pyenv:
list: yes
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.versions
- name: pyenv versions --bare
pyenv:
subcommand: versions
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.versions
- name: pyenv virtualenvs --skip-aliases --bare
pyenv:
subcommand: virtualenvs
pyenv_root: "~/.pyenv"
register: result
- debug:
var: result.virtualenvs
- name: pyenv virtualenv --force 2.7.13 ansible
pyenv:
subcommand: virtualenv
pyenv_root: "~/.pyenv"
version: 2.7.13
virtualenv_name: ansible
force: yes
'''
RETURNS = '''
virtualenvs:
description: the return value of `pyenv virtualenvs`
returned: success
type: list
sample:
- 3.6.1/envs/neovim
- neovim
versions:
description: the return value of `pyenv install --list` or `pyenv global` or `pyenv versions`
returned: success
type: list
sample:
- 2.7.13
- 3.6.1
'''
import os # noqa E402
from ansible.module_utils.basic import AnsibleModule # noqa E402
def wrap_get_func(func):
def wrap(module, *args, **kwargs):
result, data = func(module, *args, **kwargs)
if result:
module.exit_json(**data)
else:
module.fail_json(**data)
return wrap
def get_install_list(module, cmd_path, **kwargs):
""" pyenv install --list
"""
rc, out, err = module.run_command([cmd_path, "install", "-l"], **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove header and last newline
versions = [line.strip() for line in out.split("\n")[1:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
versions=versions))
cmd_install_list = wrap_get_func(get_install_list)
def get_versions(module, cmd_path, bare, **kwargs):
""" pyenv versions [--bare]
"""
cmd = [cmd_path, "versions"]
if bare:
cmd.append("--bare")
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove last newline
versions = [line.strip() for line in out.split("\n")[:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
versions=versions))
cmd_versions = wrap_get_func(get_versions)
def cmd_uninstall(module, cmd_path, version, **kwargs):
""" pyenv uninstall --force <version>
"""
result, data = get_versions(module, cmd_path, True, **kwargs)
if not result:
return module.fail_json(**data)
if version not in data["versions"]:
return module.exit_json(
changed=False, failed=False, stdout="", stderr="")
cmd = [cmd_path, "uninstall", "-f", version]
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
module.fail_json(msg=err, stdout=out)
else:
module.exit_json(changed=True, failed=False, stdout=out, stderr=err)
def get_global(module, cmd_path, **kwargs):
""" pyenv global
"""
rc, out, err = module.run_command([cmd_path, "global"], **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove last newline
versions = [line.strip() for line in out.split("\n")[:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
versions=versions))
cmd_get_global = wrap_get_func(get_global)
def cmd_set_global(module, cmd_path, versions, **kwargs):
""" pyenv global <version> [<version> ...]
"""
result, data = get_global(module, cmd_path, **kwargs)
if not result:
return module.fail_json(**data)
if set(data["versions"]) == set(versions):
return module.exit_json(
changed=False, failed=False, stdout="", stderr="",
versions=versions)
rc, out, err = module.run_command(
[cmd_path, "global"] + versions, **kwargs)
if rc:
module.fail_json(msg=err, stdout=out)
else:
module.exit_json(
changed=True, failed=False, stdout=out, stderr=err,
versions=versions)
def cmd_install(module, params, cmd_path, **kwargs):
""" pyenv install [--skip-existing] [--force] <version>
"""
cmd = [cmd_path, "install"]
if params["skip_existing"] is not False:
force = False
cmd.append("--skip-existing")
elif params["force"] is True:
force = True
cmd.append("--force")
cmd.append(params["version"])
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
changed = force or out
return module.exit_json(
changed=changed, failed=False, stdout=out, stderr=err)
def get_virtualenvs(module, cmd_path, skip_aliases, bare, **kwargs):
""" pyenv virtualenvs [--skip-aliases] [--bare]
"""
cmd = [cmd_path, "virtualenvs"]
if skip_aliases:
cmd.append("--skip-aliases")
if bare:
cmd.append("--bare")
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return (False, dict(msg=err, stdout=out))
else:
# slice: remove last newline
virtualenvs = [line.strip() for line in out.split("\n")[:-1]]
return (True, dict(
changed=False, failed=False, stdout=out, stderr=err,
virtualenvs=virtualenvs))
cmd_virtualenvs = wrap_get_func(get_virtualenvs)
def cmd_virtualenv(
module, cmd_path, version, virtualenv_name, options, **kwargs):
""" pyenv virtualenv [--force] <version> <virtualenv name>
"""
cmd = [cmd_path, "virtualenv"]
for key in [
"force", "no_pip", "no_setuptools", "no_wheel", "symlinks",
"copies", "clear", "without_pip"]:
if options[key]:
cmd.append("--{}".format(key.replace("_", "-")))
if options["force"]:
# pyenv virtualenv --force not working as expected?
# https://github.com/pyenv/pyenv-virtualenv/issues/161
cmd.append("--force")
cmd.append(version)
cmd.append(virtualenv_name)
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
return module.exit_json(
changed=True, failed=False, stdout=out, stderr=err)
if options["clear"]:
# pyenv virtualenv --clear not working as expected?
cmd.append(version)
cmd.append(virtualenv_name)
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
return module.exit_json(
changed=True, failed=False, stdout=out, stderr=err)
result, data = get_virtualenvs(module, cmd_path, False, True, **kwargs)
if not result:
return module.fail_json(**data)
virtualenvs = set(data["virtualenvs"])
if virtualenv_name in virtualenvs:
if "{}/envs/{}".format(version, virtualenv_name) in virtualenvs:
return module.exit_json(
changed=False, failed=False,
stdout="{} already exists".format(virtualenv_name), stderr="")
else:
return module.fail_json(
msg="{} already exists but version differs".format(
virtualenv_name))
cmd.append(version)
cmd.append(virtualenv_name)
rc, out, err = module.run_command(cmd, **kwargs)
if rc:
return module.fail_json(msg=err, stdout=out)
else:
return module.exit_json(
changed=True, failed=False, stdout=out, stderr=err)
MSGS = {
"required_pyenv_root": (
"Either the environment variable 'PYENV_ROOT' "
"or 'pyenv_root' option is required")
}
def get_pyenv_root(params):
if params["pyenv_root"]:
if params["expanduser"]:
return os.path.expanduser(params["pyenv_root"])
else:
return params["pyenv_root"]
else:
if "PYENV_ROOT" not in os.environ:
return None
if params["expanduser"]:
return os.path.expanduser(os.environ["PYENV_ROOT"])
else:
return os.environ["PYENV_ROOT"]
def main():
module = AnsibleModule(argument_spec={
"bare": {"required": False, "type": "bool", "default": True},
"copies": {"required": False, "type": "bool", "default": False},
"clear": {"required": False, "type": "bool", "default": False},
"force": {"required": False, "type": "bool", "default": None},
"expanduser": {"required": False, "type": "bool", "default": True},
"list": {"required": False, "type": "bool", "default": False},
"no_pip": {"required": False, "type": "bool", "default": False},
"no_setuptools": {"required": False, "type": "bool", "default": False},
"no_wheel": {"required": False, "type": "bool", "default": False},
"pyenv_root": {"required": False, "default": None},
"skip_aliases": {"required": False, "type": "bool", "default": True},
"skip_existing": {"required": False, "type": "bool", "default": None},
"subcommand": {
"required": False, "default": "install",
"choices": [
"install", "uninstall", "versions", "global",
"virtualenv", "virtualenvs"]
},
"symlinks": {"required": False, "type": "bool", "default": False},
"version": {"required": False, "type": "str", "default": None},
"versions": {"required": False, "type": "list", "default": None},
"virtualenv_name": {"required": False, "type": "str", "default": None},
"without_pip": {"required": False, "type": "bool", "default": False},
})
params = module.params
environ_update = {}
pyenv_root = get_pyenv_root(params)
if pyenv_root is None:
return module.fail_json(
msg=MSGS["required_pyenv_root"])
environ_update["PYENV_ROOT"] = pyenv_root
cmd_path = os.path.join(pyenv_root, "bin", "pyenv")
if params["subcommand"] == "install":
if params["list"]:
return cmd_install_list(
module, cmd_path, environ_update=environ_update)
return cmd_install(
module, params, cmd_path, environ_update=environ_update)
elif params["subcommand"] == "uninstall":
if not params["version"]:
return module.fail_json(
msg="uninstall subcommand requires the 'version' parameter")
return cmd_uninstall(
module, cmd_path, params["version"], environ_update=environ_update)
elif params["subcommand"] == "versions":
return cmd_versions(
module, cmd_path, params["bare"], environ_update=environ_update)
elif params["subcommand"] == "global":
if params["versions"]:
return cmd_set_global(
module, cmd_path, params["versions"],
environ_update=environ_update)
else:
return cmd_get_global(
module, cmd_path, environ_update=environ_update)
elif params["subcommand"] == "virtualenvs":
return cmd_virtualenvs(
module, cmd_path, params["skip_aliases"], params["bare"],
environ_update=environ_update)
elif params["subcommand"] == "virtualenv":
if not params["version"]:
return module.fail_json(
msg="virtualenv subcommand requires the 'version' parameter")
if not params["virtualenv_name"]:
return module.fail_json(
msg=(
"virtualenv subcommand requires the 'virtualenv_name' "
"parameter"))
options = dict((key, params[key]) for key in [
"force", "no_pip", "no_setuptools", "no_wheel", "symlinks",
"copies", "clear", "without_pip"])
return cmd_virtualenv(
module, cmd_path, params["version"], params["virtualenv_name"],
options, environ_update=environ_update)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright (C) 2016-2020 Wason Technology, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#Example client to change the light source color
#For use with rip_sensor_world.world Gazebo world
#Warning: Color changes do not currently show in gzclient. They
#will be applied to the camera sensors.
import sys
from RobotRaconteur.Client import *
import time
import cv2
import numpy as np
server=RRN.ConnectService('rr+tcp://localhost:11346/?service=GazeboServer')
w=server.get_worlds('default')
print(w.light_names)
sun=w.get_lights('sun')
color=sun.diffuse_color
print(str(color[0]["a"]) + " " + str(color[0]["r"]) + " " + str(color[0]["g"]) + " " + str(color[0]["b"]))
color_dtype=RRN.GetNamedArrayDType('com.robotraconteur.color.ColorRGBAf',server)
color2=np.zeros((1,),dtype=color_dtype)
color2["a"]=1.0
color2["r"]=0.0
color2["g"]=1.0
color2["b"]=0.0
sun.diffuse_color=color2
|
nilq/baby-python
|
python
|
from uuid import uuid4
from sqlalchemy import Column, String, Boolean, ForeignKey
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship, backref
from .db import Base
class User(Base):
__tablename__ = "user"
id = Column(UUID(as_uuid=True), primary_key=True, index=True, default=uuid4)
name = Column(String)
lastname = Column(String)
email = Column(String, unique=True, index=True)
dogs = relationship("Dog", cascade="all,delete")
class Dog(Base):
__tablename__ = "dog"
id = Column(UUID(as_uuid=True), primary_key=True, index=True, default=uuid4)
name = Column(String)
picture = Column(String)
create_date = Column(String)
is_adopted = Column(Boolean)
user_id = Column(UUID(as_uuid=True), ForeignKey("user.id"))
user = relationship("User", backref=backref("dogs_user", cascade="all,delete"))
|
nilq/baby-python
|
python
|
# encoding: utf-8
"""
keepalive.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message import Message
# =================================================================== KeepAlive
#
class KeepAlive (Message):
ID = Message.CODE.KEEPALIVE
TYPE = chr(Message.CODE.KEEPALIVE)
def message (self):
return self._message('')
def __str__ (self):
return "KEEPALIVE"
@classmethod
def unpack_message (cls, data, negotiated): # pylint: disable=W0613
# XXX: FIXME: raise Notify if data has something
return cls()
|
nilq/baby-python
|
python
|
import pandas as pd
import os
from IGTD_Functions import min_max_transform, table_to_image
num_row = 30 # Number of pixel rows in image representation
num_col = 30 # Number of pixel columns in image representation
num = num_row * num_col # Number of features to be included for analysis, which is also the total number of pixels in image representation
save_image_size = 3 # Size of pictures (in inches) saved during the execution of IGTD algorithm.
max_step = 10000 # The maximum number of iterations to run the IGTD algorithm, if it does not converge.
val_step = 300 # The number of iterations for determining algorithm convergence. If the error reduction rate
# is smaller than a pre-set threshold for val_step itertions, the algorithm converges.
# Import the example data and linearly scale each feature so that its minimum and maximum values are 0 and 1, respectively.
data = pd.read_csv('../Data/Data.txt', low_memory=False, sep='\t', engine='c', na_values=['na', '-', ''],
header=0, index_col=0)
data = data.iloc[:, :num]
norm_data = min_max_transform(data.values)
norm_data = pd.DataFrame(norm_data, columns=data.columns, index=data.index)
# Run the IGTD algorithm using (1) the Euclidean distance for calculating pairwise feature distances and pariwise pixel
# distances and (2) the absolute function for evaluating the difference between the feature distance ranking matrix and
# the pixel distance ranking matrix. Save the result in Test_1 folder.
fea_dist_method = 'Euclidean'
image_dist_method = 'Euclidean'
error = 'abs'
result_dir = '../Results/Test_1'
os.makedirs(name=result_dir, exist_ok=True)
table_to_image(norm_data, [num_row, num_col], fea_dist_method, image_dist_method, save_image_size,
max_step, val_step, result_dir, error)
# Run the IGTD algorithm using (1) the Pearson correlation coefficient for calculating pairwise feature distances,
# (2) the Manhattan distance for calculating pariwise pixel distances, and (3) the square function for evaluating
# the difference between the feature distance ranking matrix and the pixel distance ranking matrix.
# Save the result in Test_2 folder.
fea_dist_method = 'Pearson'
image_dist_method = 'Manhattan'
error = 'squared'
result_dir = '../Results/Test_2'
os.makedirs(name=result_dir, exist_ok=True)
table_to_image(norm_data, [num_row, num_col], fea_dist_method, image_dist_method, save_image_size,
max_step, val_step, result_dir, error)
|
nilq/baby-python
|
python
|
import json
import os
from typing import List, Dict, Any
from .._types import TEST_SCHEMA
class TestSchema:
_endpoint_url: str
_paths: Dict[str, List[Any]]
def __init__(self, endpoint_url: str) -> None:
self._endpoint_url = endpoint_url
self._paths = {}
def add_tests(self, path: str, tests: List[Any]) -> None:
self._paths[path] = tests
def to_json(self) -> TEST_SCHEMA:
return {"endpoint_url": self._endpoint_url, "paths": self._paths}
def save(self, path: str) -> None:
with open(path, "w") as fp:
json.dump(self.to_json(), fp)
@staticmethod
def load(path: str) -> "TestSchema":
assert os.path.exists(path), f"Test schema not found at {path}"
with open(path) as fp:
data = json.load(fp)
TestSchema.validate_test_schema(data)
schema = TestSchema(data.get("endpoint_url"))
schema._paths = data.get("paths")
return schema
@staticmethod
def validate_test_schema(test_schema: TEST_SCHEMA) -> bool:
try:
assert type(test_schema) is dict
assert type(test_schema.get("endpoint_url")) is str
assert type(test_schema.get("paths")) is dict
except AssertionError:
raise ValueError("Invalid Test Schema Provided")
return True
@property
def endpoint_url(self) -> str:
return self._endpoint_url
@property
def paths(self) -> Dict[str, List[Any]]:
return self._paths
|
nilq/baby-python
|
python
|
# Copyright (c) 2019 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from typing import List
from wsim_workflow.step import Step
from forcing.leaky_bucket import LeakyBucket
from forcing.nmme import NMMEForecast
def get_producing_step(target: str, steps: List[Step]) -> Step:
return [s for s in steps if target in s.targets][0]
class TestNMMEConfig(unittest.TestCase):
source = '/tmp/source'
derived = '/tmp/derived'
def test_model_iteration_correct(self):
# WSIM's "yearmon" variable is based on the last month of observed data available.
# In other words, the "201901" model iteration is run in February 2019 using observed
# data through the end of January 2019. This is different from the "reference time" used
# in NMME files, which refers to the month in which the forecast was generated. A
# confusing result of this offset is that we use the "201902" NMME data to produce
# the "201901" WSIM run. This offset is handled by the NMME path generator, since
# other parts of the code have no reason to know about this.
observed = LeakyBucket(self.source)
nmme = NMMEForecast(self.source, self.derived, observed, 'Model3', 1969, 2008)
params = {
'yearmon': '201901',
'target': '201904',
'member': '8'
}
raw_fcst = nmme.forecast_raw(**params).split('::')[0]
# the raw forecast file uses the WSIM month, 201901
self.assertTrue(raw_fcst.endswith('model3_201901_trgt201904_fcst8.nc'))
# and its dependencies use the NMME month, 201902
anom_to_raw = get_producing_step(raw_fcst, nmme.prep_steps(**params))
self.assertIn(os.path.join(nmme.model_dir(), 'clim', 'Model3.prate.02.mon.clim.nc'), anom_to_raw.dependencies)
self.assertIn(os.path.join(nmme.model_dir(), 'clim', 'Model3.tmp2m.02.mon.clim.nc'), anom_to_raw.dependencies)
self.assertIn(os.path.join(nmme.model_dir(), 'raw_anom', 'nmme_201902', 'Model3.tmp2m.201902.anom.nc'),
anom_to_raw.dependencies)
self.assertIn(os.path.join(nmme.model_dir(), 'raw_anom', 'nmme_201902', 'Model3.prate.201902.anom.nc'),
anom_to_raw.dependencies)
def test_hindcast_lead(self):
# This test checks another consequence of the offset between WSIM data version and
# NMME forecast reference times.
observed = LeakyBucket(self.source)
nmme = NMMEForecast(self.source, self.derived, observed, 'Model3', 1969, 2008)
fit_command = nmme.compute_fit_hindcast(varname='Pr', month=9, lead=4)[0].commands[0]
lead_arg = fit_command.index('--lead') + 1
self.assertEqual(fit_command[lead_arg], '3')
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.functional as tf
from torch.nn.modules.activation import ReLU
from models.m1layers_warpgan.conv2d import CustomConv2d
class StyleController(nn.Module):
"""
Style Controller network.
"""
def __init__(self, args):
"""
Style Controller Network
:param batch_size : number of examples in a batch
:param input_size : dimension of the style vectors
"""
super().__init__()
# unpack input parameters from args
self.batch_size = args.in_batch
self.k = args.k
self.style_size = args.style_size
self.device = args.device
# inp: (in_batch, input_size)
# out: (in_batch, 128)
self.linears = nn.Sequential(
# inp: (in_batch, input_size)
# out: (in_batch, 128)
nn.Linear(self.style_size, 128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.LayerNorm(128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.ReLU(),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.Linear(128, 128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.LayerNorm(128),
# inp: (in_batch, 128)
# out: (in_batch, 128)
nn.ReLU(),
)
# inp: (in_batch, 128)
# out: (in_batch, 4 * k)
self.linear_gamma = nn.Linear(128, 4 * self.k, bias = True)
# inp: (in_batch, 128)
# out: (in_batch, 4 * k)
self.linear_beta = nn.Linear(128, 4 * self.k, bias = True)
# initialize all weights for module
self.initialize_weights()
def forward(self, x) -> tuple:
"""
Forward function for Style Controller.
Returns two (batch_size, 1, 1, 4 * k) shaped tensors, gamma and beta coefficients
:param x: style encodings
:shape: (batch_size, style_size)
:return : out
:shape: (batch_size, 2, 1, 4 * k)
"""
if x is None:
x = torch.randn((self.batch_size, self.style_size)).to(self.device)
# inp: (batch_size, style_size)
# out: (batch_size, 128)
out = self.linears(x)
# inp: (batch_size, 128)
# out: (batch_size, 4 * k)
gamma = self.linear_gamma(out)
# inp: (batch_size, 4 * k)
# out: (batch_size, 4 * k, 1, 1)
gamma = gamma.view([-1, 4 * self.k, 1, 1])
# inp: (batch_size, 128)
# out: (batch_size, 4 * k, 1, 1)
beta = self.linear_beta(out)
# inp: (batch_size, 4 * k)
# out: (batch_size, 4 * k, 1, 1)
beta = beta.view([-1, 4 * self.k, 1, 1])
return beta, gamma
def initialize_weights(self) -> None:
"""
Initialize weights of modules.
"""
for module in self.modules():
if isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
|
nilq/baby-python
|
python
|
from multiprocessing import Process
import multiprocessing as mp
import time
class Worker(Process):
def __init__(self, worker_idx, task_queue, result_queue, debug_prints=False):
# call the Process constructor
Process.__init__(self)
self.worker_idx = worker_idx
# the queues for work to be done and work done
self.task_queue = task_queue
self.result_queue = result_queue
self.debug_prints = debug_prints
def run_task(self, task):
return task()
def run(self):
if self.debug_prints:
worker_process = mp.current_process()
print("Worker process started as name: {}; PID: {}\n".format(worker_process.name,
worker_process.pid))
while True:
# get the next task
task_idx, next_task = self.task_queue.get()
# # check for the poison pill which is the signal to stop
if next_task is None:
if self.debug_prints:
print('Worker: {}; received {} {}: FINISHED'.format(
self.name, task_idx, next_task))
# mark the poison pill task as done
self.task_queue.task_done()
# and exit the loop
break
if self.debug_prints:
print('Worker: {}; task_idx : {}; args : {} '.format(
self.name, task_idx, next_task.args))
# run the task
start = time.time()
answer = self.run_task(next_task)
end = time.time()
task_time = end - start
if self.debug_prints:
print('Worker: {}; task_idx : {}; COMPLETED in {} s'.format(
self.name, task_idx, task_time))
# (for joinable queue) tell the queue that the formerly
# enqued task is complete
self.task_queue.task_done()
# put the results into the results queue with it's task
# index so we can sort them later
self.result_queue.put((task_idx, self.worker_idx, task_time, answer))
class Task(object):
def __init__(self, func, *args):
self.args = args
self.func = func
def __call__(self, **kwargs):
# run the function passing in the args for running it and any
# worker information in the kwargs
return self.func(*self.args, **kwargs)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/25 1:39
# @Author : WieAngeal
# @File : ycyl_hander.py
# @Software: PyCharm
from flask import Blueprint, flash, render_template, session, redirect, request
from ..common import (ConsoleLogger, make_response, HttpError,
relative_path, multi_dict_parser2dict)
from flask import request
from ..models import Hosinfo
from ..services import DBService
from ..common import email, auth
import ast
from flask_login import login_required
logger = ConsoleLogger(relative_path(__file__))
ycyl_service = DBService(model=Hosinfo)
ycyl = Blueprint('ycyl', __name__, url_prefix='/ycyl')
@ycyl.route('/', methods=["GET", "POST"])
@login_required
def home():
token = request.args.get('token')
user = auth.verify_auth_token(token)['username']
return render_template("ctyxy.html", user=user)
@ycyl.route('/api/register', methods=["POST"])
def register():
method = request.method
if method == 'POST':
data = request.form.get('data')
hosinfo = ast.literal_eval(data)
Attachments = ['工作日报记录表.xlsx']
email.send_mail(title='第一份flask_email测试邮件',
to='17693186908@126.com',
msg_html='''<h2>这是我的个人博客</h2>
<hr />
<h3>东风破</h3>
<h5><font color="blue" size="18px">周杰伦</font></h5>
<p>一盏离愁 孤灯伫立在窗口</p>
<p>我在门后 假装你人还没走</p>
<p>旧地如重游月 圆更寂寞
<p>夜半清醒的烛火 不忍苛责我</p>''',
attachfiles=None
)
id = ycyl_service.max(Hosinfo.id)
if (id is None):
id = 0
hosinfo['id'] = id + 1
obj = ycyl_service.save(Hosinfo(**hosinfo))
return make_response(data=obj.json())
@ycyl.route('/api/count', methods=["POST", "GET"])
def count():
method = request.method
if method == 'GET':
max_num = ycyl_service.max(Hosinfo.id)
return make_response(data=max_num, e="查询总数成功。")
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
def index(request):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36"}
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
data = []
for b in range(1, 5):
URL = "https://etherscan.io/accounts/" + str(b)
driver.get(URL)
soup = BeautifulSoup(driver.page_source, "lxml")
i = 0
a = 4
for td in soup.find_all("td"):
if(i == a):
data.append(td.get_text())
a = a + 6
i = i + 1
context = {
"data": data
}
return render(request, 'chartapp/index.html', context)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class CartItem(models.Model):
cart = models.ForeignKey('carts.Cart', verbose_name=_('cart'), related_name='cartitems')
variant = models.ForeignKey('products.Variant', verbose_name=_('variant'))
quantity = models.PositiveIntegerField(_('quantity'), default=1)
created = models.DateTimeField(auto_now_add=True, editable=False)
updated = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return '%s - %s' % (self.cart, self.variant)
class Meta:
app_label = 'carts'
verbose_name = _('cart item')
verbose_name_plural = _('cart items')
ordering = ('-created',)
|
nilq/baby-python
|
python
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import six
import time
from acos_client import errors as acos_errors
from acos_client.v21 import base
class Action(base.BaseV21):
def write_memory(self):
try:
self._get("system.action.write_memory")
except acos_errors.InvalidPartitionParameter:
pass
def reboot(self, **kwargs):
raise NotImplementedError
# return self._post("system.action.reboot", **kwargs)
def reload(self, write_memory=False, **kwargs):
# write_memory param is required but no matter what value is passed
# it will ALWAYS save pending changes
write_memory = 1 if write_memory else 0
return self._post("system.action.reload",
params={"write_memory": write_memory}, **kwargs)
def activate_and_write(self, partition, **kwargs):
write_cmd = "write memory\r\n"
if partition is not None:
write_cmd = "active-partition {0}\r\n{1}".format(partition, write_cmd)
last_e = None
for i in six.moves.range(0, 5):
# Request raises an exception when the "maybe error" is returned.
try:
return self._request("POST", "cli.deploy", params=None, payload=write_cmd, **kwargs)
except acos_errors.ACOSException as e:
last_e = e
# Catch 'might fail error'
if e.msg.startswith("write memory") or '2039 ' in e.msg:
time.sleep(1)
continue
raise e
if last_e is not None:
raise last_e
|
nilq/baby-python
|
python
|
import json
import os
import pathlib
import re
import datetime
job_log_search_dict = {
'temp_dir1': r"Starting plotting progress into temporary dirs: (.+) and .+\n",
'temp_dir2': r"Starting plotting progress into temporary dirs: .+ and (.+)\n",
'final_dir': r"Final Directory is: (.+)\n",
'plot_id': r"ID: (.+)\n",
'process_id': r"Process ID is: (.+)\n",
'phase1_time': r"Time for phase 1 = (\d+\.\d+) seconds\.",
'phase2_time': r"Time for phase 2 = (\d+\.\d+) seconds\.",
'phase3_time': r"Time for phase 3 = (\d+\.\d+) seconds\.",
'phase4_time': r"Time for phase 4 = (\d+\.\d+) seconds\.",
'total_time': r"Total time = (\d+\.\d+) seconds\.",
'copy_time': r"Copy time = (\d+\.\d+) seconds\.",
'plot_size': r"Plot size is: (\d+)\n",
'buffer_size': r"Buffer size is: (.+)\n",
'n_buckets': r"Using (\d+) buckets\n",
'n_threads': r"Using (\d+) threads of stripe size \d+\n",
'stripe_size': r"Using \d+ threads of stripe size (\d+)\n",
}
config_fn = pathlib.Path(__file__).parent / ".." / "config.json"
with open(config_fn) as f:
config_dict = json.load(f)
job_log_dir = pathlib.Path(config_dict['job_log_dir'])
def get_all_job_files(job_dir=None):
if job_dir is None:
job_dir = job_log_dir
job_files = {}
for job_log_file in job_dir.glob("*.log"):
match = re.search("(\d{4})-(\d{2})-\d{2}_\d{2}_\d{2}_\d{2}", job_log_file.name)
if not match:
print(f"JOB NAME READ ERROR: {job_log_file}")
continue
file_time = datetime.datetime.strptime(match.group(), "%Y-%m-%d_%H_%M_%S")
job_files[job_log_file] = file_time
return job_files
def read_job_log(path):
with open(path) as f:
job_log = f.read()
job_data = {}
for key, regex in job_log_search_dict.items():
match = re.search(regex, job_log)
if match:
job_data[key] = match.group(1)
# status
match = re.search("Created a total of 1 new plots", job_log)
if match:
job_data['status'] = "complete"
else:
match = re.search("error", job_log, flags=re.IGNORECASE)
if match:
job_data['status'] = "error"
else:
job_data['status'] = "in_progress"
return job_data
if __name__ == "__main__":
for file, t in get_all_job_files().items():
data = read_job_log(file)
data['time'] = t
print(t, data)
|
nilq/baby-python
|
python
|
from ddt import ddt, data
from rest_framework.test import APITestCase
@ddt
class TestCookieRequest(APITestCase):
@data('put', 'patch', 'post', 'delete')
def test_generate_csrf_token_for_each_not_safe_method_request(self, http_verb):
request_method = getattr(self.client, http_verb)
first_response_csrf = request_method('/').cookies['csrftoken']._value
second_response_csrf = request_method('/').cookies['csrftoken']._value
self.assertNotEquals(first_response_csrf, second_response_csrf)
@data('get', 'head', 'options', 'trace')
def test_not_generate_csrf_token_for_safe_method_request(self, http_verb):
request_method = getattr(self.client, http_verb)
self.assertNotIn('csrftoken', request_method('/').cookies)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'error.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_errorwin(object):
def setupUi(self, errorwin):
errorwin.setObjectName("errorwin")
errorwin.resize(248, 164)
self.centralwidget = QtWidgets.QWidget(errorwin)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.text_error = QtWidgets.QTextBrowser(self.centralwidget)
self.text_error.setObjectName("text_error")
self.gridLayout.addWidget(self.text_error, 0, 0, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1)
errorwin.setCentralWidget(self.centralwidget)
self.retranslateUi(errorwin)
QtCore.QMetaObject.connectSlotsByName(errorwin)
def retranslateUi(self, errorwin):
_translate = QtCore.QCoreApplication.translate
errorwin.setWindowTitle(_translate("errorwin", "ошибка"))
self.text_error.setHtml(_translate("errorwin", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.pushButton.setText(_translate("errorwin", "ok"))
|
nilq/baby-python
|
python
|
import ops
import iopc
TARBALL_FILE="clutter-1.26.0.tar.xz"
TARBALL_DIR="clutter-1.26.0"
INSTALL_DIR="clutter-bin"
pkg_path = ""
output_dir = ""
tarball_pkg = ""
tarball_dir = ""
install_dir = ""
install_tmp_dir = ""
cc_host = ""
tmp_include_dir = ""
dst_include_dir = ""
dst_lib_dir = ""
def set_global(args):
global pkg_path
global output_dir
global tarball_pkg
global install_dir
global install_tmp_dir
global tarball_dir
global cc_host
global tmp_include_dir
global dst_include_dir
global dst_lib_dir
global dst_pkgconfig_dir
pkg_path = args["pkg_path"]
output_dir = args["output_path"]
tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)
install_dir = ops.path_join(output_dir, INSTALL_DIR)
install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + "-tmp")
tarball_dir = ops.path_join(output_dir, TARBALL_DIR)
cc_host_str = ops.getEnv("CROSS_COMPILE")
cc_host = cc_host_str[:len(cc_host_str) - 1]
tmp_include_dir = ops.path_join(output_dir, ops.path_join("include",args["pkg_name"]))
dst_include_dir = ops.path_join("include",args["pkg_name"])
dst_lib_dir = ops.path_join(install_dir, "lib")
dst_pkgconfig_dir = ops.path_join(ops.path_join(output_dir, "pkgconfig"), "pkgconfig")
def MAIN_ENV(args):
set_global(args)
ops.exportEnv(ops.setEnv("CC", ops.getEnv("CROSS_COMPILE") + "gcc"))
ops.exportEnv(ops.setEnv("CXX", ops.getEnv("CROSS_COMPILE") + "g++"))
ops.exportEnv(ops.setEnv("CROSS", ops.getEnv("CROSS_COMPILE")))
ops.exportEnv(ops.setEnv("DESTDIR", install_tmp_dir))
ops.exportEnv(ops.setEnv("PKG_CONFIG_LIBDIR", ops.path_join(iopc.getSdkPath(), "pkgconfig")))
ops.exportEnv(ops.setEnv("PKG_CONFIG_SYSROOT_DIR", iopc.getSdkPath()))
ops.exportEnv(ops.addEnv("PATH", ops.path_join(pkg_path, "host_utils")))
cc_sysroot = ops.getEnv("CC_SYSROOT")
cflags = ""
cflags += " -I" + ops.path_join(cc_sysroot, 'usr/include')
#cflags += " -I" + ops.path_join(iopc.getSdkPath(), 'usr/include/libexpat')
ldflags = ""
ldflags += " -L" + ops.path_join(cc_sysroot, 'lib')
ldflags += " -L" + ops.path_join(cc_sysroot, 'usr/lib')
ldflags += " -L" + ops.path_join(iopc.getSdkPath(), 'lib')
#libs = ""
#libs += " -lffi -lxml2 -lexpat"
ops.exportEnv(ops.setEnv("LDFLAGS", ldflags))
ops.exportEnv(ops.setEnv("CFLAGS", cflags))
#ops.exportEnv(ops.setEnv("LIBS", libs))
return False
def MAIN_EXTRACT(args):
set_global(args)
ops.unTarXz(tarball_pkg, output_dir)
#ops.copyto(ops.path_join(pkg_path, "finit.conf"), output_dir)
return True
def MAIN_PATCH(args, patch_group_name):
set_global(args)
for patch in iopc.get_patch_list(pkg_path, patch_group_name):
if iopc.apply_patch(tarball_dir, patch):
continue
else:
sys.exit(1)
return True
def MAIN_CONFIGURE(args):
set_global(args)
print ops.getEnv("PKG_CONFIG_PATH")
extra_conf = []
extra_conf.append("--host=" + cc_host)
extra_conf.append("--enable-egl-backend=yes")
extra_conf.append("--enable-wayland-compositor=yes")
extra_conf.append("--disable-glibtest")
'''
includes = '-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libglib/glib-2.0')
includes += ' -I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libglib')
extra_conf.append('CFLAGS=' + includes)
extra_conf.append('GLIB_CFLAGS=' + includes)
libs = ' -lglib-2.0 -lgobject-2.0 -lgio-2.0 -lgthread-2.0 -lgmodule-2.0 -lpthread -lz -lffi -lpcre'
extra_conf.append('LIBS=-L' + ops.path_join(iopc.getSdkPath(), 'lib') + libs)
extra_conf.append('GLIB_LIBS=-L' + ops.path_join(iopc.getSdkPath(), 'lib') + libs)
extra_conf.append("--disable-documentation")
extra_conf.append('FFI_CFLAGS="-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libffi') + '"')
extra_conf.append('FFI_LIBS="-L' + ops.path_join(iopc.getSdkPath(), 'lib') + ' -lffi"')
extra_conf.append('EXPAT_CFLAGS="-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libexpat') + '"')
extra_conf.append('EXPAT_LIBS="-L' + ops.path_join(iopc.getSdkPath(), 'lib') + ' -lexpat"')
extra_conf.append('LIBXML_CFLAGS="-I' + ops.path_join(iopc.getSdkPath(), 'usr/include/libxml2') + '"')
extra_conf.append('LIBXML_LIBS="-L' + ops.path_join(iopc.getSdkPath(), 'lib') + ' -lxml2"')
'''
iopc.configure(tarball_dir, extra_conf)
return True
def MAIN_BUILD(args):
set_global(args)
print "AAAA" + ops.getEnv("PATH")
ops.mkdir(install_dir)
ops.mkdir(install_tmp_dir)
iopc.make(tarball_dir)
iopc.make_install(tarball_dir)
ops.mkdir(install_dir)
ops.mkdir(dst_lib_dir)
libwayland_client = "libwayland-client.so.0.3.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_client), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_client, "libwayland-client.so.0.3")
ops.ln(dst_lib_dir, libwayland_client, "libwayland-client.so.0")
ops.ln(dst_lib_dir, libwayland_client, "libwayland-client.so")
libwayland_cursor = "libwayland-cursor.so.0.0.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_cursor), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_cursor, "libwayland-cursor.so.0.0")
ops.ln(dst_lib_dir, libwayland_cursor, "libwayland-cursor.so.0")
ops.ln(dst_lib_dir, libwayland_cursor, "libwayland-cursor.so")
libwayland_egl = "libwayland-egl.so.1.0.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_egl), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_egl, "libwayland-egl.so.1.0")
ops.ln(dst_lib_dir, libwayland_egl, "libwayland-egl.so.1")
ops.ln(dst_lib_dir, libwayland_egl, "libwayland-egl.so")
libwayland_server = "libwayland-server.so.0.1.0"
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/" + libwayland_server), dst_lib_dir)
ops.ln(dst_lib_dir, libwayland_server, "libwayland-server.so.0.1")
ops.ln(dst_lib_dir, libwayland_server, "libwayland-server.so.0")
ops.ln(dst_lib_dir, libwayland_server, "libwayland-server.so")
ops.mkdir(tmp_include_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/include/."), tmp_include_dir)
ops.mkdir(dst_pkgconfig_dir)
ops.copyto(ops.path_join(install_tmp_dir, "usr/local/lib/pkgconfig/wayland-scanner.pc"), dst_pkgconfig_dir)
return False
def MAIN_INSTALL(args):
set_global(args)
iopc.installBin(args["pkg_name"], ops.path_join(ops.path_join(install_dir, "lib"), "."), "lib")
iopc.installBin(args["pkg_name"], ops.path_join(tmp_include_dir, "."), dst_include_dir)
iopc.installBin(args["pkg_name"], ops.path_join(dst_pkgconfig_dir, '.'), "pkgconfig")
return False
def MAIN_SDKENV(args):
set_global(args)
cflags = ""
cflags += " -I" + ops.path_join(iopc.getSdkPath(), 'usr/include/' + args["pkg_name"])
iopc.add_includes(cflags)
#libs = ""
#libs += " -lcap"
#iopc.add_libs(libs)
return False
def MAIN_CLEAN_BUILD(args):
set_global(args)
return False
def MAIN(args):
set_global(args)
|
nilq/baby-python
|
python
|
import numpy as np
def normalize_features(features):
raise NotImplementedError()
|
nilq/baby-python
|
python
|
"""
Produces Fig. 11 of Johnson & Weinberg (2019), a 2-column by 2-row plot
showing the slow burst models. Star formation histories are shown in the top
left, [O/Fe]-[Fe/H] tracks in the top right, [O/Fe] and [Fe/H] against time
in the bottom left, and [O/Fe] against time in the bottom right.
"""
import visuals # visuals.py -> matplotlib subroutines in this directory
import matplotlib.pyplot as plt
import vice
import sys
import warnings
warnings.filterwarnings("ignore")
def setup_axes():
"""
Sets up the 2x2 axis grid with the proper axis labels and ranges and the
associated insets
Returns
=======
axes :: list
The axes, indexable via axes[row number][column number]
insets :: list
The insets, indexable via insets[row number]
"""
axes = visuals.subplots(2, 2, figsize = (14, 14))
xlabels = [["Time [Gyr]", "[Fe/H]"], ["Time [Gyr]", "Time [Gyr]"]]
ylabels = [[r"$\dot{M}_*$ [M$_\odot$ yr$^{-1}$]", "[O/Fe]"],
["[X/H]", "[O/Fe]"]]
xlims = [[[-1, 16], [-1.7, 0.2]], [[-1, 16], [-1, 16]]]
ylims = [[[-1, 13], [0.0, 0.5]], [[-0.34, 0.14], [-0.1, 0.5]]]
for i in range(2):
for j in range(2):
axes[i][j].set_xlabel(xlabels[i][j])
axes[i][j].set_ylabel(ylabels[i][j])
axes[i][j].set_xlim(xlims[i][j])
axes[i][j].set_ylim(ylims[i][j])
axes[1][0].yaxis.set_ticks([-0.3, -0.2, -0.1, 0.0, 0.1])
return axes
def plot_history(axes, name, color, linestyle = '-'):
"""
Plots the relevant information for a given history on the 2x2 axis grid
Parameters
==========
axes :: list
The 2x2 list of matplotlib axis objects to plot on
name :: str
The name of the model to plot
color :: str
The name of the color to use in plotting the model
"""
hist = vice.history(name)
# axes[0][0].plot(hist["time"], hist["ifr"], linestyle = '--',
# c = visuals.colors()[color])
axes[0][0].plot(hist["time"], hist["sfr"], c = visuals.colors()[color],
linestyle = linestyle)
if linestyle == '-':
axes[0][1].plot(hist["[Fe/H]"], hist["[O/Fe]"],
c = visuals.colors()[color], linestyle = linestyle)
axes[1][0].plot(hist["time"], hist["[O/H]"], linestyle = '--',
c = visuals.colors()[color])
axes[1][0].plot(hist["time"], hist["[Fe/H]"], linestyle = '-',
c = visuals.colors()[color])
else:
axes[1][0].plot(hist["time"], hist["[O/H]"], linestyle = linestyle,
c = visuals.colors()[color])
axes[1][0].plot(hist["time"], hist["[Fe/H]"], linestyle = linestyle,
c = visuals.colors()[color])
axes[1][1].plot(hist["time"], hist["[O/Fe]"], c = visuals.colors()[color],
linestyle = linestyle)
def draw_ofe_legend(ax):
"""
Draws the legend differentiating between oxygen and iron in the plot of
[X/H] against time.
Parameters
==========
ax :: subplot
The matplotlib axis object to put the legend on
"""
lines = 2 * [None]
for i in range(2):
lines[i] = ax.plot([1, 2], [1, 2], c = visuals.colors()["black"],
label = ["O", "Fe"][i], linestyle = ['--', '-'][i])[0]
ax.legend(loc = visuals.mpl_loc()["upper left"], frameon = False,
bbox_to_anchor = (0.01, 0.99))
for i in range(2):
lines[i].remove()
def main():
"""
Produces the figure and saves it as a PDF.
"""
plt.clf()
axes = setup_axes()
plot_history(axes, "../../simulations/episodic_infall", "black",
linestyle = ':')
plot_history(axes, "../../simulations/constant", "black",
linestyle = ':')
plot_history(axes, "../../simulations/slowburst_episodic_infall",
"crimson")
plot_history(axes, "../../simulations/slowburst_constant", "deepskyblue")
draw_ofe_legend(axes[1][0])
plt.tight_layout()
plt.savefig(sys.argv[1])
plt.clf()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2
img = cv2.imread('images/cameraman.tif',0)
cv2.imshow("Image read in Python", img)
k = cv2.waitKey(0) & 0xFF
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the base aperture classes.
"""
import abc
from copy import deepcopy
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as u
from .bounding_box import BoundingBox
from ._photometry_utils import (_handle_units, _prepare_photometry_data,
_validate_inputs)
from ..utils._wcs_helpers import _pixel_scale_angle_at_skycoord
__all__ = ['Aperture', 'SkyAperture', 'PixelAperture']
class Aperture(metaclass=abc.ABCMeta):
"""
Abstract base class for all apertures.
"""
_params = ()
positions = np.array(())
theta = None
def __len__(self):
if self.isscalar:
raise TypeError(f'A scalar {self.__class__.__name__!r} object '
'has no len()')
return self.shape[0]
def __getitem__(self, index):
if self.isscalar:
raise TypeError(f'A scalar {self.__class__.__name__!r} object '
'cannot be indexed')
kwargs = dict()
for param in self._params:
if param == 'positions':
# slice the positions array
kwargs[param] = getattr(self, param)[index]
else:
kwargs[param] = getattr(self, param)
return self.__class__(**kwargs)
def __iter__(self):
for i in range(len(self)):
yield self.__getitem__(i)
def _positions_str(self, prefix=None):
if isinstance(self, PixelAperture):
return np.array2string(self.positions, separator=', ',
prefix=prefix)
elif isinstance(self, SkyAperture):
return repr(self.positions)
else:
raise TypeError('Aperture must be a subclass of PixelAperture '
'or SkyAperture')
def __repr__(self):
prefix = f'{self.__class__.__name__}'
cls_info = []
for param in self._params:
if param == 'positions':
cls_info.append(self._positions_str(prefix))
else:
cls_info.append(f'{param}={getattr(self, param)}')
cls_info = ', '.join(cls_info)
return f'<{prefix}({cls_info})>'
def __str__(self):
cls_info = [('Aperture', self.__class__.__name__)]
for param in self._params:
if param == 'positions':
prefix = 'positions'
cls_info.append((prefix, self._positions_str(prefix + ': ')))
else:
cls_info.append((param, getattr(self, param)))
fmt = [f'{key}: {val}' for key, val in cls_info]
return '\n'.join(fmt)
def __eq__(self, other):
"""
Equality operator for `Aperture`.
All Aperture properties are compared for strict equality except
for Quantity parameters, which allow for different units if they
are directly convertible.
"""
if not isinstance(other, self.__class__):
return False
self_params = list(self._params)
other_params = list(other._params)
# check that both have identical parameters
if self_params != other_params:
return False
# now check the parameter values
# Note that Quantity comparisons allow for different units
# if they directly convertible (e.g., 1. * u.deg == 60. * u.arcmin)
try:
for param in self_params:
# np.any is used for SkyCoord array comparisons
if np.any(getattr(self, param) != getattr(other, param)):
return False
except TypeError:
# TypeError is raised from SkyCoord comparison when they do
# not have equivalent frames. Here return False instead of
# the TypeError.
return False
return True
def __ne__(self, other):
"""
Inequality operator for `Aperture`.
"""
return not (self == other)
def copy(self):
"""
Make an independent (deep) copy.
"""
params_copy = {}
for param in list(self._params):
params_copy[param] = deepcopy(getattr(self, param))
return self.__class__(**params_copy)
@property
def shape(self):
"""
The shape of the instance.
"""
if isinstance(self.positions, SkyCoord):
return self.positions.shape
else:
return self.positions.shape[:-1]
@property
def isscalar(self):
"""
Whether the instance is scalar (i.e., a single position).
"""
return self.shape == ()
class PixelAperture(Aperture):
"""
Abstract base class for apertures defined in pixel coordinates.
"""
@property
def _default_patch_properties(self):
"""
A dictionary of default matplotlib.patches.Patch properties.
"""
mpl_params = dict()
# matplotlib.patches.Patch default is ``fill=True``
mpl_params['fill'] = False
return mpl_params
@staticmethod
def _translate_mask_mode(mode, subpixels, rectangle=False):
if mode not in ('center', 'subpixel', 'exact'):
raise ValueError(f'Invalid mask mode: {mode}')
if rectangle and mode == 'exact':
mode = 'subpixel'
subpixels = 32
if mode == 'subpixels':
if not isinstance(subpixels, int) or subpixels <= 0:
raise ValueError('subpixels must be a strictly positive '
'integer')
if mode == 'center':
use_exact = 0
subpixels = 1
elif mode == 'subpixel':
use_exact = 0
elif mode == 'exact':
use_exact = 1
subpixels = 1
return use_exact, subpixels
@property
@abc.abstractmethod
def _xy_extents(self):
"""
The (x, y) extents of the aperture measured from the center
position.
In other words, the (x, y) extents are half of the aperture
minimal bounding box size in each dimension.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
@property
def bbox(self):
"""
The minimal bounding box for the aperture.
If the aperture is scalar then a single
`~photutils.aperture.BoundingBox` is returned, otherwise a list
of `~photutils.aperture.BoundingBox` is returned.
"""
positions = np.atleast_2d(self.positions)
x_delta, y_delta = self._xy_extents
xmin = positions[:, 0] - x_delta
xmax = positions[:, 0] + x_delta
ymin = positions[:, 1] - y_delta
ymax = positions[:, 1] + y_delta
bboxes = [BoundingBox.from_float(x0, x1, y0, y1)
for x0, x1, y0, y1 in zip(xmin, xmax, ymin, ymax)]
if self.isscalar:
return bboxes[0]
else:
return bboxes
@property
def _centered_edges(self):
"""
A list of ``(xmin, xmax, ymin, ymax)`` tuples, one for each
position, of the pixel edges after recentering the aperture at
the origin.
These pixel edges are used by the low-level `photutils.geometry`
functions.
"""
edges = []
for position, bbox in zip(np.atleast_2d(self.positions),
np.atleast_1d(self.bbox)):
xmin = bbox.ixmin - 0.5 - position[0]
xmax = bbox.ixmax - 0.5 - position[0]
ymin = bbox.iymin - 0.5 - position[1]
ymax = bbox.iymax - 0.5 - position[1]
edges.append((xmin, xmax, ymin, ymax))
return edges
@property
def area(self):
"""
The exact analytical area of the aperture shape.
Returns
-------
area : float
The aperture area.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
@abc.abstractmethod
def to_mask(self, method='exact', subpixels=5):
"""
Return a mask for the aperture.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The aperture weights will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The aperture weights will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending
on whether its center is in or out of the aperture.
If ``subpixels=1``, this method is equivalent to
``'center'``. The aperture weights will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels ** 2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
mask : `~photutils.aperture.ApertureMask` or list of `~photutils.aperture.ApertureMask`
A mask for the aperture. If the aperture is scalar then a
single `~photutils.aperture.ApertureMask` is returned,
otherwise a list of `~photutils.aperture.ApertureMask` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
def area_overlap(self, data, *, mask=None, method='exact', subpixels=5):
"""
Return the areas of the aperture masks that overlap with the
data, i.e., how many pixels are actually used to calculate each
sum.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is masked. Masked data are excluded from the area overlap.
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The aperture weights will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The aperture weights will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending
on whether its center is in or out of the aperture.
If ``subpixels=1``, this method is equivalent to
``'center'``. The aperture weights will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels ** 2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
areas : float or array_like
The overlapping areas between the aperture masks and the data.
"""
apermasks = self.to_mask(method=method, subpixels=subpixels)
if self.isscalar:
apermasks = (apermasks,)
if mask is not None:
mask = np.asarray(mask)
if mask.shape != data.shape:
raise ValueError('mask and data must have the same shape')
data = np.ones_like(data)
vals = [apermask.get_values(data, mask=mask) for apermask in apermasks]
# if the aperture does not overlap the data return np.nan
areas = [val.sum() if val.shape != (0,) else np.nan for val in vals]
if self.isscalar:
return areas[0]
else:
return areas
def _do_photometry(self, data, variance, method='exact', subpixels=5,
unit=None):
aperture_sums = []
aperture_sum_errs = []
masks = self.to_mask(method=method, subpixels=subpixels)
if self.isscalar:
masks = (masks,)
for apermask in masks:
values = apermask.get_values(data)
# if the aperture does not overlap the data return np.nan
aper_sum = values.sum() if values.shape != (0,) else np.nan
aperture_sums.append(aper_sum)
if variance is not None:
values = apermask.get_values(variance)
# if the aperture does not overlap the data return np.nan
aper_var = values.sum() if values.shape != (0,) else np.nan
aperture_sum_errs.append(np.sqrt(aper_var))
aperture_sums = np.array(aperture_sums)
aperture_sum_errs = np.array(aperture_sum_errs)
# apply units
if unit is not None:
aperture_sums = aperture_sums * unit # can't use *= w/old numpy
aperture_sum_errs = aperture_sum_errs * unit
return aperture_sums, aperture_sum_errs
def do_photometry(self, data, error=None, mask=None, method='exact',
subpixels=5):
"""
Perform aperture photometry on the input data.
Parameters
----------
data : array_like or `~astropy.units.Quantity` instance
The 2D array on which to perform photometry. ``data``
should be background subtracted.
error : array_like or `~astropy.units.Quantity`, optional
The pixel-wise Gaussian 1-sigma errors of the input
``data``. ``error`` is assumed to include *all* sources of
error, including the Poisson error of the sources (see
`~photutils.utils.calc_total_error`) . ``error`` must have
the same shape as the input ``data``.
mask : array_like (bool), optional
A boolean mask with the same shape as ``data`` where a
`True` value indicates the corresponding element of ``data``
is masked. Masked data are excluded from all calculations.
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The aperture weights will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The aperture weights will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending
on whether its center is in or out of the aperture.
If ``subpixels=1``, this method is equivalent to
``'center'``. The aperture weights will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this
factor in each dimension. That is, each pixel is divided
into ``subpixels ** 2`` subpixels. This keyword is ignored
unless ``method='subpixel'``.
Returns
-------
aperture_sums : `~numpy.ndarray` or `~astropy.units.Quantity`
The sums within each aperture.
aperture_sum_errs : `~numpy.ndarray` or `~astropy.units.Quantity`
The errors on the sums within each aperture.
Notes
-----
`RectangularAperture` and `RectangularAnnulus` photometry with
the "exact" method uses a subpixel approximation by subdividing
each data pixel by a factor of 1024 (``subpixels = 32``). For
rectangular aperture widths and heights in the range from
2 to 100 pixels, this subpixel approximation gives results
typically within 0.001 percent or better of the exact value.
The differences can be larger for smaller apertures (e.g.,
aperture sizes of one pixel or smaller). For such small sizes,
it is recommend to set ``method='subpixel'`` with a larger
``subpixels`` size.
"""
# validate inputs
data, error = _validate_inputs(data, error)
# handle data, error, and unit inputs
# output data and error are ndarray without units
data, error, unit = _handle_units(data, error)
# compute variance and apply input mask
data, variance = _prepare_photometry_data(data, error, mask)
return self._do_photometry(data, variance, method=method,
subpixels=subpixels, unit=unit)
@staticmethod
def _make_annulus_path(patch_inner, patch_outer):
"""
Define a matplotlib annulus path from two patches.
This preserves the cubic Bezier curves (CURVE4) of the aperture
paths.
"""
import matplotlib.path as mpath
path_inner = patch_inner.get_path()
transform_inner = patch_inner.get_transform()
path_inner = transform_inner.transform_path(path_inner)
path_outer = patch_outer.get_path()
transform_outer = patch_outer.get_transform()
path_outer = transform_outer.transform_path(path_outer)
verts_inner = path_inner.vertices[:-1][::-1]
verts_inner = np.concatenate((verts_inner, [verts_inner[-1]]))
verts = np.vstack((path_outer.vertices, verts_inner))
codes = np.hstack((path_outer.codes, path_inner.codes))
return mpath.Path(verts, codes)
def _define_patch_params(self, origin=(0, 0), **kwargs):
"""
Define the aperture patch position and set any default
matplotlib patch keywords (e.g., ``fill=False``).
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
xy_positions : `~numpy.ndarray`
The aperture patch positions.
patch_params : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
"""
xy_positions = deepcopy(np.atleast_2d(self.positions))
xy_positions[:, 0] -= origin[0]
xy_positions[:, 1] -= origin[1]
patch_params = self._default_patch_properties
patch_params.update(kwargs)
return xy_positions, patch_params
@abc.abstractmethod
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.patch` or list of `~matplotlib.patches.patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.patch` is returned, otherwise a
list of `~matplotlib.patches.patch` is returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
def plot(self, axes=None, origin=(0, 0), **kwargs):
"""
Plot the aperture on a matplotlib `~matplotlib.axes.Axes`
instance.
Parameters
----------
axes : `matplotlib.axes.Axes` or `None`, optional
The matplotlib axes on which to plot. If `None`, then the
current `~matplotlib.axes.Axes` instance is used.
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
**kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : list of `~matplotlib.patches.Patch`
A list of matplotlib patches for the plotted aperture. The
patches can be used, for example, when adding a plot legend.
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
patches = self._to_patch(origin=origin, **kwargs)
if self.isscalar:
patches = (patches,)
for patch in patches:
axes.add_patch(patch)
return patches
def _to_sky_params(self, wcs):
"""
Convert the pixel aperture parameters to those for a sky
aperture.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
sky_params : `dict`
A dictionary of parameters for an equivalent sky aperture.
"""
sky_params = {}
xpos, ypos = np.transpose(self.positions)
sky_params['positions'] = skypos = wcs.pixel_to_world(xpos, ypos)
# Aperture objects require scalar shape parameters (e.g.,
# radius, a, b, theta, etc.), therefore we must calculate the
# pixel scale and angle at only a single sky position, which
# we take as the first aperture position. For apertures with
# multiple positions used with a WCS that contains distortions
# (e.g., a spatially-dependent pixel scale), this may lead to
# unexpected results (e.g., results that are dependent of the
# order of the positions). There is no good way to fix this with
# the current Aperture API allowing multiple positions.
if not self.isscalar:
skypos = skypos[0]
_, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs)
for param in self._params:
value = getattr(self, param)
if param == 'positions':
continue
elif param == 'theta':
# photutils aperture sky angles are defined as the PA of
# the semimajor axis (i.e., relative to the WCS latitude
# axis). region sky angles are defined relative to the WCS
# longitude axis.
value = (value * u.rad) - angle.to(u.rad)
else:
value = (value * u.pix * pixscale).to(u.arcsec)
sky_params[param] = value
return sky_params
@abc.abstractmethod
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyAperture` object defined in
celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyAperture` object
A `SkyAperture` object.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class SkyAperture(Aperture):
"""
Abstract base class for all apertures defined in celestial
coordinates.
"""
def _to_pixel_params(self, wcs):
"""
Convert the sky aperture parameters to those for a pixel
aperture.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
pixel_params : `dict`
A dictionary of parameters for an equivalent pixel aperture.
"""
pixel_params = {}
xpos, ypos = wcs.world_to_pixel(self.positions)
pixel_params['positions'] = np.transpose((xpos, ypos))
# Aperture objects require scalar shape parameters (e.g.,
# radius, a, b, theta, etc.), therefore we must calculate the
# pixel scale and angle at only a single sky position, which
# we take as the first aperture position. For apertures with
# multiple positions used with a WCS that contains distortions
# (e.g., a spatially-dependent pixel scale), this may lead to
# unexpected results (e.g., results that are dependent of the
# order of the positions). There is no good way to fix this with
# the current Aperture API allowing multiple positions.
if self.isscalar:
skypos = self.positions
else:
skypos = self.positions[0]
_, pixscale, angle = _pixel_scale_angle_at_skycoord(skypos, wcs)
for param in self._params:
value = getattr(self, param)
if param == 'positions':
continue
elif param == 'theta':
# photutils aperture sky angles are defined as the PA of
# the semimajor axis (i.e., relative to the WCS latitude
# axis). region sky angles are defined relative to the WCS
# longitude axis.
value = (value + angle).to(u.radian).value
else:
if value.unit.physical_type == 'angle':
value = (value / pixscale).to(u.pixel).value
else:
value = value.value
pixel_params[param] = value
return pixel_params
@abc.abstractmethod
def to_pixel(self, wcs):
"""
Convert the aperture to a `PixelAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `PixelAperture` object
A `PixelAperture` object.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
nilq/baby-python
|
python
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Many point of entry for pydicom read and write functions"""
from pydicom.filereader import (dcmread, read_file, read_dicomdir)
from pydicom.filewriter import dcmwrite, write_file
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.