content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from textwrap import dedent
endc = "\033[0m"
bcolors = dict(
blue="\033[94m",
green="\033[92m",
orange="\033[93m",
red="\033[91m",
bold="\033[1m",
underline="\033[4m",
)
def _color_message(msg, style):
return bcolors[style] + msg + endc
def _message_box(msg, color="green", border="=" * 38, doprint=True, print_func=print):
# Prepare the message so the indentation is the same as the box
msg = dedent(msg)
# Color and create the box
border_colored = _color_message(border, color)
box = """
{border_colored}
{msg}
{border_colored}
"""
box = dedent(box).format(msg=msg, border_colored=border_colored)
if doprint is True:
print_func(box)
return box
|
nilq/baby-python
|
python
|
import sys
import subprocess
import gzip
from tqdm import tqdm
def get_lines_count(file_path):
if file_path[-3:] == ".gz":
ps = subprocess.Popen(
f"gzip -cd {file_path} | wc -l", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return int(ps.communicate()[0])
else:
return int(subprocess.getoutput('wc -l ' + file_path).split()[0])
if len(sys.argv) == 1:
exit("run: python fasta_to_fastq.py <seq1> <seq2> <seq3> ....")
for fasta_file in sys.argv[1:]:
no_seqs = get_lines_count(fasta_file) // 2
fastq_file = fasta_file[:-1] + "q"
read_len = int()
with open(fasta_file) as fastaReader:
next(fastaReader)
read_len = len(next(fastaReader).strip())
fakeQuality = "+\n" + "?"*read_len + "\n"
with open(fasta_file) as fastaReader, open(fastq_file, 'w') as fastqWriter:
for line in tqdm(fastaReader, total=no_seqs):
seq1_header = line.strip().replace(">", "@")[:-2] + "\n"
seq1_seq = next(fastaReader)
fastqWriter.write(seq1_header + seq1_seq + fakeQuality)
|
nilq/baby-python
|
python
|
# convert ERAiterim data from (Claudia Wekerle's) netcdf files to ieee-be
# compute specific humidity from dew point temperature
# unfortunately precipitation and downward radiation are only available
# as daily averages, I don't know why
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import os
ncdir = '/work/ollie/clidyn/forcing/erai'
input_variables = ['precip', 'tdew', 'rad', 't_02', 'u_10', 'v_10']
years = range(2017, 2019)
def check_flds(fld):
print(fld.shape)
print(fld.dtype)
fmt='mean: %12.6e std: %12.6e min: %12.6e max: %12.6e'
print(fmt%(fld.mean(),fld.std(),fld.min(),fld.max()))
def specific_humidity(Td, P):
'''calculate specific humidity from
dew-point temperature and surface pressure
following ECMWF Equation:
http://www.ecmwf.int/sites/default/files/elibrary/2015/9211-part-iv-physical-processes.pdf,equations 7.4/7.5
data: data from NetCDF file
a1,a3,a4: Parameters according to Buck 1981
Rd,Rv: gas constants of dry air and water vapor
T0: reference temperature
'''
# Parameters according to Buck 1981 for saturation over water
a1 = 611.21 # (Pa) Pascal
a3 = 17.502 #
a4 = 32.19 # (K) Kelvin
# Gas constants
Rd = 287.06 # (J/(kg*K)) dry air
Rv = 461.53 # (J/(kg*K)) water vapor
T0 = 273.16 # (K) reference temperature
R = Rd/Rv
# (Pa) saturation water vapor pressure
e_sat = a1*np.exp( a3*( (Td -T0)/(Td -a4) ) )
q_sat = (R*e_sat) / (P - (1.0-R)*e_sat)
return q_sat
def writefield(fname,arr):
import sys
print('writing '+fname)
if True:
pass
else:
data = arr.data
fid = open(fname,"wb")
data.astype('>f4').tofile(fid)
fid.close()
for y in years:
print(y)
for invar in input_variables:
fname=os.path.join(ncdir,'erai.%s.%i.nc'%(invar,y))
if os.path.isfile(fname): print(fname+' exists and is a file')
ds = Dataset(fname,'r')
# flip direction of y-axis because the ERA convention is to have
# indices (0,0) at the top left corner of the field
if invar=='t_02':
outfld = ds['T_2_MOD'][:,::-1,:]
bfile = 't2m_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='u_10':
outfld = ds['U_10_MOD'][:,::-1,:]
bfile = 'u10_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='v_10':
outfld = ds['V_10_MOD'][:,::-1,:]
bfile = 'v10_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='tdew':
tdew = ds['d2m'][:,::-1,:]
slpname=os.path.join(ncdir,'erai.slp.%i.nc'%(y))
if os.path.isfile(slpname):
print(slpname+' exists and is a file')
dslp = Dataset(slpname,'r')
slp = dslp['SLP'][:,::-1,:]
else:
print(slpname+' does not exist, using slp = 1 bar')
slp = np.zeros(tdew.shape,dtype='float32') + 1e5
outfld = specific_humidity(tdew, slp)
bfile='q_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
elif invar=='precip':
rain = ds['RAIN'][:,::-1,:]
snow = ds['SNOW'][:,::-1,:]
outfld = rain+snow
bfile='tp_ERAi_6hourly_'+str(y)
check_flds(outfld)
writefield(bfile,outfld)
if invar=='rad':
swdw = ds['SWDW'][:,::-1,:]
bfile='ssrd_ERAi_6hourly_'+str(y)
check_flds(swdw)
writefield(bfile,swdw)
lwdw = ds['LWDW'][:,::-1,:]
bfile='strd_ERAi_6hourly_'+str(y)
check_flds(lwdw)
writefield(bfile,lwdw)
|
nilq/baby-python
|
python
|
"""
Conversion functions between corresponding data structures.
"""
import json
import logging
from collections import Hashable, OrderedDict # pylint: disable=E0611,no-name-in-module # moved to .abc in Python 3
from copy import deepcopy
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING
from urllib.parse import urlparse
from owslib.wps import (
ComplexData,
Input as OWS_Input_Type,
Metadata as OWS_Metadata,
Output as OWS_Output_Type,
is_reference
)
from pywps import Process as ProcessWPS
from pywps.app.Common import Metadata as WPS_Metadata
from pywps.inout import BoundingBoxInput, BoundingBoxOutput, ComplexInput, ComplexOutput, LiteralInput, LiteralOutput
from pywps.inout.basic import BasicIO
from pywps.inout.formats import Format
from pywps.inout.literaltypes import ALLOWEDVALUETYPE, RANGECLOSURETYPE, AllowedValue, AnyValue
from pywps.validator.mode import MODE
from weaver import xml_util
from weaver.exceptions import PackageTypeError
from weaver.execute import (
EXECUTE_MODE_ASYNC,
EXECUTE_RESPONSE_DOCUMENT,
EXECUTE_TRANSMISSION_MODE_REFERENCE,
EXECUTE_TRANSMISSION_MODE_VALUE
)
from weaver.formats import (
CONTENT_TYPE_ANY,
CONTENT_TYPE_APP_JSON,
CONTENT_TYPE_TEXT_PLAIN,
get_cwl_file_format,
get_extension,
get_format
)
from weaver.processes.constants import (
CWL_REQUIREMENT_APP_WPS1,
PACKAGE_ARRAY_BASE,
PACKAGE_ARRAY_ITEMS,
PACKAGE_ARRAY_MAX_SIZE,
PACKAGE_ARRAY_TYPES,
PACKAGE_CUSTOM_TYPES,
PACKAGE_ENUM_BASE,
PACKAGE_LITERAL_TYPES,
PROCESS_SCHEMA_OGC,
PROCESS_SCHEMA_OLD,
WPS_BOUNDINGBOX,
WPS_COMPLEX,
WPS_COMPLEX_DATA,
WPS_INPUT,
WPS_LITERAL,
WPS_LITERAL_DATA_TYPE_NAMES,
WPS_OUTPUT,
WPS_REFERENCE
)
from weaver.utils import (
bytes2str,
fetch_file,
get_any_id,
get_sane_name,
get_url_without_query,
null,
str2bytes,
transform_json
)
from weaver.wps.utils import get_wps_client
if TYPE_CHECKING:
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from urllib.parse import ParseResult
from pywps.app import WPSRequest
from owslib.wps import Process as ProcessOWS
from requests.models import Response
from weaver.typedefs import (
AnySettingsContainer,
AnyValueType,
CWL,
CWL_IO_EnumSymbols,
CWL_IO_Value,
CWL_Input_Type,
CWL_Output_Type,
JSON
)
# typing shortcuts
# pylint: disable=C0103,invalid-name
WPS_Input_Type = Union[LiteralInput, ComplexInput, BoundingBoxInput]
WPS_Output_Type = Union[LiteralOutput, ComplexOutput, BoundingBoxOutput]
WPS_IO_Type = Union[WPS_Input_Type, WPS_Output_Type]
OWS_IO_Type = Union[OWS_Input_Type, OWS_Output_Type]
JSON_IO_Type = JSON
JSON_IO_ListOrMap = Union[List[JSON], Dict[str, Union[JSON, str]]]
CWL_IO_Type = Union[CWL_Input_Type, CWL_Output_Type]
PKG_IO_Type = Union[JSON_IO_Type, WPS_IO_Type]
ANY_IO_Type = Union[CWL_IO_Type, JSON_IO_Type, WPS_IO_Type, OWS_IO_Type]
ANY_Format_Type = Union[Dict[str, Optional[str]], Format]
ANY_Metadata_Type = Union[OWS_Metadata, WPS_Metadata, Dict[str, str]]
# WPS object attribute -> all possible *other* naming variations (no need to repeat key name)
WPS_FIELD_MAPPING = {
"identifier": ["id", "ID", "Id", "Identifier"],
"title": ["Title", "Label", "label"],
"abstract": ["description", "Description", "Abstract"],
"version": ["processVersion", "Version"],
"metadata": ["Metadata"],
"keywords": ["Keywords"],
"allowed_values": ["AllowedValues", "allowedValues", "allowedvalues", "Allowed_Values", "Allowedvalues"],
"allowed_collections": ["AllowedCollections", "allowedCollections", "allowedcollections", "Allowed_Collections",
"Allowedcollections"],
"any_value": ["anyvalue", "anyValue", "AnyValue"],
"literal_data_domains": ["literalDataDomains"],
"default": ["default_value", "defaultValue", "DefaultValue", "Default", "data_format", "data"],
"supported_values": ["SupportedValues", "supportedValues", "supportedvalues", "Supported_Values"],
"supported_formats": ["SupportedFormats", "supportedFormats", "supportedformats", "Supported_Formats", "formats"],
"additional_parameters": ["AdditionalParameters", "additionalParameters", "additionalparameters",
"Additional_Parameters"],
"type": ["Type", "data_type", "dataType", "DataType", "Data_Type"],
"min_occurs": ["minOccurs", "MinOccurs", "Min_Occurs", "minoccurs"],
"max_occurs": ["maxOccurs", "MaxOccurs", "Max_Occurs", "maxoccurs"],
"max_megabytes": ["maximumMegabytes", "max_size"],
"mime_type": ["mimeType", "MimeType", "mime-type", "Mime-Type", "mimetype",
"mediaType", "MediaType", "media-type", "Media-Type", "mediatype"],
"range_minimum": ["minval", "minimum", "minimumValue"],
"range_maximum": ["maxval", "maximum", "maximumValue"],
"range_spacing": ["spacing"],
"range_closure": ["closure", "rangeClosure"],
"encoding": ["Encoding"],
"href": ["url", "link", "reference"],
}
# WPS fields that contain a structure corresponding to `Format` object
# - keys must match `WPS_FIELD_MAPPING` keys
# - fields are placed in order of relevance (prefer explicit format, then supported, and defaults as last resort)
WPS_FIELD_FORMAT = ["formats", "supported_formats", "supported_values", "default"]
# WPS 'type' string variations employed to indicate a Complex (file) I/O by different libraries
# for literal types, see 'any2cwl_literal_datatype' and 'any2wps_literal_datatype' functions
WPS_COMPLEX_TYPES = [WPS_COMPLEX, WPS_COMPLEX_DATA, WPS_REFERENCE]
# WPS 'type' string of all combinations (type of data / library implementation)
WPS_ALL_TYPES = [WPS_LITERAL, WPS_BOUNDINGBOX] + WPS_COMPLEX_TYPES
# default format if missing (minimal requirement of one)
DEFAULT_FORMAT = Format(mime_type=CONTENT_TYPE_TEXT_PLAIN)
DEFAULT_FORMAT_MISSING = "__DEFAULT_FORMAT_MISSING__"
setattr(DEFAULT_FORMAT, DEFAULT_FORMAT_MISSING, True)
INPUT_VALUE_TYPE_MAPPING = {
"bool": bool,
"boolean": bool,
"file": str,
"File": str,
"float": float,
"int": int,
"integer": int,
"str": str,
"string": str,
}
LOGGER = logging.getLogger(__name__)
def complex2json(data):
# type: (Union[ComplexData, Any]) -> Union[JSON, Any]
"""
Obtains the JSON representation of a :class:`ComplexData` or simply return the unmatched type.
"""
if not isinstance(data, ComplexData):
return data
# backward compat based on OWSLib version, field did not always exist
max_mb = getattr(data, "maximumMegabytes", None)
if isinstance(max_mb, str) and max_mb.isnumeric():
max_mb = int(max_mb)
return {
"mimeType": data.mimeType,
"encoding": data.encoding,
"schema": data.schema,
"maximumMegabytes": max_mb,
"default": False, # always assume it is a supported format/value, caller should override
}
def metadata2json(meta, force=False):
# type: (Union[ANY_Metadata_Type, Any], bool) -> Union[JSON, Any]
"""
Retrieve metadata information and generate its JSON representation.
Obtains the JSON representation of a :class:`OWS_Metadata` or :class:`pywps.app.Common.Metadata`.
Otherwise, simply return the unmatched type.
If requested, can enforce parsing a dictionary for the corresponding keys.
"""
if not force and not isinstance(meta, (OWS_Metadata, WPS_Metadata)):
return meta
title = get_field(meta, "title", search_variations=True, default=None)
href = get_field(meta, "href", search_variations=True, default=None)
role = get_field(meta, "role", search_variations=True, default=None)
rel = get_field(meta, "rel", search_variations=True, default=None)
# many remote servers do not provide the 'rel', but instead provide 'title' or 'role'
# build one by default to avoid failing schemas that expect 'rel' to exist
if not rel:
href_rel = urlparse(href).hostname
rel = str(title or role or href_rel).lower() # fallback to first available
rel = get_sane_name(rel, replace_character="-", assert_invalid=False)
return {"href": href, "title": title, "role": role, "rel": rel}
def ows2json_field(ows_field):
# type: (Union[ComplexData, OWS_Metadata, AnyValueType]) -> Union[JSON, AnyValueType]
"""
Obtains the JSON or raw value from an :mod:`owslib.wps` I/O field.
"""
if isinstance(ows_field, ComplexData):
return complex2json(ows_field)
if isinstance(ows_field, OWS_Metadata):
return metadata2json(ows_field)
return ows_field
def ows2json_io(ows_io):
# type: (OWS_IO_Type) -> JSON_IO_Type
"""
Converts I/O definition from :mod:`owslib.wps` to JSON.
"""
json_io = dict()
for field in WPS_FIELD_MAPPING:
value = get_field(ows_io, field, search_variations=True)
# preserve numeric values (ex: "minOccurs"=0) as actual parameters
# ignore undefined values represented by `null`, empty list, or empty string
if value or value in [0, 0.0]:
if isinstance(value, list):
# complex data is converted as is
# metadata converted and preserved if it results into a minimally valid definition (otherwise dropped)
json_io[field] = [
complex2json(v) if isinstance(v, ComplexData) else
metadata2json(v) if isinstance(v, OWS_Metadata) else v
for v in value if not isinstance(v, OWS_Metadata) or v.url is not None
]
elif isinstance(value, ComplexData):
json_io[field] = complex2json(value)
elif isinstance(value, OWS_Metadata):
json_io[field] = metadata2json(value)
else:
json_io[field] = value
json_io["id"] = get_field(json_io, "identifier", search_variations=True, pop_found=True)
io_type = json_io.get("type")
# add 'format' if missing, derived from other variants
if io_type == WPS_COMPLEX_DATA:
fmt_default = False
if "default" in json_io and isinstance(json_io["default"], dict):
json_io["default"]["default"] = True # provide for workflow extension (internal), schema drops it (API)
fmt_default = True
# retrieve alternate format definitions
if "formats" not in json_io:
# correct complex data 'formats' from OWSLib from initial fields loop can get stored in 'supported_values'
fmt_val = get_field(json_io, "supported_values", pop_found=True)
if fmt_val:
json_io["formats"] = fmt_val
else:
# search for format fields directly specified in I/O body
for field in WPS_FIELD_FORMAT:
fmt = get_field(json_io, field, search_variations=True)
if not fmt:
continue
if isinstance(fmt, dict):
fmt = [fmt]
fmt = filter(lambda f: isinstance(f, dict), fmt)
if not isinstance(json_io.get("formats"), list):
json_io["formats"] = []
for var_fmt in fmt:
# add it only if not exclusively provided by a previous variant
json_fmt_items = [j_fmt.items() for j_fmt in json_io["formats"]]
if any(all(var_item in items for var_item in var_fmt.items()) for items in json_fmt_items):
continue
json_io["formats"].append(var_fmt)
json_io.setdefault("formats", [])
# apply the default flag
for fmt in json_io["formats"]:
fmt["default"] = fmt_default and is_equal_formats(json_io["default"], fmt)
if fmt["default"]:
break
# NOTE:
# Don't apply 'minOccurs=0' as in below literal case because default 'format' does not imply that unspecified
# input is valid, but rather that given an input without explicit 'format' specified, that 'default' is used.
return json_io
# add value constraints in specifications
elif io_type in WPS_LITERAL_DATA_TYPE_NAMES:
domains = any2json_literal_data_domains(ows_io)
if domains:
json_io["literalDataDomains"] = domains
# fix inconsistencies of some process descriptions
# WPS are allowed to report 'minOccurs=1' although 'defaultValue' can also be provided
# (see https://github.com/geopython/pywps/issues/625)
if "defaultValue" in domains[0]:
json_io["min_occurs"] = 0
return json_io
# FIXME: add option to control auto-fetch, disable during workflow by default to avoid double downloads?
# (https://github.com/crim-ca/weaver/issues/183)
def ows2json_output_data(output, process_description, container=None):
# type: (OWS_Output_Type, ProcessOWS, Optional[AnySettingsContainer]) -> JSON
"""
Utility method to convert an :mod:`owslib.wps` process execution output data (result) to `JSON`.
In the case that a ``reference`` output of `JSON` content-type is specified and that it refers to a file that
contains an array list of URL references to simulate a multiple-output, this specific output gets expanded to
contain both the original URL ``reference`` field and the loaded URL list under ``data`` field for easier access
from the response body.
Referenced file(s) are fetched in order to store them locally if executed on a remote process, such that they can
become accessible as local job result for following reporting or use by other processes in a workflow chain.
If the ``dataType`` details is missing from the data output (depending on servers that might omit it), the
:paramref:`process_description` is employed to retrieve the original description with expected result details.
:param output: output with data value or reference according to expected result for the corresponding process.
:param process_description: definition of the process producing the specified output following execution.
:param container: container to retrieve application settings (for request options during file retrieval as needed).
:return: converted JSON result data and additional metadata as applicable based on data-type and content-type.
"""
if not output.dataType:
for process_output in getattr(process_description, "processOutputs", []):
if getattr(process_output, "identifier", "") == output.identifier:
output.dataType = process_output.dataType
break
json_output = {
"identifier": output.identifier,
"title": output.title,
"dataType": output.dataType
}
# WPS standard v1.0.0 specify that either a reference or a data field has to be provided
if output.reference:
json_output["reference"] = output.reference
# Handle special case where we have a reference to a json array containing dataset reference
# Avoid reference to reference by fetching directly the dataset references
json_array = _get_multi_json_references(output, container)
if json_array and all(str(ref).startswith("http") for ref in json_array):
json_output["data"] = json_array
else:
# WPS standard v1.0.0 specify that Output data field has Zero or one value
json_output["data"] = output.data[0] if output.data else None
if (json_output["dataType"] == WPS_COMPLEX_DATA or "reference" in json_output) and output.mimeType:
json_output["mimeType"] = output.mimeType
return json_output
# FIXME: support metalink unwrapping (weaver #25)
# FIXME: reuse functions
# definitely can be improved and simplified with 'fetch_file' function
# then return parsed contents from that file
def _get_multi_json_references(output, container):
# type: (OWS_Output_Type, Optional[AnySettingsContainer]) -> Optional[List[JSON]]
"""
Obtains the JSON contents of a single output corresponding to multi-file references.
Since WPS standard does not allow to return multiple values for a single output,
a lot of process actually return a JSON array containing references to these outputs.
Because the multi-output references are contained within this JSON file, it is not very convenient to retrieve
the list of URLs as one always needs to open and read the file to get them. This function goal is to detect this
particular format and expand the references to make them quickly available in the job output response.
:return:
Array of HTTP(S) references if the specified output is effectively a JSON containing that, ``None`` otherwise.
"""
# Check for the json datatype and mime-type
if output.dataType == WPS_COMPLEX_DATA and output.mimeType == CONTENT_TYPE_APP_JSON:
try:
# If the json data is referenced read it's content
if output.reference:
with TemporaryDirectory() as tmp_dir:
file_path = fetch_file(output.reference, tmp_dir, settings=container)
with open(file_path, "r") as tmp_file:
json_data_str = tmp_file.read()
# Else get the data directly
else:
# process output data are append into a list and
# WPS standard v1.0.0 specify that Output data field has zero or one value
if not output.data:
return None
json_data_str = output.data[0]
# Load the actual json dict
json_data = json.loads(json_data_str)
except Exception as exc: # pylint: disable=W0703
LOGGER.debug("Failed retrieval of JSON output file for multi-reference unwrapping", exc_info=exc)
return None
if isinstance(json_data, list):
return None if any(not is_reference(data_value) for data_value in json_data) else json_data
return None
def any2cwl_io(wps_io, io_select):
# type: (Union[JSON_IO_Type, WPS_IO_Type, OWS_IO_Type], str) -> Tuple[CWL_IO_Type, Dict[str, str]]
"""
Converts a `WPS`-like I/O to `CWL` corresponding I/O.
Because of `CWL` I/O of type `File` with `format` field, the applicable namespace is also returned.
:returns: converted I/O and namespace dictionary with corresponding format references as required
"""
def _get_cwl_fmt_details(wps_fmt):
# type: (ANY_Format_Type) -> Union[Tuple[Tuple[str, str], str, str], Tuple[None, None, None]]
_wps_io_fmt = get_field(wps_fmt, "mime_type", search_variations=True)
if not _wps_io_fmt:
return None, None, None
_cwl_io_ext = get_extension(_wps_io_fmt)
_cwl_io_ref, _cwl_io_fmt = get_cwl_file_format(_wps_io_fmt, must_exist=True, allow_synonym=False)
return _cwl_io_ref, _cwl_io_fmt, _cwl_io_ext
wps_io_type = get_field(wps_io, "type", search_variations=True)
wps_io_id = get_field(wps_io, "identifier", search_variations=True)
cwl_ns = dict()
cwl_io = {"id": wps_io_id} # type: CWL_IO_Type
if wps_io_type not in WPS_COMPLEX_TYPES:
cwl_io_type = any2cwl_literal_datatype(wps_io_type)
wps_allow = get_field(wps_io, "allowed_values", search_variations=True)
if isinstance(wps_allow, list) and len(wps_allow) > 0:
cwl_io["type"] = {"type": PACKAGE_ENUM_BASE, "symbols": wps_allow}
else:
cwl_io["type"] = cwl_io_type
# FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
else:
cwl_io_fmt = None
cwl_io_ext = CONTENT_TYPE_ANY
cwl_io["type"] = "File"
# inputs are allowed to define multiple 'supported' formats
# outputs are allowed to define only one 'applied' format
for field in WPS_FIELD_FORMAT:
fmt = get_field(wps_io, field, search_variations=True)
if isinstance(fmt, dict):
cwl_io_ref, cwl_io_fmt, cwl_io_ext = _get_cwl_fmt_details(fmt)
if cwl_io_ref and cwl_io_fmt:
cwl_ns.update(cwl_io_ref)
break
if isinstance(fmt, list):
if len(fmt) == 1:
cwl_io_ref, cwl_io_fmt, cwl_io_ext = _get_cwl_fmt_details(fmt[0])
if cwl_io_ref and cwl_io_fmt:
cwl_ns.update(cwl_io_ref)
break
if io_select == WPS_OUTPUT and len(fmt) > 1:
break # don't use any format because we cannot enforce one
cwl_ns_multi = {}
cwl_fmt_multi = []
for fmt_i in fmt:
# FIXME: (?)
# when multiple formats are specified, but at least one schema/namespace reference can't be found,
# we must drop all since that unknown format is still allowed but cannot be validated
# avoid potential validation error if that format was the one provided during execute...
# (see: https://github.com/crim-ca/weaver/issues/50)
cwl_io_ref_i, cwl_io_fmt_i, _ = _get_cwl_fmt_details(fmt_i)
if cwl_io_ref_i and cwl_io_fmt_i:
cwl_ns_multi.update(cwl_io_ref_i)
cwl_fmt_multi.append(cwl_io_fmt_i)
else:
# reset all since at least one format could not be mapped to an official schema
cwl_ns_multi = {}
cwl_fmt_multi = None
break
cwl_io_fmt = cwl_fmt_multi # all formats or none of them
cwl_ns.update(cwl_ns_multi)
break
if cwl_io_fmt:
cwl_io["format"] = cwl_io_fmt
# for backward compatibility with deployed processes, consider text/plan as 'any' for glob pattern
cwl_io_txt = get_extension(CONTENT_TYPE_TEXT_PLAIN)
if cwl_io_ext == cwl_io_txt:
cwl_io_any = get_extension(CONTENT_TYPE_ANY)
LOGGER.warning("Replacing '%s' [%s] to generic '%s' [%s] glob pattern. "
"More explicit format could be considered for %s '%s'.",
CONTENT_TYPE_TEXT_PLAIN, cwl_io_txt, CONTENT_TYPE_ANY, cwl_io_any, io_select, wps_io_id)
cwl_io_ext = cwl_io_any
if io_select == WPS_OUTPUT:
# FIXME: (?) how to specify the 'name' part of the glob (using the "id" value for now)
cwl_io["outputBinding"] = {
"glob": "{}{}".format(wps_io_id, cwl_io_ext)
}
# FIXME: multi-outputs (https://github.com/crim-ca/weaver/issues/25)
# min/max occurs can only be in inputs, outputs are enforced min/max=1 by WPS
if io_select == WPS_INPUT:
wps_default = get_field(wps_io, "default", search_variations=True)
wps_min_occ = get_field(wps_io, "min_occurs", search_variations=True, default=1)
# field 'default' must correspond to a fallback "value", not a default "format"
is_min_null = wps_min_occ in [0, "0"]
if wps_default != null and not isinstance(wps_default, dict):
cwl_io["default"] = wps_default
# NOTE:
# Don't set any 'default' field here (neither 'null' string or 'None' type) if no value was provided
# since those are interpreted by CWL as literal string 'null' (for 'string' type) or null object.
# Instead, 'null' entry is added to 'type' to indicate drop/ignore missing input.
wps_max_occ = get_field(wps_io, "max_occurs", search_variations=True)
if wps_max_occ != null and (wps_max_occ == "unbounded" or wps_max_occ > 1):
cwl_array = {
"type": PACKAGE_ARRAY_BASE,
"items": cwl_io["type"]
}
# if single value still allowed, or explicitly multi-value array if min greater than one
if wps_min_occ > 1:
cwl_io["type"] = cwl_array
else:
cwl_io["type"] = [cwl_io["type"], cwl_array]
# apply default null after handling literal/array/enum type variants
# (easier to apply against their many different structures)
if is_min_null:
if isinstance(cwl_io["type"], list):
cwl_io["type"].insert(0, "null") # if min=0,max>1 (null, <type>, <array-type>)
else:
cwl_io["type"] = ["null", cwl_io["type"]] # if min=0,max=1 (null, <type>)
return cwl_io, cwl_ns
def wps2cwl_requirement(wps_service_url, wps_process_id):
# type: (Union[str, ParseResult], str) -> JSON
"""
Obtains the `CWL` requirements definition needed for parsing by a remote `WPS` provider as an `Application Package`.
"""
return OrderedDict([
("cwlVersion", "v1.0"),
("class", "CommandLineTool"),
("hints", {
CWL_REQUIREMENT_APP_WPS1: {
"provider": get_url_without_query(wps_service_url),
"process": wps_process_id,
}}),
])
def ows2json(wps_process, wps_service_name, wps_service_url, wps_provider_name=None):
# type: (ProcessOWS, str, Union[str, ParseResult], Optional[str]) -> Tuple[CWL, JSON]
"""
Generates the `CWL` package and process definitions from a :class:`owslib.wps.Process` hosted under `WPS` location.
"""
process_info = OrderedDict([
("id", wps_process.identifier),
("keywords", [wps_service_name] if wps_service_name else []),
])
if wps_provider_name and wps_provider_name not in process_info["keywords"]:
process_info["keywords"].append(wps_provider_name)
default_title = wps_process.identifier.capitalize()
process_info["title"] = get_field(wps_process, "title", default=default_title, search_variations=True)
process_info["description"] = get_field(wps_process, "abstract", default=None, search_variations=True)
process_info["version"] = get_field(wps_process, "version", default=None, search_variations=True)
process_info["metadata"] = []
if wps_process.metadata:
for meta in wps_process.metadata:
metadata = metadata2json(meta)
if metadata:
process_info["metadata"].append(metadata)
process_info["inputs"] = [] # type: List[JSON]
process_info["outputs"] = [] # type: List[JSON]
for wps_in in wps_process.dataInputs: # type: OWS_Input_Type
process_info["inputs"].append(ows2json_io(wps_in))
for wps_out in wps_process.processOutputs: # type: OWS_Output_Type
process_info["outputs"].append(ows2json_io(wps_out))
# generate CWL for WPS-1 using parsed WPS-3
cwl_package = wps2cwl_requirement(wps_service_url, wps_process.identifier)
for io_select in [WPS_INPUT, WPS_OUTPUT]:
io_section = "{}s".format(io_select)
cwl_package[io_section] = list()
for wps_io in process_info[io_section]:
cwl_io, cwl_ns = any2cwl_io(wps_io, io_select)
cwl_package[io_section].append(cwl_io)
if cwl_ns:
if "$namespaces" not in cwl_package:
cwl_package["$namespaces"] = dict()
cwl_package["$namespaces"].update(cwl_ns)
return cwl_package, process_info
def xml_wps2cwl(wps_process_response, settings):
# type: (Response, AnySettingsContainer) -> Tuple[CWL, JSON]
"""
Obtains the ``CWL`` definition that corresponds to a XML WPS-1 process.
Converts a `WPS-1 ProcessDescription XML` tree structure to an equivalent `WPS-3 Process JSON`. and builds the
associated `CWL` package in conformance to :data:`weaver.processes.wps_package.CWL_REQUIREMENT_APP_WPS1`.
:param wps_process_response: valid response (XML, 200) from a `WPS-1 ProcessDescription`.
:param settings: application settings to retrieve additional request options.
"""
def _tag_name(_xml):
# type: (Union[xml_util.XML, str]) -> str
"""
Obtains ``tag`` from a ``{namespace}Tag`` `XML` element.
"""
if hasattr(_xml, "tag"):
_xml = _xml.tag
return _xml.split("}")[-1].lower()
# look for `XML` structure starting at `ProcessDescription` (WPS-1)
xml_resp = xml_util.fromstring(str2bytes(wps_process_response.content))
xml_wps_process = xml_resp.xpath("//ProcessDescription") # type: List[xml_util.XML]
if not len(xml_wps_process) == 1:
raise ValueError("Could not retrieve a valid 'ProcessDescription' from WPS-1 response.")
process_id = None
for sub_xml in xml_wps_process[0]:
tag = _tag_name(sub_xml)
if tag == "identifier":
process_id = sub_xml.text
break
if not process_id:
raise ValueError("Could not find a match for 'ProcessDescription.identifier' from WPS-1 response.")
# transform WPS-1 -> WPS-3
wps = get_wps_client(wps_process_response.url, settings)
wps_service_url = urlparse(wps_process_response.url)
if wps.provider:
wps_service_name = wps.provider.name
else:
wps_service_name = wps_service_url.hostname
wps_process = wps.describeprocess(process_id, xml=wps_process_response.content)
cwl_package, process_info = ows2json(wps_process, wps_service_name, wps_service_url)
return cwl_package, process_info
def is_cwl_file_type(io_info):
# type: (CWL_IO_Type) -> bool
"""
Identifies if the provided `CWL` input/output corresponds to one, many or potentially a ``File`` type(s).
When multiple distinct *atomic* types are allowed for a given I/O (e.g.: ``[string, File]``) and that one of them
is a ``File``, the result will be ``True`` even if other types are not ``Files``.
Potential ``File`` when other base type is ``"null"`` will also return ``True``.
"""
io_type = io_info.get("type")
if not io_type:
raise ValueError("Missing CWL 'type' definition: [{!s}]".format(io_info))
if isinstance(io_type, str):
return io_type == "File"
if isinstance(io_type, dict):
if io_type["type"] == PACKAGE_ARRAY_BASE:
return io_type["items"] == "File"
return io_type["type"] == "File"
if isinstance(io_type, list):
return any(typ == "File" or is_cwl_file_type({"type": typ}) for typ in io_type)
msg = "Unknown parsing of CWL 'type' format ({!s}) [{!s}] in [{}]".format(type(io_type), io_type, io_info)
raise ValueError(msg)
def is_cwl_array_type(io_info):
# type: (CWL_IO_Type) -> Tuple[bool, str, MODE, Union[AnyValueType, List[Any]]]
"""
Verifies if the specified I/O corresponds to one of various CWL array type definitions.
:returns:
``tuple(is_array, io_type, io_mode, io_allow)`` where:
- ``is_array``: specifies if the I/O is of array type.
- ``io_type``: array element type if ``is_array`` is True, type of ``io_info`` otherwise.
- ``io_mode``: validation mode to be applied if sub-element requires it, defaults to ``MODE.NONE``.
- ``io_allow``: validation values to be applied if sub-element requires it, defaults to ``AnyValue``.
:raises PackageTypeError: if the array element doesn't have the required values and valid format.
"""
# use mapping to allow sub-function updates
io_return = {
"array": False,
"allow": AnyValue,
"type": io_info["type"],
"mode": MODE.NONE,
}
def _update_if_sub_enum(_io_item):
# type: (CWL_IO_Type) -> bool
"""
Updates the ``io_return`` parameters if ``io_item`` evaluates to a valid ``enum`` type.
Parameter ``io_item`` should correspond to field ``items`` of an array I/O definition.
Simple pass-through if the array item is not an ``enum``.
"""
_is_enum, _enum_type, _enum_mode, _enum_allow = is_cwl_enum_type({"type": _io_item}) # noqa: typing
if _is_enum:
LOGGER.debug("I/O [%s] parsed as 'array' with sub-item as 'enum'", io_info["name"])
io_return["type"] = _enum_type
io_return["mode"] = _enum_mode
io_return["allow"] = _enum_allow
return _is_enum
# optional I/O could be an array of '["null", "<type>"]' with "<type>" being any of the formats parsed after
# is it the literal representation instead of the shorthand with '?'
if isinstance(io_info["type"], list) and any(sub_type == "null" for sub_type in io_info["type"]):
# we can ignore the optional indication in this case because it doesn't impact following parsing
io_return["type"] = list(filter(lambda sub_type: sub_type != "null", io_info["type"]))[0]
# array type conversion when defined as '{"type": "array", "items": "<type>"}'
# validate against 'Hashable' instead of 'dict' since 'OrderedDict'/'CommentedMap' can fail 'isinstance()'
if (
not isinstance(io_return["type"], str)
and not isinstance(io_return["type"], Hashable)
and "items" in io_return["type"]
and "type" in io_return["type"]
):
io_type = dict(io_return["type"]) # make hashable to allow comparison
if io_type["type"] != PACKAGE_ARRAY_BASE:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
# parse enum in case we got an array of allowed symbols
is_enum = _update_if_sub_enum(io_type["items"])
if not is_enum:
io_return["type"] = io_type["items"]
if io_return["type"] not in PACKAGE_ARRAY_ITEMS:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
LOGGER.debug("I/O [%s] parsed as 'array' with nested dict notation", io_info["name"])
io_return["array"] = True
# array type conversion when defined as string '<type>[]'
elif isinstance(io_return["type"], str) and io_return["type"] in PACKAGE_ARRAY_TYPES:
io_return["type"] = io_return["type"][:-2] # remove '[]'
if io_return["type"] in PACKAGE_CUSTOM_TYPES:
# parse 'enum[]' for array of allowed symbols, provide expected structure for sub-item parsing
io_item = deepcopy(io_info)
io_item["type"] = io_return["type"] # override corrected type without '[]'
_update_if_sub_enum(io_item)
if io_return["type"] not in PACKAGE_ARRAY_ITEMS:
raise PackageTypeError("Unsupported I/O 'array' definition: '{}'.".format(repr(io_info)))
LOGGER.debug("I/O [%s] parsed as 'array' with shorthand '[]' notation", io_info["name"])
io_return["array"] = True
return io_return["array"], io_return["type"], io_return["mode"], io_return["allow"]
def is_cwl_enum_type(io_info):
# type: (CWL_IO_Type) -> Tuple[bool, str, int, Optional[CWL_IO_EnumSymbols]]
"""
Verifies if the specified I/O corresponds to a CWL enum definition.
:returns:
``tuple(is_enum, io_type, io_allow)`` where:
- ``is_enum``: specifies if the I/O is of enum type.
- ``io_type``: enum base type if ``is_enum=True``, type of ``io_info`` otherwise.
- ``io_mode``: validation mode to be applied if input requires it, defaults to ``MODE.NONE``.
- ``io_allow``: validation values of the enum.
:raises PackageTypeError: if the enum doesn't have the required parameters and valid format.
"""
io_type = io_info["type"]
if not isinstance(io_type, dict) or "type" not in io_type or io_type["type"] not in PACKAGE_CUSTOM_TYPES:
return False, io_type, MODE.NONE, None
if "symbols" not in io_type:
raise PackageTypeError("Unsupported I/O 'enum' definition: '{!r}'.".format(io_info))
io_allow = io_type["symbols"]
if not isinstance(io_allow, list) or len(io_allow) < 1:
raise PackageTypeError("Invalid I/O 'enum.symbols' definition: '{!r}'.".format(io_info))
# validate matching types in allowed symbols and convert to supported CWL type
first_allow = io_allow[0]
for io_i in io_allow:
if type(io_i) is not type(first_allow):
raise PackageTypeError("Ambiguous types in I/O 'enum.symbols' definition: '{!r}'.".format(io_info))
if isinstance(first_allow, str):
io_type = "string"
elif isinstance(first_allow, float):
io_type = "float"
elif isinstance(first_allow, int):
io_type = "int"
else:
raise PackageTypeError("Unsupported I/O 'enum' base type: `{!s}`, from definition: `{!r}`."
.format(type(first_allow), io_info))
# allowed value validator mode must be set for input
return True, io_type, MODE.SIMPLE, io_allow
def get_cwl_io_type(io_info):
# type: (CWL_IO_Type) -> Tuple[str, bool]
"""
Obtains the basic type of the CWL input and identity if it is optional.
CWL allows multiple shorthand representation or combined types definition.
The *base* type must be extracted in order to identify the expected data format and supported values.
Obtains real type if ``"default"`` or shorthand ``"<type>?"`` was in CWL, which
can also be defined as type ``["null", <type>]``.
CWL allows multiple distinct types (e.g.: ``string`` and ``int`` simultaneously), but not WPS inputs.
WPS allows only different amount of *same type* through ``minOccurs`` and ``maxOccurs``.
Considering WPS conversion, we can also have following definition ``["null", <type>, <array-type>]`` (same type).
Whether single or array-like type, the base type can be extracted.
:param io_info: definition of the CWL input.
:return: tuple of guessed base type and flag indicating if it can be null (optional input).
"""
io_type = io_info["type"]
is_null = False
if isinstance(io_type, list):
if not len(io_type) > 1:
raise PackageTypeError("Unsupported I/O type as list cannot have only one base type: '{}'".format(io_info))
if "null" in io_type:
if len(io_type) == 1:
raise PackageTypeError("Unsupported I/O cannot be only 'null' type: '{}'".format(io_info))
LOGGER.debug("I/O parsed for 'default'")
is_null = True # I/O can be omitted since default value exists
io_type = [typ for typ in io_type if typ != "null"]
if len(io_type) == 1: # valid if other was "null" now removed
io_type = io_type[0]
else:
# check that many sub-type definitions all match same base type (no conflicting literals)
io_type_many = set()
io_base_type = None
for i, typ in enumerate(io_type):
sub_type = {"type": typ, "name": "{}[{}]".format(io_info["name"], i)}
is_array, array_elem, _, _ = is_cwl_array_type(sub_type)
is_enum, enum_type, _, _ = is_cwl_enum_type(sub_type)
# array base type more important than enum because later array conversion also handles allowed values
if is_array:
io_base_type = typ # highest priority (can have sub-literal or sub-enum)
io_type_many.add(array_elem)
elif is_enum:
io_base_type = io_base_type if io_base_type is not None else enum_type # less priority
io_type_many.add(enum_type)
else:
io_base_type = io_base_type if io_base_type is not None else typ # less priority
io_type_many.add(typ) # literal base type by itself (not array/enum)
if len(io_type_many) != 1:
raise PackageTypeError("Unsupported I/O with many distinct base types for info: '{!s}'".format(io_info))
io_type = io_base_type
LOGGER.debug("I/O parsed for multiple base types")
return io_type, is_null
def cwl2wps_io(io_info, io_select):
# type:(CWL_IO_Type, str) -> WPS_IO_Type
"""
Converts input/output parameters from CWL types to WPS types.
:param io_info: parsed IO of a CWL file
:param io_select: :py:data:`WPS_INPUT` or :py:data:`WPS_OUTPUT` to specify desired WPS type conversion.
:returns: corresponding IO in WPS format
"""
is_input = False
is_output = False
# FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
if io_select == WPS_INPUT:
is_input = True
io_literal = LiteralInput # type: Union[Type[LiteralInput], Type[LiteralOutput]]
io_complex = ComplexInput # type: Union[Type[ComplexInput], Type[ComplexOutput]]
# io_bbox = BoundingBoxInput # type: Union[Type[BoundingBoxInput], Type[BoundingBoxOutput]]
elif io_select == WPS_OUTPUT:
is_output = True
io_literal = LiteralOutput # type: Union[Type[LiteralInput], Type[LiteralOutput]]
io_complex = ComplexOutput # type: Union[Type[ComplexInput], Type[ComplexOutput]]
# io_bbox = BoundingBoxOutput # type: Union[Type[BoundingBoxInput], Type[BoundingBoxOutput]]
else:
raise PackageTypeError("Unsupported I/O info definition: '{!r}' with '{}'.".format(io_info, io_select))
# obtain base type considering possible CWL type representations
io_type, is_null = get_cwl_io_type(io_info)
io_info["type"] = io_type # override resolved multi-type base for more parsing
io_name = io_info["name"]
io_min_occurs = 0 if is_null else 1
io_max_occurs = 1 # unless array after
# convert array types
is_array, array_elem, io_mode, io_allow = is_cwl_array_type(io_info)
if is_array:
LOGGER.debug("I/O parsed for 'array'")
io_type = array_elem
io_max_occurs = PACKAGE_ARRAY_MAX_SIZE
# convert enum types
is_enum, enum_type, enum_mode, enum_allow = is_cwl_enum_type(io_info)
if is_enum:
LOGGER.debug("I/O parsed for 'enum'")
io_type = enum_type
io_allow = enum_allow
io_mode = enum_mode
# debug info for unhandled types conversion
if not isinstance(io_type, str):
LOGGER.debug("is_array: [%s]", repr(is_array))
LOGGER.debug("array_elem: [%s]", repr(array_elem))
LOGGER.debug("is_enum: [%s]", repr(is_enum))
LOGGER.debug("enum_type: [%s]", repr(enum_type))
LOGGER.debug("enum_allow: [%s]", repr(enum_allow))
LOGGER.debug("io_info: [%s]", repr(io_info))
LOGGER.debug("io_type: [%s]", repr(io_type))
LOGGER.debug("type(io_type): [%s]", type(io_type))
raise TypeError("I/O type has not been properly decoded. Should be a string, got: '{!r}'".format(io_type))
# literal types
if is_enum or io_type in PACKAGE_LITERAL_TYPES:
if io_type == "Any":
io_type = "anyvalue"
if io_type == "null":
io_type = "novalue"
if io_type in ["int", "integer", "long"]:
io_type = "integer"
if io_type in ["float", "double"]:
io_type = "float"
# keywords commonly used by I/O
kw = {
"identifier": io_name,
"title": io_info.get("label", ""),
"abstract": io_info.get("doc", ""),
"data_type": io_type,
"mode": io_mode,
}
if is_input:
# avoid storing 'AnyValue' which become more problematic than
# anything later on when CWL/WPS merging is attempted
if io_allow is not AnyValue:
kw["allowed_values"] = io_allow
kw["default"] = io_info.get("default", None)
kw["min_occurs"] = io_min_occurs
kw["max_occurs"] = io_max_occurs
return io_literal(**kw)
# complex types
else:
# keywords commonly used by I/O
kw = {
"identifier": io_name,
"title": io_info.get("label", io_name),
"abstract": io_info.get("doc", ""),
}
if "format" in io_info:
io_formats = [io_info["format"]] if isinstance(io_info["format"], str) else io_info["format"]
kw["supported_formats"] = [get_format(fmt) for fmt in io_formats]
kw["mode"] = MODE.SIMPLE # only validate the extension (not file contents)
else:
# we need to minimally add 1 format, otherwise empty list is evaluated as None by pywps
# when "supported_formats" is None, the process's json property raises because of it cannot iterate formats
kw["supported_formats"] = [DEFAULT_FORMAT]
kw["mode"] = MODE.NONE # don't validate anything as default is only raw text
if is_output:
if io_type == "Directory":
kw["as_reference"] = True
if io_type == "File":
has_contents = io_info.get("contents") is not None
kw["as_reference"] = not has_contents
else:
# note:
# value of 'data_format' is identified as 'default' input format if specified with `Format`
# otherwise, `None` makes it automatically use the first one available in 'supported_formats'
kw["data_format"] = get_field(io_info, "data_format")
kw["data_format"] = json2wps_field(kw["data_format"], "supported_formats") if kw["data_format"] else None
kw.update({
"min_occurs": io_min_occurs,
"max_occurs": io_max_occurs,
})
return io_complex(**kw)
def cwl2json_input_values(data, schema=PROCESS_SCHEMA_OGC):
# type: (Dict[str, CWL_IO_Value], str) -> JSON
"""
Converts :term:`CWL` formatted :term:`Job` inputs to corresponding :term:`OGC API - Processes` format.
:param data: dictionary with inputs formatted as key-value pairs with relevant structure based on :term:`CWL` types.
:param schema: either ``OGC`` or ``OLD`` format respectively for mapping/listing representations.
:raises TypeError: if input data is invalid.
:raises ValueError: if any input value could not be parsed with expected schema.
:returns: converted inputs for :term:`Job` submission either in ``OGC`` or ``OLD`` format.
"""
if not isinstance(data, dict):
raise TypeError(f"Invalid CWL input values format must be a dictionary of keys to values. Got [{type(data)}].")
inputs = {}
for input_id, input_value in data.items():
# single file
if isinstance(input_value, dict) and input_value.get("class") == "File":
inputs[input_id] = {"href": input_value.get("path")}
# single literal value
elif isinstance(input_value, (str, int, float, bool)):
inputs[input_id] = {"value": input_value}
# multiple files
elif isinstance(input_value, list) and all(
isinstance(val, dict) and val.get("class") == "File" for val in input_value
):
inputs[input_id] = [{"href": val.get("path")} for val in input_value]
# multiple literal values
elif isinstance(input_value, list) and all(
isinstance(val, (str, int, float, bool)) for val in input_value
):
inputs[input_id] = [{"value": val} for val in input_value]
else:
raise ValueError(f"Input [{input_id}] value definition could not be parsed: {input_value!s}")
schema = schema.upper()
if schema == PROCESS_SCHEMA_OGC:
return inputs
if schema != PROCESS_SCHEMA_OLD:
raise NotImplementedError(f"Unknown conversion format of input values for schema: [{schema}]")
input_list = []
for input_id, input_value in inputs.items():
if isinstance(input_value, list):
input_key = list(input_value[0])[0]
input_list.extend([{"id": input_id, input_key: val[input_key]} for val in input_value])
else:
input_key = list(input_value)[0]
input_value = input_value[input_key]
input_list.append({"id": input_id, input_key: input_value})
return input_list
def repr2json_input_values(inputs):
# type: (List[str]) -> List[JSON]
"""
Converts inputs in string representation to corresponding :term:`JSON` values.
Expected format is as follows:
.. code-block:: text
input_id[:input_type]=input_value[;input_array]
Where:
- ``input_id`` represents the target identifier of the input
- ``input_type`` represents the conversion type, as required
(includes ``File`` for ``href`` instead of ``value`` key in resulting object)
- ``input_value`` represents the desired value subject to conversion by ``input_type``
- ``input_array`` represents any additional values for array-like inputs (``maxOccurs > 1``)
:param inputs: list of string inputs to parse.
:return: parsed inputs if successful.
"""
values = []
for str_input in inputs:
str_id, str_val = str_input.split("=")
str_id_typ = str_id.split(":")
if len(str_id_typ) == 2:
str_id, str_typ = str_id_typ
elif len(str_id_typ) != 1:
raise ValueError(f"Invalid input value ID representation. Must be 'ID[:TYPE]' for '{str_id!s}'.")
else:
str_typ = "string"
val_typ = any2cwl_literal_datatype(str_typ)
if not str_id or (val_typ is null and str_typ not in INPUT_VALUE_TYPE_MAPPING):
raise ValueError(f"Invalid input value ID representation. "
f"Missing or unknown 'ID[:type]' parts after resolution as '{str_id!s}:{str_typ!s}'.")
map_typ = val_typ if val_typ is not null else str_typ
arr_val = str_val.split(";")
arr_typ = INPUT_VALUE_TYPE_MAPPING[map_typ]
arr_val = [arr_typ(val) for val in arr_val]
val_key = "href" if str_typ in ["file", "File"] else "value"
values.append({"id": str_id, val_key: arr_val if ";" in str_val else arr_val[0]})
return values
def any2cwl_literal_datatype(io_type):
# type: (str) -> Union[str, Type[null]]
"""
Solves common literal data-type names to supported ones for `CWL`.
"""
if io_type in ["string", "date", "time", "dateTime", "anyURI"]:
return "string"
if io_type in ["scale", "angle", "float", "double"]:
return "float"
if io_type in ["integer", "long", "positiveInteger", "nonNegativeInteger"]:
return "int"
if io_type in ["bool", "boolean"]:
return "boolean"
LOGGER.warning("Could not identify a CWL literal data type with [%s].", io_type)
return null
def any2wps_literal_datatype(io_type, is_value):
# type: (AnyValueType, bool) -> Union[str, Type[null]]
"""
Solves common literal data-type names to supported ones for `WPS`.
Verification is accomplished by name when ``is_value=False``, otherwise with python ``type`` when ``is_value=True``.
"""
if isinstance(io_type, str):
if not is_value:
if io_type in ["string", "date", "time", "dateTime", "anyURI"]:
return "string"
if io_type in ["scale", "angle", "float", "double"]:
return "float"
if io_type in ["int", "integer", "long", "positiveInteger", "nonNegativeInteger"]:
return "integer"
if io_type in ["bool", "boolean"]:
return "boolean"
LOGGER.warning("Unknown named literal data type: '%s', using default 'string'. Should be one of: %s",
io_type, list(WPS_LITERAL_DATA_TYPE_NAMES))
return "string"
if is_value and isinstance(io_type, bool):
return "boolean"
if is_value and isinstance(io_type, int):
return "integer"
if is_value and isinstance(io_type, float):
return "float"
return null
def any2json_literal_allowed_value(io_allow):
# type: (Union[AllowedValue, JSON, str, float, int, bool]) -> Union[JSON, str, str, float, int, bool, Type[null]]
"""
Converts an ``AllowedValues`` definition from different packages into standardized JSON representation of `OGC-API`.
"""
if isinstance(io_allow, AllowedValue):
io_allow = io_allow.json
if isinstance(io_allow, dict):
wps_range = {}
for field, dest in [
("range_minimum", "minimumValue"),
("range_maximum", "maximumValue"),
("range_spacing", "spacing"),
("range_closure", "rangeClosure")
]:
wps_range_value = get_field(io_allow, field, search_variations=True, pop_found=True)
if wps_range_value is not null:
wps_range[dest] = wps_range_value
# in case input was a PyWPS AllowedValue object converted to JSON,
# extra metadata must be removed/transformed accordingly for literal value
basic_type = io_allow.pop("type", None)
allowed_type = io_allow.pop("allowed_type", None)
allowed_type = allowed_type or basic_type
allowed_value = io_allow.pop("value", None)
if allowed_value is not None:
# note: closure must be ignored for range compare because it defaults to 'close' even for a 'value' type
range_fields = ["minimumValue", "maximumValue", "spacing"]
if allowed_type == "value" or not any(field in io_allow for field in range_fields):
return allowed_value
if not io_allow: # empty container
return null
return io_allow
def any2json_literal_data_domains(io_info):
# type: (ANY_IO_Type) -> Union[Type[null], List[JSON]]
"""
Extracts allowed value constrains from the input definition and generate the expected literal data domains.
The generated result, if applicable, corresponds to a list of a single instance of
schema definition :class:`weaver.wps_restapi.swagger_definitions.LiteralDataDomainList` with following structure.
.. code-block:: yaml
default: bool
defaultValue: float, int, bool, str
dataType: {name: string, <reference: url: string>}
uom: string
valueDefinition:
oneOf:
- string
- url-string
- {anyValue: bool}
- [float, int, bool, str]
- [{minimum, maximum, spacing, closure}]
"""
io_type = get_field(io_info, "type", search_variations=False)
if io_type in [WPS_BOUNDINGBOX, WPS_COMPLEX]:
return null
io_data_type = get_field(io_info, "type", search_variations=True, only_variations=True)
domain = {
"default": True, # since it is generated from convert, only one is available anyway
"dataType": {
"name": any2wps_literal_datatype(io_data_type, is_value=False), # just to make sure, simplify type
# reference: # FIXME: unsupported named-reference data-type (need example to test it)
}
# uom: # FIXME: unsupported Unit of Measure (need example to test it)
}
wps_allowed_values = get_field(io_info, "allowed_values", search_variations=True)
wps_default_value = get_field(io_info, "default", search_variations=True)
wps_value_definition = {"anyValue": get_field(io_info, "any_value", search_variations=True, default=False)}
if wps_default_value not in [null, None]:
domain["defaultValue"] = wps_default_value
if isinstance(wps_allowed_values, list) and len(wps_allowed_values) > 0:
wps_allowed_values = [any2json_literal_allowed_value(io_value) for io_value in wps_allowed_values]
wps_allowed_values = [io_value for io_value in wps_allowed_values if io_value is not null]
if wps_allowed_values:
wps_value_definition = wps_allowed_values
domain["valueDefinition"] = wps_value_definition
return [domain]
def json2wps_datatype(io_info):
# type: (JSON_IO_Type) -> str
"""
Converts a JSON input definition into the corresponding :mod:`pywps` parameters.
Guesses the literal data-type from I/O JSON information in order to allow creation of the corresponding I/O WPS.
Defaults to ``string`` if no suitable guess can be accomplished.
"""
io_type = get_field(io_info, "type", search_variations=False, pop_found=True)
if str(io_type).lower() == WPS_LITERAL:
io_type = null
io_guesses = [
(io_type, False),
(get_field(io_info, "type", search_variations=True), False),
(get_field(io_info, "default", search_variations=True), True),
(get_field(io_info, "allowed_values", search_variations=True), True),
(get_field(io_info, "supported_values", search_variations=True), True)
]
for io_guess, is_value in io_guesses:
if io_type:
break
if isinstance(io_guess, list) and len(io_guess):
io_guess = io_guess[0]
io_type = any2wps_literal_datatype(io_guess, is_value)
if not isinstance(io_type, str):
LOGGER.warning("Failed literal data-type guess, using default 'string' for I/O [%s].",
get_field(io_info, "identifier", search_variations=True))
return "string"
return io_type
def json2wps_field(field_info, field_category):
# type: (JSON, str) -> Any
"""
Converts an I/O field from a JSON literal data, list, or dictionary to corresponding WPS types.
:param field_info: literal data or information container describing the type to be generated.
:param field_category: one of ``WPS_FIELD_MAPPING`` keys to indicate how to parse ``field_info``.
"""
if field_category == "allowed_values":
return json2wps_allowed_values({"allowed_values": field_info})
elif field_category == "supported_formats":
if isinstance(field_info, dict):
return Format(**field_info)
if isinstance(field_info, str):
return Format(field_info)
elif field_category == "metadata":
if isinstance(field_info, WPS_Metadata):
return field_info
if isinstance(field_info, dict):
meta = metadata2json(field_info, force=True)
meta.pop("rel", None)
return WPS_Metadata(**meta)
if isinstance(field_info, str):
return WPS_Metadata(field_info)
elif field_category == "keywords" and isinstance(field_info, list):
return field_info
elif field_category in ["identifier", "title", "abstract"] and isinstance(field_info, str):
return field_info
LOGGER.warning("Field of type '%s' not handled as known WPS field.", field_category)
return None
def json2wps_allowed_values(io_info):
# type: (JSON_IO_Type) -> Union[Type[null], List[AllowedValue]]
"""
Obtains the allowed values constrains for the literal data type from a JSON I/O definition.
Converts the ``literalDataDomains`` definition into ``allowed_values`` understood by :mod:`pywps`.
Handles explicit ``allowed_values`` if available and not previously defined by ``literalDataDomains``.
.. seealso::
Function :func:`any2json_literal_data_domains` defines generated ``literalDataDomains`` JSON definition.
"""
domains = get_field(io_info, "literal_data_domains", search_variations=True)
allowed = get_field(io_info, "allowed_values", search_variations=True)
if not domains and isinstance(allowed, list):
if all(isinstance(value, AllowedValue) for value in allowed):
return allowed
if all(isinstance(value, (float, int, str)) for value in allowed):
return [AllowedValue(value=value) for value in allowed]
if all(isinstance(value, dict) for value in allowed):
allowed_values = []
for value in allowed:
min_val = get_field(value, "range_minimum", search_variations=True, default=None)
max_val = get_field(value, "range_maximum", search_variations=True, default=None)
spacing = get_field(value, "range_spacing", search_variations=True, default=None)
closure = get_field(value, "range_closure", search_variations=True, default=RANGECLOSURETYPE.CLOSED)
literal = get_field(value, "value", search_variations=False, default=None)
if min_val or max_val or spacing:
allowed_values.append(AllowedValue(ALLOWEDVALUETYPE.RANGE,
minval=min_val, maxval=max_val,
spacing=spacing, range_closure=closure))
elif literal:
allowed_values.append(AllowedValue(ALLOWEDVALUETYPE.VALUE, value=literal))
# literalDataDomains could be 'anyValue', which is to be ignored here
return allowed_values
LOGGER.debug("Cannot parse literal I/O AllowedValues: %s", allowed)
raise ValueError("Unknown parsing of 'AllowedValues' for value: {!s}".format(allowed))
if domains:
for domain in domains:
values = domain.get("valueDefinition")
if values:
allowed = json2wps_allowed_values({"allowed_values": values})
# stop on first because undefined how to combine multiple
# no multiple definitions by 'any2json_literal_data_domains' regardless, and not directly handled by pywps
if allowed:
return allowed
return null
def json2wps_io(io_info, io_select):
# type: (JSON_IO_Type, str) -> WPS_IO_Type
"""
Converts an I/O from a JSON dict to PyWPS types.
:param io_info: I/O in JSON dict format.
:param io_select: :py:data:`WPS_INPUT` or :py:data:`WPS_OUTPUT` to specify desired WPS type conversion.
:return: corresponding I/O in WPS format.
"""
io_info["identifier"] = get_field(io_info, "identifier", search_variations=True, pop_found=True)
rename = {
"formats": "supported_formats",
"minOccurs": "min_occurs",
"maxOccurs": "max_occurs",
"dataType": "data_type",
"defaultValue": "default",
"supportedValues": "supported_values",
}
remove = [
"id",
"workdir",
"any_value",
"data_format",
"data",
"file",
"mimetype",
"mediaType",
"encoding",
"schema",
"asreference",
"additionalParameters",
]
replace_values = {"unbounded": PACKAGE_ARRAY_MAX_SIZE}
transform_json(io_info, rename=rename, remove=remove, replace_values=replace_values)
# convert allowed value objects
values = json2wps_allowed_values(io_info)
if values is not null:
if isinstance(values, list) and len(values) > 0:
io_info["allowed_values"] = values
else:
io_info["allowed_values"] = AnyValue # noqa
# convert supported format objects
formats = get_field(io_info, "supported_formats", search_variations=True, pop_found=True)
if formats is not null:
for fmt in formats:
fmt["mime_type"] = get_field(fmt, "mime_type", search_variations=True, pop_found=True)
fmt.pop("maximumMegabytes", None)
# define the 'default' with 'data_format' to be used if explicitly specified from the payload
if fmt.pop("default", None) is True:
if get_field(io_info, "data_format") != null: # if set by previous 'fmt'
raise PackageTypeError("Cannot have multiple 'default' formats simultaneously.")
# use 'data_format' instead of 'default' to avoid overwriting a potential 'default' value
# field 'data_format' is mapped as 'default' format
io_info["data_format"] = json2wps_field(fmt, "supported_formats")
io_info["supported_formats"] = [json2wps_field(fmt, "supported_formats") for fmt in formats]
# convert metadata objects
metadata = get_field(io_info, "metadata", search_variations=True, pop_found=True)
if metadata is not null:
io_info["metadata"] = [json2wps_field(meta, "metadata") for meta in metadata]
# convert literal fields specified as is
for field in ["identifier", "title", "abstract", "keywords"]:
value = get_field(io_info, field, search_variations=True, pop_found=True)
if value is not null:
io_info[field] = json2wps_field(value, field)
# convert by type, add missing required arguments and
# remove additional arguments according to each case
io_type = io_info.pop("type", WPS_COMPLEX) # only ComplexData doesn't have "type"
# attempt to identify defined data-type directly in 'type' field instead of 'data_type'
if io_type not in WPS_ALL_TYPES:
io_type_guess = any2wps_literal_datatype(io_type, is_value=False)
if io_type_guess is not null:
io_type = WPS_LITERAL
io_info["data_type"] = io_type_guess
if io_select == WPS_INPUT:
if ("max_occurs", "unbounded") in io_info.items():
io_info["max_occurs"] = PACKAGE_ARRAY_MAX_SIZE
if io_type in WPS_COMPLEX_TYPES:
if "supported_formats" not in io_info:
io_info["supported_formats"] = [DEFAULT_FORMAT]
io_info.pop("data_type", None)
io_info.pop("allowed_values", None)
io_info.pop("supported_values", None)
return ComplexInput(**io_info)
if io_type == WPS_BOUNDINGBOX:
io_info.pop("supported_formats", None)
io_info.pop("supportedCRS", None)
return BoundingBoxInput(**io_info)
if io_type == WPS_LITERAL:
io_info.pop("data_format", None)
io_info.pop("supported_formats", None)
io_info["data_type"] = json2wps_datatype(io_info)
allowed_values = json2wps_allowed_values(io_info)
if allowed_values:
io_info["allowed_values"] = allowed_values
else:
io_info.pop("allowed_values", None)
io_info.pop("literalDataDomains", None)
return LiteralInput(**io_info)
elif io_select == WPS_OUTPUT:
io_info.pop("min_occurs", None)
io_info.pop("max_occurs", None)
io_info.pop("allowed_values", None)
io_info.pop("data_format", None)
io_info.pop("default", None)
if io_type in WPS_COMPLEX_TYPES:
io_info.pop("supported_values", None)
return ComplexOutput(**io_info)
if io_type == WPS_BOUNDINGBOX:
io_info.pop("supported_formats", None)
return BoundingBoxOutput(**io_info)
if io_type == WPS_LITERAL:
io_info.pop("supported_formats", None)
io_info["data_type"] = json2wps_datatype(io_info)
allowed_values = json2wps_allowed_values(io_info)
if allowed_values:
io_info["allowed_values"] = allowed_values
else:
io_info.pop("allowed_values", None)
io_info.pop("literalDataDomains", None)
return LiteralOutput(**io_info)
raise PackageTypeError("Unknown conversion from dict to WPS type (type={0}, mode={1}).".format(io_type, io_select))
def wps2json_io(io_wps):
# type: (WPS_IO_Type) -> JSON_IO_Type
"""
Converts a PyWPS I/O into a dictionary based version with keys corresponding to standard names (WPS 2.0).
"""
if not isinstance(io_wps, BasicIO):
raise PackageTypeError("Invalid type, expected 'BasicIO', got: [{0!r}] '{1!r}'".format(type(io_wps), io_wps))
if not hasattr(io_wps, "json"):
raise PackageTypeError("Invalid type definition expected to have a 'json' property.")
io_wps_json = io_wps.json # noqa
rename = {
"identifier": "id",
"abstract": "description",
"supported_formats": "formats",
"mime_type": "mediaType",
"min_occurs": "minOccurs",
"max_occurs": "maxOccurs",
}
replace_values = {
PACKAGE_ARRAY_MAX_SIZE: "unbounded",
}
replace_func = {
"maxOccurs": str,
"minOccurs": str,
}
transform_json(io_wps_json, rename=rename, replace_values=replace_values, replace_func=replace_func)
# in some cases (Complex I/O), 'as_reference=True' causes "type" to be overwritten, revert it back
if "type" in io_wps_json and io_wps_json["type"] == WPS_REFERENCE:
io_wps_json["type"] = WPS_COMPLEX
# minimum requirement of 1 format object which defines mime-type
if io_wps_json["type"] == WPS_COMPLEX:
# FIXME: should we store 'None' in db instead of empty string when missing "encoding", "schema", etc. ?
if "formats" not in io_wps_json or not len(io_wps_json["formats"]):
io_wps_json["formats"] = [DEFAULT_FORMAT.json]
for io_format in io_wps_json["formats"]:
transform_json(io_format, rename=rename, replace_values=replace_values, replace_func=replace_func)
# set 'default' format if it matches perfectly, or if only mime-type matches and it is the only available one
# (this avoid 'encoding' possibly not matching due to CWL not providing this information)
io_default = get_field(io_wps_json, "default", search_variations=True)
for io_format in io_wps_json["formats"]:
io_format["default"] = (io_default != null and is_equal_formats(io_format, io_default))
if io_default and len(io_wps_json["formats"]) == 1 and not io_wps_json["formats"][0]["default"]:
io_default_mime_type = get_field(io_default, "mime_type", search_variations=True)
io_single_fmt_mime_type = get_field(io_wps_json["formats"][0], "mime_type", search_variations=True)
io_wps_json["formats"][0]["default"] = (io_default_mime_type == io_single_fmt_mime_type)
elif io_wps_json["type"] == WPS_BOUNDINGBOX:
pass # FIXME: BoundingBox not implemented (https://github.com/crim-ca/weaver/issues/51)
else: # literal
domains = any2json_literal_data_domains(io_wps_json)
if domains:
io_wps_json["literalDataDomains"] = domains
return io_wps_json
def wps2json_job_payload(wps_request, wps_process):
# type: (WPSRequest, ProcessWPS) -> JSON
"""
Converts the input and output values of a :mod:`pywps` WPS ``Execute`` request to corresponding WPS-REST job.
The inputs and outputs must be parsed from XML POST payload or KVP GET query parameters, and converted to data
container defined by :mod:`pywps` based on the process definition.
"""
data = {
"inputs": [],
"outputs": [],
"response": EXECUTE_RESPONSE_DOCUMENT,
"mode": EXECUTE_MODE_ASYNC,
}
multi_inputs = list(wps_request.inputs.values())
for input_list in multi_inputs:
iid = get_any_id(input_list[0])
for input_value in input_list:
input_data = input_value.get("data")
input_href = input_value.get("href")
if input_data:
data["inputs"].append({"id": iid, "data": input_data})
elif input_href:
data["inputs"].append({"id": iid, "href": input_href})
output_ids = list(wps_request.outputs)
for output in wps_process.outputs:
oid = output.identifier
as_ref = isinstance(output, ComplexOutput)
if oid not in output_ids:
data_output = {"identifier": oid, "asReference": str(as_ref).lower()}
else:
data_output = wps_request.outputs[oid]
if as_ref:
data_output["transmissionMode"] = EXECUTE_TRANSMISSION_MODE_REFERENCE
else:
data_output["transmissionMode"] = EXECUTE_TRANSMISSION_MODE_VALUE
data_output["id"] = oid
data["outputs"].append(data_output)
return data
def get_field(io_object, field, search_variations=False, only_variations=False, pop_found=False, default=null):
# type: (Any, str, bool, bool, bool, Any) -> Any
"""
Gets a field by name from various I/O object types.
Default value is :py:data:`null` used for most situations to differentiate from literal ``None`` which is often
used as default for parameters. The :class:`NullType` allows to explicitly tell that there was 'no field' and
not 'no value' in existing field. If you provided another value, it will be returned if not found within the
input object.
When :paramref:`search_variation` is enabled and that :paramref:`field` could not be found within the object,
field lookup will employ the values under the :paramref:`field` entry within :data:`WPS_FIELD_MAPPING` as
additional field names to search for an existing property or key. Search continues until the first match is found,
respecting order within the variations listing, and finally uses :paramref:`default` if no match was found.
:param io_object: Any I/O representation, either as a class instance or JSON container.
:param field: Name of the field to look for, either as property or key name based on input object type.
:param search_variations: If enabled, search for all variations to the field name to attempt search until matched.
:param only_variations: If enabled, skip the first 'basic' field and start search directly with field variations.
:param pop_found: If enabled, whenever a match is found by field or variations, remove that entry from the object.
:param default: Alternative default value to return if no match could be found.
:returns: Matched value (including search variations if enabled), or ``default``.
"""
if not (search_variations and only_variations):
if isinstance(io_object, dict):
value = io_object.get(field, null)
if value is not null:
if pop_found:
io_object.pop(field)
return value
else:
value = getattr(io_object, field, null)
if value is not null:
return value
if search_variations and field in WPS_FIELD_MAPPING:
for var in WPS_FIELD_MAPPING[field]:
value = get_field(io_object, var, search_variations=False, only_variations=False, pop_found=pop_found)
if value is not null:
return value
return default
def set_field(io_object, field, value, force=False):
# type: (Union[ANY_IO_Type, ANY_Format_Type], str, Any, bool) -> None
"""
Sets a field by name into various I/O object types.
Field value is set only if not ``null`` to avoid inserting data considered `invalid`.
If ``force=True``, verification of ``null`` value is ignored.
"""
if value is not null or force:
if isinstance(io_object, dict):
io_object[field] = value
return
setattr(io_object, field, value)
def _are_different_and_set(item1, item2):
# type: (Any, Any) -> bool
"""
Verifies if two items are set and are different of different "representative" value.
Compares two value representations and returns ``True`` only if both are not ``null``, are of same ``type`` and
of different representative value. By "representative", we consider here the visual representation of byte/unicode
strings rather than literal values to support XML/JSON and Python 2/3 implementations.
Other non string-like types are verified with literal (usual) equality method.
"""
if item1 is null or item2 is null:
return False
try:
# Note:
# Calling ``==`` will result in one defined item's type ``__eq__`` method calling a property to validate
# equality with the second. When compared to a ``null``, ``None`` or differently typed second item, the
# missing property on the second item could raise and ``AssertionError`` depending on the ``__eq__``
# implementation (eg: ``Format`` checking for ``item.mime_type``, etc.).
equal = item1 == item2
except AttributeError:
return False
if equal:
return False
# Note: check for both (str, bytes) for any python implementation that modifies its value
type1 = str if isinstance(item1, (str, bytes)) else type(item1)
type2 = str if isinstance(item2, (str, bytes)) else type(item2)
if type1 is str and type2 is str:
return bytes2str(item1) != bytes2str(item2)
return True
def is_equal_formats(format1, format2):
# type: (Union[Format, JSON], Union[Format, JSON]) -> bool
"""
Verifies for matching formats.
"""
mime_type1 = get_field(format1, "mime_type", search_variations=True)
mime_type2 = get_field(format2, "mime_type", search_variations=True)
encoding1 = get_field(format1, "encoding", search_variations=True)
encoding2 = get_field(format2, "encoding", search_variations=True)
if (
mime_type1 == mime_type2 and encoding1 == encoding2
and all(f != null for f in [mime_type1, mime_type2, encoding1, encoding2])
):
return True
return False
def normalize_ordered_io(io_section, order_hints=None):
# type: (JSON_IO_ListOrMap, Optional[JSON_IO_ListOrMap]) -> List[JSON]
"""
Reorders and converts I/O from any representation (:class:`dict` or :class:`list`) considering given ordering hints.
First, converts I/O definitions defined as dictionary to an equivalent :class:`list` representation,
in order to work only with a single representation method. The :class:`list` is chosen over :class:`dict` because
sequences can enforce a specific order, while mapping have no particular order. The list representation ensures
that I/O order is preserved when written to file and reloaded afterwards regardless of each server and/or library's
implementation of the mapping container.
If this function fails to correctly order any I/O or cannot correctly guarantee such result because of the provided
parameters (e.g.: no hints given when required), the result will not break nor change the final processing behaviour
of parsers. This is merely *cosmetic* adjustments to ease readability of I/O to avoid always shuffling their order
across multiple :term:`Application Package` and :term:`Process` reporting formats.
The important result of this function is to provide the I/O as a consistent list of objects so it is less
cumbersome to compare/merge/iterate over the elements with all functions that will follow.
.. note::
When defined as a dictionary, an :class:`OrderedDict` is expected as input to ensure preserved field order.
Prior to Python 3.7 or CPython 3.5, preserved order is not guaranteed for *builtin* :class:`dict`.
In this case the :paramref:`order_hints` is required to ensure same order.
:param io_section: Definition contained under the ``inputs`` or ``outputs`` fields.
:param order_hints: Optional/partial I/O definitions hinting an order to sort unsorted-dict I/O.
:returns: I/O specified as list of dictionary definitions with preserved order (as best as possible).
"""
if isinstance(io_section, list):
return io_section
io_list = []
io_dict = OrderedDict()
if isinstance(io_section, dict) and not isinstance(io_section, OrderedDict) and order_hints and len(order_hints):
# convert the hints themselves to list if they are provided as mapping
if isinstance(order_hints, dict):
order_hints = [dict(id=key, **values) for key, values in order_hints.items()]
# pre-order I/O that can be resolved with hint when the specified I/O section is not ordered
io_section = deepcopy(io_section)
for hint in order_hints:
hint_id = get_field(hint, "identifier", search_variations=True)
if hint_id and hint_id in io_section: # ignore hint where ID could not be resolved
io_dict[hint_id] = io_section.pop(hint_id)
for hint in io_section:
io_dict[hint] = io_section[hint]
else:
io_dict = io_section
for io_id, io_value in io_dict.items():
# I/O value can be a literal type string or dictionary with more details at this point
# make it always detailed dictionary to avoid problems for later parsing
# this is also required to make the list, since all list items must have a matching type
if isinstance(io_value, str):
io_list.append({"type": io_value})
else:
io_list.append(io_value)
io_list[-1]["id"] = io_id
return io_list
def merge_io_formats(wps_formats, cwl_formats):
# type: (List[ANY_Format_Type], List[ANY_Format_Type]) -> List[ANY_Format_Type]
"""
Merges I/O format definitions by matching ``mime-type`` field.
In case of conflict, preserve the WPS version which can be more detailed (for example, by specifying ``encoding``).
Verifies if ``DEFAULT_FORMAT_MISSING`` was written to a single `CWL` format caused by a lack of any value
provided as input. In this case, *only* `WPS` formats are kept.
In the event that ``DEFAULT_FORMAT_MISSING`` was written to the `CWL` formats and that no `WPS` format was
specified, the :py:data:`DEFAULT_FORMAT` is returned.
:raises PackageTypeError: if inputs are invalid format lists
"""
if not (isinstance(wps_formats, (list, tuple, set)) and isinstance(cwl_formats, (list, tuple, set))):
raise PackageTypeError("Cannot merge formats definitions with invalid lists.")
if not len(wps_formats):
wps_formats = [DEFAULT_FORMAT]
if len(cwl_formats) == 1 and get_field(cwl_formats[0], DEFAULT_FORMAT_MISSING) is True:
return wps_formats
formats = []
cwl_fmt_dict = OrderedDict((get_field(fmt, "mime_type", search_variations=True), fmt) for fmt in cwl_formats)
wps_fmt_dict = OrderedDict((get_field(fmt, "mime_type", search_variations=True), fmt) for fmt in wps_formats)
for cwl_fmt in cwl_fmt_dict:
if cwl_fmt in wps_fmt_dict:
formats.append(wps_fmt_dict[cwl_fmt])
else:
formats.append(cwl_fmt_dict[cwl_fmt])
wps_fmt_only = set(wps_fmt_dict) - set(cwl_fmt_dict)
for wps_fmt in wps_fmt_only:
formats.append(wps_fmt_dict[wps_fmt])
return formats
def merge_package_io(wps_io_list, cwl_io_list, io_select):
# type: (List[ANY_IO_Type], List[WPS_IO_Type], str) -> List[WPS_IO_Type]
"""
Merges corresponding parameters of different I/O definitions from CWL/WPS sources.
Update I/O definitions to use for process creation and returned by GetCapabilities, DescribeProcess.
If WPS I/O definitions where provided during deployment, update `CWL-to-WPS` converted I/O with the WPS I/O
complementary details. Otherwise, provide minimum field requirements that can be retrieved from CWL definitions.
Removes any deployment WPS I/O definitions that don't match any CWL I/O by ID.
Adds missing deployment WPS I/O definitions using expected CWL I/O IDs.
:param wps_io_list: list of WPS I/O (as json) passed during process deployment.
:param cwl_io_list: list of CWL I/O converted to WPS-like I/O for counter-validation.
:param io_select: :py:data:`WPS_INPUT` or :py:data:`WPS_OUTPUT` to specify desired WPS type conversion.
:returns: list of validated/updated WPS I/O for the process matching CWL I/O requirements.
"""
if not isinstance(cwl_io_list, list):
raise PackageTypeError("CWL I/O definitions must be provided, empty list if none required.")
if not wps_io_list:
wps_io_list = list()
wps_io_dict = OrderedDict((get_field(wps_io, "identifier", search_variations=True), deepcopy(wps_io))
for wps_io in wps_io_list)
cwl_io_dict = OrderedDict((get_field(cwl_io, "identifier", search_variations=True), deepcopy(cwl_io))
for cwl_io in cwl_io_list)
missing_io_list = [cwl_io for cwl_io in cwl_io_dict if cwl_io not in wps_io_dict] # preserve ordering
updated_io_list = list()
# WPS I/O by id not matching any converted CWL->WPS I/O are discarded
# otherwise, evaluate provided WPS I/O definitions and find potential new information to be merged
for cwl_id in cwl_io_dict:
cwl_io = cwl_io_dict[cwl_id]
updated_io_list.append(cwl_io)
if cwl_id in missing_io_list:
continue # missing WPS I/O are inferred only using CWL->WPS definitions
# enforce expected CWL->WPS I/O required parameters
cwl_io_json = cwl_io.json
wps_io_json = wps_io_dict[cwl_id]
cwl_identifier = get_field(cwl_io_json, "identifier", search_variations=True)
cwl_title = get_field(wps_io_json, "title", search_variations=True)
wps_io_json.update({
"identifier": cwl_identifier,
"title": cwl_title if cwl_title is not null else cwl_identifier
})
# apply type if WPS deploy definition was partial but can be retrieved from CWL
wps_io_json.setdefault("type", get_field(cwl_io_json, "type", search_variations=True))
# fill missing WPS min/max occurs in 'provided' json to avoid overwriting resolved CWL values by WPS default '1'
# with 'default' field, this default '1' causes erroneous result when 'min_occurs' should be "0"
# with 'array' type, this default '1' causes erroneous result when 'max_occurs' should be "unbounded"
cwl_min_occurs = get_field(cwl_io_json, "min_occurs", search_variations=True)
cwl_max_occurs = get_field(cwl_io_json, "max_occurs", search_variations=True)
wps_min_occurs = get_field(wps_io_json, "min_occurs", search_variations=True)
wps_max_occurs = get_field(wps_io_json, "max_occurs", search_variations=True)
if wps_min_occurs == null and cwl_min_occurs != null:
wps_io_json["min_occurs"] = cwl_min_occurs
if wps_max_occurs == null and cwl_max_occurs != null:
wps_io_json["max_occurs"] = cwl_max_occurs
wps_io = json2wps_io(wps_io_json, io_select)
# Retrieve any complementing fields (metadata, keywords, etc.) passed as WPS input.
# Enforce some additional fields to keep value specified by WPS if applicable.
# These are only added here rather that 'WPS_FIELD_MAPPING' to avoid erroneous detection by other functions.
# - Literal: 'default' value defined by 'data'
# - Complex: 'default' format defined by 'data_format'
# (see function 'json2wps_io' for detail)
for field_type in list(WPS_FIELD_MAPPING) + ["data", "data_format"]:
cwl_field = get_field(cwl_io, field_type)
wps_field = get_field(wps_io, field_type)
# override provided formats if different (keep WPS), or if CWL->WPS was missing but is provided by WPS
if _are_different_and_set(wps_field, cwl_field) or (wps_field is not null and cwl_field is null):
# list of formats are updated by comparing format items since information can be partially complementary
if field_type in ["supported_formats"]:
wps_field = merge_io_formats(wps_field, cwl_field)
# default 'data_format' must be one of the 'supported_formats'
# avoid setting something invalid in this case, or it will cause problem after
# note: 'supported_formats' must have been processed before
if field_type == "data_format":
wps_fmts = get_field(updated_io_list[-1], "supported_formats", search_variations=False, default=[])
if wps_field not in wps_fmts:
continue
set_field(updated_io_list[-1], field_type, wps_field)
return updated_io_list
|
nilq/baby-python
|
python
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.graphics"
__author__ = "Patrick Kunzmann"
__all__ = ["plot_dendrogram"]
import numpy as np
def plot_dendrogram(axes, tree, orientation="left", use_distances=True,
labels=None, label_size=None, color="black",
show_distance=True, **kwargs):
"""
Plot a dendrogram from a (phylogenetic) tree.
Parameters
----------
tree : Tree
The tree to be visualized
orientation : {'left', 'right', 'bottom', 'top'}, optional
The position of the root node in the plot
use_distances : bool, optional
If true, the `distance` attribute of the :class:`TreeNode`
objects are used as distance measure.
Otherwise the topological distance is used.
labels : list of str, optional
The leaf node labels.
The label of a leaf node is the entry at the position of its
`index` attribute.
label_size : float, optional
The font size of the labels
color : tuple or str, optional
A *Matplotlib* compatible color, that is used to draw the lines
of the dendrogram.
show_distance : bool, optional
If true, the distance from the root is shown on the
corresponding axis.
**kwargs
Additional parameters that are used to draw the dendrogram
lines.
"""
indices = tree.root.get_indices()
leaf_dict = {indices[i] : i for i in indices}
# Required for setting the plot limits
max_distance = 0
def _plot_node(node, distance):
"""
Draw the lines from the given node to its children.
Parameters
----------
dist : float
the distance of the node from root
Returns
-------
pos : float
the postion of the node on the 'label' axis
"""
# The term 'distance'
# refers to positions along the 'distance' axis
# the term 'pos'
# refers to positions along the other axis
nonlocal max_distance
if max_distance < distance:
max_distance = distance
if node.is_leaf():
# No children -> no line can be drawn
return leaf_dict[node.index]
else:
children = node.children
if use_distances:
child_distances = [distance + c.distance for c in children]
else:
# Use topologic distance of children to this node,
# which is always 1
child_distances = [distance + 1 for c in children]
child_pos = [
_plot_node(child, child_distance)
for child, child_distance in zip(children, child_distances)
]
# Position of this node is in the center of the child nodes
center_pos = sum(child_pos) / len(child_pos)
if orientation in ["left", "right"]:
# Line connecting the childs
axes.plot(
[distance, distance], [child_pos[0], child_pos[-1]],
color=color, marker="None", **kwargs
)
# Lines depicting the distances of the childs
for child_dist, pos in zip(child_distances, child_pos):
axes.plot(
[distance, child_dist], [pos, pos],
color=color, marker="None", **kwargs
)
elif orientation in ["bottom", "top"]:
# Line connecting the childs
axes.plot(
[child_pos[0], child_pos[-1]], [distance, distance],
color=color, marker="None", **kwargs
)
# Lines depicting the distances of the childs
for child_dist, pos in zip(child_distances, child_pos):
axes.plot(
[pos, pos], [distance, child_dist],
color=color, marker="None", **kwargs
)
else:
raise ValueError(f"'{orientation}' is not a valid orientation")
return center_pos
_plot_node(tree.root, 0)
if labels is not None:
# Sort labels using the order of indices in the tree
# A list cannot be directly indexed with a list,
# hence the conversion to a ndarray
labels = np.array(labels)[indices].tolist()
else:
labels = [str(i) for i in indices]
# The distance axis does not start at 0,
# since the root line would not properly rendered
# Hence the limit is set a to small fraction of the entire axis
# beyond 0
zero_limit = -0.01 * max_distance
if orientation == "left":
axes.set_xlim(zero_limit, max_distance)
axes.set_ylim(-1, len(indices))
axes.set_yticks(np.arange(0, len(indices)))
axes.set_yticklabels(labels)
axes.yaxis.set_tick_params(
left=False, right=False, labelleft=False, labelright=True,
labelsize=label_size
)
axes.xaxis.set_tick_params(
bottom=True, top=False, labelbottom=show_distance, labeltop=False,
labelsize=label_size
)
elif orientation == "right":
axes.set_xlim(max_distance, zero_limit)
axes.set_ylim(-1, len(indices))
axes.set_yticks(np.arange(0, len(indices)))
axes.set_yticklabels(labels)
axes.yaxis.set_tick_params(
left=False, right=False, labelleft=True, labelright=False,
labelsize=label_size
)
axes.xaxis.set_tick_params(
bottom=True, top=False, labelbottom=show_distance, labeltop=False,
labelsize=label_size
)
elif orientation == "bottom":
axes.set_ylim(zero_limit, max_distance)
axes.set_xlim(-1, len(indices))
axes.set_xticks(np.arange(0, len(indices)))
axes.set_xticklabels(labels)
axes.xaxis.set_tick_params(
bottom=False, top=False, labelbottom=False, labeltop=True,
labelsize=label_size
)
axes.yaxis.set_tick_params(
left=True, right=False, labelleft=show_distance, labelright=False,
labelsize=label_size
)
elif orientation == "top":
axes.set_ylim(max_distance, zero_limit)
axes.set_xlim(-1, len(indices))
axes.set_xticks(np.arange(0, len(indices)))
axes.set_xticklabels(labels)
axes.xaxis.set_tick_params(
bottom=False, top=False, labelbottom=True, labeltop=False,
labelsize=label_size
)
axes.yaxis.set_tick_params(
left=True, right=False, labelleft=show_distance, labelright=False,
labelsize=label_size
)
else:
raise ValueError(f"'{orientation}' is not a valid orientation")
axes.set_frame_on(False)
|
nilq/baby-python
|
python
|
import boto3
from trp import Document
# Document
s3BucketName = "ki-textract-demo-docs"
documentName = "expense.png"
# Amazon Textract client
textract = boto3.client('textract')
# Call Amazon Textract
response = textract.analyze_document(
Document={
'S3Object': {
'Bucket': s3BucketName,
'Name': documentName
}
},
FeatureTypes=["TABLES"])
#print(response)
doc = Document(response)
def isFloat(input):
try:
float(input)
except ValueError:
return False
return True
warning = ""
for page in doc.pages:
# Print tables
for table in page.tables:
for r, row in enumerate(table.rows):
itemName = ""
for c, cell in enumerate(row.cells):
print("Table[{}][{}] = {}".format(r, c, cell.text))
if(c == 0):
itemName = cell.text
elif(c == 4 and isFloat(cell.text)):
value = float(cell.text)
if(value > 1000):
warning += "{} is greater than $1000.".format(itemName)
if(warning):
print("\nReview needed:\n====================\n" + warning)
|
nilq/baby-python
|
python
|
from itertools import chain
import attr
@attr.s(slots=True, cmp=False)
class KmerDataCollection(object):
_kmers_data = attr.ib()
num_colors = attr.ib(init=False)
_coverage = attr.ib(None)
_edges = attr.ib(None)
raw_kmer = attr.ib(None)
def __attrs_post_init__(self):
assert len(self._kmers_data) > 0
first = self._kmers_data[0]
assert all((first.kmer == k.kmer for k in self._kmers_data))
assert all((first.kmer_size == k.kmer_size for k in self._kmers_data))
self.num_colors = sum((k.num_colors for k in self._kmers_data))
@property
def kmer(self):
return self._kmers_data[0].kmer
@property
def kmer_size(self):
return self._kmers_data[0].kmer_size
@property
def coverage(self):
if self._coverage is None:
coverage = [c for c in chain.from_iterable(k.coverage for k in self._kmers_data)]
print(coverage)
self._coverage = tuple(coverage)
return self._coverage
@property
def edges(self):
if self._edges is None:
self._edges = list(chain.from_iterable(k.edges for k in self._kmers_data))
return self._edges
def get_raw_kmer(self):
for kmer in self._kmers_data:
try:
return kmer.get_raw_kmer()
except AttributeError:
pass
raise ValueError('At least one kmer should have a raw kmer')
|
nilq/baby-python
|
python
|
__author__ = "Frédéric BISSON"
__copyright__ = "Copyright 2022, Frédéric BISSON"
__credits__ = ["Frédéric BISSON"]
__license__ = "mit"
__maintainer__ = "Frédéric BISSON"
__email__ = "zigazou@protonmail.com"
from dietpdf.info.decode_objstm import decode_objstm
def create_stream():
return b"""11 0 12 54 13 107
<</Type/Font/Subtype/TrueType/FontDescriptor 12 0 R>>
<</Type/FontDescriptor/Ascent 891/FontFile2 22 0 R>>
<</Type/Font/Subtype/Type0/ToUnicode 10 0 R>>"""
def test_decode_objstm():
objects = decode_objstm(create_stream(), 18)
assert len(objects) == 3
assert objects[0].obj_num == 11
assert objects[1].obj_num == 12
assert objects[2].obj_num == 13
|
nilq/baby-python
|
python
|
import json
import os
from unittest.mock import patch
from django.conf import settings
from django.utils import timezone as djangotime
from model_bakery import baker
from autotasks.models import AutomatedTask
from tacticalrmm.test import TacticalTestCase
class TestAPIv3(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
self.agent = baker.make_recipe("agents.agent")
def test_get_checks(self):
url = f"/api/v3/{self.agent.agent_id}/checkrunner/"
# add a check
check1 = baker.make_recipe("checks.ping_check", agent=self.agent)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], self.agent.check_interval) # type: ignore
self.assertEqual(len(r.data["checks"]), 1) # type: ignore
# override check run interval
check2 = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=20
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEqual(len(r.data["checks"]), 2) # type: ignore
# Set last_run on both checks and should return an empty list
check1.last_run = djangotime.now()
check1.save()
check2.last_run = djangotime.now()
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertFalse(r.data["checks"]) # type: ignore
# set last_run greater than interval
check1.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check1.save()
check2.last_run = djangotime.now() - djangotime.timedelta(seconds=200)
check2.save()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data["check_interval"], 20) # type: ignore
self.assertEquals(len(r.data["checks"]), 2) # type: ignore
url = "/api/v3/Maj34ACb324j234asdj2n34kASDjh34-DESKTOPTEST123/checkrunner/"
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
self.check_not_authenticated("get", url)
def test_sysinfo(self):
# TODO replace this with golang wmi sample data
url = "/api/v3/sysinfo/"
with open(
os.path.join(
settings.BASE_DIR, "tacticalrmm/test_data/wmi_python_agent.json"
)
) as f:
wmi_py = json.load(f)
payload = {"agent_id": self.agent.agent_id, "sysinfo": wmi_py}
r = self.client.patch(url, payload, format="json")
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("patch", url)
def test_checkrunner_interval(self):
url = f"/api/v3/{self.agent.agent_id}/checkinterval/"
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": self.agent.check_interval},
)
# add check to agent with check interval set
check = baker.make_recipe(
"checks.ping_check", agent=self.agent, run_interval=30
)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 30},
)
# minimum check run interval is 15 seconds
check = baker.make_recipe("checks.ping_check", agent=self.agent, run_interval=5)
r = self.client.get(url, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(),
{"agent": self.agent.pk, "check_interval": 15},
)
def test_run_checks(self):
# force run all checks regardless of interval
agent = baker.make_recipe("agents.online_agent")
baker.make_recipe("checks.ping_check", agent=agent)
baker.make_recipe("checks.diskspace_check", agent=agent)
baker.make_recipe("checks.cpuload_check", agent=agent)
baker.make_recipe("checks.memory_check", agent=agent)
baker.make_recipe("checks.eventlog_check", agent=agent)
for _ in range(10):
baker.make_recipe("checks.script_check", agent=agent)
url = f"/api/v3/{agent.agent_id}/runchecks/"
r = self.client.get(url)
self.assertEqual(r.json()["agent"], agent.pk)
self.assertIsInstance(r.json()["check_interval"], int)
self.assertEqual(len(r.json()["checks"]), 15)
@patch("apiv3.views.reload_nats")
def test_agent_recovery(self, reload_nats):
reload_nats.return_value = "ok"
r = self.client.get("/api/v3/34jahsdkjasncASDjhg2b3j4r/recover/")
self.assertEqual(r.status_code, 404)
agent = baker.make_recipe("agents.online_agent")
url = f"/api/v3/{agent.agent_id}/recovery/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "pass", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="mesh")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "mesh", "shellcmd": ""})
reload_nats.assert_not_called()
baker.make(
"agents.RecoveryAction",
agent=agent,
mode="command",
command="shutdown /r /t 5 /f",
)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.json(), {"mode": "command", "shellcmd": "shutdown /r /t 5 /f"}
)
reload_nats.assert_not_called()
baker.make("agents.RecoveryAction", agent=agent, mode="rpc")
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.json(), {"mode": "rpc", "shellcmd": ""})
reload_nats.assert_called_once()
def test_task_runner_get(self):
from autotasks.serializers import TaskGOGetSerializer
r = self.client.get("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
script = baker.make_recipe("scripts.script")
task = baker.make("autotasks.AutomatedTask", agent=agent, script=script)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(TaskGOGetSerializer(task).data, r.data) # type: ignore
def test_task_runner_results(self):
from agents.models import AgentCustomField
r = self.client.patch("/api/v3/500/asdf9df9dfdf/taskrunner/")
self.assertEqual(r.status_code, 404)
# setup data
agent = baker.make_recipe("agents.agent")
task = baker.make("autotasks.AutomatedTask", agent=agent)
url = f"/api/v3/{task.pk}/{agent.agent_id}/taskrunner/" # type: ignore
# test passing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "passing") # type: ignore
# test failing task
data = {
"stdout": "test test \ntestest stdgsd\n",
"stderr": "",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test collector task
text = baker.make("core.CustomField", model="agent", type="text", name="Test")
boolean = baker.make(
"core.CustomField", model="agent", type="checkbox", name="Test1"
)
multiple = baker.make(
"core.CustomField", model="agent", type="multiple", name="Test2"
)
# test text fields
task.custom_field = text # type: ignore
task.save() # type: ignore
# test failing failing with stderr
data = {
"stdout": "test test \nthe last line",
"stderr": "This is an error",
"retcode": 1,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertTrue(AutomatedTask.objects.get(pk=task.pk).status == "failing") # type: ignore
# test saving to text field
data = {
"stdout": "test test \nthe last line",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=text, agent=task.agent).value, "the last line") # type: ignore
# test saving to checkbox field
task.custom_field = boolean # type: ignore
task.save() # type: ignore
data = {
"stdout": "1",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertTrue(AgentCustomField.objects.get(field=boolean, agent=task.agent).value) # type: ignore
# test saving to multiple field with commas
task.custom_field = multiple # type: ignore
task.save() # type: ignore
data = {
"stdout": "this,is,an,array",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this", "is", "an", "array"]) # type: ignore
# test mutiple with a single value
data = {
"stdout": "this",
"stderr": "",
"retcode": 0,
"execution_time": 3.560,
}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(AutomatedTask.objects.get(pk=task.pk).status, "passing") # type: ignore
self.assertEqual(AgentCustomField.objects.get(field=multiple, agent=task.agent).value, ["this"]) # type: ignore
|
nilq/baby-python
|
python
|
"""
Copyright 2020 Vitaliy Zarubin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sqlalchemy import or_
from sqlalchemy import and_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, TIMESTAMP
from datetime import datetime, timedelta
from .model_user import ModelUser
Base = declarative_base()
class ModelUserToken(Base):
CONST_DAYS = 30
__tablename__ = 'users_tokens'
id = Column(Integer, primary_key=True)
user_id = Column(Integer)
token = Column(String)
message_token = Column(String)
language = Column(String)
uid = Column(String)
created_at = Column(TIMESTAMP)
updated_at = Column(TIMESTAMP)
@classmethod
def clear_old(cls, app):
app.db.execute('DELETE FROM {} WHERE updated_at < NOW() - INTERVAL {} DAY'.format(cls.__tablename__, cls.CONST_DAYS))
app.db.commit()
app.log.info('clear older tokens done')
@classmethod
def find_by_day(cls, app, channel_id, days=15):
from .model_notification import ModelNotification
return app.db.query(ModelUserToken.user_id, ModelUserToken.message_token, ModelUserToken.language) \
.distinct(ModelUserToken.message_token) \
.filter(ModelUserToken.updated_at < (datetime.now() - timedelta(days=days))) \
.join(ModelUser, ModelUser.id == ModelUserToken.user_id) \
.outerjoin(ModelNotification, and_(ModelNotification.channel_id == channel_id, ModelNotification.user_id == ModelUserToken.user_id)) \
.filter(ModelUser.enabled == 1) \
.filter(ModelNotification.channel_id == None) \
.all()
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
class Connectable(object):
def __init__(self,
name=None,
auto_terminate=None):
self.uuid = uuid.uuid4()
if name is None:
self.name = "node_of_" + str(self.uuid)
else:
self.name = name
if auto_terminate is None:
self.auto_terminate = []
else:
self.auto_terminate = auto_terminate
self.connections = {}
self.out_proc = self
self.drop_empty_flowfiles = False
def connect(self, connections):
for rel in connections:
# Ensure that rel is not auto-terminated
if rel in self.auto_terminate:
del self.auto_terminate[self.auto_terminate.index(rel)]
# Add to set of output connections for this rel
if rel not in self.connections:
self.connections[rel] = []
self.connections[rel].append(connections[rel])
return self
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_uuid(self):
return self.uuid
def set_uuid(self, uuid):
self.uuid = uuid
|
nilq/baby-python
|
python
|
class dtype:
def __eq__(self, other):
return self.__name__ == other.__name__
class complex128(dtype):
precedence = 10
class complex64(dtype):
precedence = 9
class float64(dtype):
precedence = 8
class float32(dtype):
precedence = 7
class float16(dtype):
precedence = 6
# class bfloat16(dtype):
# pass
class int64(dtype):
precedence = 5
class int32(dtype):
precedence = 4
class int16(dtype):
precedence = 3
class int8(dtype):
precedence = 2
class uint8(dtype):
precedence = 1
class bool(dtype):
precedence = 0
float = float32
double = float64
cfloat = complex64
cdouble = complex128
half = float16
short = int16
int = int32
long = int64
floatTypes = [float64, float32, float16]
floatDefault = float32
intDefault = int64
def maxDtype(*tensors):
maxPrecedence = -1
dtype = None
for tensor in tensors:
if isinstance(tensor.dtype, str):
raise Exception("tensor.dtype is " + tensor.dtype)
if tensor.dtype.precedence > maxPrecedence:
maxPrecedence = tensor.dtype.precedence
dtype = tensor.dtype
return dtype
|
nilq/baby-python
|
python
|
"""
Global mail object used to send notification emails.
"""
from flask_mail import Mail, Message
from flask import current_app
mail = Mail()
def send_mail(email, output):
"""sends email"""
header = "Your analysis is ready"
content = '''<div style='font-size:14px;'>\
Your requested analysis is ready and available at:<br />\
<a href={0}{1}{2}>{0}{1}{2}</a>\
</div>'''\
.format(current_app.config['APP_DOMAIN'], '/explore/biomarker/result/', output['analysis_id'][0])
footer = '''<div style='font-size:12px;'>\
Thank you for using PredictIO, powered by <a href=https://www.pmgenomics.ca/bhklab/>BHK Lab</a>.\
</div>'''
if(output["error"][0]):
header = "Error occurred during analysis"
content = '''<div style='font-size:14px;'>\
Error occurred during your analysis.<br />\
Please contact <b>support@PredictIO.ca</b> by citing your analysis ID: {0}\
</div>'''.format(output['analysis_id'][0])
body = "<div style='font-family:arial;'>{0}<br /><br />{1}</div>".format(content, footer)
msg = Message("[PredictIO] " + header, sender='PredictIO@PredictIO.ca', recipients=[email])
msg.html = body
print('sending email')
mail.send(msg)
print('mail sent')
|
nilq/baby-python
|
python
|
import argparse
import sys
from typing import Callable
from typing import List
from typing import Optional
from . import audit
from . import baseline
from . import filters
from . import plugins
from . import scan
from ...settings import get_settings
from .common import initialize_plugin_settings
from detect_secrets.__version__ import VERSION
class ParserBuilder:
def __init__(self) -> None:
self._parser = argparse.ArgumentParser()
self._post_processors: List[Callable[[argparse.Namespace], None]] = [
initialize_plugin_settings,
]
self.add_default_options()
def add_default_options(self) -> 'ParserBuilder':
self._parser.add_argument(
'-v',
'--verbose',
action='count',
help='Verbose mode.',
)
self._parser.add_argument(
'--version',
action='version',
version=VERSION,
help='Display version information.',
)
self._parser.add_argument(
'-C',
metavar='<path>',
dest='custom_root',
nargs=1,
default=[''],
help=(
'Run as if detect-secrets was started in <path>, rather than in the current '
'working directory.'
),
)
self._parser.add_argument(
'-c',
'--cores',
dest='num_cores',
nargs=1,
type=int,
default=[None],
help=(
'Specify the number of cores to use for parallel processing. Defaults to '
'using the max cores on the current host.'
),
)
return self
def add_console_use_arguments(self) -> 'ParserBuilder':
subparser = self._parser.add_subparsers(dest='action')
self._post_processors = [_assert_action_is_specified, *self._post_processors]
parser = scan.add_scan_action(subparser)
# NOTE: This ordering is important.
# 1. Baselines will be handled accordingly, and the global settings object will
# be initialized with a certain state.
# 2. Scan options can override this (e.g. --force-use-all-plugins)
# 3. Plugin options can override this again (e.g. disabling plugins, or different configs)
#
# In a similar way, the filter options must come after the settings object is initialized.
self._post_processors.append(scan.parse_args)
self.add_plugin_options(parser, action_filter='scan')
self.add_filter_options(parser, action_filter='scan')
# NOTE: scan allows a baseline, but we need to override the first post_processor
self._post_processors[1] = baseline.parse_args
audit.add_audit_action(subparser)
self._post_processors.append(audit.parse_args)
return self
def add_pre_commit_arguments(self) -> 'ParserBuilder':
self._parser.add_argument(
'filenames',
nargs='*',
help='Filenames to check.',
)
self.add_baseline_options(
help=(
'Explicitly ignore secrets through a baseline generated by `detect-secrets scan`'
),
)
self.add_plugin_options()
self.add_filter_options()
return self
def add_baseline_options(self, help: str = '') -> 'ParserBuilder':
baseline.add_baseline_option(self._parser, help=help)
for index, processor in enumerate(self._post_processors):
if processor == initialize_plugin_settings:
self._post_processors[index] = baseline.parse_args
break
return self
def add_plugin_options(
self,
parser: Optional[argparse.ArgumentParser] = None,
action_filter: Optional[str] = None,
) -> 'ParserBuilder':
if not parser:
parser = self._parser
plugins.add_plugin_options(parser)
if action_filter:
self._post_processors.append(
_action_specific_post_processor(action_filter, plugins.parse_args),
)
else:
self._post_processors.append(plugins.parse_args)
return self
def add_filter_options(
self,
parser: Optional[argparse.ArgumentParser] = None,
action_filter: Optional[str] = None,
) -> 'ParserBuilder':
if not parser:
parser = self._parser
filters.add_filter_options(parser)
if action_filter:
self._post_processors.append(
_action_specific_post_processor(action_filter, filters.parse_args),
)
else:
self._post_processors.append(filters.parse_args)
return self
def parse_args(self, argv: Optional[List[str]] = None) -> argparse.Namespace:
args = self._parser.parse_args(argv)
try:
for processor in self._post_processors:
processor(args)
except argparse.ArgumentTypeError as e:
# TODO: Better help text?
self._parser.print_usage(sys.stderr)
print(f'error: {str(e)}', file=sys.stderr)
sys.exit(1)
args.custom_root = args.custom_root[0]
if args.custom_root:
# This filter assumes current working directory, which will fail if we're running
# from a different directory.
# TODO: Maybe adjust this so that it is directory agnostic?
get_settings().disable_filters('detect_secrets.filters.common.is_invalid_file')
# Abide by the Principle of Least Surprise, and have the default value be the
# custom root directory itself.
if args.path == ['.']:
args.path = [args.custom_root]
args.num_cores = args.num_cores[0]
return args
def _assert_action_is_specified(args: argparse.Namespace) -> None:
if not args.action:
raise argparse.ArgumentTypeError('Unspecified action.')
def _action_specific_post_processor(action: str, processor: Callable) -> Callable:
def wrapped(args: argparse.Namespace) -> None:
if args.action != action:
return
processor(args)
return wrapped
|
nilq/baby-python
|
python
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""select_run"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import select
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
def select_run(shape_cond, shape_x, dtype_cond, dtype_x, attrs=None):
"""select_run implementation"""
if attrs is None:
attrs = {}
mod = utils.op_build_test(select.select, [shape_cond, shape_x, shape_x], [dtype_cond, dtype_x, dtype_x],
kernel_name='select', op_attrs=[], attrs=attrs)
args, exp_output, cond, x1, x2 = gen_data(shape_cond, shape_x, dtype_cond, dtype_x)
acu_output = utils.mod_launch(mod, args, expect=exp_output)
# compare result
rtol, atol = get_rtol_atol("select", dtype_x)
testcase_result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)
return [cond, x1, x2], acu_output, exp_output, testcase_result
def gen_data(shape_cond, shape_x, dtype_cond, dtype_x):
# generate data
cond = np.random.randint(0, 2, shape_cond).astype(dtype_cond)
x1 = random_gaussian(shape_x, miu=10, sigma=0.3).astype(dtype_x)
x2 = random_gaussian(shape_x, miu=10, sigma=0.3).astype(dtype_x)
exp_output = np.where(cond, x1, x2)
# inputs and output to hold the data
output = np.full(shape_x, np.nan, dtype_x)
args = [cond, x1, x2, output]
return args, exp_output, cond, x1, x2
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 02:35:02 2018
@author: elvex
"""
"""Boite à outils de manipulation des base de données de tweets. """
#import json
import pandas as pd
import txt_analysis as TA
from math import log10
from glob import glob
from os.path import abspath
from re import split
from math import pi
from numpy import cos, sin
import datetime
def json2pd(adr):
"""
Convertit un json de tweets en base de donnée panda.
Entrée : l'adresse du json
Sortie : la base de donnée panda
"""
with open(adr, 'r') as f:
r = f.read()
bdd = pd.read_json(r, orient = 'records', lines = True)
bdd = bdd['user'].apply(pd.Series).join(bdd.drop('user', 1),
how = "left", lsuffix="_profile", rsuffix="_tweet")
return bdd
def filterBYlanguage(bdd, lan = 'fr'):
bdd = bdd[(bdd.lang_tweet == lan)]
return bdd
def keepNdropPD_txt(bdd):
bdd = bdd.loc[:, ["id_profile", "text"]]
return bdd
def aggregate_bddFiltered(bdd):
grp = bdd.groupby("id_profile")
bdd = grp.agg(["count", lambda x: "\n".join(x)])
bdd.columns = bdd.columns.droplevel(0)
bdd = bdd.rename(columns={ bdd.columns[0]: "counting", bdd.columns[1]: "text"})
return bdd
def json2bdd_agreg(json):
return aggregate_bddFiltered(keepNdropPD_txt(filterBYlanguage(json2pd(json))))
#bdd = aggregate_bddFiltered(keepNdropPD_txt(filterBYlanguage(json2pd(file))))
def concat_bdd_aggreg(bdd1, bdd2):
bdd21 = bdd1.counting.add(bdd2.counting, fill_value=0)
bdd22 = bdd1.text.add(bdd2.text, fill_value="")
bdd2 = pd.concat([bdd21, bdd22], axis=1)
return bdd2
def concat_dir(dirname):
path = abspath(dirname)
lst = glob(path+"/*.json")
bdd = json2bdd_agreg(lst[0])
for i in range(1, len(lst)):
try:
bdd2 = json2bdd_agreg(lst[i])
bdd = concat_bdd_aggreg(bdd, bdd2)
except ValueError as e:
print("Erreur '{}' sur l'étape {}".format(e, i))
continue
return bdd
def drop_profile(bdd, n = 2):
return bdd.loc[bdd["counting"] >= n, "text"]
def bdd2bow(bdd):
"""
Transforme un Data Frame panda de tweet en base donnée bag of words,
chaque collonne correspondant à un mot spécifique
et chaque ligne à un utilisateur,
avec comme contenu de la cellule le nombre d'occurence du mot dans le tweet.
Entrée : le dataframe panda
Sortie : le dataframe bag of word
"""
T = bdd["text"] if isinstance(bdd, pd.core.frame.DataFrame) else bdd
T = T.map(TA.formate_txt)
T = T.map(TA.bow)
bow = pd.DataFrame.from_dict(T.tolist())
bow = bow.fillna(0)
return bow
def filter_bow(bow, mini = 1):
"""
Permet de filtrer un dataframe bag of words en stipulant un nombre minimum
de tweets dans lequels les mots doivent apparaître.
Entrée :
bow : pandas dataframe bag of words
mini : entier stipulant le minimum
Sortie :
bow_f : le dataframe bag of words filtré
"""
test = (((bow > 0).sum()) >= mini).values
bow_f = bow.iloc[:, test]
return bow_f
def tf_idf(bow, lst = [], fonction = "idfi"):
"""
À partir d'un dataframe bag of words, applique une métrique de tf idf pour
pondérer le score des mots.
Entrée :
bow : dataframe bag of words
lst : liste de mots à garder dans le dataframe, si nul, tous les mots son gardés
fonction : fonction de pondération :
idfn => pas de pondération
idfi => prend en compte le nombre de tweets et la fréquence d'utilisation des mots
idfl => comme idfi mais en se laissant une sécurité sur le log10(0)
idfs => comme idfi mais en se laissant une autre sécurité sur le log10(0)
idff => prend simplement en compte la fréquence d'utilisation des mots
idfp => prend en compte le nombre de tweets et la fréquence d'utilisation des mots
"""
dico = {"idfi" : idfi,
"idfn" : idfn,
"idfl" : idfl,
"idfp" : idfp,
"idff" : idff,
"idfs" : idfs}
D, df = len(bow), (bow > 0).sum()
f_poids = dico.get(fonction, "idfi")
idf = bow * f_poids(D, df)
if len(lst) > 0: idf = intersection(bow, lst)
return idf
def intersection(bdd, lst):
"""Renvoie les colonnes d'une bdd pandas qui correspondent aux mots entrés.
Entrées :
bdd : panda dataframe
lst : liste de mots
Sortie :
nouvelle dataframe pandas
"""
s = set(map(str.lower, lst))
s = s.intersection(set(bdd.columns.values.tolist()))
return bdd.loc[:, list(s)]
def idfi(D, df):
return (D/df).apply(log10)
def idfn(D, df):
return 1
def idfl(D, df):
return (D/df + 1).apply(log10)
def idff(D, df):
return 1/df
def idfp(D, df):
return ((D - df) / df).apply(log10)
def idfs (D, df):
return (((D + 1) / df).apply(log10)) ** 2
def df2np(df):
"""Convertit un dataframe panda en matrice, renvoie cette matrice et le vecteur d'indice.
Entrée :
df, panda dataframe
Sortie :
idx : numpy array des indices de la dataframe
mtx : numpy array des valeurs de la dataframe
"""
mtx = df.values
idx = df.index.values
return (idx, mtx)
def dateBDD(bdd):
dico_month = {1 : 31, 2 : 28, 3 : 31, 4 : 30, 5 : 31, 6 : 30, 7 : 31,
8 : 31, 9 : 30, 10 : 31, 11 : 30, 12 : 30}
bdd = bdd.loc[:, ['id_tweet', 'created_at_tweet']].set_index('id_tweet')
bdd.created_at_tweet = bdd.created_at_tweet.apply(lambda x: list(map(int, split('[: -]', str(x)))))
bdd["hour"] = bdd.created_at_tweet.apply(lambda lst: (lst[-3] + lst[-2] / 60 + lst[-1] / (60**2)) * (pi/12))
bdd["hour_X"] = bdd.hour.apply(cos)
bdd["hour_Y"] = bdd.hour.apply(sin)
bdd["day_X"] = bdd.created_at_tweet.apply(lambda x: cos(x[2] * pi / 6))
bdd["day_Y"] = bdd.created_at_tweet.apply(lambda x: sin(x[2] * pi / 6))
bdd["dayweek"] = bdd.created_at_tweet.apply(lambda x: datetime.date(x[0], x[1], x[2]).weekday())
bdd["dayweek_X"] = bdd.dayweek.apply(lambda x: cos(x * 2 * pi / 7))
bdd["dayweek_Y"] = bdd.dayweek.apply(lambda x: sin(x * 2 * pi / 7))
bdd["month_X"] = bdd.created_at_tweet.apply(lambda x: cos(x[1] * pi / dico_month[x[2]]))
bdd["month_Y"] = bdd.created_at_tweet.apply(lambda x: sin(x[1] * pi / dico_month[x[2]]))
bdd["year"] = bdd.created_at_tweet.apply(lambda x: x[0])
bdd.drop(labels = ["created_at_tweet", "hour", "dayweek"], axis = 1, inplace = True)
return bdd
def json2dateBDD(json):
return dateBDD(filterBYlanguage(json2pd(json)))
def date_dir(dirname):
path = abspath(dirname)
lst = glob(path+"/*.json")
bdd = json2dateBDD(lst[0])
for i in range(1, len(lst)):
try:
bdd2 = json2dateBDD(lst[i])
bdd = pd.concat([bdd, bdd2], axis=0)
except ValueError as e:
print("Erreur '{}' sur l'étape {}".format(e, i))
continue
return bdd
def print_means_words(km, col, lim = 10):
means = km.means
D = pd.DataFrame(means, columns=col)
for i in range(km.nb_cluster):
lst = list(D.sort_values(by = i, axis = 1).iloc[i, :lim].index.values)
n = km.data.index[(km.grp[:, 1] == i)].size
print("Les {} mots représentatif du groupe {} composé de {} individus sont :\n\t {}".format(lim, i, n, ' - '.join(lst)))
return None
|
nilq/baby-python
|
python
|
from jmetal.algorithm.singleobjective.simulated_annealing import SimulatedAnnealing
from jmetal.operator import PolynomialMutation
from jmetal.problem.bbob import bbob
from jmetal.util.observer import ProgressBarObserver
from jmetal.util.termination_criterion import StoppingByEvaluations
if __name__ == '__main__':
max_evaluations = 1000000
termination_criteria = StoppingByEvaluations(max_evaluations)
problem = bbob.BBOB()
algorithm = SimulatedAnnealing(
problem=problem,
mutation=PolynomialMutation(probability=0.1, distribution_index=20.0),
termination_criterion=StoppingByEvaluations(max=max_evaluations)
)
progress = ProgressBarObserver(termination_criteria.get_criterion())
algorithm.observable.register(progress)
algorithm.run()
# Random Trials
|
nilq/baby-python
|
python
|
"""
Packaging setup for ledcontroller
"""
# pylint: disable=line-too-long
import os.path
from codecs import open as codecs_open
from setuptools import setup
with codecs_open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setup(
name='ledcontroller',
version='1.3.0',
description='Controller library for limitlessled/easybulb/milight Wi-Fi LEDs',
long_description=LONG_DESCRIPTION,
url='https://github.com/ojarva/python-ledcontroller',
author='Olli Jarva',
author_email='olli@jarva.fi',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Home Automation',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords='applight applamp dekolight easybulb ilight limitlessled led ledme milight wifi',
packages=["ledcontroller"],
install_requires=[],
test_suite="tests",
extras_require={
'dev': ['twine', 'wheel'],
},
)
|
nilq/baby-python
|
python
|
'''
实验名称:RTC实时时钟
版本:v1.0
日期:2020.12
作者:01Studio
说明:在LCD上显示时间
社区:www.01studio.org
'''
#导入相关模块
import pyb
from tftlcd import LCD43M
#定义常用颜色
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
BLACK = (0,0,0)
WHITE = (255,255,255)
########################
# 构建4.3寸LCD对象并初始化
########################
d = LCD43M(portrait=1) #默认方向
d.fill(WHITE)#填充白色
#初始化RTC
rtc = pyb.RTC()
# 定义星期和时间(时分秒)显示字符列表
week = ['Mon', 'Tues', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
time = ['', '', '']
#显示标题
d.printStr('01Studio RTC', 100, 10, BLACK, size=4)
# 首次上电配置时间,按顺序分别是:年,月,日,星期,时,分,秒,次秒级;这里做#了一个简单的判断,检查到当前年份不对就修改当前时间,开发者可以根据自己实际情况来修改。
if rtc.datetime()[0] != 2019:
rtc.datetime((2019, 4, 1, 1, 0, 0, 0, 0))
while True:
datetime = rtc.datetime() # 获取当前时间
# 显示日期,字符串可以直接用“+”来连接
d.printStr(str(datetime[0]) + '-' + str(datetime[1]) + '-' + str(datetime[2]) + ' ' + week[(datetime[3] - 1)], 10, 100, BLACK, size=4)
# 显示时间需要判断时、分、秒的值否小于10,如果小于10,则在显示前面补“0”以到较佳的显示效果
for i in range(4, 7):
if datetime[i] < 10:
time[i - 4] = "0"
else:
time[i - 4] = ""
# 显示时间
d.printStr(time[0] + str(datetime[4]) + ':' + time[1] + str(datetime[5]) + ':' + time[2] + str(datetime[6]), 10, 200, BLACK, size=4)
pyb.delay(300) #延时500ms
|
nilq/baby-python
|
python
|
from __future__ import division, absolute_import, print_function
from .jdx import jdx_reader, jdx_file_reader, JdxFile
__all__ = ["jdx"]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, shakeel vaim and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Add New Properties')
class TestAddNewProperties(unittest.TestCase):
pass
|
nilq/baby-python
|
python
|
from datetime import datetime
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.parser import parse as dateparse
from city_scrapers.mixins.wayne_commission import WayneCommissionMixin
class WayneBuildingAuthoritySpider(WayneCommissionMixin, CityScrapersSpider):
name = "wayne_building_authority"
agency = "Wayne County Government"
start_urls = ["https://www.waynecounty.com/boards/buildingauthority/meetings.aspx"]
meeting_name = "Building Authority"
# Override the mixin for any unique attributes.
location = {
"name": "6th Floor, Guardian Building",
"address": "500 Griswold St, Detroit, MI 48226",
}
def _parse_entries(self, response):
current_year = datetime.now().year
current_year_non_empty_rows = response.xpath(
'//section[contains(.,"%s")]//tbody/tr[child::td/text()]' % current_year
)
return current_year_non_empty_rows
def _parse_start(self, item):
"""
Parse start date and time.
"""
# Strong text indicates a replacement meeting date
strong_text = item.xpath(".//td[2]/strong/text()").extract_first()
if strong_text is not None:
date_str = strong_text
else:
date_str = item.xpath(".//td[2]/text()").extract_first()
time_str = item.xpath(".//td[3]/text()").extract_first()
return dateparse("{0} {1}".format(date_str, time_str))
|
nilq/baby-python
|
python
|
"""Base segment definitions.
Here we define:
- BaseSegment. This is the root class for all segments, and is
designed to hold other subsegments.
- RawSegment. This is designed to be the root segment, without
any children, and the output of the lexer.
- UnparsableSegment. A special wrapper to indicate that the parse
function failed on this block of segments and to prevent further
analysis.
These are the fundamental building blocks of the rest of the parser.
"""
import logging
from io import StringIO
from benchit import BenchIt
from .match import MatchResult, curtail_string, join_segments_raw
from ..errors import SQLLintError
def verbosity_logger(msg, verbosity=0, level='info', v_level=3):
"""Log or print based on configuration."""
if verbosity >= v_level:
print(msg)
else:
# Should be mostly equivalent to logging.info(msg)
getattr(logging, level)(msg)
def parse_match_logging(grammar, func, msg, parse_context, v_level, **kwargs):
"""Log in a particular consistent format for use while matching."""
# If we can avoid this, bank the performance increase
if parse_context.verbosity <= 1:
return
# Otherwise carry on...
symbol = kwargs.pop('symbol', '')
s = "[PD:{0} MD:{1}]\t{2:<50}\t{3:<20}\t{4:<4}".format(
parse_context.parse_depth, parse_context.match_depth,
('.' * parse_context.match_depth) + str(parse_context.match_segment),
"{0}.{1} {2}".format(grammar, func, msg),
symbol
)
if kwargs:
s += "\t[{0}]".format(
', '.join(
"{0}={1}".format(
k,
repr(v) if isinstance(v, str) else v
) for k, v in kwargs.items()
)
)
verbosity_logger(s, parse_context.verbosity, v_level=v_level)
def frame_msg(msg):
"""Frame a message with hashes so that it covers five lines."""
return "###\n#\n# {0}\n#\n###".format(msg)
def check_still_complete(segments_in, matched_segments, unmatched_segments):
"""Check that the segments in are the same as the segments out."""
initial_str = join_segments_raw(segments_in)
current_str = join_segments_raw(
matched_segments + unmatched_segments
)
if initial_str != current_str:
raise RuntimeError(
"Dropped elements in sequence matching! {0!r} != {1!r}".format(
initial_str, current_str))
class ParseBlacklist:
"""Acts as a cache to stop unnecessary matching."""
def __init__(self):
self._blacklist_struct = {}
def _hashed_version(self):
return {
k: {hash(e) for e in self._blacklist_struct[k]}
for k in self._blacklist_struct
}
def check(self, seg_name, seg_tuple):
"""Check this seg_tuple against this seg_name.
Has this seg_tuple already been matched
unsuccessfully against this segment name.
"""
if seg_name in self._blacklist_struct:
if seg_tuple in self._blacklist_struct[seg_name]:
return True
return False
def mark(self, seg_name, seg_tuple):
"""Mark this seg_tuple as not a match with this seg_name."""
if seg_name in self._blacklist_struct:
self._blacklist_struct[seg_name].add(seg_tuple)
else:
self._blacklist_struct[seg_name] = {seg_tuple}
def clear(self):
"""Clear the blacklist struct."""
self._blacklist_struct = {}
class ParseContext:
"""The context for parsing. It holds configuration and rough state.
We expect that an object (or copy of this object) will be passed
around rather than the individual variables for parse and match depth
as before.
"""
__slots__ = ['match_depth', 'parse_depth', 'verbosity', 'dialect', 'match_segment', 'recurse', 'blacklist']
def __init__(self, dialect=None, verbosity=0, match_depth=0, parse_depth=0, match_segment=None, recurse=True, blacklist=None):
# Write all the variables in a DRY way. Yes it's a bit convoluted. Sorry.
for k in self.__slots__:
setattr(self, k, locals()[k])
# Initialise a blacklist struct if one is not present.
if getattr(self, 'blacklist') is None:
setattr(self, 'blacklist', ParseBlacklist())
def copy(self, incr=None, decr=None, **kwargs):
"""Make a copy of the parse context, optionally with some edited variables."""
current_vals = {k: getattr(self, k) for k in self.__slots__}
current_vals.update(kwargs or {})
# Increment
if isinstance(incr, str):
current_vals[incr] += 1
elif incr:
for k in incr:
current_vals[k] += 1
# Decrement
if isinstance(decr, str):
current_vals[decr] -= 1
elif decr:
for k in decr:
current_vals[k] -= 1
# Return
return self.__class__(**current_vals)
@classmethod
def from_config(cls, config):
"""Construct a `ParseContext` from a `FluffConfig`."""
return cls(dialect=config.get('dialect_obj'), recurse=config.get('recurse'))
class BaseSegment:
"""The base segment element.
This defines the base element which drives both Lexing, Parsing and Linting.
A large chunk of the logic which defines those three operations are centered
here. Much of what is defined in the BaseSegment is also used by it's many
subclasses rather than directly here.
For clarity, the `BaseSement` is mostly centered around a segment which contains
other subsegments. For segments which don't have *children*, refer to the `RawSegment`
class (which still inherits from this one).
Segments are used both as instances to hold chunks of text, but also as classes
themselves where they function a lot like grammars, and return instances of themselves
when they match. The many classmethods in this class are usually to serve their
purpose as a matcher.
"""
# `type` should be the *category* of this kind of segment
type = 'base'
parse_grammar = None
match_grammar = None
grammar = None
comment_seperate = False
is_whitespace = False
optional = False # NB: See the seguence grammar for details
is_segment = True
_name = None
_func = None # Available for use by subclasses (e.g. the LambdaSegment)
is_meta = False
@property
def name(self):
"""The name of this segment.
The reason for two routes for names is that some subclasses
might want to overrise the name rather than just getting it
the class name.
Name should be specific to this kind of segment, while `type`
should be a higher level descriptor of the kind of segment.
For example, the name of `+` is 'plus' but the type might be
'binary_operator'.
"""
return self._name or self.__class__.__name__
@property
def is_expandable(self):
"""Return true if it is meaningful to call `expand` on this segment.
We need to do this recursively because even if *this* segment doesn't
need expanding, maybe one of it's children does.
"""
if self._parse_grammar():
return True
elif self.segments and any(s.is_expandable for s in self.segments):
return True
else:
return False
@classmethod
def simple(cls, parse_context):
"""Does this matcher support an uppercase hash matching route?"""
return False
@property
def is_code(self):
"""Return True if this segment contains any code."""
return any(seg.is_code for seg in self.segments)
@property
def is_comment(self):
"""Return True if this is entirely made of comments."""
return all(seg.is_comment for seg in self.segments)
@classmethod
def is_optional(cls):
"""Return True if this segment is optional.
This is used primarily in sequence matching, where optional
segments can be skipped.
"""
return cls.optional
@classmethod
def _match_grammar(cls):
"""Return the `match_grammar` attribute if present, or the `grammar` attribute if not."""
if cls.match_grammar:
return cls.match_grammar
else:
return cls.grammar
@classmethod
def _parse_grammar(cls):
"""Return the `parse_grammar` attribute if present, or the `grammar` attribute if not."""
if cls.parse_grammar:
return cls.parse_grammar
else:
return cls.grammar
def validate_segments(self, text="constructing", validate=True):
"""Validate the current set of segments.
Check the elements of the `segments` attribute are all
themselves segments, and that the positions match up.
`validate` confirms whether we should check contigiousness.
"""
# Placeholder variables for positions
start_pos = None
end_pos = None
prev_seg = None
for elem in self.segments:
if not isinstance(elem, BaseSegment):
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't a segment. Instead found element of type {2}.\nFound: {3}\nFull segments:{4}".format(
text,
type(self),
type(elem),
elem,
self.segments
))
# While applying fixes, we shouldn't validate here, because it will fail.
if validate:
# If we have a comparison point, validate that
if end_pos and elem.get_start_pos_marker() != end_pos:
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't contigious with previous: {2} > {3}. End pos: {4}."
" Prev String: {5!r}".format(
text,
type(self),
prev_seg,
elem,
end_pos,
prev_seg.raw
))
start_pos = elem.get_start_pos_marker()
end_pos = elem.get_end_pos_marker()
prev_seg = elem
if start_pos.advance_by(elem.raw) != end_pos:
raise TypeError(
"In {0} {1}, found an element of the segments tuple which"
" isn't self consistent: {2}".format(
text,
type(self),
elem
))
def get_end_pos_marker(self):
"""Return the pos marker at the end of this segment."""
return self.segments[-1].get_end_pos_marker()
def get_start_pos_marker(self):
"""Return the pos marker at the start of this segment."""
return self.segments[0].get_start_pos_marker()
def __init__(self, segments, pos_marker=None, validate=True):
if len(segments) == 0:
raise RuntimeError(
"Setting {0} with a zero length segment set. This shouldn't happen.".format(
self.__class__))
if hasattr(segments, 'matched_segments'):
# Safely extract segments from a match
self.segments = segments.matched_segments
elif isinstance(segments, tuple):
self.segments = segments
elif isinstance(segments, list):
self.segments = tuple(segments)
else:
raise TypeError(
"Unexpected type passed to BaseSegment: {0}".format(
type(segments)))
# Check elements of segments:
self.validate_segments(validate=validate)
if pos_marker:
self.pos_marker = pos_marker
else:
# If no pos given, it's the pos of the first segment
# Work out if we're dealing with a match result...
if hasattr(segments, 'initial_match_pos_marker'):
self.pos_marker = segments.initial_match_pos_marker()
elif isinstance(segments, (tuple, list)):
self.pos_marker = segments[0].pos_marker
else:
raise TypeError(
"Unexpected type passed to BaseSegment: {0}".format(
type(segments)))
def parse(self, parse_context=None):
"""Use the parse grammar to find subsegments within this segment.
A large chunk of the logic around this can be found in the `expand` method.
Use the parse setting in the context for testing, mostly to check how deep to go.
True/False for yes or no, an integer allows a certain number of levels.
"""
if not parse_context.dialect:
raise RuntimeError("No dialect provided to {0!r}!".format(self))
# Clear the blacklist cache so avoid missteps
if parse_context:
parse_context.blacklist.clear()
# the parse_depth and recurse kwargs control how deep we will recurse for testing.
if not self.segments:
# This means we're a root segment, just return an unmutated self
return self
# Get the Parse Grammar
g = self._parse_grammar()
if g is None:
# No parse grammar, go straight to expansion
logging.debug("{0}.parse: no grammar. Going straight to expansion".format(self.__class__.__name__))
else:
# Use the Parse Grammar (and the private method)
# NOTE: No match_depth kwarg, because this is the start of the matching.
m = g._match(
segments=self.segments,
parse_context=parse_context.copy(
match_segment=self.__class__.__name__
)
)
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{0}] {1}.match. Result is {2}, not a MatchResult!".format(
parse_context.parse_depth, self.__class__.__name__, type(m)))
# Basic Validation, that we haven't dropped anything.
check_still_complete(self.segments, m.matched_segments, m.unmatched_segments)
if m.has_match():
if m.is_complete():
# Complete match, happy days!
self.segments = m.matched_segments
else:
# Incomplete match.
# For now this means the parsing has failed. Lets add the unmatched bit at the
# end as something unparsable.
# TODO: Do something more intelligent here.
self.segments = m.matched_segments + (UnparsableSegment(
segments=m.unmatched_segments, expected="Nothing..."),)
else:
# If there's no match at this stage, then it's unparsable. That's
# a problem at this stage so wrap it in an unparable segment and carry on.
self.segments = (UnparsableSegment(
segments=self.segments,
expected=g.expected_string(dialect=parse_context.dialect)),) # NB: tuple
# Validate new segments
self.validate_segments(text="parsing")
bencher = BenchIt() # starts the timer
bencher("Parse complete of {0!r}".format(self.__class__.__name__))
# Recurse if allowed (using the expand method to deal with the expansion)
logging.debug(
"{0}.parse: Done Parse. Plotting Recursion. Recurse={1!r}".format(
self.__class__.__name__, parse_context.recurse))
parse_depth_msg = "###\n#\n# Beginning Parse Depth {0}: {1}\n#\n###\nInitial Structure:\n{2}".format(
parse_context.parse_depth + 1, self.__class__.__name__, self.stringify())
if parse_context.recurse is True:
logging.debug(parse_depth_msg)
self.segments = self.expand(
self.segments,
parse_context=parse_context.copy(
incr='parse_depth', match_depth=0, recurse=True
)
)
elif isinstance(parse_context.recurse, int):
if parse_context.recurse > 1:
logging.debug(parse_depth_msg)
self.segments = self.expand(
self.segments,
parse_context=parse_context.copy(decr='recurse', incr='parse_depth')
)
# Validate new segments
self.validate_segments(text="expanding")
return self
def __repr__(self):
return "<{0}: ({1})>".format(
self.__class__.__name__,
self.pos_marker)
def _reconstruct(self):
"""Make a string from the segments of this segment."""
return "".join(seg.raw for seg in self.segments)
@property
def raw(self):
"""Make a string from the segments of this segment."""
return self._reconstruct()
@property
def raw_upper(self):
"""Make an uppercase string from the segments of this segment."""
return self._reconstruct().upper()
@staticmethod
def _suffix():
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return ""
def _preface(self, ident, tabsize, pos_idx, raw_idx):
"""Returns the preamble to any logging."""
preface = (' ' * (ident * tabsize))
if self.is_meta:
preface += "[META] "
preface += self.__class__.__name__ + ":"
preface += (' ' * max(pos_idx - len(preface), 0))
if self.pos_marker:
preface += str(self.pos_marker)
else:
preface += '-'
sfx = self._suffix()
if sfx:
return preface + (' ' * max(raw_idx - len(preface), 0)) + sfx
else:
return preface
@property
def _comments(self):
"""Returns only the comment elements of this segment."""
return [seg for seg in self.segments if seg.type == 'comment']
@property
def _non_comments(self):
"""Returns only the non-comment elements of this segment."""
return [seg for seg in self.segments if seg.type != 'comment']
def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False):
"""Use indentation to render this segment and it's children as a string."""
buff = StringIO()
preface = self._preface(ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx)
buff.write(preface + '\n')
if not code_only and self.comment_seperate and len(self._comments) > 0:
if self._comments:
buff.write((' ' * ((ident + 1) * tabsize)) + 'Comments:' + '\n')
for seg in self._comments:
buff.write(seg.stringify(ident=ident + 2, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
if self._non_comments:
buff.write((' ' * ((ident + 1) * tabsize)) + 'Code:' + '\n')
for seg in self._non_comments:
buff.write(seg.stringify(ident=ident + 2, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
else:
for seg in self.segments:
# If we're in code_only, only show the code segments, otherwise always true
if not code_only or seg.is_code:
buff.write(seg.stringify(ident=ident + 1, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
return buff.getvalue()
@staticmethod
def segs_to_tuple(segs, **kwargs):
"""Return a tuple structure from an iterable of segments."""
return tuple(seg.to_tuple(**kwargs) for seg in segs)
def to_tuple(self, **kwargs):
"""Return a tuple structure from this segment.
NB: If he segment is a meta segment, i.e. it's an indent or dedent,
then it will never be returned from here!
"""
# works for both base and raw
code_only = kwargs.get('code_only', False)
show_raw = kwargs.get('show_raw', False)
if show_raw and not self.segments:
result = (self.type, self.raw)
elif code_only:
result = (self.type, tuple(seg.to_tuple(**kwargs) for seg in self.segments if seg.is_code and not seg.is_meta))
else:
result = (self.type, tuple(seg.to_tuple(**kwargs) for seg in self.segments if not seg.is_meta))
return result
@classmethod
def structural_simplify(cls, elem):
"""Simplify the structure recursively so it serializes nicely in json/yaml."""
if isinstance(elem, tuple):
# Does this look like an element?
if len(elem) == 2 and isinstance(elem[0], str):
# This looks like a single element, make a dict
elem = {elem[0]: cls.structural_simplify(elem[1])}
elif isinstance(elem[0], tuple):
# This looks like a list of elements.
keys = [e[0] for e in elem]
# Any duplicate elements?
if len(set(keys)) == len(keys):
# No, we can use a mapping typle
elem = {e[0]: cls.structural_simplify(e[1]) for e in elem}
else:
# Yes, this has to be a list :(
elem = [cls.structural_simplify(e) for e in elem]
return elem
def as_record(self, **kwargs):
"""Return the segment as a structurally simplified record.
This is useful for serialization to yaml or json.
kwargs passed to to_tuple
"""
return self.structural_simplify(self.to_tuple(**kwargs))
@classmethod
def match(cls, segments, parse_context):
"""Match a list of segments against this segment.
Note: Match for segments is done in the ABSTRACT.
When dealing with concrete then we're always in parse.
Parse is what happens during expand.
Matching can be done from either the raw or the segments.
This raw function can be overridden, or a grammar defined
on the underlying class.
"""
if cls._match_grammar():
# Call the private method
m = cls._match_grammar()._match(segments=segments, parse_context=parse_context.copy(incr='match_depth'))
# Calling unify here, allows the MatchResult class to do all the type checking.
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{0} MD:{1}] {2}.match. Result is {3}, not a MatchResult!".format(
parse_context.parse_depth, parse_context.match_depth, cls.__name__,
type(m)))
# Once unified we can deal with it just as a MatchResult
if m.has_match():
return MatchResult((cls(segments=m.matched_segments),), m.unmatched_segments)
else:
return MatchResult.from_unmatched(segments)
else:
raise NotImplementedError("{0} has no match function implemented".format(cls.__name__))
@classmethod
def _match(cls, segments, parse_context):
"""A wrapper on the match function to do some basic validation and logging."""
parse_match_logging(
cls.__name__[:10], '_match', 'IN', parse_context=parse_context,
v_level=4, ls=len(segments))
if isinstance(segments, BaseSegment):
segments = (segments,) # Make into a tuple for compatability
if not isinstance(segments, tuple):
logging.warning(
"{0}.match, was passed {1} rather than tuple or segment".format(
cls.__name__, type(segments)))
if isinstance(segments, list):
# Let's make it a tuple for compatibility
segments = tuple(segments)
if len(segments) == 0:
logging.info("{0}._match, was passed zero length segments list".format(cls.__name__))
m = cls.match(segments, parse_context=parse_context)
if not isinstance(m, tuple) and m is not None:
logging.warning(
"{0}.match, returned {1} rather than tuple".format(
cls.__name__, type(m)))
parse_match_logging(
cls.__name__[:10], '_match', 'OUT',
parse_context=parse_context, v_level=4, m=m)
# Validation is skipped at a match level. For performance reasons
# we match at the parse level only
# check_still_complete(segments, m.matched_segments, m.unmatched_segments)
return m
@staticmethod
def expand(segments, parse_context):
"""Expand the list of child segments using their `parse` methods."""
segs = ()
for stmt in segments:
try:
if not stmt.is_expandable:
verbosity_logger(
"[PD:{0}] Skipping expansion of {1}...".format(parse_context.parse_depth, stmt),
verbosity=parse_context.verbosity)
segs += (stmt,)
continue
except Exception as err:
# raise ValueError("{0} has no attribute `is_expandable`. This segment appears poorly constructed.".format(stmt))
logging.error("{0} has no attribute `is_expandable`. This segment appears poorly constructed.".format(stmt))
raise err
if not hasattr(stmt, 'parse'):
raise ValueError("{0} has no method `parse`. This segment appears poorly constructed.".format(stmt))
parse_depth_msg = "Parse Depth {0}. Expanding: {1}: {2!r}".format(
parse_context.parse_depth, stmt.__class__.__name__,
curtail_string(stmt.raw, length=40))
verbosity_logger(frame_msg(parse_depth_msg), verbosity=parse_context.verbosity)
res = stmt.parse(parse_context=parse_context)
if isinstance(res, BaseSegment):
segs += (res,)
else:
# We might get back an iterable of segments
segs += tuple(res)
# Basic Validation
check_still_complete(segments, segs, ())
return segs
def raw_list(self):
"""Return a list of raw elements, mostly for testing or searching."""
buff = []
for s in self.segments:
buff += s.raw_list()
return buff
def iter_raw_seg(self):
"""Iterate raw segments, mostly for searching."""
for s in self.segments:
for seg in s.iter_raw_seg():
yield seg
def iter_unparsables(self):
"""Iterate through any unparsables this segment may contain."""
for s in self.segments:
for u in s.iter_unparsables():
yield u
def type_set(self):
"""Return a set of the types contained, mostly for testing."""
typs = {self.type}
for s in self.segments:
typs |= s.type_set()
return typs
def __eq__(self, other):
# Equal if type, content and pos are the same
# NB: this should also work for RawSegment
return (type(self) is type(other)
and (self.raw == other.raw)
and (self.pos_marker == other.pos_marker))
def __len__(self):
"""Implement a len method to make everyone's lives easier."""
return 1
def is_raw(self):
"""Return True if this segment has no children."""
return len(self.segments) == 0
@classmethod
def expected_string(cls, dialect=None, called_from=None):
"""Return the expected string for this segment.
This is never going to be called on an _instance_
but rather on the class, as part of a grammar, and therefore
as part of the matching phase. So we use the match grammar.
"""
return cls._match_grammar().expected_string(dialect=dialect, called_from=called_from)
@classmethod
def as_optional(cls):
"""Construct a copy of this class, but with the optional flag set true.
Used in constructing grammars, will make an identical class
but with the optional argument set to true. Used in constructing
sequences.
"""
# Now lets make the classname (it indicates the mother class for clarity)
classname = "Optional_{0}".format(cls.__name__)
# This is the magic, we generate a new class! SORCERY
newclass = type(classname, (cls, ),
dict(optional=True))
# Now we return that class in the abstract. NOT INSTANTIATED
return newclass
def apply_fixes(self, fixes):
"""Apply an iterable of fixes to this segment.
Used in applying fixes if we're fixing linting errors.
If anything changes, this should return a new version of the segment
rather than mutating the original.
Note: We need to have fixes to apply AND this must have children. In the case
of raw segments, they will be replaced or removed by their parent and
so this function should just return self.
"""
# Let's check what we've been given.
if fixes and isinstance(fixes[0], SQLLintError):
logging.error("Transforming `fixes` from errors into a list of fixes")
# We've got linting errors, let's aggregate them into a list of fixes
buff = []
for err in fixes:
buff += err.fixes
# Overwrite fixes
fixes = buff
if fixes and not self.is_raw():
# Get a reference to self to start with, but this will rapidly
# become a working copy.
r = self
# Make a working copy
seg_buffer = []
todo_buffer = list(self.segments)
while True:
if len(todo_buffer) == 0:
break
else:
seg = todo_buffer.pop(0)
# We don't apply fixes to meta segments
if seg.is_meta:
seg_buffer.append(seg)
continue
fix_buff = fixes.copy()
unused_fixes = []
while fix_buff:
f = fix_buff.pop()
if f.anchor == seg:
if f.edit_type == 'delete':
# We're just getting rid of this segment.
seg = None
elif f.edit_type in ('edit', 'create'):
# We're doing a replacement (it could be a single segment or an iterable)
if isinstance(f.edit, BaseSegment):
seg_buffer.append(f.edit)
else:
for s in f.edit:
seg_buffer.append(s)
if f.edit_type == 'create':
# in the case of a creation, also add this segment on the end
seg_buffer.append(seg)
else:
raise ValueError(
"Unexpected edit_type: {0!r} in {1!r}".format(
f.edit_type, f))
# We've applied a fix here. Move on, this also consumes the fix
# TODO: Maybe deal with overlapping fixes later.
break
else:
# We've not used the fix so we should keep it in the list for later.
unused_fixes.append(f)
else:
seg_buffer.append(seg)
# Switch over the the unused list
fixes = unused_fixes + fix_buff
# Then recurse (i.e. deal with the children) (Requeueing)
seg_queue = seg_buffer
seg_buffer = []
for seg in seg_queue:
s, fixes = seg.apply_fixes(fixes)
seg_buffer.append(s)
# Reform into a new segment
r = r.__class__(
segments=tuple(seg_buffer),
pos_marker=r.pos_marker,
validate=False
)
# Lastly, before returning, we should realign positions.
# Note: Realign also returns a copy
return r.realign(), fixes
else:
return self, fixes
def realign(self):
"""Realign the positions in this segment.
Returns:
a copy of this class with the pos_markers realigned.
Note: this is used mostly during fixes.
Realign is recursive. We will assume that the pos_marker of THIS segment is
truthful, and that during recursion it will have been set by the parent.
This function will align the pos marker if it's direct children, we then
recurse to realign their children.
"""
seg_buffer = []
todo_buffer = list(self.segments)
running_pos = self.pos_marker
while True:
if len(todo_buffer) == 0:
# We're done.
break
else:
# Get the first off the buffer
seg = todo_buffer.pop(0)
# We'll preserve statement indexes so we should keep track of that.
# When recreating, we use the DELTA of the index so that's what matter...
idx = seg.pos_marker.statement_index - running_pos.statement_index
if seg.is_meta:
# It's a meta segment, just update the position
seg = seg.__class__(
pos_marker=running_pos
)
elif len(seg.segments) > 0:
# It's a compound segment, so keep track of it's children
child_segs = seg.segments
# Create a new segment of the same type with the new position
seg = seg.__class__(
segments=child_segs,
pos_marker=running_pos
)
# Realign the children of that class
seg = seg.realign()
else:
# It's a raw segment...
# Create a new segment of the same type with the new position
seg = seg.__class__(
raw=seg.raw,
pos_marker=running_pos
)
# Update the running position with the content of that segment
running_pos = running_pos.advance_by(
raw=seg.raw, idx=idx
)
# Add the buffer to my new segment
seg_buffer.append(seg)
# Create a new version of this class with the new details
return self.__class__(
segments=tuple(seg_buffer),
pos_marker=self.pos_marker
)
class RawSegment(BaseSegment):
"""This is a segment without any subsegments."""
type = 'raw'
_is_code = False
_is_comment = False
_template = '<unset>'
_case_sensitive = False
_raw_upper = None
@property
def is_expandable(self):
"""Return true if it is meaningful to call `expand` on this segment."""
return False
@property
def is_code(self):
"""Return True if this segment is code."""
return self._is_code
@property
def is_comment(self):
"""Return True if this segment is a comment."""
return self._is_comment
def __init__(self, raw, pos_marker):
self._raw = raw
self._raw_upper = raw.upper()
# pos marker is required here
self.pos_marker = pos_marker
@property
def raw_upper(self):
"""Make an uppercase string from the segments of this segment."""
return self._raw_upper
def iter_raw_seg(self):
"""Iterate raw segments, mostly for searching."""
yield self
@property
def segments(self):
"""Return an empty list of child segments.
This is in case something tries to iterate on this segment.
"""
return []
def raw_list(self):
"""Return a list of the raw content of this segment."""
return [self.raw]
def _reconstruct(self):
"""Return a string of the raw content of this segment."""
return self._raw
def __repr__(self):
return "<{0}: ({1}) {2!r}>".format(
self.__class__.__name__,
self.pos_marker,
self.raw)
def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False):
"""Use indentation to render this segment and it's children as a string."""
preface = self._preface(ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx)
return preface + '\n'
def _suffix(self):
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return "{0!r}".format(self.raw)
@classmethod
def make(cls, template, case_sensitive=False, name=None, **kwargs):
"""Make a subclass of the segment using a method."""
# Let's deal with the template first
if case_sensitive:
_template = template
else:
_template = template.upper()
# Use the name if provided otherwise default to the template
name = name or _template
# Now lets make the classname (it indicates the mother class for clarity)
classname = "{0}_{1}".format(name, cls.__name__)
# This is the magic, we generate a new class! SORCERY
newclass = type(classname, (cls, ),
dict(_template=_template, _case_sensitive=case_sensitive,
_name=name, **kwargs))
# Now we return that class in the abstract. NOT INSTANTIATED
return newclass
def edit(self, raw):
"""Create a new segment, with exactly the same position but different content.
Returns:
A copy of this object with new contents.
Used mostly by fixes.
"""
return self.__class__(
raw=raw,
pos_marker=self.pos_marker
)
def get_end_pos_marker(self):
"""Return the pos marker at the end of this segment."""
return self.pos_marker.advance_by(self.raw)
def get_start_pos_marker(self):
"""Return the pos marker at the start of this segment."""
return self.pos_marker
class UnparsableSegment(BaseSegment):
"""This is a segment which can't be parsed. It indicates a error during parsing."""
type = 'unparsable'
# From here down, comments are printed seperately.
comment_seperate = True
_expected = ""
def __init__(self, *args, **kwargs):
self._expected = kwargs.pop('expected', "")
super(UnparsableSegment, self).__init__(*args, **kwargs)
def _suffix(self):
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return "!! Expected: {0!r}".format(self._expected)
def iter_unparsables(self):
"""Iterate through any unparsables.
As this is an unparsable, it should yield itself.
"""
yield self
|
nilq/baby-python
|
python
|
"""客户端查询排行榜"""
from upload import uploading
def rank():
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#pylint: disable=C0103
"""
This module provides business object class to interact with
DATASET_ACCESS_TYPES table.
"""
from WMCore.DAOFactory import DAOFactory
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class DBSDatasetAccessType:
"""
DatasetAccessType business object class
"""
def __init__(self, logger, dbi, owner):
daofactory = DAOFactory(package='dbs.dao', logger=logger,
dbinterface=dbi, owner=owner)
self.logger = logger
self.dbi = dbi
self.owner = owner
self.datasetAccessType = daofactory(classname="DatasetType.List")
def listDatasetAccessTypes(self, dataset_access_type=""):
"""
List dataset access types
"""
if isinstance(dataset_access_type, basestring):
try:
dataset_access_type = str(dataset_access_type)
except:
dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type)
else:
dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type)
conn = self.dbi.connection()
try:
plist = self.datasetAccessType.execute(conn, dataset_access_type.upper())
result = [{}]
if plist:
t = []
for i in plist:
for k, v in i.iteritems():
t.append(v)
result[0]['dataset_access_type'] = t
return result
finally:
if conn:
conn.close()
|
nilq/baby-python
|
python
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/python/ReconstructionSystem/sensors/realsense_recorder.py
# pyrealsense2 is required.
# Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python
import pyrealsense2 as rs
import numpy as np
import cv2
import argparse
from os import makedirs
from os.path import exists, join
import shutil
import json
from enum import IntEnum
try:
# Python 2 compatible
input = raw_input
except NameError:
pass
class Preset(IntEnum):
Custom = 0
Default = 1
Hand = 2
HighAccuracy = 3
HighDensity = 4
MediumDensity = 5
def make_clean_folder(path_folder):
if not exists(path_folder):
makedirs(path_folder)
else:
user_input = input("%s not empty. Overwrite? (y/n) : " % path_folder)
if user_input.lower() == 'y':
shutil.rmtree(path_folder)
makedirs(path_folder)
else:
exit()
def save_intrinsic_as_json(filename, frame):
intrinsics = frame.profile.as_video_stream_profile().intrinsics
with open(filename, 'w') as outfile:
obj = json.dump(
{
'width':
intrinsics.width,
'height':
intrinsics.height,
'intrinsic_matrix': [
intrinsics.fx, 0, 0, 0, intrinsics.fy, 0, intrinsics.ppx,
intrinsics.ppy, 1
]
},
outfile,
indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"Realsense Recorder. Please select one of the optional arguments")
parser.add_argument("--output_folder",
default='../dataset/realsense/',
help="set output folder")
parser.add_argument("--record_rosbag",
action='store_true',
help="Recording rgbd stream into realsense.bag")
parser.add_argument(
"--record_imgs",
action='store_true',
help="Recording save color and depth images into realsense folder")
parser.add_argument("--playback_rosbag",
action='store_true',
help="Play recorded realsense.bag file")
args = parser.parse_args()
if sum(o is not False for o in vars(args).values()) != 2:
parser.print_help()
exit()
path_output = args.output_folder
path_depth = join(args.output_folder, "depth")
path_color = join(args.output_folder, "color")
if args.record_imgs:
make_clean_folder(path_output)
make_clean_folder(path_depth)
make_clean_folder(path_color)
path_bag = join(args.output_folder, "realsense.bag")
if args.record_rosbag:
if exists(path_bag):
user_input = input("%s exists. Overwrite? (y/n) : " % path_bag)
if user_input.lower() == 'n':
exit()
# Create a pipeline
pipeline = rs.pipeline()
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
if args.record_imgs or args.record_rosbag:
# note: using 640 x 480 depth resolution produces smooth depth boundaries
# using rs.format.bgr8 for color image format for OpenCV based image visualization
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
if args.record_rosbag:
config.enable_record_to_file(path_bag)
if args.playback_rosbag:
config.enable_device_from_file(path_bag, repeat_playback=True)
# Start streaming
profile = pipeline.start(config)
depth_sensor = profile.get_device().first_depth_sensor()
# Using preset HighAccuracy for recording
if args.record_rosbag or args.record_imgs:
depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 3 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
# Streaming loop
frame_count = 0
try:
while True:
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
if args.record_imgs:
if frame_count == 0:
save_intrinsic_as_json(
join(args.output_folder, "camera_intrinsic.json"),
color_frame)
cv2.imwrite("%s/%06d.png" % \
(path_depth, frame_count), depth_image)
cv2.imwrite("%s/%06d.jpg" % \
(path_color, frame_count), color_image)
print("Saved color + depth image %06d" % frame_count)
frame_count += 1
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
#depth image is 1 channel, color is 3 channels
depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
bg_removed = np.where((depth_image_3d > clipping_distance) | \
(depth_image_3d <= 0), grey_color, color_image)
# Render images
depth_colormap = cv2.applyColorMap(
cv2.convertScaleAbs(depth_image, alpha=0.09), cv2.COLORMAP_JET)
images = np.hstack((bg_removed, depth_colormap))
cv2.namedWindow('Recorder Realsense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Recorder Realsense', images)
key = cv2.waitKey(1)
# if 'esc' button pressed, escape loop and exit program
if key == 27:
cv2.destroyAllWindows()
break
finally:
pipeline.stop()
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^job-meta/',views.job_meta, name='job_meta'),
url(r'^job-success-failure',views.job_success_failure_ratio, name='job_success_failure_ratio'),
url(r'^$',views.dashboard, name='dashboard'),
]
|
nilq/baby-python
|
python
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member,maybe-no-member
import ipaddress
from tortuga.cli.tortugaCli import TortugaCli
from tortuga.exceptions.invalidCliRequest import InvalidCliRequest
from tortuga.wsapi.networkWsApi import NetworkWsApi
class NetworkCli(TortugaCli):
"""
Base network command line interface class.
"""
def __init__(self):
super(NetworkCli, self).__init__()
# Initialize api instance
self._networkApi = None
def setupDefaultOptions(self):
"""
Set up default command-line options for all attributes in network...
used by update and add operations
"""
cmdline_grpname = _('Command-line')
self.addOptionGroup(cmdline_grpname, None)
# Simple common Options
self.addOptionToGroup(cmdline_grpname, '--network',
help=_('Network in XXX.XXX.XXX.XXX/YY or'
' XXX.XXX.XXX.XXX/YYY.YYY.YYY.YYY'
' format'))
self.addOptionToGroup(cmdline_grpname, '--address', dest='address',
help=_('Network address'))
self.addOptionToGroup(cmdline_grpname, '--netmask', dest='netmask',
help=_('Network mask'))
self.addOptionToGroup(cmdline_grpname, '--suffix', dest='suffix',
help=_('Network suffix'))
self.addOptionToGroup(cmdline_grpname, '--gateway', dest='gateway',
help=_('Network gateway'))
self.addOptionToGroup(cmdline_grpname, '--options', dest='options',
help=_('Network options'))
self.addOptionToGroup(cmdline_grpname, '--name', dest='name',
help=_('Network name'))
self.addOptionToGroup(cmdline_grpname, '--start-ip', dest='startIp',
help=_('Network starting IP address'))
self.addOptionToGroup(cmdline_grpname, '--type', dest='type',
help=_('Network type'))
self.addOptionToGroup(cmdline_grpname, '--increment',
dest='increment',
help=_('Network increment'), type=int)
self.addOptionToGroup(cmdline_grpname,
'--dhcp', dest='usingDhcp',
action='store_true',
help=_('Network addresses assigned via DHCP'))
self.addOptionToGroup(cmdline_grpname, '--static', dest='usingDhcp',
action='store_false',
help=_('Network addresses assigned'
' statically'))
self.addOptionToGroup(cmdline_grpname, '--vlan-id', dest='vlanId',
help=_('VLAN ID.'))
self.addOptionToGroup(cmdline_grpname, '--vlan-parent-network',
dest='vlanParentNetwork',
help=_('Parent network of the VLAN network'))
# Or an xml file can be passed in
xml_grpname = _('From XML file')
self.addOptionGroup(xml_grpname, None)
self.addOptionToGroup(xml_grpname, '--xml-file', dest='xmlFile',
help=_('XML file containing network'
' definition'))
def assertIp(self, ip, parameterName, errorMsg=None): \
# pylint: disable=no-self-use
"""
Convienience function for testing IPs and raising a configurable
exception if the IP is invalid.
"""
if errorMsg is None:
errorMsg = _('The %s parameter must be a valid IP address.') % (
parameterName)
try:
ipaddress.IPv4Address(str(ip))
except ipaddress.AddressValueError:
raise InvalidCliRequest(errorMsg)
def updateNetwork(self, network):
"""
Update a passed in network tortuga object with the values passed
in on the command line.
"""
# Check for conflicting command-line options
if (self.getArgs().netmask or self.getArgs().address) and \
self.getArgs().network:
self.getParser().error(
'Specify network using --network/--netmask or --network')
if self.getArgs().network:
# Use 'ipaddr' module to validate network spec
parsed_network, parsed_netmask = \
self.parseNetworkParameter(self.getArgs().network)
network.setAddress(parsed_network)
network.setNetmask(parsed_netmask)
else:
if self.getArgs().address is not None:
self.assertIp(self.getArgs().address, '--address')
network.setAddress(self.getArgs().address)
if self.getArgs().netmask is not None:
self.assertIp(self.getArgs().netmask, '--netmask')
network.setNetmask(self.getArgs().netmask)
if self.getArgs().suffix is not None:
network.setSuffix(self.getArgs().suffix)
if self.getArgs().gateway is not None:
self.assertIp(self.getArgs().gateway, '--gateway')
network.setGateway(self.getArgs().gateway)
if self.getArgs().name is not None:
network.setName(self.getArgs().name)
if self.getArgs().startIp is not None:
self.assertIp(self.getArgs().startIp, '--start-ip')
network.setStartIp(self.getArgs().startIp)
if self.getArgs().type is not None:
network.setType(self.getArgs().type)
if self.getArgs().increment is not None:
network.setIncrement(self.getArgs().increment)
optionsString = network.getOptions()
optionsDict = {}
if optionsString:
# VLAN info may already exist for this network
optionsList = optionsString.split(';')
for originalOption in optionsList:
key, value = originalOption.split('=')
optionsDict[key] = value
vlanIdFound = self.getArgs().vlanId is not None or \
'vlan' in optionsDict
vlanParentNetworkFound = \
self.getArgs().vlanParentNetwork is not None or \
'vlanparent' in optionsDict
if (vlanIdFound and not vlanParentNetworkFound) or \
(not vlanIdFound and vlanParentNetworkFound):
raise InvalidCliRequest(
_('--vlan-id and --vlan-parent-network must be used'
' together.'))
if self.getArgs().vlanId:
optionsDict['vlan'] = self.getArgs().vlanId
if self.getArgs().vlanParentNetwork:
# Match the given parent network to a network in the DB
networkAddr, subnetMask = self.parseNetworkParameter(
self.getArgs().vlanParentNetwork)
existingNetworkList = self.getNetworkApi().getNetworkList()
matchingNetworkId = None
for existingNetwork in existingNetworkList:
if existingNetwork.getAddress() == networkAddr and \
existingNetwork.getNetmask() == subnetMask:
matchingNetworkId = existingNetwork.getId()
if not matchingNetworkId:
raise InvalidCliRequest(
_('Network [%s] not found') % (
self.getArgs().vlanParentNetwork))
optionsDict['vlanparent'] = matchingNetworkId
newOptions = ''
if self.getArgs().vlanId or self.getArgs().vlanParentNetwork:
for entry in list(optionsDict.items()):
optionKey, optionValue = entry
newOptions += '%s=%s;' % (optionKey, optionValue)
# Take off the last semicolon
newOptions = newOptions[:-1]
if self.getArgs().options:
if newOptions:
newOptions = '%s;%s' % (newOptions, self.getArgs().options)
else:
newOptions = self.getArgs().options
if self.getArgs().options or self.getArgs().vlanId or \
self.getArgs().vlanParentNetwork:
network.setOptions(newOptions)
if self.getArgs().usingDhcp is not None:
network.setUsingDhcp(self.getArgs().usingDhcp)
def getNetworkFromXml(self):
"""
If the xmlFile option is present attempt to create a Network
object from the xml. Otherwise return None
"""
network = None
if self.getArgs().xmlFile:
# An XML file was provided as input...start with that...
f = open(self.getArgs().xmlFile, 'r')
try:
xmlString = f.read()
finally:
f.close()
try:
from tortuga.objects.network import Network
network = Network.getFromXml(xmlString)
except Exception as ex: # pylint: disable=W0703
self._logger.debug('Error parsing xml %s' % ex)
if network is None:
raise InvalidCliRequest(
_('File [%s] does not contain a valid network.') % (
self.getArgs().xmlFile))
return network
def getNetworkApi(self):
"""
Caching method for getting a networkApi instance.
"""
if self._networkApi is None:
self._networkApi = self.configureClient(NetworkWsApi)
return self._networkApi
def parseNetworkParameter(self, network): \
# pylint: disable=no-self-use
"""
Validator for the --network parameter.
"""
try:
result = ipaddress.IPv4Network(str(network))
except ipaddress.AddressValueError:
# Invalid argument to --network specified
raise InvalidCliRequest(
_('--network argument must be formatted as '
' XXX.XXX.XXX.XXX/YY or XXX.XXX.XXX.XXX/YYY.YYY.YYY.YYY'))
return result.network_address.exploded, result.netmask.exploded
def validateNetwork(self, network): # pylint: disable=no-self-use
"""
Verify a network object has the minimum populated fields needed to
add it to the database
"""
if not network.getAddress():
raise InvalidCliRequest(_('Network address must be specified.'))
if not network.getNetmask():
raise InvalidCliRequest(_('Subnet mask must be specified.'))
if not network.getType():
raise InvalidCliRequest(_('Network type must be specified.'))
if network.getUsingDhcp() is None:
raise InvalidCliRequest(
_('Address allocation must be specified as DHCP or'
' static.'))
if network.getIncrement():
increment = network.getIncrement()
try:
value = int(increment)
if value < 1:
raise InvalidCliRequest(
_('Increment must be positive.'))
except ValueError:
raise InvalidCliRequest(
_('Increment must be a positive integer.'))
def get_network_from_cmdline(self, retrieve_network=True):
"""
If 'retrieve_network' is True, return Network object matching network
specification (either --address/--netmask or --network), otherwise
return None.
Raises:
NetworkNotFound
"""
# Get network from XML if an xml file was passed in
network = self.getNetworkFromXml()
if network:
return network
# If we didn't have xml but network load the network from the
# api...otherwise error
if self.getArgs().address is None and \
self.getArgs().network is None or \
((self.getArgs().address or self.getArgs().netmask) and
self.getArgs().network):
self.getParser().error(
'--address/--netmask OR --network must be specified')
if self.getArgs().network:
_network, _netmask = self.parseNetworkParameter(
self.getArgs().network)
else:
_network = self.getArgs().address
_netmask = self.getArgs().netmask
if _netmask is None:
self.getParser().error('--netmask must be specified')
if not retrieve_network:
return None
return self.getNetworkApi().getNetwork(_network, _netmask)
|
nilq/baby-python
|
python
|
import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
from textblob import TextBlob # for sentimental
import re
# For plotting and visualization:
from IPython.display import display
# for display use only import matplotlib.pyplot as plt
import seaborn as sns
# We import our access keys:
from keys.twitter_keys import * # This will allow us to use the keys as variables
# We import our access keys:
# optional from credentials import * # This will allow us to use the keys as variables
# API's setup:
def twitter_setup():
"""
Utility function to setup the Twitter's API
with our access keys provided.
"""
# Authentication and access using keys:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
# Return API with authentication:
api = tweepy.API(auth)
return api
# We create an extractor object:
extractor = twitter_setup()
# We create a tweet list as follows:
# tweets = extractor.user_timeline(screen_name="realDonaldTrump", count=200)
## search by hashtab
tweets = extractor.user_timeline(screen_name="cnnbrk", count=10)
# We create a pandas dataframe as follows:
data = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
# We display the first 10 elements of the dataframe:
#display(data.head(10))
# We add relevant data:
data['created_at'] = np.array([tweet.created_at for tweet in tweets])
data['len'] = np.array([len(tweet.text) for tweet in tweets])
data['ID'] = np.array([tweet.id for tweet in tweets])
data['Date'] = np.array([tweet.created_at for tweet in tweets])
data['Source'] = np.array([tweet.source for tweet in tweets])
data['Likes'] = np.array([tweet.favorite_count for tweet in tweets])
data['RTs'] = np.array([tweet.retweet_count for tweet in tweets])
### Below for sentimental Analysis
def clean_tweet(tweet):
'''
Utility function to clean the text in a tweet by removing
links and special characters using regex.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analize_sentiment(tweet):
'''
Utility function to classify the polarity of a tweet
using textblob.
'''
analysis = TextBlob(clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
try:
# We create a column with the result of the analysis:
data['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ])
#api.send("sentimentPreview","Rules:\n" + data (10));
#display (data (10))
myString = data.to_csv()
display ( myString )
except Exception as inst:
display ( "errors" + str(inst))
|
nilq/baby-python
|
python
|
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Table
from sqlalchemy.orm import relationship, backref
from models import db_base as base
import os
import json
__author__ = "zadjii"
class Issue(base):
__tablename__ = "issue"
"""
Represents a single issue
"""
id = Column(Integer, primary_key=True)
raw_data = Column(String)
number = Column(Integer)
def __init__(self, api_obj):
self.number = api_obj.number
self.raw_data = json.dumps(api_obj._rawData)
|
nilq/baby-python
|
python
|
port = 8888
logging = 'info'
log_file_prefix = "tivid-error.log"
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
java_source = "http://www.importnew.com/all-posts"
python_source = "http://python.jobbole.com/all-posts/"
|
nilq/baby-python
|
python
|
from threading import Thread
def async_func(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from django.apps import AppConfig
class SequencerConfig(AppConfig):
name = 'sequencer'
|
nilq/baby-python
|
python
|
import os
import sys
import datetime
from glob import iglob
from skimage import io
import cv2
import tensorflow as tf
import numpy as np
import utils as ut
import training as tr
class VAE2predict:
def __init__(self, use_sampling=False):
self.use_sampling = use_sampling
self._build_model()
def _build_model(self):
in_image = tf.keras.layers.Input(shape=(144, 144, 3), name='in_image')
out_encoder = tr.Encoder()(in_image)
x = tf.keras.layers.Dense(512)(out_encoder)
# x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('elu', name='out_latent_1')(x)
self.z_mean = tf.keras.layers.Dense(512, name='z_mean')(x)
self.z_log_var = tf.keras.layers.Dense(512, name='z_logvar')(x)
if self.use_sampling:
self.z_latent = tf.keras.layers.Lambda(tr.sampling, output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
else:
self.z_latent = tf.keras.layers.Lambda(lambda x: x[0], output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
out_image_pre = tr.Decoder()(self.z_latent, 'out_image_pre', 3)
out_mask = tr.DecoderMask()(self.z_latent, 'out_mask', 1)
# Tidy the image to use only the face regions of the estimated output and join with the original background
x = tf.keras.layers.Multiply()([out_image_pre, out_mask])
x_bg = tf.keras.layers.Multiply()([in_image, 1. - out_mask])
out_image = tf.keras.layers.Add(name='out_image')([x, x_bg])
self.model = tf.keras.models.Model(in_image, [out_image, out_mask, out_image_pre])
def load_weights(self, modelpath=None, ckpt_dir=None):
if ckpt_dir is None and modelpath is None:
raise('Not possible to load the model')
sys.exit()
if ckpt_dir is not None:
fpaths_weights = list(iglob(os.path.join(ckpt_dir, 'w*.h5')))
fpaths_weights.sort()
self.modelpath = fpaths_weights[-1]
else:
self.modelpath = modelpath
self.model.load_weights(self.modelpath)
def predict(self, X):
if len(X.shape) == 3:
X = X[None]
return self.model.predict(X)
def predict_path(self, paths):
if isinstance(paths, str):
paths = [paths]
X = np.stack([ut.load_img(p) for p in paths])
return self.predict(X)
class VAENoMask2predict(VAE2predict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _build_model(self):
in_image = tf.keras.layers.Input(shape=(144, 144, 3), name='in_image')
out_encoder = tr.Encoder()(in_image)
x = tf.keras.layers.Dense(512)(out_encoder)
# x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('elu', name='out_latent_1')(x)
self.z_mean = tf.keras.layers.Dense(512, name='z_mean')(x)
self.z_log_var = tf.keras.layers.Dense(512, name='z_logvar')(x)
if self.use_sampling:
self.z_latent = tf.keras.layers.Lambda(tr.sampling, output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
else:
self.z_latent = tf.keras.layers.Lambda(lambda x: x[0], output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
self.out_image = tr.Decoder()(self.z_latent, 'out_image', 3)
self.model = tf.keras.models.Model(in_image, self.out_image)
def save_predictions(preds, org_dim=(144, 144)):
now_timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
now_timestamp = '.'
dir_save = os.path.join('cache', now_timestamp)
if not os.path.exists(dir_save):
os.makedirs(dir_save)
n_samples = preds[0].shape[0]
for i in range(n_samples):
for j, name in enumerate(['reconst', 'mask']):
jpath = os.path.join(dir_save, '{}_{}.png'.format(i, name))
Xsave = preds[j][i]
if j == 1:
# Round the mask pixels
Xsave = Xsave.round()
Xsave = cv2.resize(Xsave, org_dim)
io.imsave(jpath, Xsave)
if __name__ == '__main__':
impath = sys.argv[1]
im = io.imread(impath) / 255
org_dim = im.shape[:-1]
im = ut.resize_imx144(im)
vae = VAE2predict()
vae.load_weights(modelpath=ut.modelpath_best_predict)
X_pred = vae.predict(im)
save_predictions(X_pred, org_dim=org_dim)
|
nilq/baby-python
|
python
|
from django import template
register = template.Library()
from urlparse import urlparse
def domain_only(full_url):
parsed = urlparse(full_url)
return parsed.netloc.lstrip("www.")
register.filter('domain_only', domain_only)
|
nilq/baby-python
|
python
|
print('spam = 40')
spam = 40
print('eggs = 2')
eggs = 2
print('spam + eggs')
a = spam + eggs
print(a)
# Variable naming convention
# small then capital or sparated by _
# varA
|
nilq/baby-python
|
python
|
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import copy
import math
import os
import sys
import unittest
from distutils.version import LooseVersion
from python import default_templates
from . import BaseInspectTestCase
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../plugins'))
import m.sphinx
class String(BaseInspectTestCase):
def test(self):
self.run_python({
'LINKS_NAVBAR1': [
('Modules', 'modules', []),
('Classes', 'classes', [])],
})
self.assertEqual(*self.actual_expected_contents('inspect_string.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.another_module.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.FooSlots.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.DerivedException.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Specials.html'))
self.assertEqual(*self.actual_expected_contents('classes.html'))
self.assertEqual(*self.actual_expected_contents('modules.html'))
class Object(BaseInspectTestCase):
def test(self):
# Reuse the stuff from inspect_string, but this time reference it via
# an object and not a string
sys.path.append(os.path.join(os.path.dirname(self.path), 'inspect_string'))
import inspect_string
self.run_python({
'LINKS_NAVBAR1': [
('Modules', 'modules', []),
('Classes', 'classes', [])],
'INPUT_MODULES': [inspect_string]
})
# The output should be the same as when inspecting a string
self.assertEqual(*self.actual_expected_contents('inspect_string.html', '../inspect_string/inspect_string.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.another_module.html', '../inspect_string/inspect_string.another_module.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Foo.html', '../inspect_string/inspect_string.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.FooSlots.html', '../inspect_string/inspect_string.FooSlots.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.DerivedException.html', '../inspect_string/inspect_string.DerivedException.html'))
self.assertEqual(*self.actual_expected_contents('inspect_string.Specials.html', '../inspect_string/inspect_string.Specials.html'))
self.assertEqual(*self.actual_expected_contents('classes.html', '../inspect_string/classes.html'))
self.assertEqual(*self.actual_expected_contents('modules.html', '../inspect_string/modules.html'))
class AllProperty(BaseInspectTestCase):
def test(self):
self.run_python()
self.assertEqual(*self.actual_expected_contents('inspect_all_property.html'))
class Annotations(BaseInspectTestCase):
def test(self):
self.run_python()
self.assertEqual(*self.actual_expected_contents('inspect_annotations.html'))
self.assertEqual(*self.actual_expected_contents('inspect_annotations.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_annotations.FooSlots.html'))
# This should not list any internal stuff from the typing module
self.assertEqual(*self.actual_expected_contents('inspect_annotations.AContainer.html'))
# https://github.com/python/cpython/pull/13394
@unittest.skipUnless(LooseVersion(sys.version) >= LooseVersion('3.7.4'),
"signature with / for pow() is not present in 3.6, "
"3.7.3 and below has a different docstring")
def test_math(self):
# From math export only pow() so we have the verification easier, and
# in addition log() because it doesn't provide any signature metadata
assert not hasattr(math, '__all__')
math.__all__ = ['pow', 'log']
self.run_python({
'INPUT_MODULES': [math]
})
del math.__all__
assert not hasattr(math, '__all__')
self.assertEqual(*self.actual_expected_contents('math.html'))
# https://github.com/python/cpython/pull/13394
@unittest.skipUnless(LooseVersion(sys.version) < LooseVersion('3.7.4') and LooseVersion(sys.version) >= LooseVersion('3.7'),
"signature with / for pow() is not present in 3.6, "
"3.7.3 and below has a different docstring")
def test_math373(self):
# From math export only pow() so we have the verification easier, and
# in addition log() because it doesn't provide any signature metadata
assert not hasattr(math, '__all__')
math.__all__ = ['pow', 'log']
self.run_python({
'INPUT_MODULES': [math]
})
del math.__all__
assert not hasattr(math, '__all__')
self.assertEqual(*self.actual_expected_contents('math.html', 'math373.html'))
@unittest.skipUnless(LooseVersion(sys.version) < LooseVersion('3.7'),
"docstring for log() is different in 3.7")
def test_math36(self):
# From math export only pow() so we have the verification easier, and
# in addition log() because it doesn't provide any signature metadata
assert not hasattr(math, '__all__')
math.__all__ = ['log']
self.run_python({
'INPUT_MODULES': [math]
})
del math.__all__
assert not hasattr(math, '__all__')
self.assertEqual(*self.actual_expected_contents('math.html', 'math36.html'))
class NameMapping(BaseInspectTestCase):
def test(self):
self.run_python({
'NAME_MAPPING': {
'inspect_name_mapping._sub.bar._NameThatGetsOverridenExternally': 'yay.ThisGotOverridenExternally'
}
})
self.assertEqual(*self.actual_expected_contents('inspect_name_mapping.html'))
self.assertEqual(*self.actual_expected_contents('inspect_name_mapping.Class.html'))
self.assertEqual(*self.actual_expected_contents('inspect_name_mapping.submodule.html'))
class Recursive(BaseInspectTestCase):
def test(self):
self.run_python()
self.assertEqual(*self.actual_expected_contents('inspect_recursive.html'))
self.assertEqual(*self.actual_expected_contents('inspect_recursive.first.html'))
self.assertEqual(*self.actual_expected_contents('inspect_recursive.a.html'))
class TypeLinks(BaseInspectTestCase):
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_DOCS': ['docs.rst'],
'INPUT_PAGES': ['index.rst'],
'M_SPHINX_INVENTORIES': [
('../../../doc/documentation/python.inv', 'https://docs.python.org/3/', [], ['m-doc-external'])]
})
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.Foo.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.sub.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.first.sub.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.Foo.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.FooSlots.html'))
self.assertEqual(*self.actual_expected_contents('inspect_type_links.second.FooSlotsInvalid.html'))
class CreateIntersphinx(BaseInspectTestCase):
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_PAGES': ['page.rst'],
'M_SPHINX_INVENTORIES': [
# Nothing from here should be added to the output
('../../../doc/documentation/python.inv', 'https://docs.python.org/3/', [], ['m-doc-external'])],
'M_SPHINX_INVENTORY_OUTPUT': 'things.inv',
'PYBIND11_COMPATIBILITY': True
})
with open(os.path.join(self.path, 'output/things.inv'), 'rb') as f:
self.assertEqual(m.sphinx.pretty_print_intersphinx_inventory(f), """
# Sphinx inventory version 2
# Project: X
# Version: 0
# The remainder of this file is compressed using zlib.
inspect_create_intersphinx.Class.a_property py:attribute 2 inspect_create_intersphinx.Class.html#a_property -
inspect_create_intersphinx.Class py:class 2 inspect_create_intersphinx.Class.html -
inspect_create_intersphinx.Class.CLASS_DATA py:data 2 inspect_create_intersphinx.Class.html#CLASS_DATA -
inspect_create_intersphinx.MODULE_DATA py:data 2 inspect_create_intersphinx.html#MODULE_DATA -
inspect_create_intersphinx.Enum py:enum 2 inspect_create_intersphinx.html#Enum -
inspect_create_intersphinx.Enum.ENUM_VALUE py:enumvalue 2 inspect_create_intersphinx.html#Enum-ENUM_VALUE -
inspect_create_intersphinx.Class.class_method py:function 2 inspect_create_intersphinx.Class.html#class_method -
inspect_create_intersphinx.Class.method py:function 2 inspect_create_intersphinx.Class.html#method -
inspect_create_intersphinx.Class.static_method py:function 2 inspect_create_intersphinx.Class.html#static_method -
inspect_create_intersphinx.function py:function 2 inspect_create_intersphinx.html#function -
inspect_create_intersphinx.pybind.overloaded_function py:function 2 inspect_create_intersphinx.pybind.html#overloaded_function -
inspect_create_intersphinx py:module 2 inspect_create_intersphinx.html -
inspect_create_intersphinx.pybind py:module 2 inspect_create_intersphinx.pybind.html -
page std:doc 2 page.html -
index std:special 2 index.html -
modules std:special 2 modules.html -
classes std:special 2 classes.html -
pages std:special 2 pages.html -
""".lstrip())
# Yes, above it should say A documentation page, but it doesn't
try:
import attr
except ImportError:
attr = None
class Attrs(BaseInspectTestCase):
@unittest.skipUnless(attr, "the attr package was not found")
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_DOCS': ['docs.rst'],
'ATTRS_COMPATIBILITY': True
})
self.assertEqual(*self.actual_expected_contents('inspect_attrs.MyClass.html'))
self.assertEqual(*self.actual_expected_contents('inspect_attrs.MyClassAutoAttribs.html'))
self.assertEqual(*self.actual_expected_contents('inspect_attrs.MySlotClass.html'))
class Underscored(BaseInspectTestCase):
def test(self):
self.run_python({
'PLUGINS': ['m.sphinx'],
'INPUT_DOCS': ['docs.rst'],
'M_SPHINX_PARSE_DOCSTRINGS': True
})
self.assertEqual(*self.actual_expected_contents('inspect_underscored.html'))
self.assertEqual(*self.actual_expected_contents('inspect_underscored.Class.html'))
class ValueFormatting(BaseInspectTestCase):
def test(self):
self.run_python({})
self.assertEqual(*self.actual_expected_contents('inspect_value_formatting.html'))
class DuplicateClass(BaseInspectTestCase):
def test(self):
self.run_python({})
self.assertEqual(*self.actual_expected_contents('inspect_duplicate_class.html'))
self.assertEqual(*self.actual_expected_contents('inspect_duplicate_class.sub.html'))
self.assertEqual(*self.actual_expected_contents('inspect_duplicate_class.Bar.html'))
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class MagiclinkConfig(AppConfig):
name = 'magiclink'
|
nilq/baby-python
|
python
|
from flask import Flask
import os
app = Flask(__name__)
healthy = True
@app.route('/')
def hello():
global healthy
if healthy:
return f"Hello from {os.environ['HOST']}!\n"
else:
return "Unhealthy", 503
@app.route('/healthy')
def healthy():
global healthy
healthy = True
return f"[{os.environ['HOST']}] Set to healthy\n", 201
@app.route('/unhealthy')
def unhealthy():
global healthy
healthy = False
return f"[{os.environ['HOST']}] Set to unhealthy\n", 201
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=False)
|
nilq/baby-python
|
python
|
'''
Integration tests for states.
'''
import unittest as ut
import numpy as np
import dynamite_test_runner as dtr
from dynamite.states import State
class RandomSeed(dtr.DynamiteTestCase):
def test_generation(self):
'''
Make sure that different processors get the same random seed.
'''
from dynamite import config
config.initialize()
from petsc4py import PETSc
comm = PETSc.COMM_WORLD.tompi4py()
seed = State.generate_time_seed()
all_seeds = comm.gather(seed, root = 0)
if comm.rank == 0:
self.assertTrue(all(s == seed for s in all_seeds))
class ToNumpy(dtr.DynamiteTestCase):
def setUp(self):
from petsc4py import PETSc
self.v = PETSc.Vec().create()
self.v.setSizes(PETSc.COMM_WORLD.size)
self.v.setFromOptions()
self.v.set(-1)
self.v[PETSc.COMM_WORLD.rank] = PETSc.COMM_WORLD.rank
self.v.assemblyBegin()
self.v.assemblyEnd()
def test_to_zero(self):
from petsc4py import PETSc
npvec = State._to_numpy(self.v)
if PETSc.COMM_WORLD.rank == 0:
for i in range(PETSc.COMM_WORLD.rank):
self.assertTrue(npvec[i] == i)
else:
self.assertIs(npvec, None)
def test_to_all(self):
from petsc4py import PETSc
npvec = State._to_numpy(self.v, to_all = True)
for i in range(PETSc.COMM_WORLD.rank):
self.assertTrue(npvec[i] == i)
class PetscMethods(dtr.DynamiteTestCase):
'''
Tests that the methods directly included from PETSc function as intended.
'''
def test_norm(self):
state = State()
start, end = state.vec.getOwnershipRange()
state.vec[start:end] = np.array([1]*(end-start))
state.vec.assemblyBegin()
state.vec.assemblyEnd()
self.assertAlmostEqual(state.norm()**2, state.subspace.get_dimension())
def test_normalize(self):
state = State()
start, end = state.vec.getOwnershipRange()
state.vec[start:end] = np.array([1]*(end-start))
state.vec.assemblyBegin()
state.vec.assemblyEnd()
state.normalize()
self.assertTrue(state.norm() == 1)
def test_copy_preallocate(self):
state1 = State()
state2 = State()
start, end = state1.vec.getOwnershipRange()
state1.vec[start:end] = np.arange(start, end)
state1.vec.assemblyBegin()
state1.vec.assemblyEnd()
result = np.ndarray((end-start,), dtype=np.complex128)
state1.copy(state2)
result[:] = state2.vec[start:end]
self.assertTrue(np.array_equal(result, np.arange(start, end)))
def test_copy_exception_L(self):
state1 = State()
state2 = State(L=state1.subspace.L+1)
with self.assertRaises(ValueError):
state1.copy(state2)
def test_copy_nopreallocate(self):
state1 = State()
start, end = state1.vec.getOwnershipRange()
state1.vec[start:end] = np.arange(start, end)
state1.vec.assemblyBegin()
state1.vec.assemblyEnd()
result = np.ndarray((end-start,), dtype=np.complex128)
state2 = state1.copy()
result[:] = state2.vec[start:end]
self.assertTrue(np.array_equal(result, np.arange(start, end)))
def test_scale(self):
vals = [2, 3.14]
for val in vals:
with self.subTest(val=val):
state = State(state='random')
start, end = state.vec.getOwnershipRange()
pre_values = np.ndarray((end-start,), dtype=np.complex128)
pre_values[:] = state.vec[start:end]
state *= val
for i in range(start, end):
self.assertEqual(state.vec[i], val*pre_values[i-start])
def test_scale_divide(self):
val = 3.14
state = State(state='random')
start, end = state.vec.getOwnershipRange()
pre_values = np.ndarray((end-start,), dtype=np.complex128)
pre_values[:] = state.vec[start:end]
state /= val
for i in range(start, end):
self.assertEqual(state.vec[i], (1/val)*pre_values[i-start])
def test_scale_exception_ary(self):
val = np.array([3.1, 4])
state = State()
with self.assertRaises(TypeError):
state *= val
def test_scale_exception_vec(self):
state1 = State()
state2 = State()
with self.assertRaises(TypeError):
state1 *= state2
# TODO: check state setting. e.g. setting an invalid state should fail (doesn't for Full subspace)
if __name__ == '__main__':
dtr.main()
|
nilq/baby-python
|
python
|
import requests
import os
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
def send_to_slack(msg: str) -> None:
URL = os.getenv("SLACK_WEBHOOK")
headers = {"content-type": "application/json"}
payload = {
"attachments": [
{
"fallback": "Plain-text summary of the attachment.",
"color": "#fff",
"title": "🚨 Important notification",
"text": msg,
"title_link": f"https://medium.com/@fabianbosler/membership",
"footer": "Made by Fabian with ❤️",
"footer_icon": "https://image.flaticon.com/icons/png/512/2097/2097443.png",
"ts": datetime.utcnow().timestamp(),
}
]
}
requests.post(URL, json=payload, headers=headers)
|
nilq/baby-python
|
python
|
from flask import abort, request
from . import app
from .helpers import render_error_template
import logging
# Catch all route for everything not matched elsewhere
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path): # pragma: no cover
abort(404, "Not Found")
@app.errorhandler(400)
def bad_request(e): # pragma: no cover
logging.exception('An error occurred during a request due to bad request error: %s', request.path)
return render_error_template(error=e, status_code=400)
@app.errorhandler(404)
def page_not_found(e):
return render_error_template(error=e, status_code=404)
@app.errorhandler(500)
def handle_internal_server_error(e): # pragma: no cover
logging.exception('An error occurred during a request due to internal server error: %s', request.path)
return render_error_template(error=e, status_code=500)
@app.errorhandler(502)
def handle_bad_gateway(e): # pragma: no cover
logging.exception('An error occurred during a request due to bad gateway: %s', request.path)
return render_error_template(error=e, status_code=502)
|
nilq/baby-python
|
python
|
'''
Created on Jun 21, 2016
@author: MarcoXZh
'''
import sys, re
import xml.etree.ElementTree as ET
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
from PIL import Image
from ImageComparison import calcSSIM
def sameColor(color1, color2):
'''
@param color1: {String} rgb string such as "rgb(0,0,0)"
@param color2: {String} rgb string such as "rgb(0,0,0)"
@return: {Boolean} True if the two colors are the same; False otherwise
'''
if color1 == "transparent" and color2 == "transparent":
return True
if color1 != "transparent" and color2 != "transparent":
if "," in color1:
rgb1 = re.split(r"\D+", color1)[1:-1]
rgb1 = sRGBColor(int(rgb1[0]), int(rgb1[1]), int(rgb1[2]), is_upscaled=True)
else:
rgb1 = sRGBColor.new_from_rgb_hex(color1)
if "," in color2:
rgb2 = re.split(r"\D+", color2)[1:-1]
rgb2 = sRGBColor(int(rgb2[0]), int(rgb2[1]), int(rgb2[2]), is_upscaled=True)
else:
rgb2 = sRGBColor.new_from_rgb_hex(color2)
return delta_e_cie2000(convert_color(rgb1, LabColor), convert_color(rgb2, LabColor)) < 4.65
pass # if color1 != "transparent" and color2 != "ransparent"
return False
pass # def sameColor(color1, color2)
def sameImage(img1, img2):
empty1 = (img1 == "none" or img1 == "")
empty2 = (img2 == "none" or img2 == "")
if empty1 and empty2:
return True
if not empty1 or not empty2:
return False
if img1 == img2:
return True
# return False
return calcSSIM(Image.open(img1), Image.open(img2)) < 0.4
pass # def sameImage(img1, img2)
def normalizedHausdorffDistance(node1, node2):
def normailizedDistance_AtoB(nodeA, nodeB):
leftA = 1.0 * int(nodeA.location["x"])
topA = 1.0 * int(nodeA.location["y"])
rightA = 1.0 * int(nodeA.location["x"]) + int(nodeA.size["width"])
bottomA = 1.0 * int(nodeA.location["y"]) + int(nodeB.size["height"])
leftB = 1.0 * int(nodeB.location["x"])
topB = 1.0 * int(nodeB.location["y"])
rightB = 1.0 * int(nodeB.location["x"]) + int(nodeB.size["width"])
bottomB = 1.0 * int(nodeB.location["y"]) + int(nodeB.size["height"])
widthA, widthB = abs(rightA - leftA), abs(rightB - leftB);
heightA, heightB = abs(bottomA - topA), abs(bottomB - topB);
centerXA, centerYA = leftA + 0.5 * widthA, topA + 0.5 * heightA;
centerXB, centerYB = leftB + 0.5 * widthB, topB + 0.5 * heightB
if leftA >= leftB and rightA <= rightB and topA >= topB and bottomA <= bottomB:
return 0.0
if leftA >= leftB and rightA <= rightB:
return (abs(topB - topA) if centerYA < centerYB else abs(bottomA - bottomB)) / heightA
if topA >= topB and bottomA <= bottomB:
return (abs(leftB - leftA) if centerXA < centerXB else abs(rightA - rightB)) / widthA
deltaX = leftB - leftA if centerXA < centerXB else rightA - rightB
deltaY = topB - topA if centerYA < centerYB else bottomA - bottomB
return (deltaX ** 2.0 + deltaY ** 2.0) ** 0.5 / (widthA ** 2.0 + heightA ** 2.0) ** 0.5
pass # def normailizedDistance_AtoB(nodeA, nodeB)
return max(normailizedDistance_AtoB(node1, node2), normailizedDistance_AtoB(node2, node1))
pass # def normalizedHausdorffDistance(node1, node2)
def MergeNodeByGestaltLaws(elements, parent, CSS, debug=False):
'''
@param elements: {List} contains all sibling WebElements, both visible and invisible
@param parent: {ET.Element} the parent of the newly created BT nodes
@param CSS: {List} contains all supported CSS properties
@param debug: {Boolean} (Optional) True to display debugging information; False not
@return : {Tuple} the BT nodes created, as well as the BT-DT map list
'''
children = []
for e in elements:
if e.is_displayed() and int(e.size["height"]) != 0 and int(e.size["width"]) != 0.0:
children.append(e)
pass # for - if
elements = children
if len(elements) == 0:
return [], []
nhds, sames = [], []
for i, sibling in enumerate(elements):
if i == len(elements)-1:
break
node1, node2 = sibling, elements[i+1]
same = (node1.value_of_css_property("position") == node2.value_of_css_property("position")) # Common fate
if not same: # Continuity
same = (int(node1.location["x"]) == int(node2.location["x"]) or \
int(node1.location["y"]) == int(node2.location["y"]) or \
int(node1.location["x"]) + int(node1.size["width"]) == \
int(node2.location["x"]) + int(node2.size["width"]) or \
int(node1.location["y"]) + int(node1.size["height"]) == \
int(node2.location["y"]) + int(node2.size["height"]))
if not same: # Similarity
idx = 0
while idx < len(CSS):
css1 = node1.value_of_css_property(CSS[idx]).strip()
css2 = node2.value_of_css_property(CSS[idx]).strip()
if "color" in CSS[idx] and not sameColor(css1, css2):
break
if "image" in CSS[idx] and not sameImage(css1, css2):
break
if css1 != css2:
break
idx += 1
pass # while idx < len(CSS)
same = (idx >= len(CSS))
pass # if not same
sames.append(same)
nhds.append(normalizedHausdorffDistance(node1, node2)) # Proximity
pass # for i, sibling in enumerate(elements)
if debug and (len(sames) != len(elements) - 1 or len(nhds) != len(elements) - 1):
print "Error: NHDs and SAMEs size issue"
btNodeMapList, btNodes = [], []
curNodeMapList, curNodes = [0], [elements[0]]
if len(elements) != 1:
avg = 1.0 * sum(nhds) / len(nhds)
for i in range(len(nhds)):
if nhds[i] <= avg or sames[i]:
curNodeMapList.append(i+1)
curNodes.append(elements[i+1])
else:
btNodeMapList.append(curNodeMapList)
curNodeMapList = [i+1]
btNodes.append(curNodes)
curNodes = [elements[i+1]]
pass # else - if nhds[i] <= avg or sames[i]
pass # for i in range(len(nhds))
if len(curNodeMapList) > 0:
btNodeMapList.append(curNodeMapList)
btNodes.append(curNodes)
pass # if len(curNodeMapList) > 0
pass # if len(elements) != 1
pXpath = parent.attrib["xpath"] + "/"
for i, nodes in enumerate(btNodes):
btNode = ET.SubElement(parent, "DIV")
node_name = "[%s]" % (",".join(str(x) for x in btNodeMapList[i]))
btNode.set("node_name", node_name)
btNode.set("xpath", pXpath + node_name)
left, top, right, bottom = sys.maxint, sys.maxint, -1, -1
for node in nodes:
l, r = int(node.location["x"]), int(node.location["x"]) + int(node.size["width"])
t, b = int(node.location["y"]), int(node.location["y"]) + int(node.size["height"])
if l < left: left = l
if t < top: top = t
if r > right: right = r
if b > bottom: bottom = b
pass # for node in nodes
btNode.set("left", "%d" % left)
btNode.set("top", "%d" % top)
btNode.set("right", "%d" % right)
btNode.set("bottom", "%d" % bottom)
for style in CSS:
v = nodes[0].value_of_css_property(style)
btNode.set("css_" + style, v)
pass # for style in CSS
btNodes[i] = btNode
pass # for i, nodes in enumerate(btNodes)
return btNodeMapList, btNodes
pass # def MergeNodeByGestaltLaws(elements, parent, CSS, debug=False)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Xilinx CoolRunner II XC2C64A characteristics
bits_of_address = 7
bits_of_data = 274
bytes_of_data = (bits_of_data + 7) // 8
bits_in_program_row = bits_of_address + bits_of_data
address_sequence = (0x00, 0x40, 0x60, 0x20, 0x30, 0x70, 0x50, 0x10, 0x18, 0x58, 0x78, 0x38, 0x28, 0x68, 0x48, 0x08, 0x0c, 0x4c, 0x6c, 0x2c, 0x3c, 0x7c, 0x5c, 0x1c, 0x14, 0x54, 0x74, 0x34, 0x24, 0x64, 0x44, 0x04, 0x06, 0x46, 0x66, 0x26, 0x36, 0x76, 0x56, 0x16, 0x1e, 0x5e, 0x7e, 0x3e, 0x2e, 0x6e, 0x4e, 0x0e, 0x0a, 0x4a, 0x6a, 0x2a, 0x3a, 0x7a, 0x5a, 0x1a, 0x12, 0x52, 0x72, 0x32, 0x22, 0x62, 0x42, 0x02, 0x03, 0x43, 0x63, 0x23, 0x33, 0x73, 0x53, 0x13, 0x1b, 0x5b, 0x7b, 0x3b, 0x2b, 0x6b, 0x4b, 0x0b, 0x0f, 0x4f, 0x6f, 0x2f, 0x3f, 0x7f, 0x5f, 0x1f, 0x17, 0x57, 0x77, 0x37, 0x27, 0x67, 0x47, 0x07, 0x05, 0x45,)
def values_list_line_wrap(values):
line_length = 16
return [' '.join(values[n:n+line_length]) for n in range(0, len(values), line_length)]
def dec_lines(bytes):
return values_list_line_wrap(['%d,' % n for n in bytes])
def hex_lines(bytes):
return values_list_line_wrap(['0x%02x,' % n for n in bytes])
def reverse_bits(n, bit_count):
byte_count = (bit_count + 7) >> 3
# n = int(bytes.hex(), 16)
n_bits = bin(n)[2:].zfill(bit_count)
n_bits_reversed = n_bits[::-1]
n_reversed = int(n_bits_reversed, 2)
return n_reversed.to_bytes(byte_count, byteorder='little')
def extract_addresses(block):
return tuple([row['address'] for row in block])
def extract_data(block):
return tuple([row['data'] for row in block])
def extract_mask(block):
return tuple([row['mask'] for row in block])
def equal_blocks(block1, block2, mask):
block1_data = extract_data(block1)
block2_data = extract_data(block2)
assert(len(block1_data) == len(block2_data))
assert(len(block1_data) == len(mask))
for row1, row2, mask in zip(block1_data, block2_data, mask):
differences = (row1 ^ row2) & mask
if differences != 0:
return False
return True
def dump_block(rows, endian='little'):
data_bytes = (bits_of_data + 7) >> 3
for row in rows:
print('%02x %s' % (row['address'], row['data'].to_bytes(data_bytes, byteorder=endian).hex()))
def extract_programming_data(commands):
ir_map = {
0x01: 'idcode',
0xc0: 'conld',
0xe8: 'enable',
0xea: 'program',
0xed: 'erase',
0xee: 'verify',
0xf0: 'init',
0xff: 'bypass',
# Other instructions unimplemented and if encountered, will cause tool to crash.
}
ir = None
program = []
verify = []
for command in commands:
if command['type'] == 'xsir':
ir = ir_map[command['tdi']['data'][0]]
if ir == 'program':
program.append([])
if ir == 'verify':
verify.append([])
elif ir == 'verify' and command['type'] == 'xsdrtdo':
tdi_length = command['tdi']['length']
end_state = command['end_state']
if tdi_length == bits_of_address and end_state == 1:
address = int(command['tdi']['data'].hex(), 16)
verify[-1].append({'address': address})
elif tdi_length == bits_of_data and end_state == 0:
mask = int(command['tdo_mask']['data'].hex(), 16)
expected = int(command['tdo_expected']['data'].hex(), 16)
verify[-1][-1]['data'] = expected
verify[-1][-1]['mask'] = mask
elif ir == 'program' and command['type'] == 'xsdrtdo':
tdi_length = command['tdi']['length']
end_state = command['end_state']
if tdi_length == bits_in_program_row and end_state == 0:
tdi = int(command['tdi']['data'].hex(), 16)
address = (tdi >> bits_of_data) & ((1 << bits_of_address) - 1)
data = tdi & ((1 << bits_of_data) - 1)
program[-1].append({
'address': address,
'data': data
})
return {
'program': program,
'verify': verify,
}
def validate_programming_data(programming_data):
# Validate program blocks:
# There should be two extracted program blocks. The first contains the
# the bitstream with done bit(s) not asserted. The second updates the
# "done" bit(s) to finish the process.
assert(len(programming_data['program']) == 2)
# First program phase writes the bitstream to flash (or SRAM) with
# special bit(s) not asserted, so the bitstream is not yet valid.
assert(extract_addresses(programming_data['program'][0]) == address_sequence)
# Second program phase updates a single row to finish the programming
# process.
assert(len(programming_data['program'][1]) == 1)
assert(programming_data['program'][1][0]['address'] == 0x05)
# Validate verify blocks:
# There should be two extracted verify blocks.
assert(len(programming_data['verify']) == 2)
# The two verify blocks should match.
assert(programming_data['verify'][0] == programming_data['verify'][1])
# Check the row address order of the second verify block.
assert(extract_addresses(programming_data['verify'][0]) == address_sequence)
assert(extract_addresses(programming_data['verify'][1]) == address_sequence)
# Checks across programming and verification:
# Check that program data matches data expected during verification.
assert(equal_blocks(programming_data['program'][0], programming_data['verify'][0], extract_mask(programming_data['verify'][0])))
assert(equal_blocks(programming_data['program'][0], programming_data['verify'][1], extract_mask(programming_data['verify'][1])))
def make_sram_program(program_blocks):
program_sram = list(program_blocks[0])
program_sram[-2] = program_blocks[1][0]
return program_sram
#######################################################################
# Command line argument parsing.
#######################################################################
import argparse
parser = argparse.ArgumentParser()
action_group = parser.add_argument_group(title='outputs')
action_group.add_argument('--checksum', action='store_true', help='Print bitstream verification CRC32 value')
action_group.add_argument('--hackrf-data', type=str, help='C data file for HackRF bitstream loading/programming/verification')
action_group.add_argument('--portapack-data', type=str, help='C++ data file for PortaPack bitstream loading/programming/verification')
parser.add_argument('--crcmod', action='store_true', help='Use Python crcmod library instead of built-in CRC32 code')
parser.add_argument('--debug', action='store_true', help='Enable debug output')
parser.add_argument('--xsvf', required=True, type=str, help='HackRF Xilinx XC2C64A CPLD XSVF file containing erase/program/verify phases')
args = parser.parse_args()
#######################################################################
# Generic XSVF parsing phase, produces a tree of commands performed
# against the CPLD.
#######################################################################
with open(args.xsvf, "rb") as f:
from xsvf import XSVFParser
commands = XSVFParser().parse(f, debug=args.debug)
programming_data = extract_programming_data(commands)
validate_programming_data(programming_data)
#######################################################################
# Patch the second programming phase into the first for SRAM
# programming.
#######################################################################
verify_blocks = programming_data['verify']
program_blocks = programming_data['program']
#######################################################################
# Calculate CRC of data read from CPLD during the second verification
# pass, which is after the "done" bit is set. Mask off insignificant
# bits (turning them to zero) and extending rows to the next full byte.
#######################################################################
if args.checksum:
if args.crcmod:
# Use a proper CRC library
import crcmod
crc = crcmod.predefined.Crc('crc-32')
else:
# Use my home-grown, simple, slow CRC32 object to avoid additional
# Python dependencies.
from dumb_crc32 import DumbCRC32
crc = DumbCRC32()
verify_block = verify_blocks[1]
for address, data, mask in verify_block:
valid_data = data & mask
bytes = valid_data.to_bytes(bytes_of_data, byteorder='little')
crc.update(bytes)
print('0x%s' % crc.hexdigest().lower())
if args.hackrf_data:
program_sram = make_sram_program(program_blocks)
verify_block = verify_blocks[1]
verify_masks = tuple(frozenset(extract_mask(verify_block)))
verify_mask_index = dict([(k, v) for v, k in enumerate(verify_masks)])
verify_mask_row_index = [verify_mask_index[row['mask']] for row in verify_block]
result = []
result.extend((
'/* WARNING: Auto-generated file. Do not edit. */',
'',
'#include <cpld_xc2c.h>',
'',
'const cpld_xc2c64a_program_t cpld_hackrf_program_sram = { {',
))
data_lines = [', '.join(['0x%02x' % n for n in row['data'].to_bytes(bytes_of_data, byteorder='little')]) for row in program_sram]
result.extend(['\t{ { %s } },' % line for line in data_lines])
result.extend((
'} };',
'',
'const cpld_xc2c64a_verify_t cpld_hackrf_verify = {',
'\t.mask = {',
))
verify_mask_lines = [', '.join(['0x%02x' % n for n in mask.to_bytes(bytes_of_data, byteorder='little')]) for mask in verify_masks]
result.extend(['\t\t{ { %s } },' % line for line in verify_mask_lines])
result.extend((
'\t},'
'\t.mask_index = {',
))
result.extend(['\t\t%s' % line for line in dec_lines(verify_mask_row_index)])
result.extend((
'\t}',
'};',
'',
'const cpld_xc2c64a_row_addresses_t cpld_hackrf_row_addresses = { {',
))
result.extend(['\t%s' % line for line in hex_lines(address_sequence)])
result.extend((
'} };',
'',
))
with open(args.hackrf_data, 'w') as f:
f.write('\n'.join(result))
if args.portapack_data:
program_sram = make_sram_program(program_blocks)
verify_block = verify_blocks[1]
verify_masks = extract_mask(verify_block)
result = []
result.extend((
'/*',
' * WARNING: Auto-generated file. Do not edit.',
'*/',
'#include "hackrf_cpld_data.hpp"',
'namespace hackrf {',
'namespace one {',
'namespace cpld {',
'const ::cpld::xilinx::XC2C64A::verify_blocks_t verify_blocks { {',
))
data_lines = [', '.join(['0x%02x' % n for n in row['data'].to_bytes(bytes_of_data, byteorder='big')]) for row in program_sram]
mask_lines = [', '.join(['0x%02x' % n for n in mask.to_bytes(bytes_of_data, byteorder='big')]) for mask in verify_masks]
lines = ['{ 0x%02x, { { %s } }, { { %s } } }' % data for data in zip(address_sequence, data_lines, mask_lines)]
result.extend('\t%s,' % line for line in lines)
result.extend((
'} };',
'} /* namespace hackrf */',
'} /* namespace one */',
'} /* namespace cpld */',
'',
))
with open(args.portapack_data, 'w') as f:
f.write('\n'.join(result))
|
nilq/baby-python
|
python
|
# Copyright 2020 reinforced_scinet (https://github.com/hendrikpn/reinforced_scinet)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from analyzer import AnalyzerSubGridWorld
PLOT_LATENT = False # plot the latent variable's behavior
PLOT_RESULTS = False # plot the performance of the RL agent
PLOT_RESULTS_LOSS = True # plot the performance of pretrainer
PLOT_FIGURE = False # plot the figure from the whitepaper
ENV_ID = 'env2' # the environment id to be used (usually not relevant)
if __name__ == "__main__":
analyzer = AnalyzerSubGridWorld(ENV_ID, load_model=PLOT_LATENT)
if PLOT_LATENT:
analyzer.plot_latent_space()
if PLOT_RESULTS:
analyzer.plot_results_figure(avg_mod=200)
if PLOT_RESULTS_LOSS:
analyzer.plot_loss_figure(avg_mod=100)
if PLOT_FIGURE:
analyzer.plot_selection_figure()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
from chromedriver_py import binary_path
print(
"""This command will fail if you have not run the setup.py script AND "source environment/env.sh" first.\n\n\nIt is installing a headless chrome web browser driver to allow making an image out of the big demo script sessions. It's not required to run the s/w, but nice to have"""
)
os.chdir(f"{os.environ['RGBW_CC_ROOT']}/bin/")
cmd = f"cp {binary_path} {os.environ['RGBW_CC_ROOT']}/bin/chrome; chmod a+x ./chrome; ln -s chrome chromium;"
os.system(cmd)
|
nilq/baby-python
|
python
|
import unittest
from users import User
class TestUser(unittest.TestCase):
'''
Test class that defines test cases for the contact class behaviours.
Args:
unittest.TestUser: TestUser class that helps in creating test cases
'''
# Items up here .......
def setUp(self):
'''
Set up method to run before each test cases.
'''
self.new_users = User("Frank","23456789") # create contact object
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_users.username,"Frank")
self.assertEqual(self.new_users.password,"23456789")
def test_save_users(self):
'''
test_save_user test case to test if the users object is saved into
the user list
'''
self.new_users.save_users() # saving the new contact
self.assertEqual(len(User.user_list),1)
# Items up here...
# setup and class creation up here
def tearDown(self):
'''
tearDown method that does clean up after each test case has run.
'''
User.user_list = []
# other test cases here
def test_save_multiple_users(self):
'''
test_save_multiple_users to check if we can save multiple contact
objects to our user_list
'''
self.new_users.save_users()
test_users = User("users","23456789") # new contact
test_users.save_users()
self.assertEqual(len(User.user_list),2)
# More tests above
def test_delete_users(self):
'''
test_delete_users to test if we can remove a contact from our contact list
'''
self.new_users.save_users()
test_users = User("users","23456789") # new contact
test_users.save_users()
self.new_users.delete_users()# Deleting a contact object
self.assertEqual(len(User.user_list),1)
def test_find_users_by_username(self):
'''
test to check if we can find a user by username and display information
'''
self.new_users.save_users()
test_users = User("frank","23456789") # new contact
test_users.save_users()
found_user = User.find_by_username("frank")
self.assertEqual(found_user,test_users)
def test_users_exists(self):
'''
test to check if we can return a Boolean if we cannot find the users.
'''
self.new_users.save_users()
test_users = User("frank","23456789") # new contact
test_users.save_users()
users_exists = User.users_exist("frank")
self.assertTrue(users_exists)
def test_display_all_users(self):
'''
method that returns a list of all users saved
'''
self.assertEqual(User.display_users(),User.user_list)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import random
import time
import os
import discord
import triggers
import data
import cmd
import tools
################################################################################
lurker_data = dict()
lurker_data['emoji'] = '👀'
lurker_data['min_chance'] = 1
lurker_data['max_chance'] = 10
data.NewGuildEnvAdd('lurker_data', lurker_data)
def GetLurkerData(local_env):
return local_env['lurker_data']
################################################################################
async def OnMessage(local_env, message, normalised_text):
lurker = GetLurkerData(local_env)
min_chance = lurker['min_chance']
max_chance = lurker['max_chance']
emoji = lurker['emoji']
chance = random.randint(min_chance, max_chance)
if tools.Success(chance):
await message.add_reaction(emoji)
await message.remove_reaction(emoji, message.guild.me)
triggers.on_message.append(OnMessage)
################################################################################
async def cmd_chance(ctx, args):
local_env = data.GetGuildEnvironment(ctx.guild)
lurker = GetLurkerData(local_env)
if len(args) != 2: raise RuntimeError("Incorrect number of arguments (min_chance max_chance expected)")
min_chance = int(args[0])
max_chance = int(args[1])
if min_chance < 0 or min_chance > 100: raise RuntimeError("Minimal chance must be within (0,100)")
if max_chance < min_chance or max_chance > 100: raise RuntimeError("Maximal chance must be within (min_chance, 100)")
lurker['min_chance'] = min_chance
lurker['max_chance'] = max_chance
async def cmd_emoji(ctx, args):
local_env = data.GetGuildEnvironment(ctx.guild)
lurker = GetLurkerData(local_env)
if len(args) != 1: raise RuntimeError("Incorrect number of arguments (emoji expected)")
emoji = args[0]
try:
await ctx.message.add_reaction(emoji)
await ctx.message.remove_reaction(emoji, ctx.guild.me)
except Exception as e:
raise RuntimeError(f"Cannot add emoji {emoji}")
lurker['emoji'] = emoji
async def cmd_settings(ctx, args):
local_env = data.GetGuildEnvironment(ctx.guild)
lurker = GetLurkerData(local_env)
output = "Lurker settings:\n" + f"Minimal chance: {lurker['min_chance']}\n" + f"Maximal chance: {lurker['max_chance']}\n" + f"Emoji: {lurker['emoji']}\n"
await ctx.message.reply(output, mention_author=False)
return True
################################################################################
parser = cmd.Parser()
cmd.Add(parser, "chance", cmd_chance, "", "", discord.Permissions.all())
cmd.Add(parser, "emoji", cmd_emoji, "", "", discord.Permissions.all())
cmd.Add(parser, "settings", cmd_settings, "", "")
cmd.Add(cmd.parser, "lurker", parser, "Setup lurker", "")
################################################################################
|
nilq/baby-python
|
python
|
from enum import auto
from mstrio.utils.enum_helper import AutoName
class RefreshPolicy(AutoName):
ADD = auto()
DELETE = auto()
UPDATE = auto()
UPSERT = auto()
REPLACE = auto()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
# to work around tk_chooseDirectory not properly returning unicode paths on Windows
# need to use a dialog that can be hacked up to actually return full unicode paths
# originally based on AskFolder from EasyDialogs for Windows but modified to fix it
# to actually use unicode for path
# The original license for EasyDialogs is as follows
#
# Copyright (c) 2003-2005 Jimmy Retzlaff
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
AskFolder(...) -- Ask the user to select a folder Windows specific
"""
import ctypes
from ctypes.wintypes import LPCWSTR
import ctypes.wintypes as wintypes
__all__ = ['AskFolder']
# Load required Windows DLLs
ole32 = ctypes.windll.ole32
shell32 = ctypes.windll.shell32
user32 = ctypes.windll.user32
# Windows Constants
BFFM_INITIALIZED = 1
BFFM_SETOKTEXT = 1129
BFFM_SETSELECTIONA = 1126
BFFM_SETSELECTIONW = 1127
BIF_EDITBOX = 16
BS_DEFPUSHBUTTON = 1
CB_ADDSTRING = 323
CB_GETCURSEL = 327
CB_SETCURSEL = 334
CDM_SETCONTROLTEXT = 1128
EM_GETLINECOUNT = 186
EM_GETMARGINS = 212
EM_POSFROMCHAR = 214
EM_SETSEL = 177
GWL_STYLE = -16
IDC_STATIC = -1
IDCANCEL = 2
IDNO = 7
IDOK = 1
IDYES = 6
MAX_PATH = 260
OFN_ALLOWMULTISELECT = 512
OFN_ENABLEHOOK = 32
OFN_ENABLESIZING = 8388608
OFN_ENABLETEMPLATEHANDLE = 128
OFN_EXPLORER = 524288
OFN_OVERWRITEPROMPT = 2
OPENFILENAME_SIZE_VERSION_400 = 76
PBM_GETPOS = 1032
PBM_SETMARQUEE = 1034
PBM_SETPOS = 1026
PBM_SETRANGE = 1025
PBM_SETRANGE32 = 1030
PBS_MARQUEE = 8
PM_REMOVE = 1
SW_HIDE = 0
SW_SHOW = 5
SW_SHOWNORMAL = 1
SWP_NOACTIVATE = 16
SWP_NOMOVE = 2
SWP_NOSIZE = 1
SWP_NOZORDER = 4
VER_PLATFORM_WIN32_NT = 2
WM_COMMAND = 273
WM_GETTEXT = 13
WM_GETTEXTLENGTH = 14
WM_INITDIALOG = 272
WM_NOTIFY = 78
# Windows function prototypes
BrowseCallbackProc = ctypes.WINFUNCTYPE(ctypes.c_int, wintypes.HWND, ctypes.c_uint, wintypes.LPARAM, wintypes.LPARAM)
# Windows types
LPCTSTR = ctypes.c_char_p
LPTSTR = ctypes.c_char_p
LPVOID = ctypes.c_voidp
TCHAR = ctypes.c_char
class BROWSEINFO(ctypes.Structure):
_fields_ = [
("hwndOwner", wintypes.HWND),
("pidlRoot", LPVOID),
("pszDisplayName", LPTSTR),
("lpszTitle", LPCTSTR),
("ulFlags", ctypes.c_uint),
("lpfn", BrowseCallbackProc),
("lParam", wintypes.LPARAM),
("iImage", ctypes.c_int)
]
# Utilities
def CenterWindow(hwnd):
desktopRect = GetWindowRect(user32.GetDesktopWindow())
myRect = GetWindowRect(hwnd)
x = width(desktopRect) // 2 - width(myRect) // 2
y = height(desktopRect) // 2 - height(myRect) // 2
user32.SetWindowPos(hwnd, 0,
desktopRect.left + x,
desktopRect.top + y,
0, 0,
SWP_NOACTIVATE | SWP_NOSIZE | SWP_NOZORDER
)
def GetWindowRect(hwnd):
rect = wintypes.RECT()
user32.GetWindowRect(hwnd, ctypes.byref(rect))
return rect
def width(rect):
return rect.right-rect.left
def height(rect):
return rect.bottom-rect.top
def AskFolder(
message=None,
version=None,
defaultLocation=None,
location=None,
windowTitle=None,
actionButtonLabel=None,
cancelButtonLabel=None,
multiple=None):
"""Display a dialog asking the user for select a folder.
modified to use unicode strings as much as possible
returns unicode path
"""
def BrowseCallback(hwnd, uMsg, lParam, lpData):
if uMsg == BFFM_INITIALIZED:
if actionButtonLabel:
label = unicode(actionButtonLabel, errors='replace')
user32.SendMessageW(hwnd, BFFM_SETOKTEXT, 0, label)
if cancelButtonLabel:
label = unicode(cancelButtonLabel, errors='replace')
cancelButton = user32.GetDlgItem(hwnd, IDCANCEL)
if cancelButton:
user32.SetWindowTextW(cancelButton, label)
if windowTitle:
title = unicode(windowTitle, erros='replace')
user32.SetWindowTextW(hwnd, title)
if defaultLocation:
user32.SendMessageW(hwnd, BFFM_SETSELECTIONW, 1, defaultLocation.replace('/', '\\'))
if location:
x, y = location
desktopRect = wintypes.RECT()
user32.GetWindowRect(0, ctypes.byref(desktopRect))
user32.SetWindowPos(hwnd, 0,
desktopRect.left + x,
desktopRect.top + y, 0, 0,
SWP_NOACTIVATE | SWP_NOSIZE | SWP_NOZORDER)
else:
CenterWindow(hwnd)
return 0
# This next line is needed to prevent gc of the callback
callback = BrowseCallbackProc(BrowseCallback)
browseInfo = BROWSEINFO()
browseInfo.pszDisplayName = ctypes.c_char_p('\0' * (MAX_PATH+1))
browseInfo.lpszTitle = message
browseInfo.lpfn = callback
pidl = shell32.SHBrowseForFolder(ctypes.byref(browseInfo))
if not pidl:
result = None
else:
path = LPCWSTR(u" " * (MAX_PATH+1))
shell32.SHGetPathFromIDListW(pidl, path)
ole32.CoTaskMemFree(pidl)
result = path.value
return result
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class EsgConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'esg'
|
nilq/baby-python
|
python
|
from .base import * # noqa: F403,F401
DEBUG = True
INSTALLED_APPS += [ # noqa ignore=F405
'debug_toolbar',
]
MIDDLEWARE += [ # noqa ignore=F405
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ALLOWED_HOSTS = [
'0.0.0.0',
'127.0.0.1',
'art-backend.herokuapp.com'
]
INTERNAL_IPS = [
'0.0.0.0',
'127.0.0.1'
]
|
nilq/baby-python
|
python
|
# Version 3.1; Erik Husby; Polar Geospatial Center, University of Minnesota; 2019
from __future__ import division
import math
import os
import sys
import traceback
from warnings import warn
import numpy as np
from osgeo import gdal_array, gdalconst
from osgeo import gdal, ogr, osr
gdal.UseExceptions()
class RasterIOError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedDataTypeError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
class UnsupportedMethodError(Exception):
def __init__(self, msg=""):
super(Exception, self).__init__(msg)
#############
# Raster IO #
#############
# Legacy; Retained for quick instruction of useful GDAL raster information extraction methods.
def oneBandImageToArrayZXY_projRef(rasterFile):
"""
Opens a single-band raster image as a NumPy 2D array [Z] and returns it along
with [X, Y] coordinate ranges of pixels in the raster grid as NumPy 1D arrays
and the projection definition string for the raster dataset in OpenGIS WKT format.
"""
if not os.path.isfile(rasterFile):
raise RasterIOError("No such rasterFile: '{}'".format(rasterFile))
ds = gdal.Open(rasterFile, gdal.GA_ReadOnly)
proj_ref = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
xmin, ymax = gt[0], gt[3]
dx, dy = gt[1], gt[5]
X = xmin + np.arange(ds.RasterXSize) * dx
Y = ymax + np.arange(ds.RasterYSize) * dy
Z = ds.GetRasterBand(1).ReadAsArray()
return Z, X, Y, proj_ref
def openRaster(file_or_ds, target_EPSG=None):
"""
Open a raster image as a GDAL dataset object.
Parameters
----------
file_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
Returns
-------
ds : osgeo.gdal.Dataset
The raster image as a GDAL dataset.
Notes
-----
If `rasterFile_or_ds` is a GDAL dataset,
it is returned without modification.
"""
ds = None
if type(file_or_ds) == gdal.Dataset:
ds = file_or_ds
elif isinstance(file_or_ds, str):
if not os.path.isfile(file_or_ds):
raise RasterIOError("No such rasterFile: '{}'".format(file_or_ds))
try:
ds = gdal.Open(file_or_ds, gdal.GA_ReadOnly)
except RuntimeError:
print("RuntimeError when opening file/dataset: {}".format(file_or_ds))
raise
else:
raise InvalidArgumentError("Invalid input type for `file_or_ds`: {}".format(
type(file_or_ds)))
if target_EPSG is not None:
target_sr = osr.SpatialReference()
target_sr.ImportFromEPSG(target_EPSG)
ds = reprojectGDALDataset(ds, target_sr, 'nearest')
return ds
def reprojectGDALDataset(ds_in, sr_out, interp_str):
# FIXME: Finish this function.
# dtype_gdal, promote_dtype = dtype_np2gdal(Z.dtype)
# if promote_dtype is not None:
# Z = Z.astype(promote_dtype)
interp_gdal = interp_str2gdal(interp_str)
mem_drv = gdal.GetDriverByName('MEM')
sr_in = osr.SpatialReference()
# ds_in = mem_drv.Create('', X.size, Y.size, 1, dtype_gdal)
# ds_in.SetGeoTransform((X[0], X[1]-X[0], 0,
# Y[0], 0, Y[1]-Y[0]))
# ds_in.GetRasterBand(1).WriteArray(Z)
ds_out = mem_drv.Create('', ds_in.RasterXSize, ds_in.RasterYSize, 1)
gdal.ReprojectImage(ds_in, ds_out, '', '', interp_gdal)
return ds_out
def gdalReadAsArraySetsmSceneBand(raster_band, make_nodata_nan=False):
scale = raster_band.GetScale()
offset = raster_band.GetOffset()
if scale is None:
scale = 1.0
if offset is None:
offset = 0.0
if scale == 1.0 and offset == 0.0:
array_data = raster_band.ReadAsArray()
if make_nodata_nan:
nodata_val = raster_band.GetNoDataValue()
if nodata_val is not None:
array_data[array_data == nodata_val] = np.nan
else:
if raster_band.DataType != gdalconst.GDT_Int32:
raise RasterIOError(
"Expected GDAL raster band with scale!=1.0 or offset!=0.0 to be of Int32 data type"
" (scaled int LERC_ZSTD-compressed 50cm DEM), but data type is {}".format(
gdal.GetDataTypeName(raster_band.DataType)
)
)
if scale == 0.0:
raise RasterIOError(
"GDAL raster band has invalid parameters: scale={}, offset={}".format(scale, offset)
)
nodata_val = raster_band.GetNoDataValue()
array_data = raster_band.ReadAsArray(buf_type=gdalconst.GDT_Float32)
adjust_where = (array_data != nodata_val) if nodata_val is not None else True
if scale != 1.0:
np.multiply(array_data, scale, out=array_data, where=adjust_where)
if offset != 0.0:
np.add(array_data, offset, out=array_data, where=adjust_where)
if make_nodata_nan:
array_nodata = np.logical_not(adjust_where, out=adjust_where)
array_data[array_nodata] = np.nan
del adjust_where
if array_data is None:
raise RasterIOError("`raster_band.ReadAsArray()` returned None")
return array_data
def getCornerCoords(gt, shape):
"""
Retrieve the georeferenced corner coordinates of a raster image.
The corner coordinates of the raster are calculated from
the rasters's geometric transformation specifications and
the dimensions of the raster.
Parameters
----------
gt : numeric tuple `(top_left_x, dx_x, dx_y, top_left_y, dy_x, dy_y)`
The affine geometric transformation ("geotransform" or "geo_trans")
describing the relationship between pixel coordinates and
georeferenced coordinates.
Pixel coordinates start at `(0, 0)` [row, col] for the top left pixel
in the raster image, increasing down rows and right across columns.
Georeferenced coordinates `(x_geo, y_geo)` are calculated for pixels
in the image by the pixel coordinates `(pix_row, pix_col)` as follows:
`x_geo = top_left_x + pix_row*dx_x + pix_col*dx_y`
`y_geo = top_left_y + pix_row*dy_x + pix_col*dy_y`
shape : tuple of positive int, 2 elements
Dimensions of the raster image in (num_rows, num_cols) format.
Returns
-------
corner_coords : ndarray (5, 2)
Georeferenced corner coordinates of the raster image,
in (x, y) coordinate pairs, starting and ending at the
top left corner, clockwise.
"""
top_left_x = np.full((5, 1), gt[0])
top_left_y = np.full((5, 1), gt[3])
top_left_mat = np.concatenate((top_left_x, top_left_y), axis=1)
ysize, xsize = shape
raster_XY_size_mat = np.array([
[0, 0],
[xsize, 0],
[xsize, ysize],
[0, ysize],
[0, 0]
])
gt_mat = np.array([
[gt[1], gt[4]],
[gt[2], gt[5]]
])
return top_left_mat + np.dot(raster_XY_size_mat, gt_mat)
def coordsToWkt(point_coords):
"""
Retrieve a WKT polygon representation of an ordered list of
point coordinates.
Parameters
----------
point_coords : 2D sequence of floats/ints like ndarray
of shape (npoints, ndim)
Ordered list of points, each represented by a list of
coordinates that define its position in space.
Returns
-------
wkt : str
WKT polygon representation of `point_coords`.
"""
return 'POLYGON (({}))'.format(
','.join([" ".join([str(c) for c in xy]) for xy in point_coords])
)
def wktToCoords(wkt):
"""
Create an array of point coordinates from a WKT polygon string.
Parameters
----------
wkt : str
WKT polygon representation of points with coordinate data
to be extracted.
Returns
-------
point_coords : ndarray of shape (npoints, ndim)
Ordered list of point coordinates extracted from `wkt`.
"""
coords_list = eval(
wkt.replace('POLYGON ','').replace('(','[').replace(')',']').replace(',','],[').replace(' ',',')
)
return np.array(coords_list)
def extractRasterData(rasterFile_or_ds, *params):
"""
Extract information from a single-band raster image file.
Parameters
----------
rasterFile_or_ds : str (file path) or osgeo.gdal.Dataset
File path of the raster image to open as a GDAL dataset object,
or the GDAL dataset itself.
params : str
Names of parameters to be extracted from the raster dataset.
'array'/'z' ------ matrix of image pixel values as ndarray (2D)
'shape'----------- pixel shape of image as tuple (nrows, ncols)
'x' -------------- georeferenced grid coordinates corresponding to
each column of pixels in image as ndarray (1D)
'y' -------------- georeferenced grid coordinates corresponding to
each row of pixels in image as ndarray (1D)
'dx' ------------- x length of each pixel in georeferenced pixel-grid coordinates,
corresponding to x[1] - x[0] from 'x' param (dx may be negative)
'dy' ------------- y length of each pixel in georeferenced pixel-grid coordinates,
corresponding to y[1] - y[0] from 'y' param (dy may be negative)
'res' ------------ (absolute) resolution of square pixels in image
(NaN if pixels are not square)
'geo_trans' ------ affine geometric transformation
(see documentation for `getCornerCoords`)
'corner_coords' -- georeferenced corner coordinates of image extent
(see documentation for `getCornerCoords`)
'proj_ref' ------- projection definition string in OpenGIS WKT format
(None if projection definition is not available)
'spat_ref' ------- spatial reference as osgeo.osr.SpatialReference object
(None if spatial reference is not available)
'geom' ----------- polygon geometry of image extent as osgeo.ogr.Geometry object
'geom_sr' -------- polygon geometry of image extent as osgeo.ogr.Geometry object
with spatial reference assigned (if available)
'nodata_val' ----- pixel value that should be interpreted as "No Data"
'dtype_val' ------ GDAL type code for numeric data type of pixel values (integer)
'dtype_str' ------ GDAL type name for numeric data type of pixel values (string)
Returns
-------
value_list : list
List of parameter data with length equal to the number
of parameter name arguments given in the function call.
The order of returned parameter data corresponds directly to
the order of the parameter name arguments.
If only one parameter name argument is provided, the single
datum is returned itself, not in a list.
Examples
--------
>>> f = 'my_raster.tif'
>>> image_data, resolution = extractRasterData(f, 'array', 'res')
>>> resolution
2
>>> extractRasterData(f, 'dy')
-2
"""
ds = openRaster(rasterFile_or_ds)
pset = set(params)
invalid_pnames = pset.difference({'ds', 'shape', 'z', 'array', 'x', 'y',
'dx', 'dy', 'res', 'geo_trans', 'corner_coords',
'proj_ref', 'spat_ref', 'geom', 'geom_sr',
'nodata_val', 'dtype_val', 'dtype_str'})
if invalid_pnames:
raise InvalidArgumentError("Invalid parameter(s) for extraction: {}".format(invalid_pnames))
if pset.intersection({'z', 'array', 'nodata_val', 'dtype_val', 'dtype_str'}):
band = ds.GetRasterBand(1)
if pset.intersection({'z', 'array'}):
try:
array_data = gdalReadAsArraySetsmSceneBand(band)
except RasterIOError as e:
traceback.print_exc()
print("Error reading raster: {}".format(rasterFile_or_ds))
raise
if pset.intersection({'shape', 'x', 'y', 'corner_coords', 'geom', 'geom_sr'}):
shape = (ds.RasterYSize, ds.RasterXSize) if 'array_data' not in vars() else array_data.shape
if pset.intersection({'x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords', 'geom', 'geom_sr'}):
geo_trans = ds.GetGeoTransform()
if pset.intersection({'proj_ref', 'spat_ref', 'geom_sr'}):
proj_ref = ds.GetProjectionRef()
if pset.intersection({'corner_coords', 'geom', 'geom_sr'}):
corner_coords = getCornerCoords(geo_trans, shape)
if pset.intersection({'spat_ref', 'geom_sr'}):
spat_ref = osr.SpatialReference(proj_ref) if proj_ref is not None else None
if pset.intersection({'geom', 'geom_sr'}):
geom = ogr.Geometry(wkt=coordsToWkt(corner_coords))
if pset.intersection({'nodata_val'}):
nodata_val = band.GetNoDataValue()
if pset.intersection({'dtype_val', 'dtype_str'}):
dtype_val = band.DataType
if pset.intersection({'dtype_str'}):
dtype_str = gdal.GetDataTypeName(dtype_val)
value_list = []
for pname in params:
pname = pname.lower()
value = None
if pname == 'ds':
value = ds
elif pname == 'shape':
value = shape
elif pname in ('z', 'array'):
value = array_data
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = corner_coords
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = spat_ref
elif pname == 'geom':
value = geom
elif pname == 'geom_sr':
value = geom.Clone() if 'geom' in params else geom
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
warn("Spatial reference could not be extracted from raster dataset, "
"so extracted geometry has not been assigned a spatial reference.")
elif pname == 'nodata_val':
value = nodata_val
elif pname == 'dtype_val':
value = dtype_val
elif pname == 'dtype_str':
value = dtype_str
value_list.append(value)
if len(value_list) == 1:
value_list = value_list[0]
return value_list
# Legacy; Retained for a visual aid of equivalences between NumPy and GDAL data types.
# Use gdal_array.NumericTypeCodeToGDALTypeCode to convert from NumPy to GDAL data type.
def dtype_np2gdal_old(dtype_in, form_out='gdal', force_conversion=False):
"""
Converts between input NumPy data type (dtype_in may be either
NumPy 'dtype' object or already a string) and output GDAL data type.
If form_out='numpy', the corresponding NumPy 'dtype' object will be
returned instead, allowing for quick lookup by string name.
If the third element of a dtype_dict conversion tuple is zero,
that conversion of NumPy to GDAL data type is not recommended. However,
the conversion may be forced with the argument force_conversion=True.
"""
dtype_dict = { # ---GDAL LIMITATIONS---
'bool' : (np.bool, gdal.GDT_Byte, 0), # GDAL no bool/logical/1-bit
'int8' : (np.int8, gdal.GDT_Byte, 1), # GDAL byte is unsigned
'int16' : (np.int16, gdal.GDT_Int16, 1),
'int32' : (np.int32, gdal.GDT_Int32, 1),
'intc' : (np.intc, gdal.GDT_Int32, 1), # np.intc ~= np.int32
'int64' : (np.int64, gdal.GDT_Int32, 0), # GDAL no int64
'intp' : (np.intp, gdal.GDT_Int32, 0), # intp ~= np.int64
'uint8' : (np.uint8, gdal.GDT_Byte, 1),
'uint16' : (np.uint16, gdal.GDT_UInt16, 1),
'uint32' : (np.uint32, gdal.GDT_UInt32, 1),
'uint64' : (np.uint64, gdal.GDT_UInt32, 0), # GDAL no uint64
'float16' : (np.float16, gdal.GDT_Float32, 1), # GDAL no float16
'float32' : (np.float32, gdal.GDT_Float32, 1),
'float64' : (np.float64, gdal.GDT_Float64, 1),
'complex64' : (np.complex64, gdal.GDT_CFloat32, 1),
'complex128': (np.complex128, gdal.GDT_CFloat64, 1),
}
errmsg_unsupported_dtype = "Conversion of NumPy data type '{}' to GDAL is not supported".format(dtype_in)
try:
dtype_tup = dtype_dict[str(dtype_in).lower()]
except KeyError:
raise UnsupportedDataTypeError("No such NumPy data type in lookup table: '{}'".format(dtype_in))
if form_out.lower() == 'gdal':
if dtype_tup[2] == 0:
if force_conversion:
print(errmsg_unsupported_dtype)
else:
raise UnsupportedDataTypeError(errmsg_unsupported_dtype)
dtype_out = dtype_tup[1]
elif form_out.lower() == 'numpy':
dtype_out = dtype_tup[0]
else:
raise UnsupportedDataTypeError("The following output data type format is not supported: '{}'".format(form_out))
return dtype_out
def dtype_np2gdal(dtype_np):
# TODO: Write docstring.
if dtype_np == np.bool:
promote_dtype = np.uint8
elif dtype_np == np.int8:
promote_dtype = np.int16
elif dtype_np == np.float16:
promote_dtype = np.float32
else:
promote_dtype = None
if promote_dtype is not None:
warn("NumPy array data type ({}) does not have equivalent GDAL data type and is not "
"supported, but can be safely promoted to {}".format(dtype_np, promote_dtype(1).dtype))
dtype_np = promote_dtype
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_np)
if dtype_gdal is None:
raise InvalidArgumentError("NumPy array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_np))
return dtype_gdal, promote_dtype
def interp_str2gdal(interp_str):
# TODO: Write docstring.
interp_choices = ('nearest', 'linear', 'cubic', 'spline', 'lanczos', 'average', 'mode')
interp_dict = {
'nearest' : gdal.GRA_NearestNeighbour,
'linear' : gdal.GRA_Bilinear,
'bilinear' : gdal.GRA_Bilinear,
'cubic' : gdal.GRA_Cubic,
'bicubic' : gdal.GRA_Cubic,
'spline' : gdal.GRA_CubicSpline,
'lanczos' : gdal.GRA_Lanczos,
'average' : gdal.GRA_Average,
'mode' : gdal.GRA_Mode,
}
if interp_str not in interp_dict:
raise UnsupportedMethodError("`interp` must be one of {}, but was '{}'".format(interp_choices, interp_str))
return interp_dict[interp_str]
def saveArrayAsTiff(array, dest,
X=None, Y=None, proj_ref=None, geotrans_rot_tup=(0, 0),
nodata_val='like_raster', dtype_out=None, nbits=None, co_args='compress',
like_raster=None):
"""
Save a NumPy 2D array as a single-band raster image in GeoTiff format.
Parameters
----------
array : ndarray, 2D
Array containing the values of pixels to be saved in the image,
one value per pixel.
dest : str (file path)
File path where the raster image will be saved.
If a file already exists at this path, it will be overwritten.
X : None or (ndarray, 1D)
Grid coordinates corresponding to all columns in the raster image,
from left to right, such that `X[j]` specifies the x-coordinate for
all pixels in `array[:, j]`.
If None, `like_raster` must be provided.
Y : None or (ndarray, 1D)
Grid coordinates corresponding to all rows in the raster image,
from top to bottom, such that `Y[i]` specifies the y-coordinate for
all pixels in `array[i, :]`
If None, `like_raster` must be provided.
proj_ref : None, str (WKT or Proj4), or osr.SpatialReference
Projection reference of the raster image to be saved, specified as
either a WKT/Proj4 string or an osr.SpatialReference object.
If None, `like_raster` must be provided.
geotrans_rot_tup : None or tuple (2 floats)
The third and fifth elements of the geometric transformation tuple
that specify rotation from north-up of the raster image to be saved.
If a north-up output is desired, let both elements be zero.
See documentation for `getCornerCoords` for more information on the
geometric transformation tuple.
If None, `like_raster` must be provided.
nodata_val : 'like_raster', None, or int/float
Non-NaN value in `array` that will be classified as "no data" in the
output raster image.
If 'like_raster', allow this value to be set equal to the nodata value
of `like_raster`.
dtype_out : data type as str (e.g. 'uint16'), NumPy data type
(e.g. np.uint16), or numpy.dtype object (e.g. from arr.dtype)
Numeric type of values in the output raster image.
If 'n-bit', write output raster image in an unsigned integer GDAL
data type with ['NBITS=n'] option in driver, where n is set to `nbits`
if `nbits` is not None. If `nbits` is None, n is calculated to be only
as large as necessary to capture the maximum value of `array`, and the
output array data type is unsigned integer of minimal bitdepth.
nbits : None or 1 <= int <= 32
Only applies when `dtype_out='nbits'`.
co_args : None, 'compress', or list of '[ARG_NAME]=[ARG_VALUE]' strings
Creation Option arguments to pass to the `Create` method of the GDAL
Geotiff driver that instantiates the output raster dataset.
If 'compress', the following default arguments are used:
'BIGTIFF=IF_SAFER'
'COMPRESS=LZW'
'TILED=YES'
The 'NBITS=X' argument may not be used -- that is set by the `nbits`
argument for this function.
A list of Creation Option arguments may be found here: [1].
like_raster : None, str (file path), or osgeo.gdal.Dataset
File path or GDAL dataset for a raster image of identical dimensions,
geographic location/extent, spatial reference, and nodata value as
the raster image that will be saved.
If provided, `X`, `Y`, `proj_ref`, and `geotrans_rot_tup` should not
be provided, as these metrics will be taken from the like raster.
Returns
-------
None
Notes
-----
The OSGeo `gdal_translate` program [1] must be callable by name
from the current working directory at the time this function is called.
References
----------
.. [1] https://www.gdal.org/frmt_gtiff.html
"""
spat_ref = None
projstr_wkt = None
projstr_proj4 = None
if proj_ref is None:
pass
elif type(proj_ref) == osr.SpatialReference:
spat_ref = proj_ref
elif isinstance(proj_ref, str):
spat_ref = osr.SpatialReference()
if proj_ref.lstrip().startswith('PROJCS'):
projstr_wkt = proj_ref
spat_ref.ImportFromWkt(projstr_wkt)
elif proj_ref.lstrip().startswith('+proj='):
projstr_proj4 = proj_ref
spat_ref.ImportFromProj4(projstr_proj4)
else:
raise InvalidArgumentError("`proj_ref` of string type has unknown format: '{}'".format(proj_ref))
else:
raise InvalidArgumentError("`proj_ref` must be a string or osr.SpatialReference object, "
"but was of type {}".format(type(proj_ref)))
dtype_is_nbits = (dtype_out is not None and type(dtype_out) is str and dtype_out == 'nbits')
if co_args is not None and co_args != 'compress':
if type(co_args) != list:
raise InvalidArgumentError("`co_args` must be a list of strings, but was {}".format(co_args))
if dtype_is_nbits:
for arg in co_args:
if arg.startswith('NBITS='):
raise InvalidArgumentError("`co_args` cannot include 'NBITS=X' argument. "
"Please use this function's `nbits` argument.")
shape = array.shape
dtype_gdal = None
if like_raster is not None:
ds_like = openRaster(like_raster)
if shape[0] != ds_like.RasterYSize or shape[1] != ds_like.RasterXSize:
raise InvalidArgumentError("Shape of `like_rasterFile` '{}' ({}, {}) does not match "
"the shape of `array` {}".format(
like_raster, ds_like.RasterYSize, ds_like.RasterXSize, shape)
)
geo_trans = extractRasterData(ds_like, 'geo_trans')
if proj_ref is None:
spat_ref = extractRasterData(ds_like, 'spat_ref')
if nodata_val == 'like_raster':
nodata_val = extractRasterData(ds_like, 'nodata_val')
if dtype_out is None:
dtype_gdal = extractRasterData(ds_like, 'dtype_val')
else:
if shape[0] != Y.size or shape[1] != X.size:
raise InvalidArgumentError("Lengths of [`Y`, `X`] grid coordinates ({}, {}) do not match "
"the shape of `array` ({})".format(Y.size, X.size, shape))
geo_trans = (X[0], X[1]-X[0], geotrans_rot_tup[0],
Y[0], geotrans_rot_tup[1], Y[1]-Y[0])
if nodata_val == 'like_raster':
nodata_val = None
if dtype_out is not None:
if dtype_is_nbits:
if nbits is None:
nbits = int(math.floor(math.log(float(max(1, np.max(array))), 2)) + 1)
elif type(nbits) != int or nbits < 1:
raise InvalidArgumentError("`nbits` must be an integer in the range [1,32]")
if nbits <= 8:
dtype_gdal = gdal.GDT_Byte
elif nbits <= 16:
dtype_gdal = gdal.GDT_UInt16
elif nbits <= 32:
dtype_gdal = gdal.GDT_UInt32
else:
raise InvalidArgumentError("Output array requires {} bits of precision, "
"but GDAL supports a maximum of 32 bits")
else:
if type(dtype_out) is str:
dtype_out = eval('np.{}'.format(dtype_out.lower()))
dtype_gdal = gdal_array.NumericTypeCodeToGDALTypeCode(dtype_out)
if dtype_gdal is None:
raise InvalidArgumentError("Output array data type ({}) does not have equivalent "
"GDAL data type and is not supported".format(dtype_out))
dtype_in = array.dtype
dtype_in_gdal, promote_dtype = dtype_np2gdal(dtype_in)
if promote_dtype is not None:
array = array.astype(promote_dtype)
dtype_in = promote_dtype(1).dtype
if dtype_out is not None:
if dtype_is_nbits:
if not np.issubdtype(dtype_in, np.unsignedinteger):
warn("Input array data type ({}) is not unsigned and may be incorrectly saved "
"with n-bit precision".format(dtype_in))
elif dtype_in != dtype_out:
warn("Input array NumPy data type ({}) differs from output "
"NumPy data type ({})".format(dtype_in, dtype_out(1).dtype))
elif dtype_gdal is not None and dtype_gdal != dtype_in_gdal:
warn("Input array GDAL data type ({}) differs from output "
"GDAL data type ({})".format(gdal.GetDataTypeName(dtype_in_gdal),
gdal.GetDataTypeName(dtype_gdal)))
if dtype_gdal is None:
dtype_gdal = dtype_in_gdal
sys.stdout.write("Saving Geotiff {} ...".format(dest))
sys.stdout.flush()
# Create the output raster dataset in memory.
if co_args is None:
co_args = []
if co_args == 'compress':
co_args = []
co_args.extend(['BIGTIFF=IF_SAFER']) # Will create BigTIFF
# if the resulting file *might* exceed 4GB.
co_args.extend(['COMPRESS=LZW']) # Do LZW compression on output image.
co_args.extend(['TILED=YES']) # Force creation of tiled TIFF files.
if dtype_is_nbits:
co_args.extend(['NBITS={}'.format(nbits)])
if spat_ref is not None:
if projstr_wkt is None:
projstr_wkt = spat_ref.ExportToWkt()
if projstr_proj4 is None:
projstr_proj4 = spat_ref.ExportToProj4()
sys.stdout.write(" GDAL data type: {}, NoData value: {}, Creation Options: {}, Projection (Proj4): {} ...".format(
gdal.GetDataTypeName(dtype_gdal), nodata_val, ' '.join(co_args) if co_args else None, projstr_proj4.strip())
)
sys.stdout.flush()
sys.stdout.write(" creating file ...")
sys.stdout.flush()
driver = gdal.GetDriverByName('GTiff')
ds_out = driver.Create(dest, shape[1], shape[0], 1, dtype_gdal, co_args)
ds_out.SetGeoTransform(geo_trans)
if projstr_wkt is not None:
ds_out.SetProjection(projstr_wkt)
band = ds_out.GetRasterBand(1)
if nodata_val is not None:
band.SetNoDataValue(nodata_val)
sys.stdout.write(" writing array values ...")
sys.stdout.flush()
band.WriteArray(array)
# Write the output raster dataset to disk.
sys.stdout.write(" finishing file ...")
sys.stdout.flush()
ds_out = None # Dereference dataset to initiate write to disk of intermediate image.
sys.stdout.write(" done!\n")
sys.stdout.flush()
|
nilq/baby-python
|
python
|
import random
print('====================== \033[35mBEM-VINDO AO JOGO DA ADIVINHAÇÃO\033[m ======================')
print('Tente adivinhar o número entre 0 e 10 que eu estou pensando')
computador = random.randint(0, 10)
palpites = 0
acertou = False
while not acertou:
jogador = int(input('Qual é a sua tentativa? '))
palpites += 1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('Mais... Tente mais uma vez.')
elif jogador > computador:
print('Menos... Tente mais uma vez.')
print('\033[32mVocê venceu, PARABÉNS\033[m')
print('Foram necessárias \033[37m{}\033[m tentativas para me vencer '.format(palpites))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Imports
import json
import discord
import random
import datetime
import asyncio
client = discord.Client()
# Readiness Indicator
@client.event
async def on_ready():
print("The bot is ready!")
await client.change_presence(game=discord.Game(name="roulette with your money"))
# Reminder Message
# CURRENTLY NOT WORKING AS INTENDED
# Intended to send the message every Monday
@client.event
async def background_loop():
await client.wait_until_ready()
while not client.is_closed:
if datetime.weekday == 0:
channel = client.get_channel("397349083318059010")
message = "Don't forget downtime!"
await client.send_message(channel, message)
await asyncio.sleep(604800)
# Main Functionality
@client.event
async def on_message(message):
with open('banks2.txt') as bankin:
bank = json.load(bankin)
# Banking Functions
if message.content.startswith('/bank'):
user = str(message.author)
operation = message.content.split()[1]
if operation in ['add', 'subtract']:
metal = message.content.split()[2]
amount = message.content.split()[3]
if user in bank.keys():
if operation == 'add':
if metal == 'gold':
bank[user][metal] += int(amount)
elif metal == 'silver':
bank[user][metal] += int(amount)
elif metal == 'copper':
bank[user][metal] += int(amount)
await client.send_message(message.channel,
f'You have deposited {amount} {metal}. You now have {bank[user][metal]} '
f'{metal} in your account.')
elif operation == 'subtract':
if metal == 'gold':
bank[user][metal] -= int(amount)
elif metal == 'silver':
bank[user][metal] -= int(amount)
elif metal == 'copper':
bank[user][metal] -= int(amount)
await client.send_message(message.channel,
f'You have withdrawn {amount} {metal}. You now have {bank[user][metal]} '
f'{metal} in your account.')
elif operation == ('balance'):
await client.send_message(message.channel, f'Your balance is {bank[user]["gold"]} gold, '
f'{bank[user]["silver"]} silver, and {bank[user]["copper"]}'
f' copper.')
elif operation == ('clear'):
bank[user]["gold"] = 0
bank[user]["silver"] = 0
bank[user]["copper"] = 0
await client.send_message(message.channel, 'You have cleared your balance.')
elif operation == ('condense'):
silver, copper = divmod(bank[user]["copper"], 10)
bank[user]["silver"] += silver
bank[user]["copper"] = copper
gold, silver = divmod(bank[user]["silver"], 10)
bank[user]["gold"] += gold
bank[user]["silver"] = silver
await client.send_message(message.channel,
f'Your balance has been condensed to {bank[user]["gold"]} gold, '
f'{bank[user]["silver"]} silver, and {bank[user]["copper"]} copper.')
else:
bank.update({user: {'gold': 0, 'silver': 0, 'copper': 0}})
await client.send_message(message.channel,
'You did not have an account. You now have an account with a balance of 0')
with open('banks2.txt', 'w') as bankout:
json.dump(bank, bankout)
# Dice Rolling Functions
if message.content.startswith('/roll'):
if "-" in message.content:
operator = "-"
elif "+" in message.content:
operator = "+"
else:
operator = str()
if "-" in message.content:
bonus = 0 - int(message.content.split('-')[1])
elif "+" in message.content:
bonus = 0 + int(message.content.split('+')[1])
else:
bonus = int(0)
if operator != "":
sidesEnd = message.content.find(operator)
elif operator == "":
sidesEnd = len(message.content)
numberofDice = message.content[message.content.find('/roll') + 5:message.content.find('d')]
numberofSides = message.content[message.content.find('d') + 1:sidesEnd]
rolls = 0
rawrolls = []
bonusRolls = []
dice = 0
try:
dice = int(numberofDice)
except ValueError:
dice = 1
pass
while dice > rolls:
rawrolls.append(random.randint(1, int(numberofSides)))
rolls += 1
for r in rawrolls:
bonusRolls.append(r + bonus)
await client.send_message(message.channel,
f'You rolled **{rawrolls}**. Your bonus of **[{bonus}]** brings that to **{bonusRolls}'
f'**.')
elif message.content.startswith('/r'):
if "-" in message.content:
operator = "-"
elif "+" in message.content:
operator = "+"
else:
operator = str()
if "-" in message.content:
bonus = 0 - int(message.content.split('-')[1])
elif "+" in message.content:
bonus = 0 + int(message.content.split('+')[1])
else:
bonus = int(0)
if operator != "":
sidesEnd = message.content.find(operator)
elif operator == "":
sidesEnd = len(message.content)
numberofDice = message.content[message.content.find('/r') + 2:message.content.find('d')]
numberofSides = message.content[message.content.find('d') + 1:sidesEnd]
rolls = 0
rawrolls = []
bonusRolls = []
dice = 0
try:
dice = int(numberofDice)
except ValueError:
dice = 1
pass
while dice > rolls:
rawrolls.append(random.randint(1, int(numberofSides)))
rolls += 1
for r in rawrolls:
bonusRolls.append(r + bonus)
await client.send_message(message.channel,
f'You rolled **{rawrolls}**. Your bonus of **[{bonus}]** brings that to **{bonusRolls}'
f'**.')
# Help Section
if message.content.startswith('/help'):
with open('helps.txt') as file:
helps = json.load(file)
for h in helps:
await client.send_message(message.channel, f'**{h}** - {helps[h]}\n')
# Command List
if message.content.startswith('/commands'):
with open('commands.txt') as file:
commands = json.load(file)
for c in commands:
await client.send_message(message.channel, f'**{c}** - {commands[c]}\n')
# Communal Banking
if message.content.startswith('/communal'):
with open ('communalbank.txt') as communalIn:
communal = json.load(communalIn)
operation = message.content.split()[1]
if operation in ['add', 'subtract']:
metal = message.content.split()[2]
amount = message.content.split()[3]
if operation == 'add':
if metal == 'gold':
communal[metal] += int(amount)
elif metal == 'silver':
communal[metal] += int(amount)
elif metal == 'copper':
communal[metal] += int(amount)
await client.send_message(message.channel, f'You have deposited {amount} {metal} in the communal account')
elif operation == ('balance'):
await client.send_message(message.channel, f'The Communal Balance is {communal["gold"]} gold, {communal["silver"]} silver, and {communal["copper"]} copper')
elif operation == ('condense'):
silver, copper = divmod(communal["copper"], 10)
communal["silver"] += silver
communal["copper"] = copper
gold, silver = divmod(communal["silver"], 10)
communal["gold"] += gold
communal["silver"] = silver
await client.send_message(message.channel, f'The Communal Balance is {communal["gold"]} gold, {communal["silver"]} silver, and {communal["copper"]} copper')
with open ('communalbank.txt', 'w') as communalOut:
json.dump(communal, communalOut)
# Run the bot
token = 'Your Token'
client.run(token)
|
nilq/baby-python
|
python
|
import re
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from markdown import markdown
register = template.Library()
@register.filter("markdown")
@stringfilter
def markdown_filter(value):
return mark_safe(markdown(value))
@register.tag(name="markdown")
def do_markdown(parser, token):
nodelist = parser.parse(("endmarkdown",))
parser.delete_first_token()
m = re.search(r"as (?P<var_name>\w+)$", token.contents)
var_name = None
if m:
var_name = m.group("var_name")
return MarkdownNode(nodelist, var_name)
class MarkdownNode(template.Node):
def __init__(self, nodelist, var_name=None):
self.nodelist = nodelist
self.var_name = var_name
def render(self, context):
value = markdown_filter(self.nodelist.render(context))
if self.var_name:
context[self.var_name] = value
return ""
return value
|
nilq/baby-python
|
python
|
"""
This file is part of Advent of Code 2019.
Coded by: Samuel Michaels (samuel.michaels@protonmail.com)
11 December 2019
NO COPYRIGHT
This work is dedicated to the public domain. All rights have been
waived worldwide under copyright law, including all related and
neighboring rights, to the extent allowed by law.
You may copy, modify, distribute, and perform the work, even for
commercial purposes, all without asking permission. See the
accompanying COPYRIGHT document.
"""
import io
import sys
import unittest
from day9 import ElfCPU, InvalidInstructionError, ProtectionFaultError, InputInterrupt, OutputInterrupt, OutputOverflow
from day9 import InputOverflow
from unittest.mock import patch
class TestElfCPU(unittest.TestCase):
def test_load_string_types(self):
"""
Checks for TypeError
"""
e = ElfCPU()
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
e.load_string(0)
e.load_string('1,2,3,4')
def test_peek(self):
"""
Tests address range for peek
"""
e = ElfCPU()
e.load_string('0,1,2,3,4,5,6,7,8,9')
# TypeError
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
e.peek('x')
# Above memory range
with self.assertRaises(ValueError):
e.peek(2**65)
# Below memory range
with self.assertRaises(ValueError):
e.peek(-1)
self.assertEqual(e.peek(0), 0)
self.assertEqual(e.peek(9), 9)
def test_poke(self):
"""
Tests address range and data for poke
"""
e = ElfCPU()
e.load_string('0,1,2,3,4,5,6,7,8,9')
# TypeError
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
e.poke('x', 2)
# Above memory range
with self.assertRaises(ValueError):
e.poke(2**65, 2)
# Below memory range
with self.assertRaises(ValueError):
e.poke(-1, 2)
# Value
with self.assertRaises(ValueError):
e.poke(0, 2**64+1)
self.assertEqual(e.poke(0, 99), 99)
self.assertEqual(e.poke(9, 88), 88)
self.assertEqual(e.peek(0), 99)
self.assertEqual(e.peek(9), 88)
def test_invalid_instr(self):
"""
Tests for invalid op code
"""
e = ElfCPU()
e.load_string('123456789')
with self.assertRaises(InvalidInstructionError):
e.execute()
def test_op_add(self):
"""
Tests ADD op code
[dst]:=[a]+[b]
"""
e = ElfCPU()
# Invalid address 123456789 for a
e.load_string('1,123456789,0,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for b
e.load_string('1,0,123456789,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for dst
e.load_string('1,0,0,123456789')
with self.assertRaises(ProtectionFaultError):
e.step()
# 1 + 1 = 2 @ address 0
e.load_string('1,0,0,0,99')
e.step()
self.assertEqual(e.peek(0), 2)
# 2**64 + 1 = 1 @ address 0 (overflow and wrap)
#e.load_string('1,5,6,0,99,'+str(2**64)+',1')
#e.step()
#self.assertEqual(e.peek(0), 1)
# [dst]:=a+[b]
e.load_string('101,44,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 46)
# [dst]:=[a]+b
e.load_string('1001,5,50,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 52)
# [dst]:=a+b
e.load_string('1101,5,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 10)
# [dst]:=r[a]+b
e.load_string('109,10,1201,0,5,7,99,7,3,3,5')
e.execute()
self.assertEqual(e.peek(7), 10)
# [dst]:=a+r[b]
e.load_string('109,10,2101,20,0,7,99,7,3,3,10')
e.execute()
self.assertEqual(e.peek(7), 30)
# [dst]:=a+r[b]
e.load_string('109,10,2101,20,0,7,99,7,3,3,10')
e.execute()
self.assertEqual(e.peek(7), 30)
# r[dst]:=a+b
e.load_string('109,10,21101,16,16,0,99,7,3,3,7')
e.execute()
self.assertEqual(e.peek(10), 32)
# dst:=a+b INVALID
#e.load_string('11101,32,32,1,99')
#e.execute()
#self.assertEqual(e.peek(1), 64)
def test_op_mul(self):
"""
Tests MUL op code
[dst]:=[a]*[b]
"""
e = ElfCPU()
# Invalid address 123456789 for a
e.load_string('2,123456789,0,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for b
e.load_string('2,0,123456789,0')
with self.assertRaises(ProtectionFaultError):
e.step()
# Invalid address 123456789 for dst
e.load_string('2,0,0,123456789')
with self.assertRaises(ProtectionFaultError):
e.step()
# [dst]:=[a]*[b]
e.load_string('2,0,0,0,99')
e.step()
self.assertEqual(e.peek(0), 4)
# [dst]:=a*[b]
e.load_string('102,44,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 88)
# [dst]:=[a]*b
e.load_string('1002,5,50,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 100)
# [dst]:=a*b
e.load_string('1102,5,5,6,99,2,6')
e.execute()
self.assertEqual(e.peek(6), 25)
# [dst]:=r[a]*b
e.load_string('109,10,1202,0,4,7,99,7,3,3,4')
e.execute()
self.assertEqual(e.peek(7), 16)
# [dst]:=a*r[b]
e.load_string('109,10,2102,7,0,7,99,7,3,3,2')
e.execute()
self.assertEqual(e.peek(7), 14)
# [dst]:=r[a]*r[b]
e.load_string('109,10,2202,0,1,7,99,7,3,3,2,6')
e.execute()
self.assertEqual(e.peek(7), 12)
# dst:=a*b
e.load_string('11102,6,6,0,99')
e.execute()
self.assertEqual(e.peek(0), 36)
# r[dst]:=a*b
e.load_string('109,7,21102,8,3,0,99,1')
e.execute()
self.assertEqual(e.peek(7), 24)
def test_op_input(self):
"""
Tests input op code
Use unittest.mock.patch to fake the input value
"""
e = ElfCPU()
# Interrupts off
e.load_string('103,3,99,-1')
e.interrupts = False
with patch('builtins.input', return_value='1234'):
e.execute()
self.assertEqual(e.peek(3), 1234)
# Interrupts on IMMEDIATE MODE
e.load_string('103,5,103,5,99,-1')
e.interrupts = True
with self.assertRaises(InputInterrupt):
e.step()
# Should be back at pc = 0
self.assertEqual(e.pc, 0)
# Load input
e.input_buffer = 567
# Loading again overflows
with self.assertRaises(InputOverflow):
e.input_buffer = 123
# Execute the input instruction
e.step()
self.assertEqual(e.peek(5), 567)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 987
# Execute until end
e.execute()
self.assertEqual(e.peek(5), 987)
######################################################
# Interrupts on RELATIVE MODE
e.load_string('109,10,203,0,203,1,203,-1,99,102,100,101')
e.interrupts = True
# step past the relative base op code
e.step()
with self.assertRaises(InputInterrupt):
e.step()
# Should be back at pc = 2 (after relative base op code)
self.assertEqual(e.pc, 2)
# Load input
e.input_buffer = 567
# Loading again overflows
with self.assertRaises(InputOverflow):
e.input_buffer = 123
# Execute the input instruction
e.step()
self.assertEqual(e.peek(10), 567)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 987
# Step to execute this input
e.step()
self.assertEqual(e.peek(11), 987)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 456
# Execute until end
e.execute()
self.assertEqual(e.peek(9), 456)
######################################################
# Interrupts on POSITIONAL MODE
e.load_string('3,7,3,8,3,9,99,1,3,5')
e.interrupts = True
with self.assertRaises(InputInterrupt):
e.step()
# Should be back at pc = 0
self.assertEqual(e.pc, 0)
# Load input
e.input_buffer = 345
# Loading again overflows
with self.assertRaises(InputOverflow):
e.input_buffer = 123
# Execute the input instruction
e.step()
self.assertEqual(e.peek(7), 345)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 765
# Step to execute this input
e.step()
self.assertEqual(e.peek(8), 765)
# Exec next input instruction
with self.assertRaises(InputInterrupt):
e.step()
e.input_buffer = 555
# Execute until end
e.execute()
self.assertEqual(e.peek(9), 555)
def test_op_output(self):
"""
Tests output op code
Use io.StringIO() to capture the output
"""
e = ElfCPU()
# Interrupts off
e.load_string('4,5,104,66,99,55,5')
e.interrupts = False
result = None
with patch('sys.stdout', new=io.StringIO()) as output:
e.execute()
result = output.getvalue()
result = result.splitlines()
# First is a reference to memory address 5
self.assertEqual(result[0].strip(), '55')
# Second is an immediate value
self.assertEqual(result[1].strip(), '66')
# Interrupts on
e.load_string('4,5,104,66,99,55,5')
e.interrupts = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 55)
# Don't clear buffer
with self.assertRaises(OutputOverflow):
e.execute()
# Restart test
e.reset()
e.load_string('4,5,104,66,99,55,5')
e.interrupts = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 55)
# Clear buffer
del e.output_buffer
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 66)
###############################################
# Interrupts on RELATIVE MODE
# Restart test
e.reset()
e.load_string('109,5,204,1,99,6,1234')
e.interrupts = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 1234)
def test_op_jmp_true(self):
"""
Tests jump if true op code
"""
e = ElfCPU()
"""
Tests address 8 (which is 1) if it is non-zero. Since this is
true, it jumps to the value of address 9 (which is 7). This
terminates the program.
"""
e.load_string('5,8,9,1101,1,1,8,99,1,7')
e.execute()
self.assertEqual(e.peek(8), 1)
"""
Tests immediate value 8 if it is non-zero. Since it is
true, jump to immediate address 7 which terminates.
"""
e.load_string('1105,8,7,1101,1,1,8,99,1,7')
e.execute()
self.assertEqual(e.peek(8), 1)
"""
Tests address 8 (which is 0) if it is non-zero. Since this is
false it does not jump and instead adds 1+1 to address 8.
"""
e.load_string('5,8,9999,11101,1,1,8,99,0,7')
e.execute()
self.assertEqual(e.peek(8), 2)
"""
Tests immediate value 0 if it is non-zero. Since it is
false it does not jump and instead adds 1+1 to address 8.
"""
e.load_string('1105,0,9999,11101,1,1,8,99,0,7')
e.execute()
self.assertEqual(e.peek(8), 2)
def test_op_cmp_lessthan(self):
"""
Tests compare less than op code
"""
e = ElfCPU()
"""
Tests if value of address 8 (5) is less than value of
address 9 (10). Since this is true write 1 to address 10.
"""
e.load_string('7,8,9,10,99,5,10,-1,5,10,7')
e.execute()
self.assertEqual(e.peek(10), 1)
"""
Tests if value of address 5 (10) is less than value of
address 6 (5). Since this is false write 0 to address 10.
"""
e.load_string('7,8,9,10,99,5,10,-1,10,5,7')
e.execute()
self.assertEqual(e.peek(10), 0)
"""
Tests if immediate value of 5 is less than immediate value of
10. Since this is true write 1 to address 7.
"""
e.load_string('1107,5,10,7,99,0,0,-1')
e.execute()
self.assertEqual(e.peek(7), 1)
"""
Tests if immediate value of 10 is less than immediate value of
5. Since this is false write 0 to address 7.
"""
e.load_string('11107,10,5,7,99,0,0,-1')
e.execute()
self.assertEqual(e.peek(7), 0)
"""
if r[a] < r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22207,0,1,2,99,222,222,222,100,50,1')
e.execute()
self.assertEqual(e.peek(12), 0)
"""
if r[a] < r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22207,0,1,2,99,222,222,222,50,100,1')
e.execute()
self.assertEqual(e.peek(12), 1)
def test_op_eq(self):
"""
Tests equals op code
"""
e = ElfCPU()
"""
Tests if value of address 5 (10) is equal to value
of address 6 (10). Since this is true, write 1 to
address 10.
"""
e.load_string('8,8,9,10,99,10,10,-1,10,10,7')
e.execute()
self.assertEqual(e.peek(10), 1)
"""
Tests if value of address 5 (10) is equal to value
of address 6 (0). Since this is false, write 0 to
address 10.
"""
e.load_string('8,8,9,10,99,10,0,-1,5,6,7')
e.execute()
self.assertEqual(e.peek(10), 0)
"""
Tests if immediate value 10 is equal to immediate value
10. Since this is true, write 1 to address 7.
"""
e.load_string('1108,10,10,7,99,2,3,-1')
e.execute()
self.assertEqual(e.peek(7), 1)
"""
Tests if immediate value of 0 is equal to immediate value
10. Since this is false, write 0 to address 7.
"""
e.load_string('1108,0,10,7,99,2,3,-1')
e.execute()
self.assertEqual(e.peek(7), 0)
"""
if r[a] = r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22208,0,1,2,99,222,222,222,555,555,1')
e.execute()
self.assertEqual(e.peek(12), 1)
"""
if r[a] < r[b]
r[dst]:=1
else
r[dst]:=0
"""
e.load_string('109,10,22208,0,1,2,99,222,222,222,-500,100,1')
e.execute()
self.assertEqual(e.peek(12), 0)
def test_halt(self):
"""
Tests for the halt op code
"""
e = ElfCPU()
e.load_string('1,0,0,0,99')
e.step()
self.assertFalse(e.is_halted)
e.step()
self.assertTrue(e.is_halted)
def test_reset(self):
"""
Tests for CPU reset
"""
e = ElfCPU()
e.load_string('1,0,0,0,99')
e.execute()
e.reset()
# Halted gets cleared
self.assertFalse(e.is_halted)
# Program counter goes to 0
self.assertEqual(e.pc, 0)
# Memory gets wiped so address 1 becomes invalid
with self.assertRaises(ValueError):
e.peek(1)
def test_gpf(self):
"""
Tests for a general protection fault by allowing the program counter to
go past the end of the memory.
"""
e = ElfCPU()
# Jump to 2**20, the last memory address
e.load_string('1106,0,1048576')
with self.assertRaises(ProtectionFaultError):
e.execute()
def test_op_relative_base(self):
"""
Tests the relative base mode op code
"""
e = ElfCPU()
# Position
e.load_string('9,5,204,1,99,6,7,777')
e.interrupts = True
# Step over relative mode op
e.step()
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 777)
# Immediate
e.reset()
e.load_string('109,5,204,1,99,444,777')
e.interrupts = True
# Step over relative mode op
e.step()
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 777)
# Relative
e.reset()
e.load_string('209,9,209,6,204,-2,99,5,333,4,6')
e.interrupts = True
e.debug = True
with self.assertRaises(OutputInterrupt):
e.execute()
self.assertEqual(e.output_buffer, 333)
# EOF
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask_restful import Resource, Api
from controller.TestSuit import TestSuit
from model.TestSuit import db as TestSuitDB
app = Flask(__name__)
api = Api(app)
api.add_resource(TestSuit, '/testsuit')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///example.sqlite'
TestSuitDB.init_app(app)
if __name__ == '__main__':
with app.app_context():
TestSuitDB.create_all()
app.run(debug=True)
|
nilq/baby-python
|
python
|
# Compact version
# ---------------
rule = (('0'*8 + bin(30)[2:])[-8:])[::-1]
cells = list('0'*40 + '1' + '0'*40)
for epoch in range(40):
print(''.join(cells).replace("0"," ").replace("1","█"))
cells = [cells[0]] + [rule[eval('0b' + cells[i-1]+cells[i]+cells[i+1])]
for i in range(1,len(cells)-1)] + [cells[-1]]
"""
# Readable version
# ----------------
# Rule transformation into ascii/binary representation
rule = 30
rule = ('0'*8 + bin(rule)[2:])[-8:]
rule = rule[::-1]
# Cells
p = 40
cells = '0'*p + '1' + '0'*p
# Iteration over epoch
n = 40
for epoch in range(n):
# Display
t = ''.join(cells)
t = t.replace("0", " ")
t = t.replace("1", "█")
print(t)
# Iteration over local neighborood
_cells = []
for i in range(1,len(cells)-1):
code = cells[i-1]+cells[i]+cells[i+1]
code = eval('0b' + code)
_cells.append(rule[code])
cells = [cells[0]] + _cells + [cells[-1]]
"""
|
nilq/baby-python
|
python
|
import io
import os
from google.cloud import vision
from google.cloud.vision import types
from google.protobuf.json_format import MessageToJson
class GoogleVisionApi:
def __init__(self):
# Instantiates a client
self.client = vision.ImageAnnotatorClient()
self.requestsCache = {}
def request(self, imagePath):
# Loads the image into memory
with io.open(imagePath, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
self.requestsCache[imagePath] = self.client.document_text_detection(image=image)
response = self.requestsCache[imagePath]
jsonText = MessageToJson(response)
return jsonText
def clear(self,requestName):
if requestName in self.requestsCache:
del self.requestsCache[requestName]
def clearAll(self):
self.requestsCache = {}
|
nilq/baby-python
|
python
|
from os import getenv, \
path
class Config(object):
API_KEY = getenv('API_KEY')
DEBUG = getenv('DEBUG', False)
SQLALCHEMY_DATABASE_URI = getenv('DATABASE_URL', 'sqlite:///' + path.dirname(__file__) + '/app/app.db').replace('mysql2:', 'mysql:')
SQLALCHEMY_ECHO = getenv('SQLALCHEMY_ECHO', False)
SQLALCHEMY_POOL_RECYCLE = 60
SQLALCHEMY_TRACK_MODIFICATIONS = False
STRIP_WWW_PREFIX = True
TESTING = False
|
nilq/baby-python
|
python
|
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', default='mag', choices=['mag', 'amazon'])
parser.add_argument('--num_motif', default=50, type=int)
parser.add_argument('--eta', default=2.0, type=float)
args = parser.parse_args()
dataset = args.dataset
topM = args.num_motif
eta = args.eta
labels = []
with open(f'../{dataset}_data/labels.txt') as fin:
for line in fin:
data = line.strip()
labels.append('TERM_'+data)
label2emb = {}
word2idx = {}
idx2word = {}
word2emb = {}
with open(f'{dataset}.emb') as fin:
idx = 0
for line in fin:
data = line.strip().split()
if len(data) != 101:
continue
word = data[0]
emb = np.array([float(x) for x in data[1:]])
emb = emb / np.linalg.norm(emb)
word2idx[word] = idx
idx2word[idx] = word
word2emb[word] = emb
idx += 1
if word in labels:
label2emb[word] = emb
word2kappa = {}
with open(f'{dataset}.kappa') as fin:
for line in fin:
data = line.strip().split()
if len(data) != 2:
continue
word = data[0]
kappa = float(data[1])
word2kappa[word] = kappa
embMat = np.zeros((len(idx2word), 100))
for idx in range(len(idx2word)):
embMat[idx] = word2emb[idx2word[idx]]
with open(f'{dataset}_motifs.txt', 'w') as fout:
for label in labels:
l_emb = word2emb[label]
res = np.dot(embMat, l_emb)
idx_sorted = list(np.argsort(-res))
expanded = []
k = 0
kappa_l = word2kappa[label]
while len(expanded) < topM and k < len(idx_sorted):
word = idx2word[idx_sorted[k]]
if word2kappa[word] >= eta*kappa_l:
expanded.append(word)
k += 1
fout.write(label+'\t'+'\t'.join(expanded)+'\n')
|
nilq/baby-python
|
python
|
from infi.pyutils.lazy import cached_method
from ..inquiry import InquiryException
from logging import getLogger
logger = getLogger(__name__)
class InfiniBoxVolumeMixin(object):
@cached_method
def _is_volume_mapped(self):
"""In race condition between a rescan and volume unmap operation, the device may still exist while the volume
is already unampped. This method returns True if a volume is mapped to the device."""
standard_inquiry = self.device.get_scsi_standard_inquiry()
# spc4r30 section 6.4.2 tables 140 + 141, peripheral device type 0 is disk, 31 is unknown or no device
return standard_inquiry.peripheral_device.type == 0
@cached_method
def get_volume_id(self):
""" Returns the volume id within the InfiniBox """
try:
return self._get_key_from_json_page('vol_entity_id', 0xc6)
except InquiryException:
return self._get_key_from_json_page('vol_entity_id')
@cached_method
def get_volume_name(self):
""" Returns the volume name inside the Infinibox, or None if not a volume """
return self._get_volume_name_from_json_page()
@cached_method
def get_volume_type(self):
""" Returns the volume type, or None if it is not a volume """
raise NotImplementedError()
def _get_volume_name_from_json_page(self):
try:
return self.get_string_data(0xc7)
except InquiryException:
return self._get_key_from_json_page('vol')
def _send_null_write(self, device):
from infi.asi.cdb.write import Write10Command
from infi.asi.coroutines.sync_adapter import sync_wait
cdb = Write10Command(0, '') # empty write
with device.asi_context() as asi:
sync_wait(cdb.execute(asi))
def _is_null_write_returns_write_protected_check_condition(self, device):
from infi.asi.errors import AsiCheckConditionError
try:
self._send_null_write(device)
return False
except AsiCheckConditionError as error:
if error.sense_obj.sense_key == "DATA_PROTECT":
return True
raise
def check_if_write_protected(self):
from infi.storagemodel.linux.native_multipath import LinuxNativeMultipathBlockDevice
if isinstance(self.device, LinuxNativeMultipathBlockDevice):
# on linux, device-mapper swallows the I/Os and doesn't pass them to the device, so we bypass it
return self._is_null_write_returns_write_protected_check_condition(self.device.get_paths()[0])
else:
return self._is_null_write_returns_write_protected_check_condition(self.device)
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
import random, string
from asyncio import sleep
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ascii(self, ctx, amount: int=1):
await ctx.message.delete()
for i in range(amount):
text=''
for i in range(2000):
text=text+chr(random.randrange(13000))
await ctx.send(content=text)
@commands.command()
async def hack(self, ctx, user:discord.User):
perc=0
while(perc < 100):
await ctx.message.edit(content=f'**Получение почты `{user}`... {perc}%**')
perc+=random.randint(1, 15)
await ctx.message.edit(content='**:white_check_mark: Почта получена!**')
await sleep(5)
perc=0
while(perc < 100):
await ctx.message.edit(content=f'**Получение пароля `{user}`... {perc}%**')
perc+=random.randint(1, 10)
await ctx.message.edit(content='**:white_check_mark: Пароль был получен!**')
await sleep(5)
perc=0
while(perc < 100):
await ctx.message.edit(content=f'**Обход защиты... {perc}%**')
perc+=random.randint(1, 5)
await ctx.message.edit(content=f'**:white_check_mark: Успешно вошёл в аккаунт `{user}`**')
@commands.command()
async def rainbow(self, ctx):
emojis=['🟧', '🟦', '🟥', '🟪', '🟩', '🟨']
while True:
text=''
for i in range(300):
text=text+''.join(random.choice(emojis))
await ctx.message.edit(content=text)
await ctx.message.delete()
@commands.command()
async def ghoul(self, ctx):
await ctx.message.edit(content='```Я гуль...```')
a=1000
while a>6:
await ctx.send(f'**{a}-7={a-7}**')
a-=7
@commands.command()
async def boom(self, ctx):
await ctx.message.edit(content="**Данный чат будет взорван через 5 секунд...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 4 секунды...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 3 секунды...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 2 секунды...**")
await sleep(1)
await ctx.message.edit(content="**Данный чат будет взорван через 1 секунду...**")
await sleep(1)
await ctx.message.delete()
message=await ctx.send("**Boom!**", file=discord.File("Resources/boom.gif"))
await sleep(1)
await ctx.send("⠀" + "\n"*1998 + "⠀")
await message.delete()
def setup(bot):
bot.add_cog(Fun(bot))
|
nilq/baby-python
|
python
|
import requests
import lib.RModule as rmodule
import lib.RAudiostream as raudiostream
import lib.RAtmosphere as ratmosphere
import lib.RMonitoring as rmonitoring
class Project:
project_id = None
switchboards = []
neopixels = []
audiostream = None
monitoring = None
def __init__(self, project_id, broker):
self.project_id = project_id
# STARTING MONITORING SERVICE
self.monitoring = rmonitoring.Monitoring(self.project_id, broker)
# LOADING SWITCHBOARDS
retrieve_switchboards_request = "http://rhapsody.hestiaworkshop.net/rest/switchboards/get_switchboards/" + self.project_id
try:
r = requests.get(retrieve_switchboards_request)
result = r.json()
for switchboard in result:
new_switchboard = rmodule.Module("switchboards", switchboard['switchboard_id'], switchboard['switchboard_mqtt_topic'], project_id, broker)
self.switchboards.append(new_switchboard)
except Exception as e:
self.monitoring.send("ERROR", "project_initialization -> while loading switchboards", str(e))
# LOADING NEOPIXELS
retrieve_neopixels_request = "http://rhapsody.hestiaworkshop.net/rest/neopixels/get_neopixels/" + self.project_id
try:
r = requests.get(retrieve_neopixels_request)
result = r.json()
for neopixel in result:
new_neopixel = rmodule.Module("neopixels", neopixel['neopixel_id'], neopixel['neopixel_mqtt_topic'], project_id, broker)
self.neopixels.append(new_neopixel)
except:
self.monitoring.send("ERROR", "project_initialization -> while loading neopixels", str(e))
# MODULES
# LOADING AUDIOSTREAM MODULE
try:
self.audiostream = raudiostream.Audiostream("13041983", self.project_id, broker)
except Exception as e:
self.monitoring.send("ERROR", "project_initialization -> while loading audiostream module", str(e))
# LOADING NEOPIXEL MODULE
try:
self.neopixel = rneopixel.Neopixel("01101974", self.project_id, broker)
except Exception as e:
self.monitoring.send("ERROR", "project_initialization -> while loading neopixel module", str(e))
# SERVICES
# LOADING ATMOSPHERES SERVICE
#atmosphere = ratmosphere.Atmosphere(self.project_id)
def begin(self):
# mlmkl
try:
for switchboard in self.switchboards:
switchboard.start()
except Exception as e:
self.monitoring.send("ERROR", "starting_project -> starting switchboards", str(e))
# Starts neopixel modules
try:
for neopixel in self.neopixels:
neopixel.start()
except Exception as e:
self.monitoring.send("ERROR", "starting_project -> starting neopixels", str(e))
# Starts audiostream module
try:
self.audiostream.start()
except Exception as e:
self.monitoring.send("starting_project -> starting audiostream", str(e))
|
nilq/baby-python
|
python
|
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble, svm,datasets
import brica1
# Randomforest Component Definition
class RandomForestClassifierComponent(brica1.Component):
def __init__(self, n_in):
super(RandomForestClassifierComponent, self).__init__()
self.classifier = ensemble.RandomForestClassifier()
self.make_in_port("in0", n_in)
self.make_out_port("out0", 1)
def fire(self):
x = self.inputs["in0"]
z = self.classifier.predict([x])
self.results["out0"] = z
def fit(self, X, y):
self.classifier.fit(X, y)
# SVM Component Definition
class SVMComponent(brica1.Component):
def __init__(self, n_in):
super(SVMComponent, self).__init__()
self.classifier = svm.LinearSVC(C=1.0)
self.make_in_port("in0", n_in)
self.make_out_port("out0", 1)
def fire(self):
x = self.inputs["in0"]
z = self.classifier.predict([x])
self.results["out0"] = z
def fit(self, X, y):
self.classifier.fit(X, y)
# SVM vs RFC Component Definition
class SVMvsRFC_Component(brica1.Component):
def __init__(self, n_in):
super(SVMvsRFC_Component, self).__init__()
self.make_in_port("in0",n_in)
self.make_in_port("in1",n_in)
self.make_out_port("out0", 1)
def fire(self):
x = self.inputs["in0"]
y = self.inputs["in1"]
self.results["out0"] = (x==y)
# Load iris dataset
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# Setup data feeder component
feeder = brica1.ConstantComponent()
feeder.make_out_port("out0", 2)
# Setup components
svm = SVMComponent(2)
svm.fit(X, y)
RFC = RandomForestClassifierComponent(2)
RFC.fit(X,y)
SR =SVMvsRFC_Component(1)
# Connect the components
brica1.connect((feeder, "out0"), (svm, "in0"))
brica1.connect((feeder, "out0"), (RFC, "in0"))
brica1.connect((svm, "out0"), (SR, "in0"))
brica1.connect((RFC, "out0"), (SR, "in1"))
# Add components to module
mod = brica1.Module()
mod.add_component("feeder", feeder)
mod.add_component("svm", svm)
mod.add_component("RFC",RFC)
mod.add_component("SR", SR)
# Setup scheduler and agent
a = brica1.Agent()
a.add_submodule("mod", mod)
s = brica1.VirtualTimeSyncScheduler(a)
# Test the classifier
svm_result=[]
RFC_result=[]
svm_vs_RFC=[]
for i in xrange(len(X)):
feeder.set_state("out0", X[i]) # Set data feeder to training data i
s.step() # Execute prediction
svm_result.append(svm.get_out_port("out0").buffer[0])
RFC_result.append(RFC.get_out_port("out0").buffer[0])
s.step()
svm_vs_RFC.append(SR.get_out_port("out0").buffer[0])
for i in xrange(len(X)):
print "SVM: {}\tRFC: {}\tRESULT: {}".format(svm_result[i], RFC_result[i], svm_vs_RFC[i])
|
nilq/baby-python
|
python
|
""" Parent class to inception models """
import tensorflow as tf
from . import TFModel
from .layers import conv_block
class Inception(TFModel):
""" The base class for all inception models
**Configuration**
body : dict
layout : str
a sequence of blocks in the network:
- b - inception block for v1 and v3 models
- r - reduction block
- f - factorization_block for Inception_v3 model (see :meth:`.factorization_block`)
- m - mixed_block for Inception_v3 model (see :meth:`.mixed_block`)
- e - expanded_block for Inception_v3 model (see :meth:`.expanded_block`)
- A - inception block A for Inception_v4 model (see :meth:`.inception_a_block`)
- B - inception block B for Inception_v4 model (see :meth:`.inception_b_block`)
- C - Inception block C for Inception_v4 model (see :meth:`.inception_c_block`)
- G - grid-reduction block for Inception_v4 model (see :meth:`.reduction_grid_block`)
arch : dict
parameters for each block:
key : str
block's short name
value : dict
specific parameters (e.g. filters)
"""
@classmethod
def body(cls, inputs, name='body', **kwargs):
""" Base layers.
Parameters
----------
inputs : tf.Tensor
input tensor
layout : str
a sequence of blocks
arch : dict
parameters for each block
name : str
scope name
Returns
-------
tf.Tensor
"""
kwargs = cls.fill_params('body', **kwargs)
arch, layout = cls.pop(['arch', 'layout'], kwargs)
with tf.variable_scope(name):
x, inputs = inputs, None
layout_dict = {}
for block in layout:
if block not in layout_dict:
layout_dict[block] = [-1, 0]
layout_dict[block][1] += 1
for i, block in enumerate(layout):
layout_dict[block][0] += 1
block_no = layout_dict[block][0]
block_args = {**kwargs, **arch[block]}
filters = block_args.pop('filters', None)
if isinstance(filters, list):
filters = filters[block_no]
if block == 'b':
x = cls.block(x, filters=filters, name='block-%d'%i, **block_args)
elif block == 'r':
x = cls.reduction_block(x, filters=filters, name='reduction_block-%d'%i, **block_args)
elif block == 'f':
x = cls.factorization_block(x, filters=filters, name='factorization_block-%d'%i, **block_args)
elif block == 'm':
x = cls.mixed_block(x, filters=filters, name='mixed_block-%d'%i, **block_args)
elif block == 'e':
x = cls.expanded_block(x, filters=filters, name='expanded_block-%d'%i, **block_args)
elif block == 'A':
x = cls.inception_a_block(x, filters=filters, name='inception_a_block-%d'%i, **block_args)
elif block == 'B':
x = cls.inception_b_block(x, filters=filters, name='inception_b_block-%d'%i, **block_args)
elif block == 'C':
x = cls.inception_c_block(x, filters=filters, name='inception_c_block-%d'%i, **block_args)
elif block == 'G':
x = cls.reduction_grid_block(x, filters=filters, name='reduction_grid_block-%d'%i, **block_args)
return x
@classmethod
def reduction_block(cls, inputs, filters, layout='cna', name='reduction_block', **kwargs):
""" Reduction block.
For details see figure 10 in the article.
Parameters
----------
inputs : tf.Tensor
input tensor
filters : tuple of 3 ints
number of output filters
name : str
scope name
Returns
-------
tf.Tensor
"""
with tf.variable_scope(name):
branch_3 = conv_block(inputs, layout, filters[3], 3, name='conv_3', strides=2, padding='valid', **kwargs)
branch_1_3 = conv_block(inputs, layout*2, [filters[0]]+[filters[1]], [1, 3], name='conv_1_3', **kwargs)
branch_1_3_3 = conv_block(branch_1_3, layout, filters[2], 3, name='conv_1_3_3', strides=2,
padding='valid', **kwargs)
branch_pool = conv_block(inputs, layout='p', pool_size=3, pool_strides=2, name='max_pooling',
padding='valid', **kwargs)
axis = cls.channels_axis(kwargs['data_format'])
output = tf.concat([branch_3, branch_1_3_3, branch_pool], axis, name='output')
return output
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.4 on 2020-03-20 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0024_submittedplace'),
]
operations = [
migrations.RemoveField(
model_name='submittedplace',
name='website',
),
migrations.AddField(
model_name='submittedplace',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
|
nilq/baby-python
|
python
|
"""Tests for claim_line model and the associated functions."""
from claims_to_quality.analyzer.models import claim_line
def test_str_method():
"""Test that claim lines are represented in a readable format."""
line = claim_line.ClaimLine(
{'clm_line_hcpcs_cd': 'code', 'mdfr_cds': ['GQ'], 'clm_pos_code': '24', 'clm_line_num': 1}
)
assert line.__str__() == 'ClaimLine - line_number: 1'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Script to preprocess OCR output for Tesseract
Usage:
python3 preprocess.py /path/to/input/dir \
/path/to/output/dir
"""
from glob import glob
import os
import shutil
import sys
import cv2
import numpy as np
def preprocess(img):
"""Takes a given image and returns the preprocessed version for
tesseract.
Args:
img (cv2 image): The image to preprocess
Returns:
dict of cv2 image: The preprocessed images with the keys:
(`gray`, `gray_inv`, `thresh`) for the respective
images.
"""
# gray scale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gray scale inverted
img_gray_inv = cv2.bitwise_not(img_gray)
# statistical flag for white versus black ID: median
flag_background = np.median(img_gray)
# initial idea: 128-ish is half the size of the RGB scale so:
if flag_background > 128:
print('White Number Detected!')
_, threshold = cv2.threshold(img_gray, 220, 255, cv2.THRESH_BINARY_INV)
else:
print('Black Number Detected!')
_, threshold = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY)
# Return the processed image image
return { 'gray': img_gray, 'gray_inv': img_gray_inv, 'thresh': threshold }
def main():
assert len(sys.argv) - 1 >= 2, "Must provide two arguments (in_dir, out_dir)"
in_dir = sys.argv[1]
assert in_dir != None, "Missing input directory (argv[1])"
out_dir = sys.argv[2]
assert out_dir != None, "Missing output directory (argv[2])"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for file in glob("%s/*.jpg" % in_dir):
print("Processing '%s' for thresholding..." % file)
img = cv2.imread(file)
image_id = os.path.splitext(os.path.basename(file))[0]
preprocessed = [v for v in preprocess(img).values()]
for i, ppimg in enumerate(preprocessed):
out_jpeg_file = ("%s/%s.pp%s.jpg" % (out_dir, image_id, i))
cv2.imwrite(out_jpeg_file, ppimg)
for file in glob("%s/*.json" % in_dir):
image_id = os.path.splitext(os.path.basename(file))[0]
out_json_file = ("%s/%s.json" % (out_dir, image_id))
shutil.copy(file, out_json_file)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from .davis import vis
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio App RDM is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio Records Permissions API."""
from elasticsearch_dsl.query import Q
from flask import current_app
from invenio_search.api import DefaultFilter, RecordsSearch
from .factories import record_read_permission_factory
def rdm_records_filter():
"""Records filter."""
# TODO: Implement with new permissions metadata
try:
perm_factory = current_app.config["RECORDS_REST_ENDPOINTS"]["recid"][
"read_permission_factory_imp"
]() # noqa
except KeyError:
perm_factory = record_read_permission_factory
# FIXME: this might fail if factory returns None, meaning no "query_filter"
# was implemente in the generators. However, IfPublic should always be
# there.
filters = perm_factory.query_filters
if filters:
qf = None
for f in filters:
qf = qf | f if qf else f
return qf
else:
return Q()
# TODO: Move this to invenio-rdm-records and
# * have it provide the permissions OR
# * rely on app's current_search for tests
class RecordsSearch(RecordsSearch):
"""Search class for RDM records."""
class Meta:
"""Default index and filter for frontpage search."""
index = "records"
doc_types = None
default_filter = DefaultFilter(rdm_records_filter)
|
nilq/baby-python
|
python
|
#! python3
# aoc_13.py
# Advent of code:
# https://adventofcode.com/2021/day/13
# https://adventofcode.com/2021/day/13#part2
#
def part_one(input) -> int:
nmap =[]
ymap = []
coords = []
with open(input, 'r') as inp:
lines = inp.readlines()
for line in lines:
line.strip()
coords.append([int(line.split(',')[1]),int(line.split(',')[0])])
R = 447*2+1 #max([_[0] for _ in coords])+1
C = 655*2+1 #max([_[1] for _ in coords])+1
print('R:',R,'C:',C)
dmap = [[0 for columns in range(C)] for rows in range(R)]
for l in coords:
dmap[l[0]][l[1]] = 1
dmap = xfold(dmap,655)
dmap = yfold(dmap,447)
dmap = xfold(dmap,327)
dmap = yfold(dmap,223)
dmap = xfold(dmap,163)
dmap = yfold(dmap,111)
dmap = xfold(dmap,81)
dmap = yfold(dmap,55)
dmap = xfold(dmap,40)
dmap = yfold(dmap,27)
dmap = yfold(dmap,13)
dmap = yfold(dmap,6)
for line in dmap:
print(line)
return sum([sum(i) for i in dmap])
def yfold(m,yf):
nmap = []
for y in range(yf):
nmap.append([x or y for x, y in zip(m[:][y], m[:][-y-1])])
return nmap
def xfold(m,xf):
nmap =[]
for row in m:
nmap.append([x or y for x, y in zip(row[:xf], row[:xf:-1])])#list reverse [::-1]
return nmap
def part_two(input) -> int:
return 0
if __name__ == "__main__":
# ex_folds =
# inp_folds =
example_path = "./aoc_13_example.txt"
input_path = "./aoc_13_input.txt"
print("---Part One---")
# print(part_one(example_path))
print(part_one(input_path))
print("---Part Two---")
# print(part_two(input_path))
#fold along x=655
#fold along y=447
#fold along x=327
#fold along y=223
#fold along x=163
#fold along y=111
#fold along x=81
#fold along y=55
#fold along x=40
#fold along y=27
#fold along y=13
#fold along y=6
|
nilq/baby-python
|
python
|
# Copyright (C) 2018 Shriram Bhat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Character map for unicode Kannada script with Latin."""
charmap_iso15919 = {
"Knda": [
u"ಀ", u"ಁ", u"ಂ", u"ಃ", u"಄", u"ಅ", u"ಆ", u"ಇ", u"ಈ", u"ಉ", u"ಊ", u"ಋ", u"ಌ", u"", u"ಎ", u"ಏ",
u"ಐ", u"", u"ಒ", u"ಓ", u"ಔ", u"ಕ", u"ಖ", u"ಗ", u"ಘ", u"ಙ", u"ಚ", u"ಛ", u"ಜ", u"ಝ", u"ಞ", u"ಟ",
u"ಠ", u"ಡ", u"ಢ", u"ಣ", u"ತ", u"ಥ", u"ದ", u"ಧ", u"ನ", u"", u"ಪ", u"ಫ", u"ಬ", u"ಭ", u"ಮ", u"ಯ",
u"ರ", u"ಱ", u"ಲ", u"ಳ", u"", u"ವ", u"ಶ", u"ಷ", u"ಸ", u"ಹ", u"", u"", u"಼", u"ಽ", u"ಾ", u"ಿ",
u"ೀ", u"ು", u"ೂ", u"ೃ", u"ೄ", u"", u"ೆ", u"ೇ", u"ೈ", u"", u"ೊ", u"ೋ", u"ೌ", u"್", u"", u"",
u"", u"", u"", u"", u"", u"ೕ", u"ೖ", u"", u"", u"", u"", u"", u"", u"ೝ", u"ೞ", u"",
u"ೠ", u"ೡ", u"ೢ", u"ೣ", u"", u"", u"೦", u"೧", u"೨", u"೩", u"೪", u"೫", u"೬", u"೭", u"೮", u"೯",
u"", u"ೱ", u"ೲ", u"ೳ", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u""
],
"Latn": [
u"", u"m̐", u"ṁ", u"ḥ", u"", u"a", u"ā", u"i", u"ī", u"u", u"ū", u"ṛ", u"ḷ", u"ê", u"e", u"ē",
u"ai", u"ô", u"o", u"ō", u"au", u"ka", u"kha", u"ga", u"gha", u"ṅa", u"ca", u"cha", u"ja", u"jha", u"ña", u"ṭa",
u"ṭha", u"ḍa", u"ḍha", u"ṇa", u"ta", u"tha", u"da", u"dha", u"na", u"ṉa", u"pa", u"pha", u"ba", u"bha", u"ma", u"ya",
u"ra", u"ṟa", u"la", u"ḷa", u"ḻa", u"va", u"śa", u"ṣa", u"sa", u"ha", u"", u"", u"", u"'", u"ā", u"i",
u"ī", u"u", u"ū", u"ṛ", u"ṝ", u"ê", u"e", u"ē", u"ai", u"ô", u"o", u"ō", u"au", u"", u"", u"",
u"oṃ", u"", u"", u"", u"", u"", u"", u"", u"qa", u"ḵẖa", u"ġ", u"za", u"ṛa", u"ṛha", u"fa", u"ẏa",
u"ṝ", u"ḹ", u"ḷ", u"ḹ", u".", u"..", u"0", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9",
u"…", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u"", u""
],
}
|
nilq/baby-python
|
python
|
"""Implements the Projection extension.
https://github.com/stac-extensions/projection
"""
from typing import Any, Dict, Generic, List, Optional, Set, TypeVar, cast
import pystac
from pystac.extensions.hooks import ExtensionHooks
from pystac.extensions.base import (
ExtensionManagementMixin,
PropertiesExtension,
)
T = TypeVar("T", pystac.Item, pystac.Asset)
SCHEMA_URI = "https://stac-extensions.github.io/projection/v1.0.0/schema.json"
EPSG_PROP = "proj:epsg"
WKT2_PROP = "proj:wkt2"
PROJJSON_PROP = "proj:projjson"
GEOM_PROP = "proj:geometry"
BBOX_PROP = "proj:bbox"
CENTROID_PROP = "proj:centroid"
SHAPE_PROP = "proj:shape"
TRANSFORM_PROP = "proj:transform"
class ProjectionExtension(
Generic[T], PropertiesExtension, ExtensionManagementMixin[pystac.Item]
):
"""ProjectionItemExt is the extension of an Item in the Projection Extension.
The Projection extension adds projection information to STAC Items.
Args:
item : The item to be extended.
Attributes:
item : The Item that is being extended.
Note:
Using ProjectionItemExt to directly wrap an item will add the 'proj' extension
ID to the item's stac_extensions.
"""
def __init__(self, item: pystac.Item) -> None:
self.item = item
def apply(
self,
epsg: Optional[int],
wkt2: Optional[str] = None,
projjson: Optional[Dict[str, Any]] = None,
geometry: Optional[Dict[str, Any]] = None,
bbox: Optional[List[float]] = None,
centroid: Optional[Dict[str, float]] = None,
shape: Optional[List[int]] = None,
transform: Optional[List[float]] = None,
) -> None:
"""Applies Projection extension properties to the extended Item.
Args:
epsg : REQUIRED. EPSG code of the datasource.
wkt2 : WKT2 string representing the Coordinate Reference
System (CRS) that the ``geometry`` and ``bbox`` fields represent
projjson : PROJJSON dict representing the
Coordinate Reference System (CRS) that the ``geometry`` and ``bbox``
fields represent
geometry : GeoJSON Polygon dict that defines the footprint of
this Item.
bbox : Bounding box of the Item in the asset CRS in
2 or 3 dimensions.
centroid : A dict with members 'lat' and 'lon' that defines
coordinates representing the centroid of the item in the asset data CRS.
Coordinates are defined in latitude and longitude, even if the data
coordinate system may not use lat/long.
shape : Number of pixels in Y and X directions for the
default grid.
transform : The affine transformation coefficients for
the default grid
"""
self.epsg = epsg
self.wkt2 = wkt2
self.projjson = projjson
self.geometry = geometry
self.bbox = bbox
self.centroid = centroid
self.shape = shape
self.transform = transform
@property
def epsg(self) -> Optional[int]:
"""Get or sets the EPSG code of the datasource.
A Coordinate Reference System (CRS) is the data reference system (sometimes
called a 'projection') used by the asset data, and can usually be referenced
using an `EPSG code <http://epsg.io/>`_.
If the asset data does not have a CRS, such as in the case of non-rectified
imagery with Ground Control Points, epsg should be set to None.
It should also be set to null if a CRS exists, but for which there is no valid
EPSG code.
Returns:
int
"""
return self._get_property(EPSG_PROP, int)
@epsg.setter
def epsg(self, v: Optional[int]) -> None:
self._set_property(EPSG_PROP, v, pop_if_none=False)
@property
def wkt2(self) -> Optional[str]:
"""Get or sets the WKT2 string representing the Coordinate Reference System (CRS)
that the proj:geometry and proj:bbox fields represent
This value is a
`WKT2 string <http://docs.opengeospatial.org/is/12-063r5/12-063r5.html>`_.
If the data does not have a CRS, such as in the case of non-rectified imagery
with Ground Control Points, wkt2 should be set to null. It should also be set
to null if a CRS exists, but for which a WKT2 string does not exist.
Returns:
str
"""
return self._get_property(WKT2_PROP, str)
@wkt2.setter
def wkt2(self, v: Optional[str]) -> None:
self._set_property(WKT2_PROP, v)
@property
def projjson(self) -> Optional[Dict[str, Any]]:
"""Get or sets the PROJJSON string representing the Coordinate Reference System (CRS)
that the proj:geometry and proj:bbox fields represent
This value is a
`PROJJSON object <https://proj.org/specifications/projjson.html>`_.
If the data does not have a CRS, such as in the case of non-rectified imagery
with Ground Control Points, projjson should be set to null. It should also be
set to null if a CRS exists, but for which a PROJJSON string does not exist.
The schema for this object can be found
`here <https://proj.org/schemas/v0.2/projjson.schema.json>`_.
Returns:
dict
"""
return self._get_property(PROJJSON_PROP, Dict[str, Any])
@projjson.setter
def projjson(self, v: Optional[Dict[str, Any]]) -> None:
self._set_property(PROJJSON_PROP, v)
@property
def geometry(self) -> Optional[Dict[str, Any]]:
"""Get or sets a Polygon GeoJSON dict representing the footprint of this item.
This dict should be formatted according the Polygon object format specified in
`RFC 7946, sections 3.1.6 <https://tools.ietf.org/html/rfc7946>`_,
except not necessarily in EPSG:4326 as required by RFC7946. Specified based on
the ``epsg``, ``projjson`` or ``wkt2`` fields (not necessarily EPSG:4326).
Ideally, this will be represented by a Polygon with five coordinates, as the
item in the asset data CRS should be a square aligned to the original CRS grid.
Returns:
dict
"""
return self._get_property(GEOM_PROP, Dict[str, Any])
@geometry.setter
def geometry(self, v: Optional[Dict[str, Any]]) -> None:
self._set_property(GEOM_PROP, v)
@property
def bbox(self) -> Optional[List[float]]:
"""Get or sets the bounding box of the assets represented by this item in the asset
data CRS.
Specified as 4 or 6 coordinates based on the CRS defined in the ``epsg``,
``projjson`` or ``wkt2`` properties. First two numbers are coordinates of the
lower left corner, followed by coordinates of upper right corner, e.g.,
[west, south, east, north], [xmin, ymin, xmax, ymax], [left, down, right, up],
or [west, south, lowest, east, north, highest]. The length of the array
must be 2*n where n is the number of dimensions.
Returns:
List[float]
"""
return self._get_property(BBOX_PROP, List[float])
@bbox.setter
def bbox(self, v: Optional[List[float]]) -> None:
self._set_property(BBOX_PROP, v)
@property
def centroid(self) -> Optional[Dict[str, float]]:
"""Get or sets coordinates representing the centroid of the item in the asset data CRS.
Coordinates are defined in latitude and longitude, even if the data coordinate
system does not use lat/long.
Example::
item.ext.proj.centroid = { 'lat': 0.0, 'lon': 0.0 }
Returns:
dict
"""
return self._get_property(CENTROID_PROP, Dict[str, float])
@centroid.setter
def centroid(self, v: Optional[Dict[str, float]]) -> None:
self._set_property(CENTROID_PROP, v)
@property
def shape(self) -> Optional[List[int]]:
"""Get or sets the number of pixels in Y and X directions for the default grid.
The shape is an array of integers that represents the number of pixels in the
most common pixel grid used by the item's assets. The number of pixels should
be specified in Y, X order. If the shape is defined in an item's properties it
is used as the default shape for all assets that don't have an overriding shape.
Returns:
List[int]
"""
return self._get_property(SHAPE_PROP, List[int])
@shape.setter
def shape(self, v: Optional[List[int]]) -> None:
self._set_property(SHAPE_PROP, v)
@property
def transform(self) -> Optional[List[float]]:
"""Get or sets the the affine transformation coefficients for the default grid.
The transform is a linear mapping from pixel coordinate space (Pixel, Line) to
projection coordinate space (Xp, Yp). It is a 3x3 matrix stored as a flat array of 9
elements in row major order. Since the last row is always 0,0,1 it can be omitted, in
which case only 6 elements are recorded. This mapping can be obtained from
GDAL `GetGeoTransform <https://gdal.org/api/gdaldataset_cpp.html#_CPPv4N11GDALDataset15GetGeoTransformEPd>`_
or the
Rasterio `Transform <https://rasterio.readthedocs.io/en/stable/api/rasterio.io.html#rasterio.io.BufferedDatasetWriter.transform>`_.
Returns:
List[float]
""" # noqa: E501
return self._get_property(TRANSFORM_PROP, List[float])
@transform.setter
def transform(self, v: Optional[List[float]]) -> None:
self._set_property(TRANSFORM_PROP, v)
@classmethod
def get_schema_uri(cls) -> str:
return SCHEMA_URI
@staticmethod
def ext(obj: T) -> "ProjectionExtension[T]":
if isinstance(obj, pystac.Item):
return cast(ProjectionExtension[T], ItemProjectionExtension(obj))
elif isinstance(obj, pystac.Asset):
return cast(ProjectionExtension[T], AssetProjectionExtension(obj))
else:
raise pystac.ExtensionTypeError(
f"File extension does not apply to type {type(obj)}"
)
class ItemProjectionExtension(ProjectionExtension[pystac.Item]):
def __init__(self, item: pystac.Item):
self.item = item
self.properties = item.properties
def __repr__(self) -> str:
return "<ItemProjectionExtension Item id={}>".format(self.item.id)
class AssetProjectionExtension(ProjectionExtension[pystac.Asset]):
def __init__(self, asset: pystac.Asset):
self.asset_href = asset.href
self.properties = asset.properties
if asset.owner and isinstance(asset.owner, pystac.Item):
self.additional_read_properties = [asset.owner.properties]
def __repr__(self) -> str:
return "<AssetProjectionExtension Asset href={}>".format(self.asset_href)
class ProjectionExtensionHooks(ExtensionHooks):
schema_uri: str = SCHEMA_URI
prev_extension_ids: Set[str] = set(["proj", "projection"])
stac_object_types: Set[pystac.STACObjectType] = set([pystac.STACObjectType.ITEM])
PROJECTION_EXTENSION_HOOKS: ExtensionHooks = ProjectionExtensionHooks()
|
nilq/baby-python
|
python
|
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def home_view(request, *args, **kwargs):
# return HttpResponse("<h1>Hello World</h1>")
return render(request,"home.html", {})
|
nilq/baby-python
|
python
|
import re
import utils as u
with open(__file__ + ".input.txt", "r+") as file:
input_str = file.read()
regex = re.compile(r"(?P<from>\d+)-(?P<to>\d+)\s(?P<letter>\w):\s(?P<password>\w+)")
def is_valid_password(input_str):
nb_from, nb_to, letter, password = regex.search(input_str).groups()
return int(nb_from) <= password.count(letter) <= int(nb_to)
def is_valid_password_for_part_two(input_str):
nb_from, nb_to, letter, password = regex.search(input_str).groups()
return (password[int(nb_from) - 1], password[int(nb_to) - 1]).count(letter) == 1
# part 1 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_
u.assert_equals(is_valid_password("1-3 a: abcde"), True)
u.assert_equals(is_valid_password("1-3 b: cdefg"), False)
u.assert_equals(is_valid_password("2-9 c: ccccccccc"), True)
u.answer_part_1(sum(1 for string in input_str.split("\n") if is_valid_password(string)))
# 347 too low
# 519 OK
# part 2 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_
u.assert_equals(is_valid_password_for_part_two("1-3 a: abcde"), True)
u.assert_equals(is_valid_password_for_part_two("1-3 b: cdefg"), False)
u.assert_equals(is_valid_password_for_part_two("2-9 c: ccccccccc"), False)
u.answer_part_2(
sum(1 for string in input_str.split("\n") if is_valid_password_for_part_two(string))
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# libfv.py────────────────────────────────────────────────────────────────┐
# │ │
# │ A Python library module that supports read/modification/write of .otf │
# │ and .ttf font version strings │
# │ │
# │ Copyright 2018 Christopher Simpkins │
# │ MIT License │
# │ │
# │ Source: https://github.com/source-foundry/font-v │
# │ │
# └───────────────────────────────────────────────────────────────────────┘
from __future__ import unicode_literals
import os
import re
from fontTools import ttLib
from git import Repo
from fontv.utilities import get_git_root_path
class FontVersion(object):
"""
FontVersion is a ttf and otf font version string class that provides support for font version string reads,
reporting, modification, & writes. It provides full support for the OpenFV font versioning specification
(https://github.com/openfv/openfv). Support is provided for instantiation from ttf and otf fonts, as well
as from fontTools.ttLib.ttFont objects (https://github.com/fonttools/fonttools).
The class works on Python "strings". String types indicated below refer to the Python2 unicode type and Python3
string type.
PUBLIC ATTRIBUTES:
contains_metadata: (boolean) boolean for presence of metadata in version string
contains_state: (boolean) boolean for presence of state substring metadata in the version string
contains_status: (boolean) boolean for presence of development/release status substring in the version string
develop_string: (string) The string to use for development builds in the absence of git commit SHA1 string
fontpath: (string) The path to the font file
is_development: (boolean) boolean for presence of development status substring at version_string_parts[1]
is_release: (boolean) boolean for presence of release status status substring at version_string_parts[1]
metadata: (list) A list of metadata substrings in the version string. Either version_string_parts[1:] or empty list
release_string: (string) The string to use for release builds in the absence of git commit SHA1 string
sha1_develop: (string) The string to append to the git SHA1 hash string for development builds
sha1_release: (string) The string to append to the git SHA1 hash string for release builds
state: (string) The state metadata substring
ttf: (fontTools.ttLib.TTFont) for font file
version_string_parts: (list) List that maintains in memory semicolon parsed substrings of font version string
version: (string) The version number substring formatted as "Version X.XXX"
PRIVATE ATTRIBUTES
_nameID_5_dict: (dictionary) {(platformID, platEncID,langID) : fontTools.ttLib.TTFont name record ID 5 object } map
:parameter font: (string) file path to the .otf or .ttf font file OR (ttLib.TTFont) object for appropriate font file
:parameter develop: (string) the string to use for development builds in the absence of git commit SHA1 string
:parameter release: (string) the string to use for release builds in the absence of a git commit SHA1 string
:parameter sha1_develop: (string) the string to append to the git SHA1 hash string for development builds
:parameter sha1_release: (string) the string to append to the git SHA1 hash string for release builds
:raises: fontTools.ttLib.TTLibError if fontpath is not a ttf or otf font
:raises: IndexError if there are no nameID 5 records in the font name table
:raises: IOError if fontpath does not exist
"""
def __init__(
self,
font,
develop="DEV",
release="RELEASE",
sha1_develop="-dev",
sha1_release="-release",
):
try:
# assume that it is a ttLib.TTFont object and attempt to call object attributes
self.fontpath = font.reader.file.name
# if it does not raise AttributeError, we guessed correctly, can set the ttf attr here
self.ttf = font
except AttributeError:
# if above attempt to call TTFont attribute raises AttributeError (as it would with string file path)
# then instantiate a ttLib.TTFont object and define the fontpath attribute with the file path string
self.ttf = ttLib.TTFont(file=font, recalcTimestamp=False)
self.fontpath = font
self.develop_string = develop
self.release_string = release
self.sha1_develop = sha1_develop
self.sha1_release = sha1_release
# name.ID = 5 version string substring data
self.name_ID5_dict = {}
self.version_string_parts = (
[]
) # list of substring items in version string (; delimited parse to list)
self.version = ""
self.state = ""
self.metadata = []
# truth test values for version string contents, updated with self._parse() method calls following updates to
# in memory version string data with methods in this library
self.contains_metadata = False
self.contains_state = False
self.contains_status = False
self.is_development = False
self.is_release = False
# head.fontRevision data. float type
self.head_fontRevision = 0.0
# object instantiation method call (truth test values updated in the following method)
self._read_version_string()
def __eq__(self, otherfont):
"""
Equality comparison between FontVersion objects
:param otherfont: fontv.libfv.FontVersion object for comparison
:return: (boolean) True = versions are the same; False = versions are not the same
"""
if type(otherfont) is type(self):
return self.version_string_parts == otherfont.version_string_parts
return False
def __ne__(self, otherfont):
"""
Inequality comparison between FontVersion objects
:param otherfont: fontv.libfv.FontVersion object for comparison
:return: (boolean) True = versions differ; False = versions are the same
"""
return not self.__eq__(otherfont)
def __str__(self):
"""
Human readable string formatting
:return: (string)
"""
return (
"<fontv.libfv.FontVersion> "
+ os.linesep
+ self.get_name_id5_version_string()
+ os.linesep
+ "file path:"
" " + self.fontpath
)
# TODO: confirm comparisons of version numbers like "Version 1.001", "Version 1.01", "Version 1.1" as not the same
# TODO: before this is released. Will need to be documented as such because this is not obvious behavior
# def __gt__(self, otherfont):
# """
#
# :param otherfont:
#
# :return:
# """
# return self.get_version_number_tuple() > otherfont.get_version_number_tuple()
#
# def __lt__(self, otherfont):
# """
#
# :param otherfont:
#
# :return:
# """
# return self.get_version_number_tuple() < otherfont.get_version_number_tuple()
def _parse(self):
"""
Private method that parses version string data to set FontVersion object attributes. Called on FontVersion
object instantiation and at the completion of setter methods in the library in order to update object
attributes with new data.
:return: None
"""
# metadata parsing
self._parse_metadata() # parse the metadata
self._parse_state() # parse the state substring data
self._parse_status() # parse the version substring dev/rel status indicator data
def _read_version_string(self):
"""
Private method that reads OpenType name ID 5 and head.fontRevision record data from a fontTools.ttLib.ttFont
object and sets FontVersion object properties. The method is called on instantiation of a FontVersion object
:return: None
"""
# Read the name.ID=5 record
namerecord_list = self.ttf["name"].names
# read in name records
for record in namerecord_list:
if record.nameID == 5:
# map dictionary as {(platformID, platEncID, langID) : version string}
recordkey = (record.platformID, record.platEncID, record.langID)
self.name_ID5_dict[recordkey] = record.toUnicode()
# assert that at least one nameID 5 record was obtained from the font in order to instantiate
# a FontVersion object
if len(self.name_ID5_dict) == 0:
raise IndexError(
"Unable to read nameID 5 version records from the font " + self.fontpath
)
# define the version string from the dictionary
for vs in self.name_ID5_dict.values():
version_string = vs
break # take the first value that dictionary serves up
# parse version string into substrings
self._parse_version_substrings(version_string)
# define version as first substring
self.version = self.version_string_parts[0]
# Read the head.fontRevision record (stored as a float)
self.head_fontRevision = self.ttf["head"].fontRevision
self._parse() # update FontVersion object attributes based upon the data read in
def _get_repo_commit(self):
"""
Private method that makes a system git call via the GitPython library and returns a short git commit
SHA1 hash string for the commit at HEAD using `git rev-list`.
:return: (string) short git commit SHA1 hash string
"""
repo = Repo(get_git_root_path(self.fontpath))
gitpy = repo.git
# git rev-list --abbrev-commit --max-count=1 --format="%h" HEAD - abbreviated unique sha1 for the repository
# number of sha1 hex characters determined by git (addresses https://github.com/source-foundry/font-v/issues/2)
full_git_sha_string = gitpy.rev_list(
"--abbrev-commit", "--max-count=1", '--format="%h"', "HEAD"
)
unicode_full_sha_string = full_git_sha_string
sha_string_list = unicode_full_sha_string.split("\n")
final_sha_string = sha_string_list[1].replace('"', "")
return final_sha_string
def _parse_metadata(self):
"""
Private method that parses a font version string for semicolon delimited font version
string metadata. Metadata are defined as anything beyond the first substring item of a version string.
See OpenFV specification for version substring definition details (https://github.com/openfv/openfv)
:return: None
"""
if len(self.version_string_parts) > 1:
# set to True if there are > 1 sub strings as others are defined as metadata
self.contains_metadata = True
self.metadata = (
[]
) # reset to empty and allow following code to define the list items
for metadata_item in self.version_string_parts[1:]:
self.metadata.append(metadata_item)
else:
self.metadata = []
self.contains_metadata = False
def _parse_state(self):
"""
Private method that parses a font version string for [ ... ] delimited data that represents the State
substring as defined by the OpenFV specification. The result of this test is used to define State data
in the FontVersion object.
See OpenFV specification for the state substring metadata definition (https://github.com/openfv/openfv)
:return: None
"""
if len(self.version_string_parts) > 1:
# Test for regular expression pattern match for state substring at version string list position 1
# as defined by OpenFV specification.
# This method call returns tuple of (truth test for match, matched state string (or empty string))
response = self._is_state_substring_return_state_match(
self.version_string_parts[1]
)
is_state_substring = response[0]
state_substring_match = response[1]
if is_state_substring is True:
self.contains_state = True
self.state = state_substring_match
else:
self.contains_state = False
self.state = ""
else:
self.contains_state = False
self.state = ""
def _parse_status(self):
"""
Private method that parses a font version string to determine if it contains development/release Status
substring metadata as defined by the OpenFV specification. The result of this test is used to define Status
data in the FontVersion object.
See OpenFV specification for the Status substring metadata definition (https://github.com/openfv/openfv)
:return: None
"""
if len(self.version_string_parts) > 1:
# define as list item 1 as per OpenFV specification
status_needle = self.version_string_parts[1]
# reset each time there is a parse attempt and let logic below define
self.contains_status = False
if self._is_development_substring(status_needle):
self.contains_status = True
self.is_development = True
else:
self.is_development = False
if self._is_release_substring(status_needle):
self.contains_status = True
self.is_release = True
else:
self.is_release = False
else:
self.contains_status = False
self.is_development = False
self.is_release = False
def _parse_version_substrings(self, version_string):
"""
Private method that splits a full semicolon delimited version string on semicolon characters to a Python list.
:param version_string: (string) the semicolon delimited version string to split
:return: None
"""
# split semicolon delimited list of version substrings
if ";" in version_string:
self.version_string_parts = version_string.split(";")
else:
self.version_string_parts = [version_string]
self.version = self.version_string_parts[0]
def _set_state_status_substring(self, state_status_string):
"""
Private method that sets the State/Status substring in the FontVersion.version_string_parts[1] list position.
The method preserves Other metadata when present in the version string.
See OpenFV specification for State/Status substring and Other metdata definition details
(https://github.com/openfv/openfv)
:param state_status_string: (string) the string value to insert at the status substring position of the
self.version_string_parts list
:return: None
"""
if len(self.version_string_parts) > 1:
prestring = self.version_string_parts[1]
state_response = self._is_state_substring_return_state_match(prestring)
is_state_substring = state_response[0]
if (
self._is_release_substring(prestring)
or self._is_development_substring(prestring)
or is_state_substring
):
# directly replace when existing status substring
self.version_string_parts[1] = state_status_string
else:
# if the second item of the substring list is not a status string, save it and all subsequent list items
# then create a new list with inserted status string value
self.version_string_parts = [
self.version_string_parts[0]
] # redefine list as list with version number
self.version_string_parts.append(
state_status_string
) # define the status substring as next item
for (
item
) in (
self.metadata
): # iterate through all previous metadata substrings and append to list
self.version_string_parts.append(item)
else:
# if the version string is defined as only a version number substring (i.e. list size = 1),
# write the new status substring to the list. Nothing else required
self.version_string_parts.append(state_status_string)
# update FontVersion truth testing properties based upon the new data
self._parse()
def _is_development_substring(self, needle):
"""
Private method that returns a boolean that indicates whether the needle string meets the OpenFV specification
definition of a Development Status metadata substring.
See OpenFV specification for Status substring definition details (https://github.com/openfv/openfv)
:param needle: (string) test string
:return: boolean True = is development substring and False = is not a development substring
"""
if (
self.develop_string == needle.strip()
or self.sha1_develop in needle[-len(self.sha1_develop) :]
):
return True
else:
return False
def _is_release_substring(self, needle):
"""
Private method that returns a boolean that indicates whether the needle string meets the OpenFV specification
definition of a Release Status metadata substring.
See OpenFV specification for Status substring definition details (https://github.com/openfv/openfv)
:param needle: (string) test string
:return: boolean True = is release substring and False = is not a release substring
"""
if (
self.release_string == needle.strip()
or self.sha1_release in needle[-len(self.sha1_release) :]
):
return True
else:
return False
def _is_state_substring_return_state_match(self, needle):
"""
Private method that returns a tuple of boolean, string. The boolean value reflects the truth test needle is a
State substring. The match value is defined as the contents inside [ and ] delimiters as defined by the
regex pattern. If there is no match, the string item in the tuple is an empty string.
See OpenFV specification for State substring definition details (https://github.com/openfv/openfv)
:param needle: (string) test string to attempt match for state substring
:return: (boolean, string) see full docstring for details re: interpretation of returned values
"""
regex_pattern = r"\s?\[([a-zA-Z0-9_\-\.]{1,50})\]"
p = re.compile(regex_pattern)
m = p.match(needle)
if m:
return True, m.group(1)
else:
return False, ""
def clear_metadata(self):
"""
Public method that clears all version string metadata in memory. This results in a version string that ONLY
includes the version number substring. The intent is to support removal of unnecessary version string data
that are included in a font binary.
See OpenFV specification for Version number substring and Metadata definition details
(https://github.com/openfv/openfv)
:return: None
"""
self.version_string_parts = [self.version_string_parts[0]]
self._parse()
def get_version_number_string(self):
"""
Public method that returns a string of the version number in XXX.XXX format. A version number match is defined
according to the OpenFV specification with up to three digits on either side of the period.
See OpenFV specification for the font version number format definition and semantics
(https://github.com/openfv/openfv)
:return: string (Python 3) or unicode (Python 2). Empty string if unable to parse version number format
"""
match = re.search(r"\d{1,3}\.\d{1,3}", self.version)
if match:
return match.group(0)
else:
return ""
def get_version_number_tuple(self):
"""
Public method that returns a tuple of integer values with the following definition:
( major version, minor version position 1, minor version position 2, minor version position 3 )
where position is the decimal position of the integer in the minor version string. The version number format is
defined by the OpenFV specification.
See OpenFV specification for the font version number format definition and semantics
(https://github.com/openfv/openfv)
:return: tuple of integers or None if the version number substring is inappropriately formatted
"""
match = re.search(r"\d{1,3}\.\d{1,3}", self.version)
if match:
version_number_int_list = []
version_number_string = match.group(0)
version_number_list = version_number_string.split(".")
version_number_major_int = int(version_number_list[0])
version_number_int_list.append(
version_number_major_int
) # add major version integer
for minor_int in version_number_list[1]:
version_number_int_list.append(int(minor_int))
return tuple(version_number_int_list)
else:
return None
def get_head_fontrevision_version_number(self):
"""
Public method that returns the version number that is parsed from head.fontRevision record as a float value.
:return: float
"""
return self.head_fontRevision
# TODO: remove this deprecated method (commented out in v0.7.0, deprecation warnings in v0.6.0)
# def get_version_string(self):
# """
# DEPRECATED: Please convert to use of FontVersion.get_name_id5_version_string() method
# """
# warnings.simplefilter('always')
# warnstring = "[WARNING] FontVersion.get_version_string is a deprecated method. Please convert to " \
# "FontVersion.get_name_id5_version_string."
# warnings.warn(warnstring, DeprecationWarning, stacklevel=2)
# return ";".join(self.version_string_parts)
def get_name_id5_version_string(self):
"""
Public method that returns the full version string as the semicolon delimiter joined contents of the
FontVersion.version_string_parts Python list.
:return: string (Python 3) or unicode (Python 2)
"""
return ";".join(self.version_string_parts)
def get_metadata_list(self):
"""
Public method that returns a Python list containing metadata substring items generated by splitting the
string on a semicolon delimiter. Metadata are defined according to the OpenFV specification.
The version number string (i.e. "Version X.XXX") is not present in this list.
See OpenFV specification for the version string Metadata definition (https://github.com/openfv/openfv)
:return: list of string (Python 3) or list of unicode (Python 2)
"""
return self.metadata
def get_state_status_substring(self):
"""
Public method that returns the State and/or Status substring at position 2 of the semicolon delimited version
string. This substring may include any of the following metadata according to the OpenFV specification:
- "DEV"
- "RELEASE"
- "[state]-dev"
- "[state]-release"
See OpenFV specification for State and Status substring definitions (https://github.com/openfv/openfv)
:return: string (Python 3) or unicode (Python 2), empty string if this substring is not set in the font
"""
if len(self.version_string_parts) > 1:
if self.is_development or self.is_release or self.contains_state:
return self.version_string_parts[1]
else:
return ""
else:
return ""
def set_state_git_commit_sha1(self, development=False, release=False):
"""
Public method that adds a git commit sha1 hash label to the font version string at the State metadata position
as defined by the OpenFV specification. This can be combined with a Development/Release Status metadata
substring if the calling code defines either the development or release parameter to a value of True.
Note that development and release are mutually exclusive. ValueError is raised if both are set to True. The
font source must be under git version control in order to use this method. If the font source is not under
git version control, an IOError is raised during the attempt to locate the .git directory in the project.
See OpenFV specification for State substring definition details (https://github.com/openfv/openfv)
:param development: (boolean) False (default) = do not add development status indicator; True = add indicator
:param release: (boolean) False (default) = do not add release status indicator; True = add indicator
:raises: IOError when the git repository root cannot be identified using the directory traversal in the
fontv.utilities.get_git_root_path() function
:raises: ValueError when calling code sets both development and release parameters to True as these are
mutually exclusive requests
:return: None
"""
git_sha1_hash = self._get_repo_commit()
git_sha1_hash_formatted = "[" + git_sha1_hash + "]"
if development and release:
raise ValueError(
"Cannot set both development parameter and release parameter to a value of True in "
"fontv.libfv.FontVersion.set_state_git_commit_sha1() method. These are mutually "
"exclusive."
)
if (
development
): # if request for development status label, append FontVersion.sha1_develop to hash digest
hash_substring = git_sha1_hash_formatted + self.sha1_develop
elif (
release
): # if request for release status label, append FontVersion.sha1_release to hash digest
hash_substring = git_sha1_hash_formatted + self.sha1_release
else: # else just use the hash digest
hash_substring = git_sha1_hash_formatted
self._set_state_status_substring(hash_substring)
def set_development_status(self):
"""
Public method that sets the in memory Development Status metadata substring for the font version string.
See OpenFV specification for Status substring and Development status definition
(https://github.com/openfv/openfv)
:return: None
"""
self._set_state_status_substring(self.develop_string)
def set_release_status(self):
"""
Public method that sets the in memory Release Status metadata substring for the font version string.
See OpenFV specification for Status substring and Release status definition details
(https://github.com/openfv/openfv)
:return: None
"""
self._set_state_status_substring(self.release_string)
def set_version_number(self, version_number):
"""
Public method that sets the version number substring with the version_number parameter. The version_number
parameter should follow the OpenFV specification for the font version number format.
See OpenFV specification for the font version number definition and semantics
(https://github.com/openfv/openfv)
The method will raise ValueError if the version_string cannot be cast to a float type. This is mandatory
for the definition of the head table fontRevision record definition in the font binary. Attempts to add
metadata strings to the version_number violate the OpenFV specification and are intentionally not permitted.
:param version_number: (string) version number in X.XXX format where X are integers
:return: None
"""
version_number_substring = "Version " + version_number
self.version_string_parts[0] = version_number_substring
self.version = self.version_string_parts[0] # "Version X.XXX"
self.head_fontRevision = float(version_number) # X.XXX
self._parse()
def set_version_string(self, version_string):
"""
Public method that sets the entire version string (including metadata if desired) with a version_string
parameter. The version_string parameter should be formatted according to the OpenFV font versioning
specification (https://github.com/openfv/openfv) for the OpenType name table ID 5 record version string.
The method will raise a ValueError if the version number used in the version_string cannot be cast to a
float type. This is mandatory for the definition of the head table fontRevision record definition in the
font binary. Attempts to add metadata strings to the version_number violate the OpenFV specification and
are intentionally not permitted.
:param version_string: (string) The version string with semicolon delimited metadata (if metadata are included)
:return: None
"""
self._parse_version_substrings(version_string)
self._parse()
self.head_fontRevision = float(self.get_version_number_string())
def write_version_string(self, fontpath=None):
"""
Public method that writes the in memory version data to:
(1) each OpenType name table ID 5 record in original font file
(2) OpenType head table fontRevision record
The name table ID 5 record(s) write is with a semicolon joined list of the items in
FontVersion.version_string_parts
The head table fontRevision record write is with the version number float value in FontVersion.head_fontRevision
The write is to a .otf file if the FontVersion object was instantiated from a .otf binary and a .ttf
file if the FontVersion object was instantiated from a .ttf binary. By default the write is to the same
file path that was used for instantiation of the FontVersion object. This write path default can be modified by
passing a new file path in the fontpath parameter.
:param fontpath: (string) optional file path to write out the font version string to a font binary
:return: None
"""
# Write to name table ID 5 record
version_string = self.get_name_id5_version_string()
namerecord_list = self.ttf["name"].names
for record in namerecord_list:
if record.nameID == 5:
# write to fonttools ttLib object name ID 5 table record for each nameID 5 record found in the font
record.string = version_string
# Write version number to head table fontRevision record
self.ttf["head"].fontRevision = self.head_fontRevision
# Write changes out to the font binary path
if fontpath is None:
self.ttf.save(self.fontpath)
else:
self.ttf.save(fontpath)
|
nilq/baby-python
|
python
|
import sys
def test_python_path():
paths = sys.path
workspace = '/workspaces/bestbot'
assert workspace in paths
|
nilq/baby-python
|
python
|
def foo(x = []):
return x.append("x")
def bar(x = []):
return len(x)
foo()
bar()
class Owner(object):
@classmethod
def cm(cls, arg):
return cls
@classmethod
def cm2(cls, arg):
return arg
#Normal method
def m(self):
a = self.cm(0)
return a.cm2(1)
|
nilq/baby-python
|
python
|
import hqm
import socket
class HQMBot():
def __init__(self, host, port, team, name):
self.team = team
self.host = host
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.session = hqm.HQMClientSession(name, 55)
self.syncing = True
def run(self):
try:
while True:
send = self.session.get_message()
self.socket.sendto(send, (self.host, self.port))
data = self.socket.recv(8192)
self.dataReceived(data)
except KeyboardInterrupt:
send = self.session.get_exit_message()
self.socket.sendto(send, (self.host, self.port))
def dataReceived(self, data):
self.session.parse_message(data)
if self.session.last_message_num == 0:
self.syncing = False
gamestate = self.session.gamestate
if not self.syncing and gamestate:
you = gamestate.you
you_player = gamestate.players.get(you)
if you_player["team"] == -1: #Still spectating
self.session.join_team(self.team)
self.spectate()
else:
if you_player["team"] != self.team:
self.session.join_team(-1) # Back to spectator so we can switch team
else:
self.session.join_team(None)
self.action() #Let's do stuff
def spectate(self):
pass
def action(self):
pass #Insert your code here
class TestBot(HQMBot):
def action(self):
session = self.session
# Session contains the current gamestate and some other useful functions such as add_chat
gamestate = session.gamestate
# Gamestate contains score, time, and a player and object list
players = gamestate.players
# A dictionary of all the players in the server. One of them is you
# Each player object is a dictionary with the keys:
# name : The player name
# i : The player index
# team : The team this player is in.
# -1 is spectating
# 0 is red
# 1 is blue
# obj: The index of the player objects
you = gamestate.you
# An index that identifies who this bot is
objects = gamestate.objects
# A dictionary of all the objects in the server. These include both players and pucks.
# For player objects, you need the player list to determine which object belongs to which player.
# Each object is a dictonary. You need to run object.calculate_positions()
# for each object to calculate some useful position data.
# Both players and pucks have these keys after calculate_positions():
# type : Identifies the type, either the string PLAYER or the string PUCK
# pos : The object position, a numpy array with 3 elements.
# rot : The object rotation, a numpy 3x3 rotation matrix
# Player objects also these keys after calculate_positions():
# stick_pos : The stick position, a numpy array with 3 elements
# stick_rot : The stick rotation, a numpy 3x3 rotation matrix
# head_rot : Head rotation, left (-)/right (+), a float with rotation in radians
# body_rot : Body rotation, backwards (-)/forwards (+), a float with rotation in radians
pucks = []
teammates = []
opponents = []
you = None
for object in objects.values():
object.calculate_positions()
if object["type"] == "PUCK":
pucks.append(object)
you_player = players[gamestate.you]
you_obj = objects[you_player["obj"]]
for i, player in players.items():
if i==you or player["obj"] == -1: # If the player is you, or a spectator
continue
player_obj = objects[player["obj"]]
if player["team"]==you_player["team"]:
teammates.append(player_obj)
else:
opponents.append(player_obj)
session.move_lr = 1 # Turn left/right, normal values are -1.0 (move left), 0 or 1.0 (move right)
session.move_fwbw = 1.0 # Forwards/Backwards, normal values are -1.0 (backwards), 0 or 1.0 (forwards)
#session.stick_x # Stick left/right rotation, normal values are from -pi/2 (left) to pi/2 (right)
#session.stick_y # Stick up/down rotation, normal values are -0.98 (up) to 0.39 (down)
#session.stick_angle # Stick angle, normal values are -1 (clockwise) to 1 (counter-clockwise),
# The standard client changes the angle in steps of .25 when the mouse wheel is used
#session.head_rot # Head rotation, normal values are -2.74 (left) to 2.74 (right)
#session.body_rot # Body rotation, normal values are -pi/2 (backwards) to pi/2 (forwards)
#session.jump = True # Jump key
#session.crouch = True # Crouch key
#session.shift = True # Shift key
if gamestate.simstep%2000==500:
session.add_chat("MigoBot")
|
nilq/baby-python
|
python
|
import os
import random
check = """<input type="checkbox" id="{}" name="chord" value="{}">
<label for="{}"> {}</label><br>\n"""
def s(note):
t = note.lower()
return t[0] + "_" + t[2:5]
to_print = ""
for note in os.listdir("./Chords"):
to_print += check.format(note[:-4], note, s(note), note[:-4])
with open("check.html", "w") as fi:
fi.write(to_print)
|
nilq/baby-python
|
python
|
import unittest
from app import db
from app.crypto.pw_hashing import global_salt_hash, indiv_salt_hash
from app.data_access.db_model.user import User
from app.data_access.user_controller import create_user, user_exists, delete_user, activate_user, \
store_pdf_and_transfer_ticket, find_user, check_idnr, check_dob
from app.data_access.user_controller_errors import UserAlreadyExistsError
class TestUserExists(unittest.TestCase):
def setUp(self):
db.create_all()
self.existing_idnr = "123"
create_user(self.existing_idnr, '1985-01-01', '789')
def test_if_existing_idnr_then_return_true(self):
response = user_exists(self.existing_idnr)
self.assertTrue(response)
def test_if_not_existing_idnr_then_return_true(self):
response = user_exists('non_existent_user')
self.assertFalse(response)
def tearDown(self):
db.drop_all()
class TestCreateUser(unittest.TestCase):
def setUp(self):
db.create_all()
self.existing_idnr = "123"
create_user(self.existing_idnr, '1985-01-01', '789')
def test_if_idnr_exists_and_request_id_same_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1985-01-01', '789')
def test_if_idnr_exists_and_request_id_different_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1985-01-01', '000')
def test_if_idnr_exists_and_dob_same_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1985-01-01', '789')
def test_if_idnr_exists_and_dob_different_then_raise_error(self):
self.assertRaises(UserAlreadyExistsError, create_user, self.existing_idnr, '1999-01-01', '789')
def test_if_new_idnr_then_save_user(self):
new_idnr = '33602'
create_user(new_idnr, '1985-01-01', '000')
self.assertTrue(user_exists(new_idnr))
def test_if_new_idnr_then_save_correct_attributes(self):
new_idnr = '33604'
dob = '1985-01-01'
req_id = '000'
create_user(new_idnr, dob, req_id)
created_user = find_user(new_idnr)
self.assertEqual(global_salt_hash().hash(new_idnr), created_user.idnr_hashed)
self.assertTrue(indiv_salt_hash().verify(dob, created_user.dob_hashed))
self.assertEqual(req_id, created_user.elster_request_id)
self.assertFalse(created_user.is_active)
def test_if_new_idnr_then_return_user_with_correct_attributes(self):
new_idnr = '33605'
dob = '1985-01-01'
req_id = '000'
created_user = create_user(new_idnr, dob, req_id)
self.assertEqual(global_salt_hash().hash(new_idnr), created_user.idnr_hashed)
self.assertTrue(indiv_salt_hash().verify(dob, created_user.dob_hashed))
self.assertEqual(req_id, created_user.elster_request_id)
self.assertFalse(created_user.is_active)
def tearDown(self):
db.drop_all()
class TestDeleteUser(unittest.TestCase):
def setUp(self):
db.create_all()
create_user('Added_user', '1985-01-01', '123')
def test_if_user_is_deleted_then_user_is_removed_from_storage(self):
delete_user('Added_user')
db.session.rollback() # Verify changes have actually been written to the database.
self.assertEqual(0, User.query.count())
def tearDown(self):
db.drop_all()
class TestActivateUser(unittest.TestCase):
def setUp(self):
db.create_all()
self.user = create_user('1234', '1985-01-01', '5678')
def test_activates_user_and_commits_changes(self):
activate_user('1234', '5678')
db.session.rollback() # Verify changes have actually been written to the database.
self.assertTrue(self.user.is_active)
def test_activate_user_returns_an_activated_user(self):
returned_user = activate_user('1234', '5678')
self.assertTrue(returned_user.is_active)
def tearDown(self):
db.drop_all()
class TestStorePdfAndTransferTicket(unittest.TestCase):
def setUp(self):
db.create_all()
def test_pdf_is_set_in_user(self):
expected_pdf = b'thisisagreatPDFforya'
user = User('123', '123', '123')
store_pdf_and_transfer_ticket(user, expected_pdf, 'Passierschein A38')
db.session.rollback()
self.assertEqual(expected_pdf, user.pdf)
def test_transfer_ticket_is_set_in_user(self):
expected_transfer_ticket = 'Passierschein A38'
user = User('123', '123', '123')
store_pdf_and_transfer_ticket(user, b'pdf', expected_transfer_ticket)
db.session.rollback()
self.assertEqual(expected_transfer_ticket, user.transfer_ticket)
def tearDown(self):
db.drop_all()
class TestCheckIdnr(unittest.TestCase):
def setUp(self):
db.create_all()
self.correct_idnr = '1234567890'
self.existing_user = create_user(self.correct_idnr, '1985-01-01', '000')
def test_if_idnr_correct_return_true(self):
self.assertTrue(check_idnr(self.existing_user, self.correct_idnr))
def test_if_idnr_incorrect_return_false(self):
self.assertFalse(check_idnr(self.existing_user, 'INCORRECT'))
def tearDown(self):
db.drop_all()
class TestCheckDob(unittest.TestCase):
def setUp(self):
db.create_all()
self.correct_dob = '1985-01-01'
self.existing_user = create_user('1234', self.correct_dob, '000')
def test_if_dob_correct_return_true(self):
self.assertTrue(check_dob(self.existing_user, self.correct_dob))
def test_if_dob_incorrect_return_false(self):
self.assertFalse(check_dob(self.existing_user, 'INCORRECT'))
def tearDown(self):
db.drop_all()
|
nilq/baby-python
|
python
|
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from wrappr_backend.detection.api import urlpatterns as api_urls
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
url(r"^api/", include(api_urls)),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUI_try.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(645, 692)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMinimumSize(QtCore.QSize(644, 691))
Dialog.setMaximumSize(QtCore.QSize(646, 693))
self.comboBox = QtGui.QComboBox(Dialog)
self.comboBox.setEnabled(True)
self.comboBox.setGeometry(QtCore.QRect(10, 10, 211, 21))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
self.comboBox.setMinimumSize(QtCore.QSize(0, 0))
self.comboBox.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.comboBox.setSizeIncrement(QtCore.QSize(0, 0))
self.comboBox.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.comboBox.setPalette(palette)
font = QtGui.QFont()
self.comboBox.setFont(font)
self.comboBox.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.comboBox.setMouseTracking(False)
self.comboBox.setFocusPolicy(QtCore.Qt.WheelFocus)
self.comboBox.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.comboBox.setUpdatesEnabled(True)
self.comboBox.setVisible(True)
self.comboBox.setAcceptDrops(False)
self.comboBox.setWindowTitle(_fromUtf8(""))
self.comboBox.setWindowIconText(_fromUtf8(""))
self.comboBox.setWindowOpacity(1.0)
self.comboBox.setWindowModified(False)
self.comboBox.setToolTip(_fromUtf8(""))
self.comboBox.setStatusTip(_fromUtf8(""))
self.comboBox.setWhatsThis(_fromUtf8(""))
self.comboBox.setAccessibleName(_fromUtf8(""))
self.comboBox.setAccessibleDescription(_fromUtf8(""))
self.comboBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.comboBox.setAutoFillBackground(False)
self.comboBox.setStyleSheet(_fromUtf8(""))
self.comboBox.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.comboBox.setWindowFilePath(_fromUtf8(""))
self.comboBox.setInputMethodHints(QtCore.Qt.ImhNone)
self.comboBox.setEditable(False)
self.comboBox.setMaxVisibleItems(10)
self.comboBox.setMaxCount(2147483647)
self.comboBox.setInsertPolicy(QtGui.QComboBox.InsertAtBottom)
self.comboBox.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContentsOnFirstShow)
self.comboBox.setMinimumContentsLength(0)
self.comboBox.setIconSize(QtCore.QSize(16, 16))
self.comboBox.setAutoCompletion(True)
self.comboBox.setAutoCompletionCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.comboBox.setDuplicatesEnabled(False)
self.comboBox.setFrame(True)
self.comboBox.setModelColumn(0)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.tableWidget = QtGui.QTableWidget(Dialog)
self.tableWidget.setGeometry(QtCore.QRect(250, 10, 381, 671))
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.pushButton_2 = QtGui.QPushButton(Dialog)
self.pushButton_2.setEnabled(True)
self.pushButton_2.setGeometry(QtCore.QRect(120, 610, 111, 31))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setMinimumSize(QtCore.QSize(0, 0))
self.pushButton_2.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton_2.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton_2.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.pushButton_2.setPalette(palette)
font = QtGui.QFont()
self.pushButton_2.setFont(font)
self.pushButton_2.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pushButton_2.setMouseTracking(False)
self.pushButton_2.setFocusPolicy(QtCore.Qt.StrongFocus)
self.pushButton_2.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.pushButton_2.setUpdatesEnabled(True)
self.pushButton_2.setVisible(True)
self.pushButton_2.setAcceptDrops(False)
self.pushButton_2.setWindowTitle(_fromUtf8(""))
self.pushButton_2.setWindowIconText(_fromUtf8(""))
self.pushButton_2.setWindowOpacity(1.0)
self.pushButton_2.setWindowModified(False)
self.pushButton_2.setToolTip(_fromUtf8(""))
self.pushButton_2.setStatusTip(_fromUtf8(""))
self.pushButton_2.setWhatsThis(_fromUtf8(""))
self.pushButton_2.setAccessibleName(_fromUtf8(""))
self.pushButton_2.setAccessibleDescription(_fromUtf8(""))
self.pushButton_2.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton_2.setAutoFillBackground(False)
self.pushButton_2.setStyleSheet(_fromUtf8(""))
self.pushButton_2.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.pushButton_2.setWindowFilePath(_fromUtf8(""))
self.pushButton_2.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton_2.setIconSize(QtCore.QSize(16, 16))
self.pushButton_2.setShortcut(_fromUtf8(""))
self.pushButton_2.setCheckable(False)
self.pushButton_2.setChecked(False)
self.pushButton_2.setAutoRepeat(False)
self.pushButton_2.setAutoExclusive(False)
self.pushButton_2.setAutoRepeatDelay(300)
self.pushButton_2.setAutoRepeatInterval(100)
self.pushButton_2.setDown(False)
self.pushButton_2.setAutoDefault(True)
self.pushButton_2.setDefault(False)
self.pushButton_2.setFlat(False)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton = QtGui.QPushButton(Dialog)
self.pushButton.setEnabled(True)
self.pushButton.setGeometry(QtCore.QRect(10, 610, 101, 31))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMinimumSize(QtCore.QSize(0, 0))
self.pushButton.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.pushButton.setPalette(palette)
font = QtGui.QFont()
self.pushButton.setFont(font)
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pushButton.setMouseTracking(False)
self.pushButton.setFocusPolicy(QtCore.Qt.StrongFocus)
self.pushButton.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.pushButton.setUpdatesEnabled(True)
self.pushButton.setVisible(True)
self.pushButton.setAcceptDrops(False)
self.pushButton.setWindowTitle(_fromUtf8(""))
self.pushButton.setWindowIconText(_fromUtf8(""))
self.pushButton.setWindowOpacity(1.0)
self.pushButton.setWindowModified(False)
self.pushButton.setToolTip(_fromUtf8(""))
self.pushButton.setStatusTip(_fromUtf8(""))
self.pushButton.setWhatsThis(_fromUtf8(""))
self.pushButton.setAccessibleName(_fromUtf8(""))
self.pushButton.setAccessibleDescription(_fromUtf8(""))
self.pushButton.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton.setAutoFillBackground(False)
self.pushButton.setStyleSheet(_fromUtf8(""))
self.pushButton.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.pushButton.setWindowFilePath(_fromUtf8(""))
self.pushButton.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton.setIconSize(QtCore.QSize(16, 16))
self.pushButton.setShortcut(_fromUtf8(""))
self.pushButton.setCheckable(False)
self.pushButton.setChecked(False)
self.pushButton.setAutoRepeat(False)
self.pushButton.setAutoExclusive(False)
self.pushButton.setAutoRepeatDelay(300)
self.pushButton.setAutoRepeatInterval(100)
self.pushButton.setDown(False)
self.pushButton.setAutoDefault(True)
self.pushButton.setDefault(True)
self.pushButton.setFlat(False)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton_3 = QtGui.QPushButton(Dialog)
self.pushButton_3.setEnabled(True)
self.pushButton_3.setGeometry(QtCore.QRect(10, 650, 101, 31))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
self.pushButton_3.setSizePolicy(sizePolicy)
self.pushButton_3.setMinimumSize(QtCore.QSize(0, 0))
self.pushButton_3.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton_3.setSizeIncrement(QtCore.QSize(0, 0))
self.pushButton_3.setBaseSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
self.pushButton_3.setPalette(palette)
font = QtGui.QFont()
self.pushButton_3.setFont(font)
self.pushButton_3.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.pushButton_3.setMouseTracking(False)
self.pushButton_3.setFocusPolicy(QtCore.Qt.StrongFocus)
self.pushButton_3.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.pushButton_3.setUpdatesEnabled(True)
self.pushButton_3.setVisible(True)
self.pushButton_3.setAcceptDrops(False)
self.pushButton_3.setWindowTitle(_fromUtf8(""))
self.pushButton_3.setWindowIconText(_fromUtf8(""))
self.pushButton_3.setWindowOpacity(1.0)
self.pushButton_3.setWindowModified(False)
self.pushButton_3.setToolTip(_fromUtf8(""))
self.pushButton_3.setStatusTip(_fromUtf8(""))
self.pushButton_3.setWhatsThis(_fromUtf8(""))
self.pushButton_3.setAccessibleName(_fromUtf8(""))
self.pushButton_3.setAccessibleDescription(_fromUtf8(""))
self.pushButton_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.pushButton_3.setAutoFillBackground(False)
self.pushButton_3.setStyleSheet(_fromUtf8(""))
self.pushButton_3.setLocale(QtCore.QLocale(QtCore.QLocale.Russian, QtCore.QLocale.RussianFederation))
self.pushButton_3.setWindowFilePath(_fromUtf8(""))
self.pushButton_3.setInputMethodHints(QtCore.Qt.ImhNone)
self.pushButton_3.setIconSize(QtCore.QSize(16, 16))
self.pushButton_3.setShortcut(_fromUtf8(""))
self.pushButton_3.setCheckable(False)
self.pushButton_3.setChecked(False)
self.pushButton_3.setAutoRepeat(False)
self.pushButton_3.setAutoExclusive(False)
self.pushButton_3.setAutoRepeatDelay(300)
self.pushButton_3.setAutoRepeatInterval(100)
self.pushButton_3.setDown(False)
self.pushButton_3.setAutoDefault(True)
self.pushButton_3.setDefault(False)
self.pushButton_3.setFlat(False)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_4 = QtGui.QPushButton(Dialog)
self.pushButton_4.setGeometry(QtCore.QRect(120, 650, 111, 31))
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.comboBox_2 = QtGui.QComboBox(Dialog)
self.comboBox_2.setEnabled(False)
self.comboBox_2.setGeometry(QtCore.QRect(10, 40, 151, 22))
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.spinBox = QtGui.QSpinBox(Dialog)
self.spinBox.setEnabled(False)
self.spinBox.setGeometry(QtCore.QRect(180, 40, 42, 22))
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.retranslateUi(Dialog)
self.comboBox.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Программа учета деталей", None))
self.comboBox.setItemText(0, _translate("Dialog", "50НР4", None))
self.comboBox.setItemText(1, _translate("Dialog", "50НР6.3", None))
self.comboBox.setItemText(2, _translate("Dialog", "50НР10", None))
self.comboBox.setItemText(3, _translate("Dialog", "50НР14", None))
self.comboBox.setItemText(4, _translate("Dialog", "50НР16", None))
self.comboBox.setItemText(5, _translate("Dialog", "50НР32", None))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "Детали", None))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "Склад", None))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("Dialog", "Требуется", None))
self.pushButton_2.setText(_translate("Dialog", "Выход", None))
self.pushButton.setText(_translate("Dialog", "Выполнить заказ", None))
self.pushButton_3.setText(_translate("Dialog", "Пополнить склад", None))
self.pushButton_4.setText(_translate("Dialog", "Посмотреть склад", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QWidget()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
from flask import jsonify, request, Response
from jsonschema import RefResolutionError
from sqlalchemy.orm import Session
from flexget.api import APIResource, api
from flexget.api.app import NotFoundError
from flexget.config_schema import resolve_ref, schema_paths
schema_api = api.namespace('schema', description='Config and plugin schemas')
schema_api_list = api.schema_model(
'schema.list',
{'type': 'object', 'properties': {'schemas': {'type': 'array', 'items': {'type': 'object'}}}},
)
def rewrite_ref(identifier: str, base_url: str) -> str:
"""
The refs in the schemas are arbitrary identifiers, and cannot be used as-is as real network locations.
This rewrites any of those arbitrary refs to be real urls servable by this endpoint.
"""
if not base_url.endswith('/'):
base_url += '/'
if identifier.startswith('/schema/'):
return base_url + identifier[1:]
return identifier
def rewrite_refs(schema, base_url: str):
"""Make sure any $refs in the schema point properly back to this endpoint."""
if isinstance(schema, dict):
if '$ref' in schema:
return {'$ref': rewrite_ref(schema['$ref'], base_url)}
return {k: rewrite_refs(v, base_url) for k, v in schema.items()}
if isinstance(schema, list):
return [rewrite_refs(v, base_url) for v in schema]
return schema
@schema_api.route('/')
class SchemaAllAPI(APIResource):
@api.response(200, model=schema_api_list)
def get(self, session: Session = None) -> Response:
""" List all schema definitions """
schemas = []
for path in schema_paths:
schema = rewrite_refs(resolve_ref(path), request.url_root)
schema['id'] = rewrite_ref(path, request.url_root)
schemas.append(schema)
return jsonify({'schemas': schemas})
@schema_api.route('/<path:path>/')
@api.doc(params={'path': 'Path of schema'})
@api.response(NotFoundError)
class SchemaAPI(APIResource):
@api.response(200, model=schema_api_list)
def get(self, path: str, session: Session = None) -> Response:
""" Get schema definition """
try:
schema = resolve_ref(request.full_path)
except RefResolutionError:
raise NotFoundError('invalid schema path')
schema['id'] = request.url
return jsonify(rewrite_refs(schema, request.url_root))
|
nilq/baby-python
|
python
|
"""
Compare two integers given as strings.
Example
For a = "12" and b = "13", the output should be
compareIntegers(a, b) = "less";
For a = "875" and b = "799", the output should be
compareIntegers(a, b) = "greater";
For a = "1000" and b = "1000", the output should be
compareIntegers(a, b) = "equal".
"""
def compareIntegers(a, b):
if len(a) > len(b):
return 'greater'
if len(a) < len(b):
return 'less'
if a < b:
return 'less'
if a > b:
return 'greater'
return 'equal'
def compareIntegers(a, b):
a = int(a)
b = int(b)
return 'less' if a < b else 'equal' if a == b else 'greater'
|
nilq/baby-python
|
python
|
# This file is part of the faebryk project
# SPDX-License-Identifier: MIT
import faebryk.library.core
import faebryk.library.kicad
import faebryk.library.library
import faebryk.library.traits
|
nilq/baby-python
|
python
|
from datetime import datetime
from .api import ApiObject
class Trigger(ApiObject):
"""
https://www.xibbaz.com/documentation/3.4/manual/api/reference/trigger/object
"""
DEFAULT_SELECTS = ('Items', 'Functions', 'Dependencies', 'DiscoveryRule', 'LastEvent', 'Tags')
RELATIONS = ('hosts', 'groups')
@classmethod
def _text_field(self):
return 'description'
PROPS = dict(
triggerid = dict(
doc = "ID of the trigger.",
id = True,
readonly = True,
),
description = dict(
doc = "Name of the trigger.",
),
expression = dict(
doc = "Reduced trigger expression.",
),
comments = dict(
doc = "Additional comments to the trigger.",
),
error = dict(
doc = "Error text if there have been any problems when updating the state of the trigger.",
readonly = True,
),
flags = dict(
doc = "Origin of the trigger.",
kind = int,
readonly = True,
vals = {
0: 'a plain trigger (default)',
4: 'a discovered trigger',
},
),
lastchange = dict(
doc = "Time when the trigger last changed its state.",
kind = datetime,
readonly = True,
),
priority = dict(
doc = "Severity of the trigger.",
kind = int,
vals = {
0: 'not classified (default)',
1: 'information',
2: 'warning',
3: 'average',
4: 'high',
5: 'disaster',
},
),
state = dict(
doc = "State of the trigger.",
kind = int,
readonly = True,
vals = {
0: 'trigger state is up to date (default)',
1: 'current trigger state is unknown',
},
),
status = dict(
doc = "Whether the trigger is enabled or disabled.",
kind = int,
vals = {
0: 'enabled (default)',
1: 'disabled',
},
),
templateid = dict(
doc = "ID of the parent template trigger.",
readonly = True,
),
type = dict(
doc = "Whether the trigger can generate multiple problem events.",
kind = int,
vals = {
0: 'do not generate multiple events (default)',
1: 'generate multiple events',
},
),
url = dict(
doc = "URL associated with the trigger.",
),
value = dict(
doc = "Whether the trigger is in OK or problem state.",
kind = int,
readonly = True,
vals = {
0: 'ok',
1: 'problem',
},
),
recovery_mode = dict(
doc = "OK event generation mode.",
kind = int,
vals = {
0: 'expression (default)',
1: 'recovery expression',
2: 'none',
},
),
recovery_expression = dict(
doc = "Reduced trigger recovery expression.",
),
correlation_mode = dict(
doc = "OK event closes.",
kind = int,
vals = {
0: 'all problems (default)',
1: 'all problems if tag values match',
},
),
correlation_tag = dict(
doc = "Tag for matching.",
),
manual_close = dict(
doc = "Allow manual close.",
kind = int,
vals = {
0: 'no (default)',
1: 'yes',
},
),
)
|
nilq/baby-python
|
python
|
################################################################### #
# Basic plot for two-strain SIR model:
# Bifurcation diagram for one parameter
####################################################################
import sys
import numpy as np
import pylab as plt
from matplotlib.font_manager import FontProperties
from two_strain import *
# Run parameters
run_num = 1 # sys.argv[1]
end_time = 1000*365
output_interval = 365.0 # if not 365., need to adjust strobe interval
step_size = 1.0
sweep_par = "beta[0]" # e.g., "beta[0]", "a[1]", "alpha[0]"
par_min = 1.0/7.0
par_max = 7.0/7.0
n_points = 40 # number of points in parameter range
n_strobes = 50 # number of years to sample
# Strain parameters, including initial conditions
beta = np.array([5, 5])/7.0
epsilon = 0.1
gamma = np.array([1, 1])/7.0
mu = 1/(10*365.0)
alpha = np.array([1., 1.])
a = np.array([1., 1.])
omega = 2*np.pi/365.
obs_sd = 0.01
NSS = 0.2
NIS = 1e-3
NRS = 0.02
NRI = 0.0
NSI = 1e-3
NSR = 0.02
NIR = 0.0
# Organize and run simulations
SI = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR])
ic = np.array([NSS, NIS, NRS, NRI, NSI, NSR, NIR, 1-np.sum(SI)])
par_vals = np.linspace(par_min, par_max, n_points)
bif_vals = np.zeros((len(par_vals), n_strobes))
for i in range(len(par_vals)):
print('Running value %d of %d' % (i+1,len(par_vals)))
exec(sweep_par + " = par_vals[i]")
params = np.array([gamma, mu, alpha, a, omega, beta, epsilon])
output = run_two_strain(end_time, output_interval, step_size, params, ic)
I = output[:, 1] + output[:, 6] # NIS + NIR
bif_vals[i, :] = I[-n_strobes:len(I)]
# Plot output
plt.plot(par_vals, bif_vals, '.k')
plt.xlabel(sweep_par)
plt.ylabel("NIS + NIR")
plt.xlim([par_min, par_max])
plt.show()
plt.savefig("bifurcation_" + sweep_par + ".png")
plt.close()
|
nilq/baby-python
|
python
|
import tweepy , tkinter, datetime, os, sys, random, time, pytz
from keys import *
from tweepy import TweepError
#Create oauth handler for tokens setting
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(key,secret)
api = tweepy.API(auth)
random_lyrics = 'Lyrics.txt'
time_stamp = pytz.timezone('US/Central')
#Creating a function that scans and stores the last statusID
def test_bot():
try:
api.verify_credentials()
print('Authentication was successful')
except:
print('Error')
user = api.me()
print(user.name + ' ' + 'Succesfully Signing In....')
mentions = api.mentions_timeline(count = 1)
mentions_id = api.get_status
print(mentions_id)
for tweet in mentions:
filesong = open(random_lyrics, 'r')
lyrics = filesong.readlines()
song_lines = len(lyrics)
#Setting Counter
random_lyric = random.randrange(0, song_lines)
tid = tweet.user.id
username_of_person = tweet.user.screen_name
try:
api.update_status("@" + username_of_person + ' ' + lyrics[random_lyric], in_reply_to_status_id = tid).append(time_stamp)
print('Replied to' + ' ' + username_of_person + ' ' + 'with:'+ ' '+lyrics[random_lyric])
except tweepy.TweepError as e:
print(e.reason)
#LOOPS Infinately, for every 10 seconds
while True:
test_bot()
time.sleep(15)
|
nilq/baby-python
|
python
|
from mycroft import MycroftSkill, intent_file_handler
class Prepararrefeicoes(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('prepararrefeicoes.intent')
def handle_prepararrefeicoes(self, message):
self.speak_dialog('prepararrefeicoes')
def create_skill():
return Prepararrefeicoes()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ibc/core/commitment/v1/commitment.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from confio import proofs_pb2 as confio_dot_proofs__pb2
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="ibc/core/commitment/v1/commitment.proto",
package="ibc.core.commitment.v1",
syntax="proto3",
serialized_options=b"Z9github.com/cosmos/ibc-go/modules/core/23-commitment/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\'ibc/core/commitment/v1/commitment.proto\x12\x16ibc.core.commitment.v1\x1a\x14gogoproto/gogo.proto\x1a\x13\x63onfio/proofs.proto" \n\nMerkleRoot\x12\x0c\n\x04hash\x18\x01 \x01(\x0c:\x04\x88\xa0\x1f\x00"9\n\x0cMerklePrefix\x12)\n\nkey_prefix\x18\x01 \x01(\x0c\x42\x15\xf2\xde\x1f\x11yaml:"key_prefix""9\n\nMerklePath\x12%\n\x08key_path\x18\x01 \x03(\tB\x13\xf2\xde\x1f\x0fyaml:"key_path":\x04\x98\xa0\x1f\x00"5\n\x0bMerkleProof\x12&\n\x06proofs\x18\x01 \x03(\x0b\x32\x16.ics23.CommitmentProofB;Z9github.com/cosmos/ibc-go/modules/core/23-commitment/typesb\x06proto3',
dependencies=[
gogoproto_dot_gogo__pb2.DESCRIPTOR,
confio_dot_proofs__pb2.DESCRIPTOR,
],
)
_MERKLEROOT = _descriptor.Descriptor(
name="MerkleRoot",
full_name="ibc.core.commitment.v1.MerkleRoot",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="hash",
full_name="ibc.core.commitment.v1.MerkleRoot.hash",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\210\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=110,
serialized_end=142,
)
_MERKLEPREFIX = _descriptor.Descriptor(
name="MerklePrefix",
full_name="ibc.core.commitment.v1.MerklePrefix",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key_prefix",
full_name="ibc.core.commitment.v1.MerklePrefix.key_prefix",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\362\336\037\021yaml:"key_prefix"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=144,
serialized_end=201,
)
_MERKLEPATH = _descriptor.Descriptor(
name="MerklePath",
full_name="ibc.core.commitment.v1.MerklePath",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key_path",
full_name="ibc.core.commitment.v1.MerklePath.key_path",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b'\362\336\037\017yaml:"key_path"',
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"\230\240\037\000",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=203,
serialized_end=260,
)
_MERKLEPROOF = _descriptor.Descriptor(
name="MerkleProof",
full_name="ibc.core.commitment.v1.MerkleProof",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="proofs",
full_name="ibc.core.commitment.v1.MerkleProof.proofs",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=262,
serialized_end=315,
)
_MERKLEPROOF.fields_by_name[
"proofs"
].message_type = confio_dot_proofs__pb2._COMMITMENTPROOF
DESCRIPTOR.message_types_by_name["MerkleRoot"] = _MERKLEROOT
DESCRIPTOR.message_types_by_name["MerklePrefix"] = _MERKLEPREFIX
DESCRIPTOR.message_types_by_name["MerklePath"] = _MERKLEPATH
DESCRIPTOR.message_types_by_name["MerkleProof"] = _MERKLEPROOF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MerkleRoot = _reflection.GeneratedProtocolMessageType(
"MerkleRoot",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEROOT,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerkleRoot)
},
)
_sym_db.RegisterMessage(MerkleRoot)
MerklePrefix = _reflection.GeneratedProtocolMessageType(
"MerklePrefix",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEPREFIX,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerklePrefix)
},
)
_sym_db.RegisterMessage(MerklePrefix)
MerklePath = _reflection.GeneratedProtocolMessageType(
"MerklePath",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEPATH,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerklePath)
},
)
_sym_db.RegisterMessage(MerklePath)
MerkleProof = _reflection.GeneratedProtocolMessageType(
"MerkleProof",
(_message.Message,),
{
"DESCRIPTOR": _MERKLEPROOF,
"__module__": "ibc.core.commitment.v1.commitment_pb2"
# @@protoc_insertion_point(class_scope:ibc.core.commitment.v1.MerkleProof)
},
)
_sym_db.RegisterMessage(MerkleProof)
DESCRIPTOR._options = None
_MERKLEROOT._options = None
_MERKLEPREFIX.fields_by_name["key_prefix"]._options = None
_MERKLEPATH.fields_by_name["key_path"]._options = None
_MERKLEPATH._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import operator
from typing import Any, Callable, List, Optional, Type, Union
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.relationships import RelationshipProperty
from sqlalchemy.sql import functions
from sqlalchemy.sql.expression import (
BinaryExpression,
BindParameter,
BooleanClauseList,
ClauseElement,
ColumnClause,
False_,
Null,
True_,
and_,
cast,
extract,
false,
literal,
null,
or_,
true,
)
from sqlalchemy.types import Date, Time
from odata_query import ast, exceptions as ex, typing, utils, visitor
from . import functions_ext
class AstToSqlAlchemyClauseVisitor(visitor.NodeVisitor):
"""
:class:`NodeVisitor` that transforms an :term:`AST` into a SQLAlchemy query
filter clause.
Args:
root_model: The root model of the query.
"""
def __init__(self, root_model: Type[DeclarativeMeta]):
self.root_model = root_model
self.join_relationships: List[InstrumentedAttribute] = []
def visit_Identifier(self, node: ast.Identifier) -> ColumnClause:
":meta private:"
try:
return getattr(self.root_model, node.name)
except AttributeError:
raise ex.InvalidFieldException(node.name)
def visit_Attribute(self, node: ast.Attribute) -> ColumnClause:
":meta private:"
rel_attr = self.visit(node.owner)
# Owner is an InstrumentedAttribute, hopefully of a relationship.
# But we need the model pointed to by the relationship.
prop_inspect = inspect(rel_attr).property
if not isinstance(prop_inspect, RelationshipProperty):
# TODO: new exception:
raise ValueError(f"Not a relationship: {node.owner}")
self.join_relationships.append(rel_attr)
# We'd like to reference the column on the related class:
owner_cls = prop_inspect.entity.class_
try:
return getattr(owner_cls, node.attr)
except AttributeError:
raise ex.InvalidFieldException(node.attr)
def visit_Null(self, node: ast.Null) -> Null:
":meta private:"
return null()
def visit_Integer(self, node: ast.Integer) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_Float(self, node: ast.Float) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_Boolean(self, node: ast.Boolean) -> Union[True_, False_]:
":meta private:"
if node.val == "true":
return true()
else:
return false()
def visit_String(self, node: ast.String) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_Date(self, node: ast.Date) -> BindParameter:
":meta private:"
try:
return literal(node.py_val)
except ValueError:
raise ex.ValueException(node.val)
def visit_DateTime(self, node: ast.DateTime) -> BindParameter:
":meta private:"
try:
return literal(node.py_val)
except ValueError:
raise ex.ValueException(node.val)
def visit_Time(self, node: ast.Time) -> BindParameter:
":meta private:"
try:
return literal(node.py_val)
except ValueError:
raise ex.ValueException(node.val)
def visit_Duration(self, node: ast.Duration) -> BindParameter:
":meta private:"
return literal(node.py_val)
def visit_GUID(self, node: ast.GUID) -> BindParameter:
":meta private:"
return literal(node.val)
def visit_List(self, node: ast.List) -> list:
":meta private:"
return [self.visit(n) for n in node.val]
def visit_Add(self, node: ast.Add) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.add
def visit_Sub(self, node: ast.Sub) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.sub
def visit_Mult(self, node: ast.Mult) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.mul
def visit_Div(self, node: ast.Div) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.truediv
def visit_Mod(self, node: ast.Mod) -> Callable[[Any, Any], Any]:
":meta private:"
return operator.mod
def visit_BinOp(self, node: ast.BinOp) -> Any:
":meta private:"
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.op)
return op(left, right)
def visit_Eq(
self, node: ast.Eq
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.eq
def visit_NotEq(
self, node: ast.NotEq
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.ne
def visit_Lt(
self, node: ast.Lt
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.lt
def visit_LtE(
self, node: ast.LtE
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.le
def visit_Gt(
self, node: ast.Gt
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.gt
def visit_GtE(
self, node: ast.GtE
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return operator.ge
def visit_In(
self, node: ast.In
) -> Callable[[ClauseElement, ClauseElement], BinaryExpression]:
":meta private:"
return lambda a, b: a.in_(b)
def visit_Compare(self, node: ast.Compare) -> BinaryExpression:
":meta private:"
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.comparator)
# If a node is a `relationship` representing a single foreign key,
# the client meant to compare the foreign key, not the related object.
# E.g. In "blogpost/author eq 1", left should be "blogpost/author_id"
left = self._maybe_sub_relationship_with_foreign_key(left)
right = self._maybe_sub_relationship_with_foreign_key(right)
return op(left, right)
def visit_And(
self, node: ast.And
) -> Callable[[ClauseElement, ClauseElement], BooleanClauseList]:
":meta private:"
return and_
def visit_Or(
self, node: ast.Or
) -> Callable[[ClauseElement, ClauseElement], BooleanClauseList]:
":meta private:"
return or_
def visit_BoolOp(self, node: ast.BoolOp) -> BooleanClauseList:
":meta private:"
left = self.visit(node.left)
right = self.visit(node.right)
op = self.visit(node.op)
return op(left, right)
def visit_Not(self, node: ast.Not) -> Callable[[ClauseElement], ClauseElement]:
":meta private:"
return operator.invert
def visit_UnaryOp(self, node: ast.UnaryOp) -> ClauseElement:
":meta private:"
mod = self.visit(node.op)
val = self.visit(node.operand)
try:
return mod(val)
except TypeError:
raise ex.TypeException(node.op.__class__.__name__, val)
def visit_Call(self, node: ast.Call) -> ClauseElement:
":meta private:"
try:
handler = getattr(self, "func_" + node.func.name.lower())
except AttributeError:
raise ex.UnsupportedFunctionException(node.func.name)
return handler(*node.args)
def visit_CollectionLambda(self, node: ast.CollectionLambda) -> ClauseElement:
":meta private:"
owner_prop = self.visit(node.owner)
collection_model = inspect(owner_prop).property.entity.class_
if node.lambda_:
# For the lambda, we want to strip the identifier off, because
# we will execute this as a subquery in the wanted model's context.
subq_ast = utils.expression_relative_to_identifier(
node.lambda_.identifier, node.lambda_.expression
)
subq_transformer = self.__class__(collection_model)
subquery_filter = subq_transformer.visit(subq_ast)
else:
subquery_filter = None
if isinstance(node.operator, ast.Any):
return owner_prop.any(subquery_filter)
else:
# For an ALL query, invert both the filter and the EXISTS:
if node.lambda_:
subquery_filter = ~subquery_filter
return ~owner_prop.any(subquery_filter)
def func_contains(self, field: ast._Node, substr: ast._Node) -> ClauseElement:
":meta private:"
return self._substr_function(field, substr, "contains")
def func_startswith(self, field: ast._Node, substr: ast._Node) -> ClauseElement:
":meta private:"
return self._substr_function(field, substr, "startswith")
def func_endswith(self, field: ast._Node, substr: ast._Node) -> ClauseElement:
":meta private:"
return self._substr_function(field, substr, "endswith")
def func_length(self, arg: ast._Node) -> functions.Function:
":meta private:"
return functions.char_length(self.visit(arg))
def func_concat(self, *args: ast._Node) -> functions.Function:
":meta private:"
return functions.concat(*[self.visit(arg) for arg in args])
def func_indexof(self, first: ast._Node, second: ast._Node) -> functions.Function:
":meta private:"
# TODO: Highly dialect dependent, might want to implement in GenericFunction:
# Subtract 1 because OData is 0-indexed while SQL is 1-indexed
return functions_ext.strpos(self.visit(first), self.visit(second)) - 1
def func_substring(
self, fullstr: ast._Node, index: ast._Node, nchars: Optional[ast._Node] = None
) -> functions.Function:
":meta private:"
# Add 1 because OData is 0-indexed while SQL is 1-indexed
if nchars:
return functions_ext.substr(
self.visit(fullstr),
self.visit(index) + 1,
self.visit(nchars),
)
else:
return functions_ext.substr(self.visit(fullstr), self.visit(index) + 1)
def func_matchespattern(
self, field: ast._Node, pattern: ast._Node
) -> functions.Function:
":meta private:"
identifier = self.visit(field)
return identifier.regexp_match(self.visit(pattern))
def func_tolower(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.lower(self.visit(field))
def func_toupper(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.upper(self.visit(field))
def func_trim(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.ltrim(functions_ext.rtrim(self.visit(field)))
def func_date(self, field: ast._Node) -> ClauseElement:
":meta private:"
return cast(self.visit(field), Date)
def func_day(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "day")
def func_hour(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "hour")
def func_minute(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "minute")
def func_month(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "month")
def func_now(self) -> functions.Function:
":meta private:"
return functions.now()
def func_second(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "second")
def func_time(self, field: ast._Node) -> functions.Function:
":meta private:"
return cast(self.visit(field), Time)
def func_year(self, field: ast._Node) -> functions.Function:
":meta private:"
return extract(self.visit(field), "year")
def func_ceiling(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.ceil(self.visit(field))
def func_floor(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.floor(self.visit(field))
def func_round(self, field: ast._Node) -> functions.Function:
":meta private:"
return functions_ext.round(self.visit(field))
def _substr_function(
self, field: ast._Node, substr: ast._Node, func: str
) -> ClauseElement:
":meta private:"
typing.typecheck(field, (ast.Identifier, ast.String), "field")
typing.typecheck(substr, ast.String, "substring")
identifier = self.visit(field)
substring = self.visit(substr)
op = getattr(identifier, func)
return op(substring)
def _maybe_sub_relationship_with_foreign_key(
self, elem: ClauseElement
) -> ClauseElement:
"""
If the given ClauseElement is a `relationship` with a single ForeignKey,
replace it with the `ForeignKey` itself.
:meta private:
"""
try:
prop_inspect = inspect(elem).property
if isinstance(prop_inspect, RelationshipProperty):
foreign_key = prop_inspect._calculated_foreign_keys
if len(foreign_key) == 1:
return next(iter(foreign_key))
except Exception:
pass
return elem
|
nilq/baby-python
|
python
|
'''
*File: domain_restriction.py
*Author: Nicholas Mattei (nicholas.mattei@nicta.com.au)
*Date: March 18, 2014
*
* Copyright (c) 2014, Nicholas Mattei and NICTA
* All rights reserved.
*
* Developed by: Nicholas Mattei
* NICTA
* http://www.nickmattei.net
* http://www.preflib.org
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NICTA nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICTA ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NICTA BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
About
--------------------
This file tests a profile for being single-peaked.
'''
import sys
import copy
import glob
from preflibtools import io
from preflibtools import generate_profiles
# Implement of the Single Peaked Consistancy Algorithm detailed in
# B. Escoffier, J. Lang, and M. Ozturk, "Single-peaked consistency and its complexity".
# 2008 European Conference on Artificial Intelligence.
#
# Intuitivily, this algortihm finds an axis that is single peaked with respect to
# the rmaps that are passed in, or it returns an empty axis (vector) of the candidates.
# This is achieved in time O(|rmaps|*|candmap|).
#
# Note that this algorithm only works for STRICT preferences. If a non-strict
# set of rankmaps is passed in, an error is returned.
def is_single_peaked(rmaps, candmap):
for current in rmaps:
if len(current) != len(candmap):
print("is_single_peaked called with non-strict preferences")
exit()
orders = order_vectors(rmaps)
fullorders = order_vectors(rmaps)
#Build the order...
leftside = []
rightside = []
last_cands = last_set(orders)
# Only one last makes no constraints so iterate...
while (len(last_cands) == 1):
if len(leftside) < len(rightside):
leftside.append(last_cands[0])
else:
rightside.insert(0,last_cands[0])
orders = remove_cands(orders, last_cands)
last_cands = last_set(orders)
#Only break if we have != 1 last candidate, wither we quit, or we put on one each end.
if len(last_cands) > 2:
return []
else:
leftside.append(last_cands[0])
rightside.insert(0,last_cands[1])
orders = remove_cands(orders, last_cands)
# While there are still unplaced candidates (not removed from every vote))
while len(orders[0]) > 0:
last_cands = last_set(orders)
# Should never have more than 2...
if len(last_cands) > 2:
return []
else:
x_i = leftside[len(leftside)-1]
x_j = rightside[0]
#Check Conditions outlined by Lang.
#If L={X}, Case 3
if len(last_cands) == 1:
x = last_cands[0]
# if x_i < x < x_j => leftside + x
if any(o.index(x_j) < o.index(x) and o.index(x) < o.index(x_i) for o in fullorders):
leftside.append(x)
# if x_j < x < x_i => x + right
elif any(o.index(x) < o.index(x_j) and o.index(x_i) < o.index(x) for o in fullorders):
rightside.insert(0, x)
# Otherwise it doest nmatter and we put it either place...
else:
if len(leftside) < len(rightside):
leftside.append(x)
else:
rightside.insert(0,x)
# Restrict...
orders = remove_cands(orders, last_cands)
#if L = {x, y}, Case 2c and 2d
# if x_i < x < x_j < y ==> left+x and y+right
# if x_j < x < x_i < y ==> left+y and x+right
# if both, then contradiction...
# if x_i < x < y < x_j ==> then this must be axis...
# if x_j < y < x < x_i ==> then this must be axis...
elif len(last_cands) == 2:
C1 = False
C2 = False
x = last_cands[0]
y = last_cands[1]
x_i = leftside[len(leftside)-1]
x_j = rightside[0]
# Iterate over each of the orders and check for the C1 or C2 conditions or D1 or D2... Switch on these..
for o in fullorders:
#Condition D1:
if o.index(x_i) > o.index(x) and o.index(x) > o.index(y) and o.index(y) > o.index(x_j):
# The axis is the current voter restricted to the remainder
temp_order = copy.copy(o)
temp_order = remove_cands([temp_order], list(set(leftside + rightside)))[0]
temp_order.reverse() ## Note that this reversed is the "increasing order of voter j"
social_axis = leftside + temp_order + rightside
if verify_orders_single_peaked_axis_strict(social_axis, fullorders):
return social_axis
else:
return []
#Condition D2:
if o.index(x_j) > o.index(y) and o.index(y) > o.index(x) and o.index(x) > o.index(x_i):
# The axis is the current voter restricted to the remainder
temp_order = copy.copy(o)
temp_order = remove_cands([temp_order], list(set(leftside + rightside)))[0]
social_axis = leftside + temp_order + rightside
if verify_orders_single_peaked_axis_strict(social_axis, fullorders):
return social_axis
else:
return []
#Condition C1:
if o.index(x_i) > o.index(x) and o.index(x) > o.index(x_j) and o.index(x_j) > o.index(y):
C1 = True
#Condition C2:
if o.index(x_j) > o.index(x) and o.index(x) > o.index(x_i) and o.index(x_i) > o.index(y):
C2 = True
# Short Circuit if we have C1 and C2 at any point...
if C1 and C2:
return []
# Processing C1 or C2 if necessary:
if C1:
leftside.append(x)
rightside.insert(0,y)
else: # Do C2 or it doesn't matter...
leftside.append(y)
rightside.insert(0,x)
orders = remove_cands(orders, last_cands)
#Leftside + Rightside must be the social axis
social_axis = leftside+rightside
if verify_orders_single_peaked_axis_strict(social_axis, fullorders):
return social_axis
else:
return []
# Helper function to find last place candidates
def last_set(orders):
if len(orders) > 0 and len(orders[0]) > 0:
# Make and return the set of last place candidates
last_cands = set()
for i in orders:
last_cands.add(i[len(i)-1])
return(list(last_cands))
# Helper function to compute the result of removing (set) of candidates from a list of orders.
def remove_cands(orders, cands_to_remove):
projection = []
for c_vote in orders:
tvote = copy.copy(c_vote)
for c_remove in cands_to_remove:
tvote.remove(c_remove)
projection.append(tvote)
return projection
# Helper Function: Given cands --> rank, return a vector of unique vectors in the profile
# that are just the orders of the candidates with index 0 == most prefered.
def order_vectors(rmaps):
orders = []
rank_to_candidate = io.rankmap_convert_rank_to_candidate(rmaps)
for c_map in rank_to_candidate:
c_vote = []
for i in sorted(c_map.keys()):
c_vote.append(c_map[i])
orders.append(c_vote)
return orders
# Verify that a profile of strict orders is single peaked w.r.t. the passed axis
def verify_orders_single_peaked_axis_strict(axis, orders):
# print("Candidate Axis: " + str(axis))
# print("Orders: " + str(orders))
if len(orders) < 1 or len(axis) != len(orders[0]):
return False
temporders = copy.copy(orders)
for corder in orders:
#Peal off the top element
split = axis.index(corder[0])
# Reverse the left side and compare element by element on the restricted set.
left = axis[:split]
left.reverse()
right = axis[split:]
# print("Checking Left Side")
restricted = remove_cands([corder], list(set(axis) - set(left)))
restricted = restricted[0]
#items should match element for element...
if len(left) > 0 and not all(restricted[i] == left[i] for i in range(len(left))):
print("Axis is not compatiable with order: " + str(corder))
return False
# print("Checking Right Side")
restricted = remove_cands([corder], list(set(axis) - set(right)))
restricted = restricted[0]
#items should match element for element...
if not all(restricted[i] == right[i] for i in range(len(right))):
print("Axis is not compatiable with order: " + str(corder))
return False
return True
# Generate a random instance and test it for SP -- Output the axis if it is...
if __name__ == '__main__':
ncand = 3
nvoters = 100
candmap = generate_profiles.gen_cand_map(ncand)
#rmaps, rmapscounts = generate_profiles.gen_impartial_culture_strict(nvoters, cmap)
rankmaps, rankmapcounts = generate_profiles.gen_single_peaked_impartial_culture_strict(nvoters, candmap)
io.pp_profile_toscreen(candmap, rankmaps, rankmapcounts)
social_axis = is_single_peaked(rankmaps, candmap)
if social_axis != []:
print("Single Peaked w.r.t " + str(social_axis))
else:
print("Not Single Peaked")
# Test all the SOC's... for fun....
files = glob.glob("./soc/*.soc")
total = 0
totalSP = 0
for cfile in sorted(files):
print("Testing: " + str(cfile))
inf = open(cfile, "r")
candmap, rankmaps, rankmapcounts, numvoters = io.read_election_file(inf)
total += 1
social_axis = is_single_peaked(rankmaps, candmap)
if social_axis != []:
print("Single Peaked w.r.t " + str(social_axis))
totalSP += 1
else:
print("Not Single Peaked")
inf.close()
print("Parsed " + str(total) + " SOC files")
print("Exactly " + str(totalSP) + " were single peaked")
|
nilq/baby-python
|
python
|
import tensorflow as tf
from storage import run_dir
from train import train
from model import model
from predict import predict
model = model()
train(model)
|
nilq/baby-python
|
python
|
import os
import re
import codecs
from setuptools import setup, find_packages
current_path = os.path.abspath(os.path.dirname(__file__))
def read_file(*parts):
with codecs.open(os.path.join(current_path, *parts), 'r', 'utf8') as reader:
return reader.read()
def get_requirements(*parts):
with codecs.open(os.path.join(current_path, *parts), 'r', 'utf8') as reader:
return list(map(lambda x: x.strip(), reader.readlines()))
def find_version(*file_paths):
version_file = read_file(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
setup(
name='keras-trans-mask',
version=find_version('keras_trans_mask', '__init__.py'),
packages=find_packages(),
url='https://github.com/CyberZHG/keras-trans-mask',
license='MIT',
author='CyberZHG',
author_email='CyberZHG@users.noreply.github.com',
description='Transfer masking in Keras',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
install_requires=get_requirements('requirements.txt'),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
nilq/baby-python
|
python
|
##!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import math
import random
from hashlib import sha256
from pathlib import Path
from typing import Optional, Tuple, Union
import aiohttp
import fsspec
from fsspec.core import url_to_fs
###############################################################################
log = logging.getLogger(__name__)
###############################################################################
MAX_THUMBNAIL_HEIGHT = 540
MAX_THUMBNAIL_WIDTH = 960
def get_media_type(uri: str) -> Optional[str]:
"""
Get the IANA media type for the provided URI.
If one could not be found, return None.
Parameters
----------
uri: str
The URI to get the IANA media type for.
Returns
-------
mtype: Optional[str]:
The found matching IANA media type.
"""
import dask.dataframe as dd
# Media types retrieved from:
# http://www.iana.org/assignments/media-types/media-types.xhtml
media_types = dd.read_csv(
str(Path(__file__).parent / "resources" / "content-types-*.csv")
)
# Get suffix from URI
splits = uri.split(".")
suffix = splits[-1]
# Find content type
matching = media_types[media_types["Name"] == suffix].compute()
# If there is exactly one matching type, return it
if len(matching) == 1:
return matching["Template"].values[0]
# Otherwise, return none
return None
def resource_copy(
uri: str,
dst: Optional[Union[str, Path]] = None,
overwrite: bool = False,
) -> str:
"""
Copy a resource (local or remote) to a local destination on the machine.
Parameters
----------
uri: str
The uri for the resource to copy.
dst: Optional[Union[str, Path]]
A specific destination to where the copy should be placed. If None provided
stores the resource in the current working directory.
overwrite: bool
Boolean value indicating whether or not to overwrite a local resource with
the same name if it already exists.
Returns
-------
saved_path: str
The path of where the resource ended up getting copied to.
"""
if dst is None:
dst = uri.split("/")[-1]
# Ensure dst doesn't exist
dst = Path(dst).resolve()
if dst.is_dir():
dst = dst / uri.split("/")[-1]
if dst.is_file() and not overwrite:
raise FileExistsError(dst)
# Open requests connection to uri as a stream
log.info(f"Beginning resource copy from: {uri}")
# Get file system
try:
kwargs = {}
# Set custom timeout for http resources
if uri.startswith("http"):
kwargs = {"timeout": aiohttp.ClientTimeout(total=1800)}
# TODO: Add explicit use of GCS credentials until public read is fixed
fs, remote_path = url_to_fs(uri, **kwargs)
fs.get(remote_path, str(dst))
log.info(f"Completed resource copy from: {uri}")
log.info(f"Stored resource copy: {dst}")
return str(dst)
except Exception as e:
log.error(
f"Something went wrong during resource copy. "
f"Attempted copy from: '{uri}', resulted in error."
)
raise e
def split_audio(
video_read_path: str,
audio_save_path: str,
overwrite: bool = False,
) -> Tuple[str, str, str]:
"""
Split and store the audio from a video file using ffmpeg.
Parameters
----------
video_read_path: str
Path to the video to split the audio from.
audio_save_path: str
Path to where the audio should be stored.
Returns
-------
resolved_audio_save_path: str
Path to where the split audio file was saved.
ffmpeg_stdout_path: str
Path to the ffmpeg stdout log file.
ffmpeg stderr path: str
Path to the ffmpeg stderr log file.
"""
import ffmpeg
# Check paths
resolved_video_read_path = Path(video_read_path).resolve(strict=True)
resolved_audio_save_path = Path(audio_save_path).resolve()
if resolved_audio_save_path.is_file() and not overwrite:
raise FileExistsError(resolved_audio_save_path)
if resolved_audio_save_path.is_dir():
raise IsADirectoryError(resolved_audio_save_path)
# Construct ffmpeg dag
stream = ffmpeg.input(resolved_video_read_path)
stream = ffmpeg.output(
stream,
filename=resolved_audio_save_path,
format="wav",
acodec="pcm_s16le",
ac=1,
ar="16k",
)
# Run dag
log.debug(f"Beginning audio separation for: {video_read_path}")
out, err = ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)
log.debug(f"Completed audio separation for: {video_read_path}")
log.debug(f"Stored audio: {audio_save_path}")
# Store logs
ffmpeg_stdout_path = resolved_audio_save_path.with_suffix(".out")
ffmpeg_stderr_path = resolved_audio_save_path.with_suffix(".err")
with open(ffmpeg_stdout_path, "wb") as write_out:
write_out.write(out)
with open(ffmpeg_stderr_path, "wb") as write_err:
write_err.write(err)
return (
str(resolved_audio_save_path),
str(ffmpeg_stdout_path),
str(ffmpeg_stderr_path),
)
def get_static_thumbnail(
video_path: str, session_content_hash: str, seconds: int = 30
) -> str:
"""
A function that produces a png thumbnail image from a video file
Parameters
----------
video_path: str
The URL of the video from which the thumbnail will be produced
session_content_hash: str
The video content hash. This will be used in the produced image file's name
seconds: int
Determines after how many seconds a frame will be selected to produce the
thumbnail. The default is 30 seconds
Returns
-------
str: cover_name
The name of the thumbnail file:
Always session_content_hash + "-static-thumbnail.png"
"""
import imageio
from PIL import Image
reader = imageio.get_reader(video_path)
png_path = ""
if reader.get_length() > 1:
png_path = f"{session_content_hash}-static-thumbnail.png"
image = None
try:
frame_to_take = math.floor(reader.get_meta_data()["fps"] * seconds)
image = reader.get_data(frame_to_take)
except (ValueError, IndexError):
reader = imageio.get_reader(video_path)
image = reader.get_data(0)
final_ratio = find_proper_resize_ratio(image.shape[0], image.shape[1])
if final_ratio < 1:
image = Image.fromarray(image).resize(
(
math.floor(image.shape[1] * final_ratio),
math.floor(image.shape[0] * final_ratio),
)
)
imageio.imwrite(png_path, image)
return png_path
def get_hover_thumbnail(
video_path: str,
session_content_hash: str,
num_frames: int = 10,
duration: float = 6.0,
) -> str:
"""
A function that produces a gif hover thumbnail from an mp4 video file
Parameters
----------
video_path: str
The URL of the video from which the thumbnail will be produced
session_content_hash: str
The video content hash. This will be used in the produced image file's name
num_frames: int
Determines the number of frames in the thumbnail
duration: float
Runtime of the produced GIF.
Default: 6.0 seconds
Returns
-------
str: cover_name
The name of the thumbnail file:
Always session_content_hash + "-hover-thumbnail.png"
"""
import imageio
import numpy as np
from PIL import Image
reader = imageio.get_reader(video_path)
gif_path = ""
if reader.get_length() > 1:
gif_path = f"{session_content_hash}-hover-thumbnail.gif"
# Get first frame
sample = reader.get_data(0)
height = sample.shape[0]
width = sample.shape[1]
final_ratio = find_proper_resize_ratio(height, width)
with imageio.get_writer(gif_path, mode="I", fps=(num_frames / duration)) as writer:
selected_frames = 0
for frame in reader:
# 1% chance to use the frame
if random.random() > 0.99:
image = Image.fromarray(frame)
if final_ratio < 1:
image = image.resize(
(
math.floor(width * final_ratio),
math.floor(height * final_ratio),
)
)
final_image = np.asarray(image).astype(np.uint8)
writer.append_data(final_image)
selected_frames += 1
if selected_frames >= num_frames:
break
return gif_path
def find_proper_resize_ratio(height: int, width: int) -> float:
"""
Return the proper ratio to resize a thumbnail greater than 960 x 540 pixels.
Parameters
----------
height: int
The height, in pixels, of the thumbnail to be resized.
width: int
The width, in pixels, of the thumbnail to be resized.
Returns
-------
final_ratio: float
The ratio by which the thumbnail will be resized.
If the ratio is less than 1, the thumbnail is too large and should be resized
by a factor of final_ratio.
If the ratio is greater than or equal to 1, the thumbnail is not too large and
should not be resized.
"""
if height > MAX_THUMBNAIL_HEIGHT or width > MAX_THUMBNAIL_WIDTH:
height_ratio = MAX_THUMBNAIL_HEIGHT / height
width_ratio = MAX_THUMBNAIL_WIDTH / width
if height_ratio > width_ratio:
final_ratio = height_ratio
else:
final_ratio = width_ratio
return final_ratio
return 2
def hash_file_contents(uri: str, buffer_size: int = 2 ** 16) -> str:
"""
Return the SHA256 hash of a file's content.
Parameters
----------
uri: str
The uri for the file to hash.
buffer_size: int
The number of bytes to read at a time.
Default: 2^16 (64KB)
Returns
-------
hash: str
The SHA256 hash for the file contents.
"""
hasher = sha256()
with fsspec.open(uri, "rb") as open_resource:
while True:
block = open_resource.read(buffer_size)
if not block:
break
hasher.update(block)
return hasher.hexdigest()
def convert_video_to_mp4(video_filepath: str) -> str:
"""
Converts a video to an equivalent MP4 file.
Parameters
----------
video_filepath: str
The filepath of the video to convert.
Returns
-------
mp4_filepath: str
The filepath of the converted MP4 video.
"""
import ffmpeg
mp4_filepath = str(Path(video_filepath).with_suffix(".mp4"))
ffmpeg.input(video_filepath).output(mp4_filepath).overwrite_output().run()
log.info("Finished converting {} to mp4".format(video_filepath))
return mp4_filepath
def generate_file_storage_name(file_uri: str, suffix: str) -> str:
"""
Generate a filename using the hash of the file contents and some provided suffix.
Parameters
----------
file_uri: str
The URI to the file to hash.
suffix: str
The suffix to append to the hash as a part of the filename.
Returns
-------
dst: str
The name of the file as it should be on Google Cloud Storage.
"""
hash = hash_file_contents(file_uri)
return f"{hash}-{suffix}"
|
nilq/baby-python
|
python
|
from subprocess import call
import glob
dirnames = glob.glob("samples/*")
for d in dirnames:
images = glob.glob(d+"/*.png")
print("")
print("### "+d)
images.sort()
for image in images:
print("")
print("")
|
nilq/baby-python
|
python
|
import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
# from kivy.garden
from kivy.uix.textinput import TextInput
import time
from Code.Scripts.predict import predict
from kivy.properties import StringProperty,ColorProperty,NumericProperty,BooleanProperty
from Code.Scripts.Analysis import count_tweets,count_unique_tweets
kivy.require("1.10.0")
loginids = {"yash":"yash","vivek":"vivek","animesh":"animesh"}
class HomeLogoImage(Image):
pass
class FeedbackScreen(Screen):
button_visible = NumericProperty()
label_visible = NumericProperty()
button_disabled = BooleanProperty()
entered_text_nav = StringProperty()
entered_text_cost = StringProperty()
entered_text_response = StringProperty()
entered_text_others = StringProperty()
def __init__(self, **kwargs):
super(FeedbackScreen, self).__init__(**kwargs)
self.entered_text_nav = "Kindly rate us and let us know your valuable feedback"
self.entered_text_cost = "Kindly rate us and let us know your valuable feedback"
self.entered_text_response = "Kindly rate us and let us know your valuable feedback"
self.entered_text_others = "Kindly rate us and let us know your valuable feedback"
self.label_visible = 0
self.button_visible = 1
self.button_disabled = False
def submit(self,nav,cost,response,others):
print("Pressed")
text = ""
text += "nav : "+nav
text += "\ncost : "+cost
text += "\nresponse : "+response
text += "\nothers : "+others
file_name = str(int(time.time()))+".txt"
with open("C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\Feedback\\"+file_name, 'w') as txt_file:
txt_file.writelines(text)
self.label_visible = 1
self.button_visible = 0
self.button_disabled = True
def clear_text_others(self):
self.entered_text_others = ""
def clear_text_response(self):
self.entered_text_response = ""
def clear_text_cost(self):
self.entered_text_cost = ""
def clear_text_nav(self):
self.entered_text_nav = ""
class TextInputFeedbackScreen(TextInput):
pass
class AnalysisScreen(Screen):
tweets_collected = StringProperty()
unique_tweets = StringProperty()
positives = StringProperty()
negatives = StringProperty()
def __init__(self, **kwargs):
super(AnalysisScreen,self).__init__(**kwargs)
self.unique_tweets = '0'
self.tweets_collected = '0'
self.positives = '0'
self.negatives = '0'
def refresh_values(self):
print("Refreshed")
self.tweets_collected = str(count_tweets())
self.unique_tweets = str(count_unique_tweets())
self.negatives = 'no data'
self.positives = 'no data'
print(self.tweets_collected)
print(self.unique_tweets)
class LoginScreen(Screen):
pass
class LoginScreenTextInput(TextInput):
pass
class LoginLogoImage(Image):
pass
class LoginScreenLabel(Label):
pass
class LoginScreenButton(Button):
def __init__(self,**kwargs):
super(LoginScreenButton,self).__init__(**kwargs)
def pressed(self,user_id,password):
print("pressed")
class AboutUsScreenDescriptionLabel(Label):
pass
class AboutUsScreenLabel(Label):
pass
class AboutUsScreen(Screen):
pass
class PopularityGraphScreen(Screen):
pass
class TopicLabel(Label):
pass
class WordCloudScreen(Screen):
path_to_cloud = StringProperty()
def __init__(self,**kwargs):
super(WordCloudScreen,self).__init__(**kwargs)
self.path_to_cloud = "C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\dummy_cloud.png"
def on_select(self,text):
print(text," selected")
if "rahul" in text.lower():
self.path_to_cloud = "C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\rahul_cloud.png"
elif "modi" in text.lower():
self.path_to_cloud = "C:\\Users\\Vivek Rao\\PycharmProjects\\Campaign-Assistant-master\\Code\\Resources\\modi_cloud.png"
class LabelFeedbackScreen(Label):
pass
class HomeScreen(Screen):
pass
class LogoImage(Image):
pass
class SentimentTestScreen(Screen):
predicted_sentiment = StringProperty()
predicted_sentiment_color = ColorProperty()
def __init__(self, **kwargs):
super(SentimentTestScreen, self).__init__(**kwargs)
self.predicted_sentiment_color = [1,1,1,1]
def predict_sentiment(self,input_text):
predicted_value = predict(input_text)
print("For the text input ",input_text,"; result is ",predicted_value)
if predicted_value == -1:
self.predicted_sentiment = "negative"
self.predicted_sentiment_color = [1,0,0.4,1]
elif predicted_value == 1:
self.predicted_sentiment = "positive"
self.predicted_sentiment_color = [0,1,0.6,1]
else:
self.predicted_sentiment = "can not classify"
class AnalysisScreenLabel(Label):
pass
class ScreenManagement(ScreenManager):
pass
class HomeScreenButton(Button):
pass
class BackButton(Button):
pass
kivy_file = Builder.load_file('CampaignAssistantGui.kv')
class MyApp(App):
def build(self):
return kivy_file
if __name__ == "__main__":
MyApp().run()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.