content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# -*- coding: utf-8 -*-
import re, kim, dinle, islem, muhabbet
def __init__(giris):
pattern_kim = re.match('(.+) ((kim(dir)?)|nedir)(\?)?$', giris, re.IGNORECASE)
pattern_dinle = re.match('(.+) dinle$', giris, re.IGNORECASE)
pattern_islem = re.match('^([\d\(\)\-]+(\+|\-|\*|\/)[\d\(\)\+\-\*\/\.]+)(=)?(\?)?$', giris, re.IGNORECASE)
if pattern_kim:
return kim.__init__(pattern_kim.group(1))
elif pattern_dinle:
return dinle.__init__(pattern_dinle.group(1))
elif pattern_islem:
return islem.__init__(pattern_islem.group(1))
else:
return muhabbet.__init__(giris)
|
nilq/baby-python
|
python
|
from pirates.minigame import CannonDefenseGlobals
from pirates.pirate.CannonCamera import CannonCamera
from pirates.util.PythonUtilPOD import ParamObj
class CannonDefenseCamera(CannonCamera):
class ParamSet(CannonCamera.ParamSet):
Params = {
'minH': -60.0,
'maxH': 60.0,
'minP': -32.0,
'maxP': 2.0,
'sensitivityH': CannonDefenseGlobals.MOUSE_SENSITIVITY_H,
'sensitivityP': CannonDefenseGlobals.MOUSE_SENSITIVITY_P }
def __init__(self, params = None):
CannonCamera.__init__(self, params)
self.keyboardRate = CannonDefenseGlobals.KEYBOARD_RATE
def enterActive(self):
CannonCamera.enterActive(self)
camera.setPos(0, -20, 15)
camera.setP(-25)
def changeModel(self, prop):
if self.cannonProp:
if prop.ship:
self.reparentTo(prop.ship.avCannonView)
else:
self.reparentTo(prop.hNode)
self.cannonProp = prop
|
nilq/baby-python
|
python
|
class Tweet:
def __repr__(self):
return self.id
def __init__(self, _id, text, created_at, hashtags, retweet_count, favorite_count, username, user_location):
self.id = _id
self.text = text
self.created_at = created_at
self.hashtags = hashtags
self.retweet_count = retweet_count
self.favorite_count = favorite_count
self.username = username
self.user_location = user_location
self.location = (None, None)
|
nilq/baby-python
|
python
|
class DimensionError(Exception):
def __init__(self, message="Dimension mismatch"):
super(DimensionError, self).__init__(message)
|
nilq/baby-python
|
python
|
import numpy as np
from task.Schema import Schema
# import pdb
class StimSampler():
'''
a sampler of sequences
'''
def __init__(
self,
n_param,
n_branch,
pad_len=0,
max_pad_len=None,
def_path=None,
def_prob=None,
def_tps=None,
key_rep_type='time',
rm_kv=False,
context_onehot=True,
context_dim=1,
context_drift=False,
n_rm_fixed=False,
sampling_mode='enumerative'
):
self.n_param = n_param
self.n_branch = n_branch
self.pad_len = pad_len
if max_pad_len is None:
self.max_pad_len = np.max([n_param // 3 - 1, 0])
#
self.def_path = def_path
self.def_prob = def_prob
self.def_tps = def_tps
#
self.context_onehot = context_onehot
self.context_dim = context_dim
self.context_drift = context_drift
#
self.key_rep_type = key_rep_type
self.sampling_mode = sampling_mode
#
self.rm_kv = rm_kv
self.n_rm_fixed = n_rm_fixed
#
self.reset_schema()
def reset_schema(self):
"""re-initialize the schema
"""
self.schema = Schema(
n_param=self.n_param,
n_branch=self.n_branch,
def_path=self.def_path,
def_prob=self.def_prob,
def_tps=self.def_tps,
context_onehot=self.context_onehot,
context_dim=self.context_dim,
context_drift=self.context_drift,
key_rep_type=self.key_rep_type,
sampling_mode=self.sampling_mode,
)
self.k_dim = self.schema.k_dim
self.v_dim = self.schema.v_dim
self.c_dim = self.schema.c_dim
def _sample(self):
"""sample an event sequence, one-hot vector representation
Returns
-------
2d np array, 2d np array; T x (T x B), T x B
sequence of keys / parameter values over time
"""
# sample keys and parameter values, integer representation
keys, vals = self.schema.sample()
# translate to vector representation
keys_vec = np.vstack([self.schema.key_rep[k_t, :] for k_t in keys])
vals_vec = np.vstack([self.schema.val_rep[v_t, :] for v_t in vals])
# ctxs_vec = np.vstack([self.schema.ctx_rep[v_t, :] for v_t in vals])
ctxs_vec = np.vstack([self.schema.ctx_rep])
misc = [keys, vals]
return keys_vec, vals_vec, ctxs_vec, misc
def sample(
self,
n_parts=2, p_rm_ob_enc=0, p_rm_ob_rcl=0,
permute_observations=True, permute_queries=False,
):
"""sample a multi-part "movie", with repetition structure
Parameters
----------
n_parts : int
the number of parts in this event sequence
format: string
the output data format
- 'okv-qkv': human-readble form
- 'xy': nn-readable form
Returns
-------
3d np array, 3d np array; nP x T x (T x B), nP x T x B
sequence of keys / parameter values over time
different parts are consistent
"""
# sample the state-param associtations
keys_vec_, vals_vec_, ctxs_vec_, misc = self._sample()
# sample for the observation phase
o_keys_vec, o_vals_vec = self._sample_permutations_sup(
keys_vec_, vals_vec_, n_parts, permute_observations)
q_keys_vec, q_vals_vec = self._sample_permutations_sup(
keys_vec_, vals_vec_, n_parts, permute_queries)
# if permute_observations:
# o_keys_vec, o_vals_vec = self._sample_permutations(
# keys_vec_, vals_vec_, n_parts)
# else:
# o_keys_vec = np.stack([keys_vec_ for _ in range(n_parts)])
# o_vals_vec = np.stack([vals_vec_ for _ in range(n_parts)])
# # sample for the query phase
# if permute_queries:
# q_keys_vec, q_vals_vec = self._sample_permutations(
# keys_vec_, vals_vec_, n_parts)
# else:
# q_keys_vec = np.stack([keys_vec_ for _ in range(n_parts)])
# q_vals_vec = np.stack([vals_vec_ for _ in range(n_parts)])
# corrupt input during encoding
o_keys_vec, o_vals_vec = self._corrupt_observations(
o_keys_vec, o_vals_vec, p_rm_ob_enc, p_rm_ob_rcl)
# context are assumed to repeat across the two phases
o_ctxs_vec = q_ctxs_vec = ctxs_vec_
# pack sample
o_sample_ = [o_keys_vec, o_vals_vec, o_ctxs_vec]
q_sample_ = [q_keys_vec, q_vals_vec, q_ctxs_vec]
# padding, if there is a delay
[o_sample_, q_sample_] = self._delay_pred_demand(o_sample_, q_sample_)
# pack sample
sample_ = [o_sample_, q_sample_]
return sample_, misc
def _sample_permutations_sup(
self, keys_vec_raw, vals_vec_raw, n_parts, permute
):
if permute:
s_keys_vec, s_vals_vec = self._sample_permutations(
keys_vec_raw, vals_vec_raw, n_parts)
else:
s_keys_vec = np.stack([keys_vec_raw for _ in range(n_parts)])
s_vals_vec = np.stack([vals_vec_raw for _ in range(n_parts)])
return s_keys_vec, s_vals_vec
def _sample_permutations(self, keys_vec_raw, vals_vec_raw, n_perms):
"""given some raw key-val pairs, generate temporal permutation sets
"""
T = self.n_param
keys_vec = np.zeros((n_perms, T, self.k_dim))
vals_vec = np.zeros((n_perms, T, self.v_dim))
# ctxs_vec = np.zeros((n_perms, T, self.c_dim))
for ip in range(n_perms):
# unique permutation for each movie part
perm_op = np.random.permutation(T)
keys_vec[ip] = keys_vec_raw[perm_op, :]
vals_vec[ip] = vals_vec_raw[perm_op, :]
# ctxs_vec[ip] = ctxs_vec_raw[perm_op, :]
return keys_vec, vals_vec
def _corrupt_observations(
self,
o_keys_vec, o_vals_vec,
p_rm_ob_enc, p_rm_ob_rcl,
):
"""corrupt observations
currently I only implemented zero-ing out random rows, but this function
can be more general than this
Parameters
----------
o_keys_vec : 3d np array, nP x T x (T x B)
keys, or states
o_vals_vec : 3d np array, nP x T x B
values, or actions
p_rm_ob_enc : float
p(zero-ing out observation at time t) during encoding
p_rm_ob_rcl : float
p(zero-ing out observation at time t) during recall
Returns
-------
3d np array, 3d np array; nP x T x (T x B), nP x T x B
keys,values after corruption
"""
# the 1st part is the encoding phase
# all remaining parts are query phase
n_parts = len(o_keys_vec)
# get a list of p_rm, only the 1st phase is the encoding phase
# the rest of phases are considered as recall phases
p_rms = [p_rm_ob_enc] * (n_parts - 1) + [p_rm_ob_rcl]
# zero out random rows (time steps)
for ip in range(n_parts):
# zero out both key and values
if self.rm_kv:
[o_keys_vec[ip], o_vals_vec[ip]] = _zero_out_random_rows(
[o_keys_vec[ip], o_vals_vec[ip]], p_rms[ip],
n_rm_fixed=self.n_rm_fixed
)
# zero out values only
# in this case the agent know which state is unknown
else:
[o_vals_vec[ip]] = _zero_out_random_rows(
[o_vals_vec[ip]], p_rms[ip],
n_rm_fixed=self.n_rm_fixed
)
return o_keys_vec, o_vals_vec
def _delay_pred_demand(self, o_sample_, q_sample_):
"""apply delay to the queries, and zero pad the end of observations
Parameters
----------
o_sample_ : list
observations
q_sample_ : list
queries
Returns
-------
list, list
padded observations and queries
"""
if self.pad_len == 0 or self.max_pad_len == 0:
return o_sample_, q_sample_
# uniformly sample a padding length
if self.pad_len == 'random':
# high is exclusive so need to add 1
pad_len = np.random.randint(low=0, high=self.max_pad_len + 1)
# fixed padding length
elif self.pad_len > 0:
pad_len = self.pad_len
else:
raise ValueError(f'Invalid delay length: {self.pad_len}')
# padd the data
o_sample_ = _zero_pad_kvc(o_sample_, pad_len, side='bot')
q_sample_ = _zero_pad_kvc(q_sample_, pad_len, side='top')
return o_sample_, q_sample_
def _zero_out_random_rows(matrices, p_rm, n_rm_fixed=True):
"""zero out the same set of (randomly selected) rows for all input matrices
Parameters
----------
matrices : list
a list of 2d arrays
p_rm : float
probability for set a row of zero
Returns
-------
list
a list of 2d arrays
"""
assert 0 <= p_rm <= 1
n_rows, _ = np.shape(matrices[0])
for matrix in matrices:
assert np.shape(matrix)[0] == n_rows
# select # row(s) to zero out
if n_rm_fixed:
n_rows_to0 = np.ceil(p_rm * n_rows)
else:
# in this case, p_rm == E[rows_to_remove]
max_rows_to_remove = p_rm * n_rows
n_rows_to0 = np.round(np.random.uniform(high=max_rows_to_remove))
# select some rows to zero out
rows_to0 = np.random.choice(
range(n_rows), size=int(n_rows_to0), replace=False
)
# zero out the same rows for all input matrices
for i in range(len(matrices)):
matrices[i][rows_to0, :] = 0
return matrices
def _zero_pad_kvc(kvc: list, pad_len: int, side: str):
"""delay the prediction demand by shifting the query value to later time
points
Parameters
----------
kvc : list
Description of parameter `kvc`.
pad_len : int
Description of parameter `pad_len`.
Returns
-------
type
Description of returned object.
"""
# unpack data
keys_vec, vals_vec, ctxs_vec = kvc
n_parts, n_params, k_dim = np.shape(keys_vec)
_, _, v_dim = np.shape(vals_vec)
_, c_dim = np.shape(ctxs_vec)
# pad to delay prediction time
keys_vec = [_vpad(k_mat, pad_len, side=side) for k_mat in keys_vec]
vals_vec = [_vpad(v_mat, pad_len, side=side) for v_mat in vals_vec]
# TODO here i assumed context is always in sync with the queries
# but probably want to generate additional context for the padding period
ctxs_vec = _vpad(ctxs_vec, pad_len, side='top')
# pack the data
kvc_ = [keys_vec, vals_vec, ctxs_vec]
return kvc_
def _vpad(matrix, pad_len: int, side: str):
'''vertically pad zeros from the top or bot'''
#
n_rows, n_cols = np.shape(matrix)
zero_padding = np.zeros((pad_len, n_cols))
if side == 'top':
padded_matrix = np.vstack([zero_padding, matrix])
elif side == 'bot':
padded_matrix = np.vstack([matrix, zero_padding])
else:
raise ValueError('Unrecognizable padding side')
return padded_matrix
'''test'''
if __name__ == "__main__":
import matplotlib.pyplot as plt
# from task.utils import sample_rand_path,sample_def_tps
# init a graph
n_param, n_branch = 10, 2
n_parts = 2
pad_len = 0
# def_tps = np.array([1, 0] * 5)
# def_tps = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# def_tps = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def_tps = np.array([1, 0] * (n_param // 2))
def_tps = np.array([0, 1] * (n_param // 2))
# def_path = np.tile(np.array([[1, 0], [0, 1]]), (1, 5)).T
# def_path = np.tile(np.array([[1, 0], [0, 1]]), (1, n_param // 2)).T
def_path = np.vstack([[1, 0] for i in range(10)])
def_prob = .9
# pad_len = 'random'
p_rm_ob_enc, p_rm_ob_rcl = .5, .5
p_rm_ob_enc, p_rm_ob_rcl = 0, 0
key_rep_type = 'time'
permute_queries = False
permute_observations = False
# key_rep_type = 'gaussian'
sampler = StimSampler(
n_param, n_branch,
pad_len=pad_len,
key_rep_type=key_rep_type,
def_tps=def_tps, def_path=def_path, def_prob=def_prob
)
sample_, misc = sampler.sample(
n_parts, p_rm_ob_enc=p_rm_ob_enc, p_rm_ob_rcl=p_rm_ob_rcl,
# permute_queries=permute_queries, permute_observations=permute_observations
)
observations, queries = sample_
[o_keys_vec, o_vals_vec, o_ctxs_vec] = observations
[q_keys_vec, q_vals_vec, q_ctxs_vec] = queries
# plot
cmap = 'bone'
n_timesteps = n_param
width_ratios = [sampler.k_dim, sampler.v_dim] * 2 + [sampler.c_dim]
f, axes = plt.subplots(
n_parts, 5, figsize=(8, 5), sharey=True,
gridspec_kw={'width_ratios': width_ratios}
)
for ip in range(n_parts):
axes[ip, 0].imshow(o_keys_vec[ip], cmap=cmap)
axes[ip, 1].imshow(o_vals_vec[ip], cmap=cmap)
axes[ip, 2].imshow(q_keys_vec[ip], cmap=cmap)
axes[ip, 3].imshow(q_vals_vec[ip], cmap=cmap)
axes[0, 4].imshow(o_ctxs_vec, cmap=cmap)
axes[1, 4].imshow(q_ctxs_vec, cmap=cmap)
# label
axes[0, 0].set_title('Observation')
axes[0, 2].set_title('Queries')
axes[-1, 0].set_xlabel('Keys/States')
axes[-1, 1].set_xlabel('Values/Action')
axes[-1, 2].set_xlabel('Keys/States')
axes[-1, 3].set_xlabel('Values/Action')
axes[0, 4].set_title('o, Context')
axes[1, 4].set_title('q, Context')
# modify y ticks/labels
for ip in range(n_parts):
axes[ip, 0].set_yticks(range(n_timesteps))
axes[ip, 0].set_yticklabels(range(n_timesteps))
axes[ip, 0].set_ylabel(f'Time, part {ip+1}')
f.tight_layout()
|
nilq/baby-python
|
python
|
from mwcleric.auth_credentials import AuthCredentials
class AuthCredentials(AuthCredentials):
"""Wrapper class just to make imports nicer to work with"""
pass
|
nilq/baby-python
|
python
|
import tkinter as tk
import view_calc as vc
import model_calc as mc
class CalcController:
def __init__(self):
self.root = tk.Tk()
self.calc = vc.ViewCalc(self.root, self)
self.moc = mc.ModelCalc()
def start(self):
self.root.mainloop()
def operacao(self, op, n1, n2):
num1 = n1.get()
num2 = n2.get()
if op == "somar":
resultado = self.moc.somar(num1, num2)
self.calc.mostrar_resultado(resultado)
elif op == "subtrair":
resultado = self.moc.subtrair(num1, num2)
self.calc.mostrar_resultado(resultado)
elif op == "multiplicar":
resultado = self.moc.multiplicar(num1, num2)
self.calc.mostrar_resultado(resultado)
elif op == "dividir":
resultado = self.moc.dividir(num1, num2)
self.calc.mostrar_resultado(resultado)
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['DomainConfigArgs', 'DomainConfig']
@pulumi.input_type
class DomainConfigArgs:
def __init__(__self__, *,
domain_name: pulumi.Input[str],
function_args: pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]],
function_name: pulumi.Input[str]):
"""
The set of arguments for constructing a DomainConfig resource.
:param pulumi.Input[str] domain_name: Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
:param pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]] function_args: The args of the domain config.
:param pulumi.Input[str] function_name: The name of the domain config.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "function_args", function_args)
pulumi.set(__self__, "function_name", function_name)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="functionArgs")
def function_args(self) -> pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]]:
"""
The args of the domain config.
"""
return pulumi.get(self, "function_args")
@function_args.setter
def function_args(self, value: pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]]):
pulumi.set(self, "function_args", value)
@property
@pulumi.getter(name="functionName")
def function_name(self) -> pulumi.Input[str]:
"""
The name of the domain config.
"""
return pulumi.get(self, "function_name")
@function_name.setter
def function_name(self, value: pulumi.Input[str]):
pulumi.set(self, "function_name", value)
@pulumi.input_type
class _DomainConfigState:
def __init__(__self__, *,
config_id: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
function_args: Optional[pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]]] = None,
function_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DomainConfig resources.
:param pulumi.Input[str] config_id: (Available in 1.132.0+) The ID of the domain config function.
:param pulumi.Input[str] domain_name: Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
:param pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]] function_args: The args of the domain config.
:param pulumi.Input[str] function_name: The name of the domain config.
:param pulumi.Input[str] status: (Available in 1.132.0+) The Status of the function. Valid values: `success`, `testing`, `failed`, and `configuring`.
"""
if config_id is not None:
pulumi.set(__self__, "config_id", config_id)
if domain_name is not None:
pulumi.set(__self__, "domain_name", domain_name)
if function_args is not None:
pulumi.set(__self__, "function_args", function_args)
if function_name is not None:
pulumi.set(__self__, "function_name", function_name)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="configId")
def config_id(self) -> Optional[pulumi.Input[str]]:
"""
(Available in 1.132.0+) The ID of the domain config function.
"""
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="functionArgs")
def function_args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]]]:
"""
The args of the domain config.
"""
return pulumi.get(self, "function_args")
@function_args.setter
def function_args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DomainConfigFunctionArgArgs']]]]):
pulumi.set(self, "function_args", value)
@property
@pulumi.getter(name="functionName")
def function_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the domain config.
"""
return pulumi.get(self, "function_name")
@function_name.setter
def function_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_name", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
(Available in 1.132.0+) The Status of the function. Valid values: `success`, `testing`, `failed`, and `configuring`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class DomainConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
function_args: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigFunctionArgArgs']]]]] = None,
function_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a CDN Accelerated Domain resource.
For information about domain config and how to use it, see [Batch set config](https://www.alibabacloud.com/help/zh/doc-detail/90915.htm)
> **NOTE:** Available in v1.34.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
# Create a new Domain config.
domain = alicloud.cdn.DomainNew("domain",
domain_name="mycdndomain.xiaozhu.com",
cdn_type="web",
scope="overseas",
sources=[alicloud.cdn.DomainNewSourceArgs(
content="1.1.1.1",
type="ipaddr",
priority=20,
port=80,
weight=15,
)])
config = alicloud.cdn.DomainConfig("config",
domain_name=domain.domain_name,
function_name="ip_allow_list_set",
function_args=[alicloud.cdn.DomainConfigFunctionArgArgs(
arg_name="ip_list",
arg_value="110.110.110.110",
)])
```
## Import
CDN domain config can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cdn/domainConfig:DomainConfig example <domain_name>:<function_name>:<config_id>
```
```sh
$ pulumi import alicloud:cdn/domainConfig:DomainConfig example <domain_name>:<function_name>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_name: Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigFunctionArgArgs']]]] function_args: The args of the domain config.
:param pulumi.Input[str] function_name: The name of the domain config.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a CDN Accelerated Domain resource.
For information about domain config and how to use it, see [Batch set config](https://www.alibabacloud.com/help/zh/doc-detail/90915.htm)
> **NOTE:** Available in v1.34.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
# Create a new Domain config.
domain = alicloud.cdn.DomainNew("domain",
domain_name="mycdndomain.xiaozhu.com",
cdn_type="web",
scope="overseas",
sources=[alicloud.cdn.DomainNewSourceArgs(
content="1.1.1.1",
type="ipaddr",
priority=20,
port=80,
weight=15,
)])
config = alicloud.cdn.DomainConfig("config",
domain_name=domain.domain_name,
function_name="ip_allow_list_set",
function_args=[alicloud.cdn.DomainConfigFunctionArgArgs(
arg_name="ip_list",
arg_value="110.110.110.110",
)])
```
## Import
CDN domain config can be imported using the id, e.g.
```sh
$ pulumi import alicloud:cdn/domainConfig:DomainConfig example <domain_name>:<function_name>:<config_id>
```
```sh
$ pulumi import alicloud:cdn/domainConfig:DomainConfig example <domain_name>:<function_name>
```
:param str resource_name: The name of the resource.
:param DomainConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
function_args: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigFunctionArgArgs']]]]] = None,
function_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainConfigArgs.__new__(DomainConfigArgs)
if domain_name is None and not opts.urn:
raise TypeError("Missing required property 'domain_name'")
__props__.__dict__["domain_name"] = domain_name
if function_args is None and not opts.urn:
raise TypeError("Missing required property 'function_args'")
__props__.__dict__["function_args"] = function_args
if function_name is None and not opts.urn:
raise TypeError("Missing required property 'function_name'")
__props__.__dict__["function_name"] = function_name
__props__.__dict__["config_id"] = None
__props__.__dict__["status"] = None
super(DomainConfig, __self__).__init__(
'alicloud:cdn/domainConfig:DomainConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[str]] = None,
domain_name: Optional[pulumi.Input[str]] = None,
function_args: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigFunctionArgArgs']]]]] = None,
function_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'DomainConfig':
"""
Get an existing DomainConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_id: (Available in 1.132.0+) The ID of the domain config function.
:param pulumi.Input[str] domain_name: Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DomainConfigFunctionArgArgs']]]] function_args: The args of the domain config.
:param pulumi.Input[str] function_name: The name of the domain config.
:param pulumi.Input[str] status: (Available in 1.132.0+) The Status of the function. Valid values: `success`, `testing`, `failed`, and `configuring`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DomainConfigState.__new__(_DomainConfigState)
__props__.__dict__["config_id"] = config_id
__props__.__dict__["domain_name"] = domain_name
__props__.__dict__["function_args"] = function_args
__props__.__dict__["function_name"] = function_name
__props__.__dict__["status"] = status
return DomainConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Output[str]:
"""
(Available in 1.132.0+) The ID of the domain config function.
"""
return pulumi.get(self, "config_id")
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Output[str]:
"""
Name of the accelerated domain. This name without suffix can have a string of 1 to 63 characters, must contain only alphanumeric characters or "-", and must not begin or end with "-", and "-" must not in the 3th and 4th character positions at the same time. Suffix `.sh` and `.tel` are not supported.
"""
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="functionArgs")
def function_args(self) -> pulumi.Output[Sequence['outputs.DomainConfigFunctionArg']]:
"""
The args of the domain config.
"""
return pulumi.get(self, "function_args")
@property
@pulumi.getter(name="functionName")
def function_name(self) -> pulumi.Output[str]:
"""
The name of the domain config.
"""
return pulumi.get(self, "function_name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
(Available in 1.132.0+) The Status of the function. Valid values: `success`, `testing`, `failed`, and `configuring`.
"""
return pulumi.get(self, "status")
|
nilq/baby-python
|
python
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: robot_behavior.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import param_pb2 as param__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='robot_behavior.proto',
package='Indriya.Core.Msgs',
#syntax='proto2',
serialized_pb=_b('\n\x14robot_behavior.proto\x12\x11Indriya.Core.Msgs\x1a\x0bparam.proto\"T\n\x11\x42\x65haviorArguments\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\t\x12\x14\n\x0cplace_holder\x18\x03 \x02(\x08\x12\x0c\n\x04type\x18\x04 \x02(\t\"\xdb\x02\n\x13\x42\x65haviorDescription\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x15\n\rfunction_name\x18\x02 \x02(\t\x12\x31\n\x03\x61rg\x18\x03 \x03(\x0b\x32$.Indriya.Core.Msgs.BehaviorArguments\x12\x42\n\x04type\x18\x04 \x02(\x0e\x32\x34.Indriya.Core.Msgs.BehaviorDescription.ExecutionType\x12\x44\n\x05state\x18\x05 \x02(\x0e\x32\x35.Indriya.Core.Msgs.BehaviorDescription.ExecutionState\".\n\rExecutionType\x12\x0c\n\x08\x42locking\x10\x00\x12\x0f\n\x0bNonBlocking\x10\x01\"2\n\x0e\x45xecutionState\x12\x08\n\x04Idle\x10\x00\x12\x0b\n\x07Running\x10\x01\x12\t\n\x05\x45rror\x10\x02\"\x9e\x02\n\x13RobotBehaviorModule\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05robot\x18\x02 \x02(\t\x12\'\n\x05param\x18\x03 \x03(\x0b\x32\x18.Indriya.Core.Msgs.Param\x12\x39\n\tbehaviors\x18\x04 \x03(\x0b\x32&.Indriya.Core.Msgs.BehaviorDescription\x12P\n\tresponder\x18\x05 \x01(\x0b\x32=.Indriya.Core.Msgs.RobotBehaviorModule.RobotBehaviorResponder\x1a\x34\n\x16RobotBehaviorResponder\x12\x0c\n\x04Host\x18\x01 \x02(\t\x12\x0c\n\x04Port\x18\x02 \x02(\x05\"O\n\x14RobotBehaviorModules\x12\x37\n\x07modules\x18\x01 \x03(\x0b\x32&.Indriya.Core.Msgs.RobotBehaviorModule')
,
dependencies=[param__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_BEHAVIORDESCRIPTION_EXECUTIONTYPE = _descriptor.EnumDescriptor(
name='ExecutionType',
full_name='Indriya.Core.Msgs.BehaviorDescription.ExecutionType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Blocking', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NonBlocking', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=392,
serialized_end=438,
)
_sym_db.RegisterEnumDescriptor(_BEHAVIORDESCRIPTION_EXECUTIONTYPE)
_BEHAVIORDESCRIPTION_EXECUTIONSTATE = _descriptor.EnumDescriptor(
name='ExecutionState',
full_name='Indriya.Core.Msgs.BehaviorDescription.ExecutionState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Idle', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Running', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Error', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=440,
serialized_end=490,
)
_sym_db.RegisterEnumDescriptor(_BEHAVIORDESCRIPTION_EXECUTIONSTATE)
_BEHAVIORARGUMENTS = _descriptor.Descriptor(
name='BehaviorArguments',
full_name='Indriya.Core.Msgs.BehaviorArguments',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Indriya.Core.Msgs.BehaviorArguments.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Indriya.Core.Msgs.BehaviorArguments.value', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='place_holder', full_name='Indriya.Core.Msgs.BehaviorArguments.place_holder', index=2,
number=3, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Indriya.Core.Msgs.BehaviorArguments.type', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=140,
)
_BEHAVIORDESCRIPTION = _descriptor.Descriptor(
name='BehaviorDescription',
full_name='Indriya.Core.Msgs.BehaviorDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Indriya.Core.Msgs.BehaviorDescription.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='function_name', full_name='Indriya.Core.Msgs.BehaviorDescription.function_name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='arg', full_name='Indriya.Core.Msgs.BehaviorDescription.arg', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='Indriya.Core.Msgs.BehaviorDescription.type', index=3,
number=4, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='Indriya.Core.Msgs.BehaviorDescription.state', index=4,
number=5, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BEHAVIORDESCRIPTION_EXECUTIONTYPE,
_BEHAVIORDESCRIPTION_EXECUTIONSTATE,
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=490,
)
_ROBOTBEHAVIORMODULE_ROBOTBEHAVIORRESPONDER = _descriptor.Descriptor(
name='RobotBehaviorResponder',
full_name='Indriya.Core.Msgs.RobotBehaviorModule.RobotBehaviorResponder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Host', full_name='Indriya.Core.Msgs.RobotBehaviorModule.RobotBehaviorResponder.Host', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='Port', full_name='Indriya.Core.Msgs.RobotBehaviorModule.RobotBehaviorResponder.Port', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=727,
serialized_end=779,
)
_ROBOTBEHAVIORMODULE = _descriptor.Descriptor(
name='RobotBehaviorModule',
full_name='Indriya.Core.Msgs.RobotBehaviorModule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Indriya.Core.Msgs.RobotBehaviorModule.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='robot', full_name='Indriya.Core.Msgs.RobotBehaviorModule.robot', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='Indriya.Core.Msgs.RobotBehaviorModule.param', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='behaviors', full_name='Indriya.Core.Msgs.RobotBehaviorModule.behaviors', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='responder', full_name='Indriya.Core.Msgs.RobotBehaviorModule.responder', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ROBOTBEHAVIORMODULE_ROBOTBEHAVIORRESPONDER, ],
enum_types=[
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=493,
serialized_end=779,
)
_ROBOTBEHAVIORMODULES = _descriptor.Descriptor(
name='RobotBehaviorModules',
full_name='Indriya.Core.Msgs.RobotBehaviorModules',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='modules', full_name='Indriya.Core.Msgs.RobotBehaviorModules.modules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
#syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=781,
serialized_end=860,
)
_BEHAVIORDESCRIPTION.fields_by_name['arg'].message_type = _BEHAVIORARGUMENTS
_BEHAVIORDESCRIPTION.fields_by_name['type'].enum_type = _BEHAVIORDESCRIPTION_EXECUTIONTYPE
_BEHAVIORDESCRIPTION.fields_by_name['state'].enum_type = _BEHAVIORDESCRIPTION_EXECUTIONSTATE
_BEHAVIORDESCRIPTION_EXECUTIONTYPE.containing_type = _BEHAVIORDESCRIPTION
_BEHAVIORDESCRIPTION_EXECUTIONSTATE.containing_type = _BEHAVIORDESCRIPTION
_ROBOTBEHAVIORMODULE_ROBOTBEHAVIORRESPONDER.containing_type = _ROBOTBEHAVIORMODULE
_ROBOTBEHAVIORMODULE.fields_by_name['param'].message_type = param__pb2._PARAM
_ROBOTBEHAVIORMODULE.fields_by_name['behaviors'].message_type = _BEHAVIORDESCRIPTION
_ROBOTBEHAVIORMODULE.fields_by_name['responder'].message_type = _ROBOTBEHAVIORMODULE_ROBOTBEHAVIORRESPONDER
_ROBOTBEHAVIORMODULES.fields_by_name['modules'].message_type = _ROBOTBEHAVIORMODULE
DESCRIPTOR.message_types_by_name['BehaviorArguments'] = _BEHAVIORARGUMENTS
DESCRIPTOR.message_types_by_name['BehaviorDescription'] = _BEHAVIORDESCRIPTION
DESCRIPTOR.message_types_by_name['RobotBehaviorModule'] = _ROBOTBEHAVIORMODULE
DESCRIPTOR.message_types_by_name['RobotBehaviorModules'] = _ROBOTBEHAVIORMODULES
BehaviorArguments = _reflection.GeneratedProtocolMessageType('BehaviorArguments', (_message.Message,), dict(
DESCRIPTOR = _BEHAVIORARGUMENTS,
__module__ = 'robot_behavior_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.BehaviorArguments)
))
_sym_db.RegisterMessage(BehaviorArguments)
BehaviorDescription = _reflection.GeneratedProtocolMessageType('BehaviorDescription', (_message.Message,), dict(
DESCRIPTOR = _BEHAVIORDESCRIPTION,
__module__ = 'robot_behavior_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.BehaviorDescription)
))
_sym_db.RegisterMessage(BehaviorDescription)
RobotBehaviorModule = _reflection.GeneratedProtocolMessageType('RobotBehaviorModule', (_message.Message,), dict(
RobotBehaviorResponder = _reflection.GeneratedProtocolMessageType('RobotBehaviorResponder', (_message.Message,), dict(
DESCRIPTOR = _ROBOTBEHAVIORMODULE_ROBOTBEHAVIORRESPONDER,
__module__ = 'robot_behavior_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.RobotBehaviorModule.RobotBehaviorResponder)
))
,
DESCRIPTOR = _ROBOTBEHAVIORMODULE,
__module__ = 'robot_behavior_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.RobotBehaviorModule)
))
_sym_db.RegisterMessage(RobotBehaviorModule)
_sym_db.RegisterMessage(RobotBehaviorModule.RobotBehaviorResponder)
RobotBehaviorModules = _reflection.GeneratedProtocolMessageType('RobotBehaviorModules', (_message.Message,), dict(
DESCRIPTOR = _ROBOTBEHAVIORMODULES,
__module__ = 'robot_behavior_pb2'
# @@protoc_insertion_point(class_scope:Indriya.Core.Msgs.RobotBehaviorModules)
))
_sym_db.RegisterMessage(RobotBehaviorModules)
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import six
import itertools
import numpy as np
from chainer import serializers
import nutszebra_log2
# import nutszebra_slack
import nutszebra_utility
import nutszebra_log_model
import nutszebra_sampling
import nutszebra_download_cifar100
import nutszebra_preprocess_picture
import nutszebra_basic_print
import nutszebra_data_augmentation
sampling = nutszebra_sampling.Sampling()
preprocess = nutszebra_preprocess_picture.PreprocessPicture()
# slack = nutszebra_slack.Slack()
utility = nutszebra_utility.Utility()
class TrainCifar100(object):
def __init__(self, model=None, optimizer=None, load_model=None, load_optimizer=None, load_log=None, da=nutszebra_data_augmentation.DataAugmentationCifar10NormalizeSmall, save_path='./', epoch=300, batch=128, gpu=-1, start_epoch=1, train_batch_divide=4, test_batch_divide=4, debug_flag=False):
self.model = model
self.optimizer = optimizer
self.load_model = load_model
self.load_optimizer = load_optimizer
self.load_log = load_log
self.da = da
self.save_path = save_path
self.epoch = epoch
self.batch = batch
self.gpu = gpu
self.start_epoch = start_epoch
self.train_batch_divide = train_batch_divide
self.test_batch_divide = test_batch_divide
dl = nutszebra_download_cifar100.Cifar100()
data = dl.load_cifar100_data()
self.data_init(data['train_x'], data['train_y'], data['test_x'], data['test_y'])
self.log = self.log_init()
self.model_init()
self.save_path = save_path if save_path[-1] == '/' else save_path + '/'
utility.make_dir(self.save_path + 'model')
self.log_model = nutszebra_log_model.LogModel(self.model, save_path=self.save_path)
self.debug_flag = debug_flag
def data_init(self, _train_x, _train_y, _test_x, _test_y):
categories = sorted(list(set(_train_y.tolist())))
train_x, train_y, test_x, test_y = [], [], [], []
picture_number_at_each_categories = []
for i, category in enumerate(categories):
indices = np.where(_train_y == category)[0]
picture_number_at_each_categories.append(indices.shape[0])
train_x += _train_x[indices].tolist()
train_y += [i for _ in six.moves.range(indices.shape[0])]
indices = np.where(_test_y == category)[0]
test_x += _test_x[indices].tolist()
test_y += [i for _ in six.moves.range(indices.shape[0])]
self.train_x, self.train_y = np.array(train_x), np.array(train_y)
self.test_x, self.test_y = np.array(test_x), np.array(test_y)
self.picture_number_at_each_categories = picture_number_at_each_categories
self.categories = categories
print('picture_number_at_each_categories: {}'.format(self.picture_number_at_each_categories))
return (train_x, train_y, test_x, test_y, picture_number_at_each_categories, categories)
def log_init(self):
load_log = self.load_log
log = nutszebra_log2.Log2()
if load_log is not None:
log.load(load_log)
else:
log({'are': self.categories}, 'categories')
log({'parameter': len(self.train_x)}, 'train_parameter')
log({'parameter': len(self.test_x)}, 'test_parameter')
for i in six.moves.range(len(self.categories)):
log({'parameter': float((np.array(self.test_y) == i).sum())}, 'test_parameter_{}'.format(i))
log({'model': str(self.model)}, 'model')
return log
def model_init(self):
load_model = self.load_model
model = self.model
gpu = self.gpu
if load_model is None:
print('ReLU weight initialization')
model.weight_initialization()
else:
print('loading ' + self.load_model)
serializers.load_npz(load_model, model)
model.check_gpu(gpu)
def train_one_epoch(self):
# initialization
log = self.log
log_model = self.log_model
model = self.model
optimizer = self.optimizer
train_x = self.train_x
train_y = self.train_y
batch = self.batch
train_batch_divide = self.train_batch_divide
batch_of_batch = int(batch / train_batch_divide)
sum_loss = 0
yielder = sampling.yield_random_batch_from_category(int(len(train_x) / batch), self.picture_number_at_each_categories, batch, shuffle=True)
progressbar = utility.create_progressbar(int(len(train_x) / batch), desc='train', stride=1)
# train start
for _, indices in six.moves.zip(progressbar, yielder):
model.cleargrads()
for ii in six.moves.range(0, len(indices), batch_of_batch):
x = train_x[indices[ii:ii + batch_of_batch]]
t = train_y[indices[ii:ii + batch_of_batch]]
data_length = len(x)
tmp_x = []
for img in x:
img, info = self.da.train(img)
tmp_x.append(img)
x = model.prepare_input(tmp_x, dtype=np.float32, volatile=False)
y = model(x, train=True)
t = model.prepare_input(t, dtype=np.int32, volatile=False)
loss = model.calc_loss(y, t) / train_batch_divide
loss.backward()
loss.unchain_backward()
loss.to_cpu()
sum_loss += loss.data * data_length
del loss
del x
del y
del t
optimizer.update()
if self.debug_flag:
log_model.save_stat()
log_model.save_grad()
log({'loss': float(sum_loss)}, 'train_loss')
# slack.post(log.train_loss())
print(log.train_loss())
def test_one_epoch(self):
# initialization
log = self.log
model = self.model
test_x = self.test_x
test_y = self.test_y
batch = self.batch
save_path = self.save_path
test_batch_divide = self.test_batch_divide
batch_of_batch = int(batch / test_batch_divide)
categories = self.categories
sum_loss = 0
sum_accuracy = {}
false_accuracy = {}
for ii in six.moves.range(len(categories)):
sum_accuracy[ii] = 0
elements = six.moves.range(len(categories))
for ii, iii in itertools.product(elements, elements):
false_accuracy[(ii, iii)] = 0
progressbar = utility.create_progressbar(len(test_x), desc='test', stride=batch_of_batch)
results = []
for i in progressbar:
x = test_x[i:i + batch_of_batch]
t = test_y[i:i + batch_of_batch]
data_length = len(x)
tmp_x = []
for img in x:
img, info = self.da.test(img)
tmp_x.append(img)
x = model.prepare_input(tmp_x, dtype=np.float32, volatile=True)
y = model(x, train=False)
t = model.prepare_input(t, dtype=np.int32, volatile=True)
loss = model.calc_loss(y, t)
sum_loss += loss.data * data_length
tmp_accuracy, tmp_false_accuracy = model.accuracy(y, t)
for key in tmp_accuracy:
sum_accuracy[key] += tmp_accuracy[key]
for key in tmp_false_accuracy:
false_accuracy[key] += tmp_false_accuracy[key]
y = np.argmax(y.data, axis=1)
for ii in six.moves.range(t.data.shape[0]):
results.append(y[ii] == t.data[ii])
model.save_computational_graph(loss, path=save_path)
del loss
del x
del y
del t
# sum_loss
log({'loss': float(sum_loss)}, 'test_loss')
# sum_accuracy
num = 0
for key in sum_accuracy:
value = sum_accuracy[key]
log({'accuracy': int(value)}, 'test_accuracy_{}'.format(key))
num += value
log({'accuracy': int(num)}, 'test_accuracy')
for key in false_accuracy:
if key[0] == key[1]:
pass
else:
value = false_accuracy[key]
log({'accuracy': int(value)}, 'test_accuracy_{}_{}'.format(key[0], key[1]))
# show logs
sen = [log.test_loss(), log.test_accuracy(max_flag=True), log.test_each_accuracy(max_flag=True)]
# slack.post('\n'.join(sen))
print('\n'.join(sen))
return results
def run(self):
log = self.log
model = self.model
optimizer = self.optimizer
epoch = self.epoch
start_epoch = self.start_epoch
save_path = self.save_path
epoch_progressbar = utility.create_progressbar(epoch + 1, desc='epoch', stride=1, start=start_epoch)
for i in epoch_progressbar:
self.train_one_epoch()
# save graph once
# save model
model.save_model('{}model/{}_{}.model'.format(save_path, model.name, i))
optimizer(i)
self.test_one_epoch()
log.generate_loss_figure('{}loss.jpg'.format(save_path))
log.generate_accuracy_figure('{}accuracy.jpg'.format(save_path))
log.save(save_path + 'log.json')
|
nilq/baby-python
|
python
|
import torch
import wandb
import numpy as np
from mlearn import base
from tqdm import tqdm, trange
from collections import defaultdict
from mlearn.utils.metrics import Metrics
from mlearn.utils.early_stopping import EarlyStopping
from mlearn.utils.evaluate import eval_torch_model, eval_sklearn_model
from sklearn.model_selection import KFold, StratifiedKFold, GridSearchCV
from mlearn.data.fileio import write_predictions, write_results, mtl_batch_writer
def _singletask_epoch(model: base.ModelType, optimizer: base.Callable, loss_f: base.Callable, metrics: Metrics,
batchers: base.DataType, clip: float = None, gpu: bool = True, **kwargs):
"""
Training procedure for single task pytorch models.
:model (base.ModelType): Untrained model to be trained.
:optimizer (bas.Callable): Optimizer function.
:loss_f (base.Callable): Loss function to use.
:batchers (base.DataType): Batched training set.
:clip (float, default = None): Add gradient clipping to prevent exploding gradients.
:gpu (bool, default = True): Run on GPU
:returns: TODO
"""
with tqdm(batchers, desc = "Batch", leave = False) as loop:
predictions, labels = [], []
epoch_loss = 0
model.train()
for X, y in loop:
# Zero out Gradients
optimizer.zero_grad()
# Send to GPU, if necessary.
if gpu:
X = X.cuda()
y = y.cuda()
# Compute scores and losses
scores = model(X, **kwargs)
loss = loss_f(scores, y)
if torch.isnan(loss):
raise ValueError
# Backprop
loss.backward()
# Clip gradients
if clip is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# Store labels and predictions
predictions.extend(torch.argmax(scores, dim = 1).cpu().tolist())
labels.extend(y.cpu().tolist())
# Store loss
lo = loss.data.item()
epoch_loss += lo
loop.set_postfix(batch_loss = f"{lo / len(y) :.4f}")
# Compute metrics and store loss
metrics.compute(labels, predictions)
metrics.loss = epoch_loss / len(labels)
def train_singletask_model(model: base.ModelType, save_path: str, epochs: int, batchers: base.DataType,
loss: base.Callable, optimizer: base.Callable, metrics: Metrics,
dev: base.DataType = None, dev_metrics: Metrics = None, clip: float = 1.0,
early_stopping: int = None, low: bool = False, shuffle: bool = True, gpu: bool = True,
hyperopt: bool = False, **kwargs) -> base.Union[list, int, dict, dict]:
"""
Train a single task pytorch model.
:model (base.ModelType): Untrained model to be trained.
:save_path (str): Path to save models to.
:epochs (int): The number of epochs to run.
:batchers (base.DataType): Batched training set.
:loss (base.Callable): Loss function to use.
:optimizer (bas.Callable): Optimizer function.
:metrics (object): Initialized Metrics object.
:dev (base.DataType, optional): Batched dev set.
:dev_metrics (object): Initialized Metrics object.
:clip (float, default = None): Clip gradients to prevent exploding gradient problem.
:early_stopping (int, default = 10): Number of iterations to keep going before early stopping.
:low (bool, default = False): Lower scores indicate better performance.
:shuffle (bool, default = True): Shuffle the dataset.
:gpu (bool, default = True): Run on GPU
:hyperopt (bool, default = False): Do hyper parameter optimisation.
"""
with trange(epochs, desc = "Training epochs", leave = False) as loop:
if gpu:
model = model.cuda()
if hyperopt:
wandb.watch(model, log = 'all')
if early_stopping is not None:
earlystop = EarlyStopping(save_path, model, early_stopping, low, hyperopt)
for ep in loop:
if shuffle:
batchers.shuffle()
try:
_singletask_epoch(model, optimizer, loss, metrics, batchers, clip, gpu)
except ValueError as e:
tqdm.write(f"QUITTING: NaN loss error occured. Exiting.\nTraceback: {e}")
break
if hyperopt:
epoch_scores = metrics.epoch_scores()
wandb.log({f'train/{key}': epoch_scores[key] for key in epoch_scores.keys()})
try:
eval_torch_model(model, dev, loss, dev_metrics, gpu, store = False, **kwargs)
loop.set_postfix(epoch_loss = f"{metrics.get_last('loss'):.4f}",
dev_loss = f"{dev_metrics.get_last('loss'):.4f}",
**metrics.display(),
dev_score = f"{dev_metrics.last_display():.4f}")
if hyperopt:
scrs = dev_metrics.epoch_scores()
wandb.log({f'dev/{key}': scrs[key] for key in scrs})
if early_stopping is not None:
if earlystop(model, dev_metrics.early_stopping()):
# model = earlystop.best_state
break
if ep >= early_stopping:
if len(set(metrics.display_score())) < 2 or len(set(dev_metrics.display_score())) < 2:
# The model is stuck on just one score and is not learning anything
tqdm.write("Early Stopping: The model has not learned anything in {ep} epochs.")
# model = earlystop.best_state
break
except Exception:
# Dev is not set.
loop.set_postfix(epoch_loss = f"{metrics.get_last('loss'):.4f}", **metrics.display())
finally:
loop.refresh()
def run_singletask_model(train: bool, writer: base.Callable, pred_writer: base.Callable = None,
library: str = 'pytorch', **kwargs) -> None:
"""
Train or evaluate model.
:train (bool): Whether it's a train or test run.
:writer (csv.writer): File to output model performance to.
:pred_writer (base.Callable): File to output the model predictions to.
:library (str): Library of the model.
"""
if train:
func = train_singletask_model if library == 'pytorch' else select_sklearn_training_regiment
else:
func = eval_torch_model if library == 'pytorch' else eval_sklearn_model
func(**kwargs)
write_results(writer, **kwargs)
if not train and pred_writer is not None:
write_predictions(pred_writer, **kwargs)
def _mtl_epoch(model: base.ModelType, loss_f: base.Callable, loss_weights: base.DataType, optimizer: base.Callable,
metrics: object, batchers: base.List[base.Batch], batch_count: int, dataset_weights: base.List[float],
taskid2name: dict, epoch_no: int, clip: float = None, gpu: bool = True, **kwargs) -> None:
"""
Train one epoch of an MTL training loop.
:model (base.ModelType): Model in the process of being trained.
:loss_f (base.Callable): The loss function being used.
:loss_weights (base.DataType): Determines relative task importance When using multiple input/output functions.
:optimizer (base.Callable): The optimizer function used.
:metrics (object): Initialized Metrics object.
:batchers (base.List[base.Batch]): A list of batched objects.
:batch_count (int): The number of batchers to go through in each epoch.
:dataset_weights (base.List[float]): The probability with which each dataset is chosen to be trained on.
:taskid2name (dict): Dictionary mapping task ID to dataset name.
:epoch_no (int): The iteration of the epoch.
:clip (float, default = None): Use gradient clipping.
"""
with tqdm(range(batch_count), desc = 'Batch', leave = False) as loop:
label_count = 0
epoch_loss = 0
# Set model to training mode
model.train()
for i, b in enumerate(loop):
# Zero out gradients
optimizer.zero_grad()
# Select task and get batch
task_id = np.random.choice(range(len(batchers)), p = dataset_weights)
X, y = next(iter(batchers[task_id]))
# Send to GPU
if gpu:
model = model.cuda()
X = X.cuda()
y = y.cuda()
# Compute model prediction and loss
scores = model(X, task_id, **kwargs)
loss = loss_f(scores, y) * loss_weights[task_id]
if torch.isnan(loss):
raise ValueError
if loss.item() < 0.0:
loss = loss * -1
# Backprop
loss.backward()
if clip is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip) # Prevent exploding gradients
optimizer.step()
# Compute metrics and store loss information
metrics.compute(torch.argmax(scores, dim = 1).tolist(), y.tolist())
label_count += len(y.cpu().tolist())
epoch_loss += loss.data.item()
metrics.loss = loss.data.item() / len(y)
# Write batch info
task_name = taskid2name[task_id]
mtl_batch_writer(model = model, batch = i, metrics = metrics, task_name = task_name, epoch = epoch_no,
**kwargs)
loop.set_postfix(batch_loss = f"{metrics.get_last('loss'):.4f}",
epoch_loss = f"{epoch_loss / label_count:.4f}",
task_score = f"{metrics.last_display():.4f}",
task = task_id)
def train_mtl_model(model: base.ModelType,
batchers: base.List[base.DataType],
optimizer: base.Callable,
loss: base.Callable,
metrics: object,
batch_size: int = 64,
epochs: int = 2,
clip: float = None,
early_stopping: int = None,
save_path: str = None,
dev: base.DataType = None,
dev_metrics: object = None,
dev_task_id: int = 0,
batches_per_epoch: int = None,
low: bool = True,
shuffle: bool = True,
imbalanced: bool = True,
dataset_weights: base.DataType = None,
loss_weights: base.DataType = None,
loss_norm: base.DataType = None,
gpu: bool = True,
hyperopt: bool = False,
**kwargs) -> None:
"""
Train a multi-task learning model.
:model (base.ModelType): Untrained model.
:batchers (base.List[base.DataType]): Batched training data.
:save_path (str): Path to save trained model to.
:optimizer (base.Callable): Pytorch optimizer to train model.
:loss (base.Callable): Loss function.
:metrics (object): Initialized metrics object.
:batch_size (int): Training batch size.
:epochs (int): Maximum number of epochs (if no early stopping).
:clip (float, default = None): Use gradient clipping.
:early_stopping (int, default = None): Number of epochs to observe non-improving dev performance before stopping.
:dev (base.DataType): Batched dev object.
:dev_metrics (object): Initialized dev_metrics object.
:dev_task_id (int, default = 0): Task ID for task to use for early stopping, in case of multitask learning.
:batches_per_epoch (int, default = None): Set number of batchers per epoch. If None, an epoch consists of all
training examples.
:low (bool, default = True): If lower value is to be interpreted as better by EarlyStopping.
:shuffle: Whether to shuffle data at training.
:imbalanced (bool, default = False): Set to False if the main task contains >= # docs in all aux tasks.
:dataset_weights (base.DataType, default = None): Probability for each dataset to be chosen (must sum to 1.0).
:loss_weights (base.DataType, default = None): Weight the loss by multiplication.
:loss_norm (base.DataType, default = None): Weight the loss.
:gpu (bool, default = True): Set tot rue if model runs on GPU.
:hyperopt (bool, default = False): Do hyper parameter optimisation.
"""
with trange(epochs, desc = "Training model", leave = False) as loop:
taskid2name = {i: batchers[i].data.name for i in range(len(batchers))}
scores = defaultdict(list)
if gpu:
model = model.cuda()
if hyperopt:
wandb.watch(model, log = 'all')
# Normalise loss for each task by the number of datapoints in each task
# if loss_norm is None:
# loss_scaling = np.ones(len(batchers)) / [len(dataset) * batch_size for dataset in batchers]
if loss_weights is None:
# loss_scaling = np.ones(len(batchers)) * loss_scaling
loss_scaling = np.ones(len(batchers))
else:
loss_scaling = loss_weights
if dataset_weights is None:
dataset_weights = np.ones(len(batchers)) / len(batchers)
elif np.sum(dataset_weights) != 1.0:
diff = 1 - np.sum(dataset_weights)
dataset_weights[0] += diff
if batches_per_epoch is None:
batches_per_epoch = sum([len(dataset) * batch_size for dataset in batchers]) // batch_size
# Limit batches_per_epoch to the maximum number of batches in the main task data
if imbalanced and batches_per_epoch > len(batchers[0]):
batches_per_epoch = len(batchers[0])
if early_stopping is not None:
earlystop = EarlyStopping(save_path, model, early_stopping, low, hyperopt)
for i, epoch in enumerate(loop):
if shuffle:
for batch in batchers:
batch.shuffle()
try:
_mtl_epoch(model, loss, loss_scaling, optimizer, metrics, batchers, batches_per_epoch, dataset_weights,
taskid2name, i, clip, gpu = gpu, **kwargs)
except ValueError as e:
tqdm.write(f"QUITTING: NaN loss error occured. Exiting.\nTraceback: {e}")
break
for score in metrics.scores: # Compute average value of the scores computed in each epoch.
if score == 'loss':
scores[score].append(sum(metrics.scores[score]))
else:
scores[score].append(np.mean(metrics.scores[score]))
if hyperopt:
scrs = metrics.epoch_scores()
wandb.log({f'train/{key}': scrs[key] for key in scrs})
try:
eval_torch_model(model, dev, loss, dev_metrics, mtl = dev_task_id, store = False, gpu = gpu, **kwargs)
if dev_metrics.get_last('loss') < 0.0:
dev_metrics.scores['loss'][-1] = dev_metrics.scores['loss'][-1] * -1
loop.set_postfix(loss = f"{metrics.get_last('loss'):.4f}",
dev_loss = f"{dev_metrics.get_last('loss'):.4f}",
dev_score = f"{dev_metrics.last_display():.4f}")
if hyperopt:
scrs = dev_metrics.epoch_scores()
wandb.log({f'dev/{key}': scrs[key] for key in scrs})
if early_stopping is not None and earlystop(model, dev_metrics.early_stopping()):
model = earlystop.best_state
break
except Exception as e:
loop.set_postfix(epoch_loss = metrics.get_last('loss'))
tqdm.write(f"EXCEPTION: {e}")
finally:
loop.refresh()
metrics.scores = scores
def run_mtl_model(train: bool, writer: base.Callable, pred_writer: base.Callable = None, library: str = 'pytorch',
**kwargs) -> None:
"""
Train or evaluate model.
:train (bool): Whether it's a train or test run.
:writer (csv.writer): File to output model performance to.
:pred_writer (base.Callable): File to output the model predictions to.
:library (str): Library of the model.
"""
if train:
func = train_mtl_model if library == 'pytorch' else select_sklearn_training_regiment
else:
func = eval_torch_model if library == 'pytorch' else eval_sklearn_model
func(**kwargs, pred_writer = pred_writer)
write_results(writer, **kwargs)
# if not train and pred_writer is not None:
# write_predictions(pred_writer, **kwargs)
def train_sklearn_cv_model(model: base.ModelType, vectorizer: base.VectType, dataset: base.DataType,
cross_validate: int = None, stratified: bool = True, metrics: Metrics = None,
dev: base.DataType = None, dev_metrics: Metrics = None, **kwargs
) -> base.Tuple[Metrics, Metrics]:
"""
Train sklearn cv model.
:model (base.ModelType): An untrained scikit-learn model.
:vectorizer (base.VectType): An unfitted vectorizer.
:dataset (GeneralDataset): The dataset object containing the training set.
:cross_validate (int, default = None): The number of folds for cross-validation.
:stratified (bool, default = True): Stratify data across the folds.
:metrics (Metrics, default = None): An initialized metrics object.
:dev (base.DataType, default = None): The development data.
:dev_metrcs (Metrics, default = None): An initialized metrics object for the dev data.
:returns (model, metrics, dev_metrics): Returns trained model object, metrics and dev_metrics objects.
"""
# Load data
train = dataset.vectorize(dataset.train, dataset, vectorizer)
labels = [doc.label for doc in dataset.train]
if stratified:
folds = StratifiedKFold(cross_validate)
else:
folds = KFold(cross_validate)
with trange(folds, desc = "Training model") as loop:
for train_idx, test_idx in folds.split(train, labels):
trainX, trainY = train[train_idx], labels[train_idx]
testX, testY = train[test_idx], labels[test_idx]
model.fit(trainX, trainY)
eval_sklearn_model(model, testX, metrics, testY)
try:
devX = dataset.vectorize(dev, dataset, vectorizer)
devY = [getattr(doc, getattr(f, 'name')) for f in dataset.label_fields for doc in dev]
eval_sklearn_model(model, devX, dev_metrics, devY)
loop.set_postfix(**metrics.display(), **dev_metrics.display())
except Exception:
loop.set_postfix(**metrics.display())
finally:
loop.refresh()
return model, metrics, dev_metrics
def train_sklearn_gridsearch_model(model: base.ModelType, vectorizer: base.VectType, dataset: base.DataType,
grid_search: dict, cross_validate: int = None, metrics: Metrics = None,
dev: base.DataType = None, dev_metrics: Metrics = None, scoring: str = 'f1_weighted',
n_jobs: int = -1, **kwargs) -> base.Tuple[base.ModelType, Metrics, Metrics]:
"""
Train sklearn model using grid-search.
:model (base.ModelType): An untrained scikit-learn model.
:vectorizer (base.VectType): An unfitted vectorizer.
:dataset (base.DataType): The dataset object containing train data.
:grid_search (dict): The parameter grid to explore.
:cross_validate (int, default = None): The number of folds for cross-validation.
:metrics (Metrics, default = None): An initialized metrics object.
:dev (base.DataType, default = None): The development data.
:dev_metrcs (Metrics, default = None): An initialized metrics object for the dev data.
:scoring (str, default = 'f1_weighted'): The scoring metrics used to define best functioning model.
:n_jobs (int, default = -1): The number of processors to use (-1 == all processors).
:returns (model, metrics, dev_metrics): Returns grid-search object, metrics and dev_metrics objects.
"""
train = dataset.vectorize(dataset.train, dataset, vectorizer)
labels = [doc.label for doc in dataset.train]
with trange(1, desc = "Training model") as loop:
model = GridSearchCV(model, grid_search, scoring, n_jobs = n_jobs, cv = cross_validate, refit = True)
model.fit(train, labels)
try:
devX = dataset.vectorize(dev, dataset, vectorizer)
devY = [getattr(doc, getattr(f, 'name')) for f in dataset.label_fields for doc in dev]
eval_sklearn_model(model, devX, dev_metrics, devY)
loop.set_postfix(f1_score = model.best_score_, **dev_metrics.display())
except Exception:
loop.set_postfix(f1_score = model.best_score_)
finally:
loop.refresh()
return model, metrics, dev_metrics
def train_sklearn_model(model: base.ModelType, vectorizer: base.VectType, dataset: base.DataType, metrics: Metrics,
dev: base.DataType = None, dev_metrics: Metrics = None, **kwargs):
"""
Train bare sci-kit learn model.
:model (base.ModelType): An untrained scikit-learn model.
:vectorizer (base.VectType): An unfitted vectorizer.
:dataset (base.DataType): The dataset object containing train data.
:grid_search (dict): The parameter grid to explore.
:cross_validate (int, default = None): The number of folds for cross-validation.
:metrics (Metrics, default = None): An initialized metrics object.
:dev (base.DataType, default = None): The development data.
:dev_metrcs (Metrics, default = None): An initialized metrics object for the dev data.
:returns (model, metrics, dev_metrics): Returns trained model object, metrics and dev_metrics objects.
"""
with trange(1, desc = "Training model") as loop:
trainX = dataset.vectorize(dataset.train, dataset, vectorizer)
trainY = [doc.label for doc in dataset.train]
model.fit(trainX, trainY)
try:
devX = dataset.vectorize(dev, dataset, vectorizer)
devY = [getattr(doc, getattr(f, 'name')) for f in dataset.label_fields for doc in dev]
eval_sklearn_model(model, devX, dev_metrics, devY)
loop.set_postfix(**metrics.display(), **dev_metrics.display())
except Exception:
loop.set_postfix(**metrics.display())
finally:
loop.refresh()
return model, metrics, dev_metrics
def select_sklearn_training_regiment(model: base.ModelType, cross_validate: int = None, grid_search: dict = None,
**kwargs):
"""
Select the type of sklearn training regime.
:model (base.ModelType): The model to be trained.
:cross_validate (int, default = None): The number of folds to use for cross validation.
:grid_search (dict, default = None): The parameters to search over.
"""
if grid_search is not None:
train_sklearn_gridsearch_model(model, cross_validate = cross_validate, grid_search = grid_search, **kwargs)
elif cross_validate is not None:
train_sklearn_cv_model(model, cross_validate = cross_validate, **kwargs)
else:
train_sklearn_model(**kwargs)
|
nilq/baby-python
|
python
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
import fkie_node_manager as nm
from python_qt_binding.QtCore import QPoint, Qt, Signal, QMimeData, QEvent
from python_qt_binding.QtGui import QIcon
try:
from python_qt_binding.QtGui import QDialog, QWidget, QVBoxLayout
except Exception:
from python_qt_binding.QtWidgets import QDialog, QWidget, QVBoxLayout
from .detachable_tab_widget import DetachableTabWidget
class DetachableTabDialog(QDialog):
'''
When a tab is detached, the contents are placed into this QDialog. The tab
can be re-attached by closing the dialog or by double clicking on its
window frame.
'''
closed_signal = Signal(QDialog)
def __init__(self, content_widget, parent=None):
QDialog.__init__(self, parent)
self.tab_widget = DetachableTabWidget(self)
layout = QVBoxLayout(self)
layout.setContentsMargins(3, 3, 3, 3)
layout.addWidget(self.tab_widget)
self.setWindowFlags(Qt.Window)
tab_index = self.tab_widget.addTab(content_widget, content_widget.name())
self.tab_widget.setCurrentIndex(tab_index)
self.tab_widget.empty_tabbar_signal.connect(self._close_if_empty)
def _close_if_empty(self):
'''
Close this dialog if not tabs are inside.
'''
if self.tab_widget.count() == 0:
self.close()
def closeEvent(self, event):
'''
Close TabWidget to remove all tabs.
'''
self.tab_widget.clear()
self.closed_signal.emit(self)
|
nilq/baby-python
|
python
|
import bpy
from bpy.props import *
from .. events import propertyChanged
from .. base_types import AnimationNodeSocket, PythonListSocket
class ColorSocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ColorSocket"
bl_label = "Color Socket"
dataType = "Color"
drawColor = (0.8, 0.8, 0.2, 1)
storable = True
comparable = False
value: FloatVectorProperty(
default = [0.5, 0.5, 0.5], subtype = "COLOR",
soft_min = 0.0, soft_max = 1.0,
update = propertyChanged)
def drawProperty(self, layout, text, node):
layout.prop(self, "value", text = text)
def getValue(self):
return list(self.value) + [1.0]
def setProperty(self, data):
self.value = data[:3]
def getProperty(self):
return self.value[:]
@classmethod
def getDefaultValue(cls):
return [0, 0, 0, 1]
@classmethod
def getCopyExpression(cls):
return "value[:]"
@classmethod
def correctValue(cls, value):
if isColor(value): return value, 0
else: return cls.getDefaultValue(), 2
class ColorListSocket(bpy.types.NodeSocket, PythonListSocket):
bl_idname = "an_ColorListSocket"
bl_label = "Color List Socket"
dataType = "Color List"
baseType = ColorSocket
drawColor = (0.8, 0.8, 0.2, 0.5)
storable = True
comparable = False
@classmethod
def getCopyExpression(cls):
return "[element[:] for element in value]"
@classmethod
def correctValue(cls, value):
if isinstance(value, list):
if all(isColor(element) for element in value):
return value, 0
return cls.getDefaultValue(), 2
def isColor(value):
if isinstance(value, list):
return len(value) == 4 and all(isinstance(element, (int, float)) for element in value)
return False
|
nilq/baby-python
|
python
|
import pytest
from src.xmlToData.regexExtractors.parametersExtractor import ParametersExtractor
@pytest.mark.parametrize("xml_string", ["+ get_age(): int",
"# add_weight( ): int",
"- set_height( ): int"])
def test_extract_empty_parameters_string_list(xml_string):
list_of_parameters_string = ParametersExtractor.extract_parameters_string(xml_string)
assert list_of_parameters_string == ['']
@pytest.mark.parametrize("xml_string", ["+ get_age(int number,float real_number): int",
"# add_weight( int number , float real_number): int",
"- set_height( int number, float real_number ): int"])
def test_extract_parameters_string_list_with_and_without_white_spaces(xml_string):
list_of_parameters_string = ParametersExtractor.extract_parameters_string(xml_string)
assert list_of_parameters_string == ["int number", "float real_number"]
|
nilq/baby-python
|
python
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import re
import sys
import time
from math import ceil
from operator import add
from pyspark.sql import SparkSession
import SciNeMCore.utils as utils
def compute_contribs(outgoing_edges, rank):
"""Calculates contributions based on the number of edges between the two nodes."""
for i in range(len(outgoing_edges["cols"])):
yield (outgoing_edges["cols"][i], rank * outgoing_edges["vals"][i] / outgoing_edges["edges_num"])
def pagerank_score(rank, alpha, initial_pagerank):
return alpha * rank + (1 - alpha) * initial_pagerank
def execute(links, alpha, convergence_error):
print("Ranking\t1\tInitializing Ranking Algorithm", flush=True)
# sum all weights
node_count = links.count()
# print("--- links count %s %s---" % (time.time() - start_time, links.getNumPartitions()))
# print("Number of nodes: %s" % (node_count))
# print("Convergence Error: %s" % (convergence_error))
# start_time = time.time()
# initialize pagerank score
initial_pagerank = 1 / float(node_count)
ranks = links.map(lambda url_neighbors: (url_neighbors[0], initial_pagerank), preservesPartitioning = True)
# initialize error in a high value
max_error = 100
iteration = 0
print("Ranking\t2\tExecuting Ranking Algorithm", flush=True)
# Calculates and updates URL ranks continuously using PageRank algorithm.
while(max_error >= convergence_error):
start_time = time.time()
prev_ranks = ranks
contribs = links.join(ranks, numPartitions = links.getNumPartitions()).flatMap(
lambda outgoing_edges: compute_contribs(outgoing_edges[1][0], outgoing_edges[1][1]))
# re-calculate pagerank score from neighbor contributions
ranks = contribs.reduceByKey(add, numPartitions = links.getNumPartitions()).mapValues(lambda rank: pagerank_score(rank, alpha, initial_pagerank))
# calculate error between consecutive iterations
max_error = ranks.join(prev_ranks).mapValues(lambda rank: abs(rank[0] - rank[1])).values().max()
print("Ranking\t2\tExecuting Ranking Algorithm (iteration %s)" % (iteration+1), flush=True)
iteration += 1
print("Ranking\t3\tSorting Ranking List", flush=True)
# ranks.sortBy(lambda x: - x[1]).coalesce(1).map(utils.toCSVLine).saveAsTextFile(outfile)
return ranks.sortBy(lambda x: - x[1])
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
nlpapiv2
The powerful Natural Language Processing APIs (v2) let you perform part of speech tagging, entity identification, sentence parsing, and much more to help you understand the meaning of unstructured text. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cloudmersive_nlp_api_client.api_client import ApiClient
class LanguageTranslationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def language_translation_translate_deu_to_eng(self, input, **kwargs): # noqa: E501
"""Translate German to English text with Deep Learning AI # noqa: E501
Automatically translates input text in German to output text in English using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_deu_to_eng(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.language_translation_translate_deu_to_eng_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.language_translation_translate_deu_to_eng_with_http_info(input, **kwargs) # noqa: E501
return data
def language_translation_translate_deu_to_eng_with_http_info(self, input, **kwargs): # noqa: E501
"""Translate German to English text with Deep Learning AI # noqa: E501
Automatically translates input text in German to output text in English using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_deu_to_eng_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method language_translation_translate_deu_to_eng" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `language_translation_translate_deu_to_eng`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/nlp-v2/translate/language/deu/to/eng', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LanguageTranslationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def language_translation_translate_eng_to_deu(self, input, **kwargs): # noqa: E501
"""Translate English to German text with Deep Learning AI # noqa: E501
Automatically translates input text in English to output text in German using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_eng_to_deu(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.language_translation_translate_eng_to_deu_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.language_translation_translate_eng_to_deu_with_http_info(input, **kwargs) # noqa: E501
return data
def language_translation_translate_eng_to_deu_with_http_info(self, input, **kwargs): # noqa: E501
"""Translate English to German text with Deep Learning AI # noqa: E501
Automatically translates input text in English to output text in German using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_eng_to_deu_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method language_translation_translate_eng_to_deu" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `language_translation_translate_eng_to_deu`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/nlp-v2/translate/language/eng/to/deu', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LanguageTranslationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def language_translation_translate_eng_to_fra(self, input, **kwargs): # noqa: E501
"""Translate English to French text with Deep Learning AI # noqa: E501
Automatically translates input text in English to output text in French using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_eng_to_fra(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.language_translation_translate_eng_to_fra_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.language_translation_translate_eng_to_fra_with_http_info(input, **kwargs) # noqa: E501
return data
def language_translation_translate_eng_to_fra_with_http_info(self, input, **kwargs): # noqa: E501
"""Translate English to French text with Deep Learning AI # noqa: E501
Automatically translates input text in English to output text in French using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_eng_to_fra_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method language_translation_translate_eng_to_fra" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `language_translation_translate_eng_to_fra`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/nlp-v2/translate/language/eng/to/fra', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LanguageTranslationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def language_translation_translate_eng_to_rus(self, input, **kwargs): # noqa: E501
"""Translate English to Russian text with Deep Learning AI # noqa: E501
Automatically translates input text in English to output text in Russian using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_eng_to_rus(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.language_translation_translate_eng_to_rus_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.language_translation_translate_eng_to_rus_with_http_info(input, **kwargs) # noqa: E501
return data
def language_translation_translate_eng_to_rus_with_http_info(self, input, **kwargs): # noqa: E501
"""Translate English to Russian text with Deep Learning AI # noqa: E501
Automatically translates input text in English to output text in Russian using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_eng_to_rus_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method language_translation_translate_eng_to_rus" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `language_translation_translate_eng_to_rus`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/nlp-v2/translate/language/eng/to/rus', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LanguageTranslationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def language_translation_translate_fra_to_eng(self, input, **kwargs): # noqa: E501
"""Translate French to English text with Deep Learning AI # noqa: E501
Automatically translates input text in French to output text in English using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_fra_to_eng(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.language_translation_translate_fra_to_eng_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.language_translation_translate_fra_to_eng_with_http_info(input, **kwargs) # noqa: E501
return data
def language_translation_translate_fra_to_eng_with_http_info(self, input, **kwargs): # noqa: E501
"""Translate French to English text with Deep Learning AI # noqa: E501
Automatically translates input text in French to output text in English using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_fra_to_eng_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method language_translation_translate_fra_to_eng" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `language_translation_translate_fra_to_eng`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/nlp-v2/translate/language/fra/to/eng', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LanguageTranslationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def language_translation_translate_rus_to_eng(self, input, **kwargs): # noqa: E501
"""Translate Russian to English text with Deep Learning AI # noqa: E501
Automatically translates input text in Russian to output text in English using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_rus_to_eng(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.language_translation_translate_rus_to_eng_with_http_info(input, **kwargs) # noqa: E501
else:
(data) = self.language_translation_translate_rus_to_eng_with_http_info(input, **kwargs) # noqa: E501
return data
def language_translation_translate_rus_to_eng_with_http_info(self, input, **kwargs): # noqa: E501
"""Translate Russian to English text with Deep Learning AI # noqa: E501
Automatically translates input text in Russian to output text in English using advanced Deep Learning and Neural NLP. Consumes 1-2 API calls per input sentence. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.language_translation_translate_rus_to_eng_with_http_info(input, async_req=True)
>>> result = thread.get()
:param async_req bool
:param LanguageTranslationRequest input: Input translation request (required)
:return: LanguageTranslationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method language_translation_translate_rus_to_eng" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input' is set
if ('input' not in params or
params['input'] is None):
raise ValueError("Missing the required parameter `input` when calling `language_translation_translate_rus_to_eng`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input' in params:
body_params = params['input']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'text/json', 'application/xml', 'text/xml', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/nlp-v2/translate/language/rus/to/eng', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LanguageTranslationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
nilq/baby-python
|
python
|
import torch
from nff.utils.scatter import compute_grad
from nff.utils import batch_to
from torch.nn import ModuleDict
from ase import Atoms
from ase import units
import numpy as np
from torchmd.topology import generate_nbr_list, get_offsets, generate_angle_list
def check_system(object):
import torchmd
if object.__class__ != torchmd.system.System:
raise TypeError("input should be a torchmd.system.System")
class System(Atoms):
"""Object that contains system information. Inherited from ase.Atoms
Attributes:
device (int or str): torch device "cpu" or an integer
dim (int): dimension of the system
props (dict{}): additional properties
"""
def __init__(
self,
*args,
device,
props={},
**kwargs
):
super().__init__(*args, **kwargs)
self.props = props
self.device = device
self.dim = self.get_cell().shape[0]
def get_nxyz(self):
"""Gets the atomic number and the positions of the atoms
inside the unit cell of the system.
Returns:
nxyz (np.array): atomic numbers + cartesian coordinates
of the atoms.
"""
nxyz = np.concatenate([
self.get_atomic_numbers().reshape(-1, 1),
self.get_positions().reshape(-1, 3)
], axis=1)
return nxyz
def get_cell_len(self):
return np.diag( self.get_cell() )
def get_batch(self):
batch = {
'nxyz': torch.Tensor(self.get_nxyz()),
'num_atoms': torch.LongTensor([self.get_number_of_atoms()]),
'energy': 0.0}
return batch
def set_temperature(self, T):
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
MaxwellBoltzmannDistribution(self, T)
if __name__ == "__main__":
from ase.lattice.cubic import FaceCenteredCubic
from potentials import PairPot, ExcludedVolume
from nff.train import get_model
params = {
'n_atom_basis': 32,
'n_filters': 32,
'n_gaussians': 32,
'n_convolutions': 32,
'cutoff': 5.0,
'trainable_gauss': False
}
# Define prior potential
lj_params = {'epsilon': 0.05,
'sigma': 3.0,
'power': 12}
size = 4
L = 19.73 / size
device = 'cpu'
atoms = FaceCenteredCubic(directions=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
symbol='H',
size=(size, size, size),
latticeconstant= L,
pbc=True)
system = System(atoms, device=device)
pair = PairPot(ExcludedVolume, lj_params,
cell=torch.Tensor(system.get_cell_len()),
device=device,
cutoff=L/2,
).to(device)
model = get_model(params)
PES = GNNPotentials(model, system.get_batch(), system.get_cell_len(), cutoff=5.0, device=system.device)
system.set_temperature(298.0)
# Todo test Pair pot with fixed atom index
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.8
from passlocker import User, user_details
def create_new_user(fisrtname, lastname ,password):
'''
Function to create a new user with a username and password
'''
new_user = User(fisrtname, lastname ,password)
return new_user
def save_user(user):
'''
Function to save a new user
'''
user.save_user()
def display_user():
"""
Function to display existing user
"""
return User.display_user()
def login_user(firstname, lastname ,password):
"""
function that checks whether a user exist and then login the user in.
"""
checks_user = user_details.check_user(firstname, lastname ,password)
return checks_user
def create_new_details(account,username,password):
"""
Function that creates new user details for a given user account
"""
new_detail = user_details(account,username,password)
return new_detail
def save_details(user_details):
"""
Function to save Credentials to the credentials list
"""
user_details. save_details()
def display_accounts_details():
"""
Function that returns all the saved details.
"""
return user_details.display_details()
def delete_detail(user_details):
"""
Function to delete a Credentials from credentials list
"""
user_details.delete_details()
def find_detail(account):
"""
Function that finds a detail by an account name and returns the details that belong to that account
"""
return user_details.find_detail(account)
def check_detail(account):
"""
Function that check if a detail exists with that account name and return true or false
"""
return user_details.details_exist(account)
def copy_password(account):
"""
A funct that copies the password using the pyperclip framework
We import the framework then declare a function that copies the emails.
"""
return user_details.copy_password(account)
def generate_Password():
'''
generates a random password for the user.
'''
auto_password=user_details.generatePassword()
return auto_password
def passLocker():
print("Hello Welcome to your Passwords store. What is your name?")
user_name = input()
print(f"Hello {user_name}. what would you like to do?")
print('\n')
while True:
print("""
Use these short codes : ca - create a new account,
ea - check existing account
""")
short_code = input().lower()
if short_code == "ca":
print("Sign Up")
print('-' * 50)
firstname = input("first_name: ")
lastname = input("last_name: ")
while True:
print(" TP - To type your own pasword:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_user(create_new_user(firstname, lastname ,password))
print("-"*85)
print(f"Hello {firstname}, Your account has been created succesfully! Your password is: {password}")
print("*"*85)
elif short_code == "ea":
print("-"*50)
print("Enter your User name and your Password to log in:")
print('-' * 50)
firstname = input("first name: ")
lastname = input("last name: ")
password = input("password: ")
login = login_user(firstname, lastname ,password)
if login_user == login:
print(f"Hello {firstname}.Welcome To PassWord Locker Manager")
print('\n')
while True:
print("""
Use these short codes:\n CD - Create a new details \n
DD - Display details \n
FD - Find a detail \n
GP - Generate A random password \n
D - Delete detail \n
EX - Exit the application \n
""")
short_code = input().lower().strip()
if short_code == "cd":
print("Create new details")
print("."*20)
print("Account name ....")
account = input().lower()
print("Your Account username")
username = input()
while True:
print(""" TP - To type your own pasword if you already have an account:\n
GP - To generate random Password
""")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Your Own Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try again")
save_details(create_new_details(account,username,password))
print('\n')
print(f"Account details for: {account} - UserName: {username} - Password:{password} created succesfully")
print('\n')
elif short_code == "dd":
if display_accounts_details():
print("Here's a list of your accounts: ")
print('*' * 30)
print('_'* 30)
for account in display_accounts_details():
print(f" Account:{account.account} \n User Name:{username}\n Password:{password}")
print('_'* 30)
print('*' * 30)
else:
print("You don't have any details saved yet..........")
elif short_code == "fd":
print("enter the account you would like to search")
search_name = input().lower()
if find_detail(search_name):
search_detail = find_detail(search_name)
print(f"Account Name : {search_detail.account}")
print('-' * 50)
print(f"User Name: {search_detail.username} Password :{search_detail.password}")
print('-' * 50)
else:
print("That Credential does not exist")
print('\n')
elif short_code == "d":
print("Enter the account name of the details you want to delete")
search_name = input().lower()
if find_detail(search_name):
search_detail = find_detail(search_name)
print("_"*50)
search_detail.delete_details()
print('\n')
print(f"Your stored details for : {search_detail.account} successfully deleted!!!")
print('\n')
else:
print("That detail you want to delete does not exist in your store yet")
elif short_code == 'gp':
password = generate_Password()
print(f"Hooray! {password} Has been generated succesfully.")
elif short_code == 'ex':
print("Thanks for using this app to store your passwords.. See you next time!")
break
else:
print("Wrong entry... Check your entry again and let it match those in the list")
else:
print("Please enter a valid input to continue")
if __name__ == '__main__':
passLocker()
|
nilq/baby-python
|
python
|
import mdk
import requests
class MockRequest(object):
def __init__(self):
self.data = None
self.get = self.repeater
self.post = self.repeater
self.put = self.repeater
self.delete = self.repeater
self.status = 200
def repeater(self, url, params=None, headers=None, body=None):
response = requests.Response()
response.status_code = self.status
response.url = url
response.headers = headers
self.data = {"_request_url": url, "_request_headers": headers, "_request_params": params, "_request_body": body}
response.json = self.json
return response
def json(self):
return self.data
SERVER_ADDRESS = "server_address"
SERVICE_URL = SERVER_ADDRESS + "/alfresco/service"
ORG_ID = "org_id"
ORG_URL = SERVICE_URL + "/orgs/" + ORG_ID
PROJECT_ID = "project_id"
PROJECT_URL = SERVICE_URL + "/projects/" + PROJECT_ID
REF_ID = "ref_id"
REF_URL = PROJECT_URL + "/refs/" + REF_ID
ELEMENT_ID = "element_id"
ELEMENT_URL = REF_URL + "/elements/" + ELEMENT_ID
if __name__ == "__main__":
mock = MockRequest()
mms_connector = mdk.MmsConnection("server_address", "username", "password")
mms_connector._requests = MockRequest()
response_json = mms_connector.get_projects(project_id=PROJECT_ID)
if response_json["_request_url"] != PROJECT_URL:
print("Project url error")
if response_json["_request_body"] is not None:
print("Project get body error")
response_json = mms_connector.get_refs(PROJECT_ID, ref_id=REF_ID)
if response_json["_request_url"] != REF_URL:
print("Ref url error")
if response_json["_request_body"] is not None:
print("Ref get body error")
response_json = mms_connector.get_element(PROJECT_ID, ELEMENT_ID, ref_id=REF_ID)
if response_json["_request_url"] != ELEMENT_URL:
print("Element url error")
if response_json["_request_body"] is not None:
print("Element get body error")
response_json = mms_connector.get_element(PROJECT_ID, ELEMENT_ID)
if response_json["_request_url"] != ELEMENT_URL.replace(REF_ID, "master"):
print("Element url error")
if response_json["_request_body"] is not None:
print("Element get body error")
print("testing complete")
# projectId = raw_input("projectId: ")
# elementIds = [raw_input("elementId %d: " % (x + 1)) for x in range(3)]
# orgId = raw_input("orgId: ")
# element = mms_connector.get_element(projectId, elementIds[0])
# elements = mms_connector.get_elements(projectId, elementIds[1:3])
# element["name"] = 'testNameMon4'
# elements[0]["name"] = 'testNameMon5'
# elements[1]["name"] = 'testNameMon6'
# mms_connector.get_element_history(projectId, elementIds[0])
# mms_connector.get_orgs()
# project = mms_connector.get_project(projectId)
# project["name"] = "brandonWasHere"
# mms_connector.get_project()
# ref = mms_connector.get_refs(projectId)
# ref[0]['name'] = "testRef2"
# mms_connector.get_project_history(projectId)
# mms_connector.get_project_groups(projectId)
# mms_connector.get_project_documents(projectId)
# mms_connector.create_ref(projectId, ref[0])
# mms_connector.update_element(projectId, element)
# mms_connector.update_element(projectId, elements)
# mms_connector.update_project(orgId, project)
# mms_connector.delete_element(projectId, elementIds[0])
# mms_connector.delete_elements(projectId, elementIds[1:3])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class PropertyAuthInfo(object):
def __init__(self):
self._area = None
self._city = None
self._community = None
self._data_id = None
self._latitude = None
self._longitude = None
self._property = None
self._uid = None
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def city(self):
return self._city
@city.setter
def city(self, value):
self._city = value
@property
def community(self):
return self._community
@community.setter
def community(self, value):
self._community = value
@property
def data_id(self):
return self._data_id
@data_id.setter
def data_id(self, value):
self._data_id = value
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def property(self):
return self._property
@property.setter
def property(self, value):
self._property = value
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, value):
self._uid = value
def to_alipay_dict(self):
params = dict()
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.city:
if hasattr(self.city, 'to_alipay_dict'):
params['city'] = self.city.to_alipay_dict()
else:
params['city'] = self.city
if self.community:
if hasattr(self.community, 'to_alipay_dict'):
params['community'] = self.community.to_alipay_dict()
else:
params['community'] = self.community
if self.data_id:
if hasattr(self.data_id, 'to_alipay_dict'):
params['data_id'] = self.data_id.to_alipay_dict()
else:
params['data_id'] = self.data_id
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.property:
if hasattr(self.property, 'to_alipay_dict'):
params['property'] = self.property.to_alipay_dict()
else:
params['property'] = self.property
if self.uid:
if hasattr(self.uid, 'to_alipay_dict'):
params['uid'] = self.uid.to_alipay_dict()
else:
params['uid'] = self.uid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PropertyAuthInfo()
if 'area' in d:
o.area = d['area']
if 'city' in d:
o.city = d['city']
if 'community' in d:
o.community = d['community']
if 'data_id' in d:
o.data_id = d['data_id']
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'property' in d:
o.property = d['property']
if 'uid' in d:
o.uid = d['uid']
return o
|
nilq/baby-python
|
python
|
def queryUser(feature_names):
print "Please enter changes to features in the format:\n(feature 1 number) (+ for increase, - for decrease)\n(feature 2 number) (+/-)\n...\n(enter -1 -1 to stop)"
for i in range(len(feature_names)):
print str(i+1)+": "+feature_names[i]
print "-------------------------------------"
input_list = []
for i in range(len(feature_names)):
input_list.append(None)
f,sign = raw_input().strip().split(' ')
f = int(f)
while (f!=-1):
input_list[f-1] = sign
f,sign = raw_input().strip().split(' ')
f = int(f)
return input_list
def write_to_file(dict):
file = open("stored_weights.txt", "w")
for f in dict:
file.write(f+" "+str(dict[f])+"\n")
file.close()
def read_from_file():
file = open("stored_weights.txt", "r")
dict = {}
for line in file.readlines():
f, val = line.strip().split()
dict[f] = float(val)
file.close()
return dict
if __name__=="__main__":
d= {"a" : 3, "b" : 4, "c" : 5, "d" : 7 }
write_to_file(d)
print read_from_file()
|
nilq/baby-python
|
python
|
from ..profiles import models
def user_display(user):
try:
profile = user.profile
except models.Profile.DoesNotExist:
return user.email
return profile.name
|
nilq/baby-python
|
python
|
"""
Contains the CNOT gate
"""
from .quantum_gate import QuantumGate
import numpy as np
class CNOT(QuantumGate):
""" Implements the CNOT gate
Parameters
------------
target : 0 or 1, optional
Specifies the target qubit, default is 0.
"""
def __init__(self, target = 0):
X = np.array([[0,1],[1,0]])
ground = np.diag([1,0])
excited = np.diag([0,1])
qeye = np.diag([1,1])
if (target is 0):
my_unitary = np.kron(qeye, ground) + np.kron(X, excited)
else:
my_unitary = np.kron(ground, qeye) + np.kron(excited, X)
QuantumGate.__init__(self, my_unitary)
|
nilq/baby-python
|
python
|
import sys
from daqhats import hat_list, HatIDs, mcc152
# get hat list of MCC daqhat boards
board_list = hat_list(filter_by_id = HatIDs.ANY)
if not board_list:
print("No boards found")
sys.exit()
# Read and display every channel
for entry in board_list:
if entry.id == HatIDs.MCC_152:
print("Board {}: MCC 152".format(entry.address))
board = mcc152(entry.address)
for channel in range(board.info().NUM_AO_CHANNELS):
value = board.a_in_read(channel)
print("Ch {0}: {1:.3f}".format(channel, value))
|
nilq/baby-python
|
python
|
"""Calculates the new ROM checksum and writes it back to the binary
"""
from binaryninja import BackgroundTaskThread, BinaryReader, show_message_box
import struct
class GenesisChecksum(BackgroundTaskThread):
def __init__(self, bv):
BackgroundTaskThread.__init__(self, "", True)
self.progress = 'genesis: Fixing up ROM checksum...'
self.rom_start = 0x200
self.checksum_off = 0x18e
self.bv = bv
self.br = BinaryReader(self.bv)
def _calculate_checksum(self):
self.br.seek(self.rom_start)
checksum = self.br.read16be()
while True:
checksum = (checksum + self.br.read16be()) & 0x0000ffff
if self.br.eof:
break
return checksum
def run(self):
checksum = self._calculate_checksum()
self.bv.write(self.checksum_off, struct.pack('>H', checksum))
show_message_box('genesis', 'ROM checksum has been updated')
if __name__ == '__main__':
print('! this plugin does not run headless')
|
nilq/baby-python
|
python
|
# Direct CAD entry for Mitutoyo calipers via USB HID emulation.
# Assuming a Serpente board, with `req`, `clock`, `data` and `ready` connected to
# D0, D1, D2 and D3, respectively.
import time
import board
import digitalio
import mitutoyo
import usb_hid
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
# I/O
pin_ready = digitalio.DigitalInOut(board.D3)
pin_ready.direction = digitalio.Direction.INPUT
pin_ready.pull = digitalio.Pull.UP
meter = mitutoyo.Digimatic(req=board.D0, clock=board.D1, data=board.D2)
# USB HID keyboard emulation.
time.sleep(1)
kbd = Keyboard(usb_hid.devices)
layout = KeyboardLayoutUS(kbd)
print("Ready.")
while True:
# wait until ready goes low
while pin_ready.value:
time.sleep(0.1)
reading = meter.read()
if reading:
print("Typing %s." % reading)
layout.write(str(reading) + "\n")
# wait until ready goes up again to just get a single reading per press
while not pin_ready.value:
time.sleep(0.1)
|
nilq/baby-python
|
python
|
# calculate the z-score for each individual by two type isoform's junction reads
# 20170522 filter the min reads counts: 25% sample's reads counts >= 1
# added postfix
# 20170626 change the filter method
# 20170705 change the filter method to left in out
# individual sum count must > 5
# remain at least 3 individual
#20170818 using the chi2 test when max nmber of test_array is > 1e7 instand of the fisher test
# to reduce the memory and running time.
#20171007 add # at the end of the output file
import scipy.stats as stats
import numpy as np
import os,sys,time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
# from matplotlib.backends.backend_pdf import PdfPages
def filter(remain_expr_array):
# using left in out method
# input : remian_expr_array
# [[ 1 3 4 4 4 4 ], S_junction skips
# [ 2 4 5 5 5 5 ]] N_junction NonSkip
# output
# min_p : float
# p_result_list_srt: list [ p_str,..]
# test_array_list : list [ '[test_array]', ...]
new_expr_array = np.array(remain_expr_array, dtype=float) # to avoid /0
n_ind = new_expr_array.shape[1]
p_result = []
test_array_list = []
for i in xrange(n_ind):
# is_test = np.arange(n_ind) == i
is_other= np.arange(n_ind) != i
test_ind_array = new_expr_array[:,i]
other_ind_array = np.sum(new_expr_array[:, is_other], axis=1) # sum two rows
test_array = np.array([test_ind_array, other_ind_array], dtype=int)
if np.max(test_array) > 1e7: # change to chi2 test 20170818
try:
chi, p, df, exp = stats.chi2_contingency(test_array) # 20181127 avoid error "frequencies has a zero element at %s." % (zeropos,))
except ValueError:
odd, p = stats.fisher_exact(test_array)
else:
odd, p = stats.fisher_exact(test_array)
p_result.append(p)
test_array_list.append(str(test_array.tolist()))
# p_result = np.array(p_result) * n_ind # correct p-value
# p_result[ p_result >1 ] = 1
p_result = 1 - (1 - np.array(p_result)) ** n_ind # correct p-value
min_p = p_result.min()
p_result_list_srt = map(str, p_result.tolist())
return min_p, p_result_list_srt, test_array_list
def filter_fun(in_addr, postfix):
'''
input_file format:
LongJunt_ShortJunc1_ShortJunc2 isofrom_type(N:normal S:skipping) sample1_counts sample2_counts sample3_counts
1_29206_30976_+_1_29206_30564_+_1_30667_30976_+_M N 0.0 0.0 0.0
1_29206_30976_+_1_29206_30564_+_1_30667_30976_+_M S 0.0 0.0 0.0
'''
f_in = open(in_addr)
path, ext = os.path.splitext(in_addr)
dat = f_in.readlines()
tmp = []
# result_exp = [dat[0]]
exp_header = dat[0]
# result_parameter = ['\t'.join(['id', 'min_p_val', 'n_remian', 'p_val','[[test_S,test_N],[other_S,other_N]\n'])]
parameter_header = '\t'.join(['id', 'min_p_val', 'n_remian', 'p_val', '[[test_S,test_N],[other_S,other_N]\n'])
### change to output on time
out_addr_exp = '%s_%s.txt' % (path, postfix)
out_addr_para = '%s_%s_para.txt' % (path, postfix)
f_out = open(out_addr_exp, 'w')
f_out_para = open(out_addr_para, 'w')
f_out.write(exp_header)
f_out_para.write(parameter_header)
# header_l = dat[0].split()
# new_header = '\t'.join([header_l[0]] + header_l[2:])
# result = [new_header]
# n_sample = len(header_l[2:])
# n_sample_filter = 0.25 * n_sample # 25% sample
for each_line in dat[1:]:
each_line_l = each_line.strip().split()
line_id = each_line_l[0]
type_l = ['S', 'N']
tmp.append(each_line_l[2:])
if len(tmp) == 2:
# filtered low juntion reads counts events
# n_expr = np.sum(np.array(tmp, dtype=float) > 0, axis=1)
# if (n_expr > n_sample_filter).all():
# if line_id=='22_18604468_18606772_18606928_18606938_18606965_18606970_18609145_+': #outlier is NA
# if line_id in '22_17585700_17586481_17586743_17589197_+_0.0620568992935_83_0.00468140442133': # outlier is error
# if line_id in '22_22930674_23006934_23101221_+': # for test
# print line_id
tmp_array = np.array(tmp, dtype=int) # expression array
ind_expr_sum = np.sum(tmp_array, axis=0)
ind_expr_sum_filter = ind_expr_sum > 5 # individual sum count must > 5
ind_expr_sum_filter_out = ind_expr_sum <= 5
if np.sum(ind_expr_sum_filter) < 3: # remain at least 3 individual
tmp = []
continue
remain_expr_array = tmp_array[:, ind_expr_sum_filter]
n_remian = np.sum(ind_expr_sum_filter)
# n_expr = np.sum(remain_expr_array > 0, axis=1) # at last 3 individual count > 0 at two isform
# if (n_expr < 3).any():
# tmp = []
# continue
tmp_array[:, ind_expr_sum_filter_out] = -1 # change filtered value to -1
min_p, p_result_list_srt, test_array_list = filter(remain_expr_array)
para_line_l = map(str, [min_p, n_remian]) + p_result_list_srt
para_line_str = '\t'.join([line_id, '\t'.join(para_line_l)]) + '\n'
coutn_line_l = map(str, [min_p, n_remian]) + test_array_list
count_line_str = '\t'.join([line_id, '\t'.join(coutn_line_l)]) + '\n'
# result_test_count.append(count_line)
# result_parameter.append(para_line_str)
# result_parameter.append(count_line_str)
f_out_para.write(para_line_str)
f_out_para.write(count_line_str)
i = 0
if min_p < 0.05:
for expr in tmp_array:
ans_str = '\t'.join(map(str, expr))
ans_line_str = '\t'.join([line_id, type_l[i], ans_str]) + '\n'
# result_exp.append(ans_line_str)
f_out.write(ans_line_str)
i += 1
tmp = []
# out_addr_exp = '%s_%s.txt' % (path, postfix)
# out_addr_para = '%s_%s_para.txt' % (path, postfix)
# out_addr_count = '%s_%s_count.txt' % (path, postfix)
# f_out = open(out_addr_exp, 'w')
# f_out.write(''.join(result_exp))
f_out.write('#')
f_out.close()
# f_out_para = open(out_addr_para, 'w')
# f_out_para.write(''.join(result_parameter))
f_out_para.close()
# f_out_count = open(out_addr_count, 'w')
# f_out_count.write(''.join(result_test_count))
# f_out_count.close()
def usage():
print '''
zscore_calc.py isfo_juncts.txt postfix
out: isfo_juncts_postifx.txt
isfo_juncts_postifx_para.txt
'''
def main():
print ' '.join(sys.argv)
print 'start:',time.ctime()
in_addr = sys.argv[1]
postfix = sys.argv[2]
filter_fun(in_addr, postfix)
print 'end:',time.ctime()
main()
|
nilq/baby-python
|
python
|
'''Defines blueprints and routes for the app'''
from flask import Blueprint
from flask_restful import Api, Resource
from .views.users import Signup
BLUE = Blueprint('api', __name__, url_prefix="/api/v1")
API = Api(BLUE)
API.add_resource(Signup, '/auth/signup')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, s, d = map(int, input().split())
for i in range(n):
xi, yi = map(int, input().split())
if xi < s and yi > d:
print("Yes")
exit()
print("No")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import komand
from .schema import ListAlertsInput, ListAlertsOutput
# Custom imports below
class ListAlerts(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='list_alerts',
description='List Carbon BlackLi alerts with given parameters',
input=ListAlertsInput(),
output=ListAlertsOutput())
def run(self, params={}):
query_params = [
("q", params.get("query", "")),
("rows", params.get("rows", 10)),
("start", params.get("start", 0))
]
try:
results = self.connection.carbon_black.get_object("/api/v1/alert", query_parameters=query_params)["results"]
except Exception as ex:
self.logger.error('Failed to get alerts: %s', ex)
raise ex
results = komand.helper.clean(results)
return {'alerts': results}
def test(self):
if self.connection.test():
return {}
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from mock import Mock
from mock import patch
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from urllib2 import HTTPError
from diamond.collector import Collector
from hacheck import HacheckCollector
################################################################################
class TestHacheckCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('HacheckCollector', {})
self.collector = HacheckCollector(config, None)
@patch.object(Collector, 'publish')
@patch('urllib2.urlopen')
def test_works_with_real_data(self, urlopen_mock, publish_mock):
urlopen_mock.return_value = self.getFixture('metrics')
self.collector.collect()
self.assertPublishedMany(
publish_mock,
{
'hacheck.cache.expirations': 2692,
'hacheck.cache.sets': 2713,
'hacheck.cache.gets': 28460,
'hacheck.cache.hits': 25747,
'hacheck.cache.misses': 2713,
'hacheck.outbound_request_queue_size': 12
},
)
@patch.object(Collector, 'publish')
@patch('urllib2.urlopen')
def test_graceful_failure_on_http_error(self, urlopen_mock, publish_mock):
urlopen_mock.side_effect = HTTPError(
Mock(), Mock(), Mock(), Mock(), Mock())
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
@patch.object(Collector, 'publish')
@patch('urllib2.urlopen')
def test_graceful_failure_on_json_error(self, urlopen_mock, publish_mock):
urlopen_mock.return_value = self.getFixture('bad_metrics')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
################################################################################
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# coding: utf-8
import click
from datetime import datetime
import re
import shortuuid
from werkzeug.security import generate_password_hash
from app import db
from app import get_db_cursor
from grid_id import GridId
from institution import Institution
from package import Package
from permission import Permission
from permission import UserInstitutionPermission
from perpetual_access import PerpetualAccessInput
from ror_id import RorId, RorGridCrosswalk
from saved_scenario import SavedScenario
from user import User
from util import get_sql_answer
def add_institution(institution_name, old_username, ror_id_list, is_consortium=False):
click.echo("initializing institution {}".format(institution_name))
my_institutions = db.session.query(Institution).filter(
Institution.display_name == institution_name,
Institution.id.notlike('%jisc%')).all()
if my_institutions:
my_institution = my_institutions[0]
click.echo(f" *** using existing institution {my_institution} ***")
if is_consortium:
click.echo("Setting up as a consortium account")
else:
my_institution = Institution()
my_institution.display_name = institution_name
my_institution.old_username = old_username
my_institution.is_demo_institution = False
my_institution.is_consortium = is_consortium
db.session.add(my_institution)
click.echo(f" adding {my_institution}")
if not ror_id_list:
return
for ror_id in ror_id_list:
add_ror(ror_id, my_institution.id)
db.session.commit()
def add_ror(ror_id, institution_id):
click.echo("adding ROR IDs, if needed")
if not db.session.query(RorId).filter(RorId.institution_id == institution_id, RorId.ror_id==ror_id).all():
db.session.add(RorId(institution_id=institution_id, ror_id=ror_id))
click.echo(f" adding ROR ID {ror_id} for {institution_id}")
else:
click.echo(" ROR ID already there")
db.session.commit()
# add grid ids
click.echo("adding GRID IDs, if needed")
click.echo(" looking up GRID IDs")
grid_ids = [x.grid_id for x in RorGridCrosswalk.query.filter(RorGridCrosswalk.ror_id == ror_id).all()]
if not grid_ids:
raise ValueError("at least one ror id corresponding to a grid id is required)")
for g_id in grid_ids:
if not db.session.query(GridId).filter(GridId.institution_id == institution_id, GridId.grid_id==g_id).all():
db.session.add(GridId(institution_id=institution_id, grid_id=g_id))
click.echo(f" adding GRID ID {g_id} for {institution_id}")
else:
click.echo(" GRID ID already there")
db.session.commit()
# jump_citing
click.echo(" populating jump_citing for GRID ID {}".format(g_id))
num_citing_rows = get_sql_answer(db, f"select count(*) from jump_citing where grid_id = '{g_id}'")
num_citing_rows_view = get_sql_answer(db, f"select count(*) from jump_citing_view where grid_id = '{g_id}'")
click.echo(f"num_citing_rows: {num_citing_rows}, num_citing_rows_view {num_citing_rows_view}")
if num_citing_rows:
click.echo(f" {num_citing_rows} jump_citing rows already exist for grid id '{g_id}'")
else:
with get_db_cursor() as cursor:
cursor.execute(
f"delete from jump_citing where grid_id = '{g_id}'"
)
cursor.execute(
f"insert into jump_citing (select * from jump_citing_view where grid_id = '{g_id}')"
)
click.echo(f" created jump_citing rows for grid id {g_id}")
# jump_authorship
click.echo(f" populating jump_authorship for GRID ID {g_id}")
num_authorship_rows = get_sql_answer(db, f"select count(*) from jump_authorship where grid_id = '{g_id}'")
num_authorship_rows_view = get_sql_answer(db, f"select count(*) from jump_authorship_view where grid_id = '{g_id}'")
click.echo(f"num_authorship_rows: {num_authorship_rows}, num_authorship_rows_view {num_authorship_rows_view}")
if num_authorship_rows:
click.echo(f" {num_authorship_rows} jump_authorship rows already exist for grid id {g_id}")
else:
with get_db_cursor() as cursor:
cursor.execute(
f"delete from jump_authorship where grid_id = '{g_id}'"
)
cursor.execute(
f"insert into jump_authorship (select * from jump_authorship_view where grid_id = '{g_id}')"
)
click.echo(f" created jump_authorship rows for grid id {g_id}")
my_packages = Package.query.filter(Package.institution_id==institution_id)
for my_package in my_packages:
rows_inserted = my_package.update_apc_authorships()
click.echo(f" inserted apc rows for package {my_package}")
def add_user(user_name, email, institution, permissions = None, password = None, jiscid = None):
email = email.strip()
user_name = user_name.strip()
click.echo(f"initializing user {email}")
if jiscid is not None:
institution_id = "institution-jisc" + jiscid
my_institution = Institution.query.get(institution_id)
click.echo(my_institution)
else:
my_institutions = db.session.query(Institution).filter(
Institution.id == institution,
Institution.id.notlike('%jisc%')).all()
if my_institutions:
my_institution = my_institutions[0]
click.echo(f" *** using existing institution {my_institution} ***")
else:
click.echo(f" *** FAILED: institution {institution} doesn't exist, exiting ***")
return
my_user = db.session.query(User).filter(User.email.ilike(email)).scalar()
if my_user:
click.echo(f" *** user {my_user} already exists. updating display name but not modifying password. ***")
else:
my_user = User()
my_user.email = email
my_user.password_hash = generate_password_hash(password or "")
my_user.display_name = user_name
db.session.merge(my_user)
click.echo(f" saving {my_user}")
click.echo(" updating permissions and linking to institution")
permission_names = permissions
if not permission_names:
permission_names = ["view", "modify", "admin"]
existing_permissions = db.session.query(UserInstitutionPermission).filter(
UserInstitutionPermission.user_id == my_user.id,
UserInstitutionPermission.institution_id == my_institution.id
).all()
for ep in existing_permissions:
click.echo(f" *** removing existing user permission {ep} ***")
db.session.delete(ep)
for permission_name in permission_names:
perm = Permission.get(permission_name)
if not perm:
raise ValueError(f"unknown permission '{permission_name}'")
user_perm = UserInstitutionPermission(
user_id=my_user.id,
institution_id=my_institution.id,
permission_id=perm.id
)
db.session.add(user_perm)
db.session.flush()
click.echo(f" adding {user_perm}")
db.session.commit()
@click.group()
def cli():
"""
Create institutions, users and add ROR IDs.
Note: Quotes are required around any flag inputs with spaces.
Examples:
python init.py --help
python init.py inst --help
python init.py inst --name='Forest College' --shortname=forcoll --ror=05fs6jp91
python init.py user --name='Scott Chamberlain' --email=myrmecocystus@gmail.com --institution='institution-Z9vU94XpmwKF'
python init.py ror --ror=05fs6jp91 --institution=institution-Z9vU94XpmwKF
python init.py ror --ror=005p9kw61 --ror=00hpz7z43 --institution=institution-Z9vU94XpmwKF
"""
@cli.command(short_help='Create an institution Unsub account')
@click.option("--name", help="Name of the institution", type=str)
@click.option("--shortname", help="Shortname of the institution (e.g., amnh)", type=str)
@click.option("--ror", help="One or more ROR IDs (can be passed multiple times)",
type=str, multiple=True, required=True)
@click.option("--is_consortium", help="True if is a consortium", type=bool, default=False)
def inst(name, shortname, ror, is_consortium):
click.echo(f"Creating Unsub account for '{name}' w/ shortname '{shortname}' and ROR ID(s) {ror}")
add_institution(name, shortname, list(ror), is_consortium)
@cli.command(short_help='create a user and associate them with an institution')
@click.option("--name", help="Full name (first last) of the person", type=str, default="Admin", required=True)
@click.option("--email", help="Email for the person", type=str, required=True)
@click.option("--institution", help="An institution ID", type=str, required=True)
@click.option("--permissions", help="Permissions", default="view,modify,admin",
show_default=True, type=str, required=True)
@click.option("--password", help="Password to associate with the user (default: no password set)",
default="", type=str, show_default=True)
@click.option("--jiscid", help="Jisc institution ID", type=str)
def user(name, email, institution, permissions, password, jiscid):
click.echo(f"Adding user {name} ({email}) to {institution}")
permissions = permissions.split(",")
add_user(name, email, institution, permissions, password, jiscid)
@cli.command(short_help='add a ROR ID to an institution/account')
@click.option("--ror", help="One or more ROR IDs (can be passed multiple times)",
type=str, multiple=True, required=True)
@click.option("--institution", help="An institution ID", type=str, required=True)
def ror(ror, institution):
click.echo(f"Adding ROR ID(s) {ror} to {institution}")
for x in ror:
add_ror(x, institution)
if __name__ == "__main__":
cli()
|
nilq/baby-python
|
python
|
import difflib
from bs4 import BeautifulSoup
from selenium import webdriver
from time import sleep
import itertools
import requests
import re
import sqlite3
import jellyfish
conn = sqlite3.connect('bazadanych.db')
c = conn.cursor()
def forbet_teams():
driver = webdriver.Firefox()
driver.get("https://stats.iforbet.pl/pl/soccer/events")
sleep(5)
# on click action
elements = driver.find_elements_by_xpath("//div[@class='leftMenu__item leftMenu__item--nested1' and @data-menu]/div[@class='leftMenu__subheader']")
def get_leagues():
urls = list()
#tests
count = 0
#
for e in elements:
e.click()
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
urls.append(str(soup.findAll('div', {"class": "leftMenu__content leftMenu__content--hidden opened"})))
# tests
if(count==10): break
else: count=count+1
#
return urls
urls = get_leagues()
leagues = list()
for url in urls:
leagues.append(re.findall('href=[\'"]?([^\'" >]+)', url))
teams = list()
for x in itertools.chain.from_iterable(leagues):
page = requests.get(x)
soup = BeautifulSoup(page.content, 'html.parser')
table_body = soup.find_all('body')
name = str(re.findall(r'<a[\s]+[^>]*?href[\s]?=[\s\"\']*(.*?)[\"\']*.*?>([^<]+|.*?)?<\/a>', str(table_body)))
teams_forbet = re.findall("'\w+[\s]\w+'", name, re.UNICODE)
#print(str(x)+": "+str(teams_forbet))
for team in teams_forbet:
teams.append(team)
return teams
def fortuna_teams():
league = list()
countries = list()
teams = dict()
fortuna = requests.get('https://s5.sir.sportradar.com/fortuna2/pl/1')
html = fortuna.content
soup = BeautifulSoup(html, 'html.parser')
urls = soup.findAll("a",{"class":"list-group-item"})
for y in urls:
countries.append(y.attrs['href'])
for leagues in countries:
page = requests.get('https://s5.sir.sportradar.com'+leagues)
html = page.content
soup = BeautifulSoup(html, 'html.parser')
leagues = soup.findAll("a", {"class": "list-group-item"})
#leagues_names = soup.findAll("span",{"class":"vertical-align-middle"})
#for leagues_name in leagues_names:
# print (leagues_name.text)
for y in leagues:
league.append(y.attrs['href'])
# tests
count = 0
#
for link in league:
key = link
page = requests.get('https://s5.sir.sportradar.com' + link)
html = page.content
soup = BeautifulSoup(html, 'html.parser')
names = soup.findAll("div", {"class": "hidden-sm-up wrap"})
for name in names:
if key not in teams:
teams[key]=[]
teams[key].append(name.text)
else:
teams[link].append(name.text)
# tests
if (count == 10):
return teams
else:
count = count + 1
return teams
scrapped_fortuna = list()
for key, values in fortuna_teams().items():
for value in values:
scrapped_fortuna.append(value)
def forbet_insert():
c.execute('CREATE TABLE IF NOT EXISTS Forbet_teams(forbet_id INTEGER PRIMARY KEY,team_name STRING)')
insert_teams = list()
insert_teams = forbet_teams()
try:
without_duplicates = list(dict.fromkeys(insert_teams))
for team in without_duplicates:
c.execute('INSERT INTO Forbet_teams VALUES (NULL, ?)', (team,))
except sqlite3.IntegrityError as ie:
pass
conn.commit()
def fortuna_insert():
c.execute('CREATE TABLE IF NOT EXISTS Fortuna_teams(fortuna_id INTEGER PRIMARY KEY, team_name STRING)')
insert_teams = list()
insert_teams = scrapped_fortuna
try:
without_duplicates = list(dict.fromkeys(insert_teams))
for team in without_duplicates:
c.execute('INSERT INTO Fortuna_teams VALUES (NULL, ?)', ("'" +team+ "'",))
except sqlite3.IntegrityError as ie:
pass
conn.commit()
def relationship_database():
c.execute("select * from Fortuna_teams")
records = c.fetchall()
fortuna = list()
for row in records:
fortuna.append(row[1])
c.execute("select * from Forbet_teams")
records = c.fetchall()
forbet= list()
for row in records:
forbet.append(row[1])
c.execute('CREATE TABLE IF NOT EXISTS Relationship_table (team_name STRING, id_fortuna STRING, id_forbet STRING)')
# test
count = 0
#
try:
for team in forbet:
if (count==1000): break
else: count=count+1
if(team in fortuna):
c.execute("INSERT INTO Relationship_table VALUES (?, ?, ?)", (team, fortuna.index(team), forbet.index(team) ))
conn.commit()
else:
name = difflib.get_close_matches(team,fortuna,cutoff=0.95)
if name:
c.execute("INSERT INTO Relationship_table VALUES (?, ?, ?)", (team, fortuna.index(name[0]), forbet.index(team) ))
conn.commit()
else: continue
except sqlite3.IntegrityError as ie:
pass
#fortuna_insert()
#forbet_insert()
relationship_database()
|
nilq/baby-python
|
python
|
filas = int(input("Ingrese el número de filas de la matriz: "))
columnas = int(input("Ingrese el número de columnas de la matriz: "))
matriz = []
for m in range(filas):
matriz.append([])
for n in range(columnas):
matriz[m].append(int(input(f"Ingrese el valor de la columna {n} de la fila {m}: ")))
print(matriz)
|
nilq/baby-python
|
python
|
import glob
import os
from setuptools import setup, find_packages
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
setup(
name="spire-pipeline",
version="1.2.0",
description="Run software pipelines using doit",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lamyj/spire",
author="Julien Lamy",
author_email="lamy@unistra.fr",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Build Tools",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
keywords="pipeline, workflow, task execution",
packages=find_packages(exclude=["tests"]),
install_requires=["doit", "jinja2", "numpy"],
)
|
nilq/baby-python
|
python
|
from sensors.range_finder import UltrasonicRangeFinder
from multiprocessing import Queue, Value
import threading
import logging
import unittest
from util.definitions import GPIO_ULTRASONIC_ECHO, GPIO_ULTRASONIC_TRIGGER
# In order to execute this test, an ultrasonic range finder has to be connected to the RPI GPIO ports above.
# We don't test the multiprocessing here, but use the same data structures
class TestStateProvider(unittest.TestCase):
# just sets the stop flag so that the range finder loop will exit
def stop_range_finder(self, stop_flag):
print("Ultrasonic sensor should stop now.")
stop_flag.value = True
def test_values_good(self):
queue = Queue(1000)
stop_flag = Value('i', False)
rangefinder = UltrasonicRangeFinder(GPIO_ULTRASONIC_TRIGGER, GPIO_ULTRASONIC_ECHO)
# this should give us around 50 measurements
t = threading.Timer(0.50, self.stop_range_finder, args=(stop_flag,))
t.start()
# this will loop until the timer kicks in
rangefinder.measure(queue, stop_flag, 0)
# last value in queue should be equal to current distance approximation
last_distance = None
count = 0
while not queue.empty():
count += 1
last_distance = queue.get_nowait()
self.assertGreater(last_distance.value, 0.03)
self.assertIsNotNone(last_distance.speed)
self.assertGreaterEqual(count, 9)
self.assertEqual(rangefinder.distance, last_distance.value)
self.assertAlmostEqual(rangefinder.speed, 0, delta=0.5)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from moviesapp.models import Movies
from moviesapp.forms import MoviesForm
def index(request):
return render(request, 'moviesapp/index.html')
def moviesList(request):
moviesList=Movies.objects.all()
movies_dict={'movies':moviesList}
return render(request, 'moviesapp/moviesList.html',movies_dict)
def addMovies(request):
success=str("Successfuly Movie Registration")
moviesForm=MoviesForm()
movies_dict={'movies':moviesForm}
if request.method=='POST':
moviesForm=MoviesForm(request.POST)
movies_dict={'movies':moviesForm,'success':success}
if moviesForm.is_valid():
moviesForm.save()
return render(request, 'moviesapp/addMovies.html',movies_dict)
return render(request, 'moviesapp/addMovies.html',movies_dict)
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class DelayConfig(AppConfig):
name = "channels.delay"
label = "channels.delay"
verbose_name = "Channels Delay"
|
nilq/baby-python
|
python
|
from .twitter import TwitterReviews
from .imdb import IMDBReviews
from .ny_times import NYTimesReviews
__all__ = ['TwitterReviews', 'IMDBReviews', 'NYTimesReviews']
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
This tokenizer has been copied from the ``tokenize.py`` standard library
tokenizer. The reason was simple: The standard library tokenizer fails
if the indentation is not right. The fast parser of jedi however requires
"wrong" indentation.
Basically this is a stripped down version of the standard library module, so
you can read the documentation there. Additionally we included some speed and
memory optimizations here.
"""
from __future__ import absolute_import
import string
import re
from collections import namedtuple
import itertools as _itertools
from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap,
NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT)
from jedi._compatibility import is_py3, py_version, u
from jedi.common import splitlines
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
if is_py3:
# Python 3 has str.isidentifier() to check if a char is a valid identifier
is_identifier = str.isidentifier
else:
namechars = string.ascii_letters + '_'
is_identifier = lambda s: s in namechars
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
def group(*choices, **kwargs):
capture = kwargs.pop('capture', False) # Python 2, arrghhhhh :(
assert not kwargs
start = '('
if not capture:
start += '?:'
return start + '|'.join(choices) + ')'
def any(*choices):
return group(*choices) + '*'
def maybe(*choices):
return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Name = r'\w+'
if py_version >= 36:
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
else:
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
if is_py3:
Octnumber = r'0[oO][0-7]+'
else:
Octnumber = '0[0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permuations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'br']
if py_version >= 36:
_valid_string_prefixes += ['f', 'fr']
if py_version <= 27:
# TODO this is actually not 100% valid. ur is valid in Python 2.7,
# while ru is not.
_valid_string_prefixes.append('ur')
# if we add binary f-strings, add: ['fb', 'fbr']
result = set([''])
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, Name, capture=True)
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = group(Whitespace, capture=True) + \
group(PseudoExtras, Number, Funny, ContStr, Name, capture=True)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = _compile(Single)
endpats[_prefix + '"'] = _compile(Double)
endpats[_prefix + "'''"] = _compile(Single3)
endpats[_prefix + '"""'] = _compile(Double3)
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for p in (t + '"', t + "'"):
single_quoted.add(p)
for p in (t + '"""', t + "'''"):
triple_quoted.add(p)
# TODO add with?
ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except',
'finally', 'while', 'return')
pseudo_token_compiled = _compile(PseudoToken)
class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])):
def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start=%r, prefix=%r)' %
self._replace(type=self.get_type_name()))
def get_type_name(self, exact=True):
if exact:
typ = self.exact_type
else:
typ = self.type
return tok_name[typ]
@property
def exact_type(self):
if self.type == OP and self.string in opmap:
return opmap[self.string]
else:
return self.type
@property
def end_pos(self):
lines = splitlines(self.string)
if len(lines) > 1:
return self.start_pos[0] + len(lines) - 1, 0
else:
return self.start_pos[0], self.start_pos[1] + len(self.string)
def source_tokens(source, use_exact_op_types=False):
"""Generate tokens from a the source code (string)."""
lines = splitlines(source, keepends=True)
return generate_tokens(lines, use_exact_op_types)
def generate_tokens(lines, use_exact_op_types=False):
"""
A heavily modified Python standard library tokenizer.
Additionally to the default information, yields also the prefix of each
token. This idea comes from lib2to3. The prefix contains all information
that is irrelevant for the parser like newlines in parentheses or comments.
"""
paren_level = 0 # count parentheses
indents = [0]
max = 0
numchars = '0123456789'
contstr = ''
contline = None
# We start with a newline. This makes indent at the first position
# possible. It's not valid Python, but still better than an INDENT in the
# second line (and not in the first). This makes quite a few things in
# Jedi's fast parser possible.
new_line = True
prefix = '' # Should never be required, but here for safety
additional_prefix = ''
for lnum, line in enumerate(lines, 1): # loop over lines in stream
pos, max = 0, len(line)
if contstr: # continued string
endmatch = endprog.match(line)
if endmatch:
pos = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:pos], contstr_start, prefix)
contstr = ''
contline = None
else:
contstr = contstr + line
contline = contline + line
continue
while pos < max:
pseudomatch = pseudo_token_compiled.match(line, pos)
if not pseudomatch: # scan for tokens
txt = line[pos:]
if txt.endswith('\n'):
new_line = True
yield TokenInfo(ERRORTOKEN, txt, (lnum, pos), prefix)
break
prefix = additional_prefix + pseudomatch.group(1)
additional_prefix = ''
start, pos = pseudomatch.span(2)
spos = (lnum, start)
token = pseudomatch.group(2)
initial = token[0]
if new_line and initial not in '\r\n#':
new_line = False
if paren_level == 0:
i = 0
while line[i] == '\f':
i += 1
start -= 1
if start > indents[-1]:
yield TokenInfo(INDENT, '', spos, '')
indents.append(start)
while start < indents[-1]:
yield TokenInfo(DEDENT, '', spos, '')
indents.pop()
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, prefix)
elif initial in '\r\n':
if not new_line and paren_level == 0:
yield TokenInfo(NEWLINE, token, spos, prefix)
else:
additional_prefix = prefix + token
new_line = True
elif initial == '#': # Comments
assert not token.endswith("\n")
additional_prefix = prefix + token
elif token in triple_quoted:
endprog = endpats[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, prefix)
else:
contstr_start = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
contstr_start = lnum, start
endprog = (endpats.get(initial) or endpats.get(token[1])
or endpats.get(token[2]))
contstr = line[start:]
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, prefix)
elif is_identifier(initial): # ordinary name
if token in ALWAYS_BREAK_TOKENS:
paren_level = 0
while True:
indent = indents.pop()
if indent > start:
yield TokenInfo(DEDENT, '', spos, '')
else:
indents.append(indent)
break
yield TokenInfo(NAME, token, spos, prefix)
elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt
additional_prefix += prefix + line[start:]
break
else:
if token in '([{':
paren_level += 1
elif token in ')]}':
paren_level -= 1
try:
# This check is needed in any case to check if it's a valid
# operator or just some random unicode character.
exact_type = opmap[token]
except KeyError:
exact_type = typ = ERRORTOKEN
if use_exact_op_types:
typ = exact_type
else:
typ = OP
yield TokenInfo(typ, token, spos, prefix)
if contstr:
yield TokenInfo(ERRORTOKEN, contstr, contstr_start, prefix)
if contstr.endswith('\n'):
new_line = True
end_pos = lnum, max
# As the last position we just take the maximally possible position. We
# remove -1 for the last new line.
for indent in indents[1:]:
yield TokenInfo(DEDENT, '', end_pos, '')
yield TokenInfo(ENDMARKER, '', end_pos, additional_prefix)
if __name__ == "__main__":
import sys
if len(sys.argv) >= 2:
path = sys.argv[1]
with open(path) as f:
code = u(f.read())
else:
code = u(sys.stdin.read())
for token in source_tokens(code, use_exact_op_types=True):
print(token)
|
nilq/baby-python
|
python
|
import thredo
def func(x, y):
return x + y
def main():
t = thredo.spawn(func, 2, 3)
print("Result:", t.join())
thredo.run(main)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 -Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test service cache."""
import datetime
import os
import time
import uuid
import pytest
def test_service_cache_ensure_user(svc_client_cache):
"""Test service cache user creation."""
client, _, cache = svc_client_cache
expected_id = uuid.uuid4().hex
user = cache.ensure_user({"user_id": expected_id})
assert user
assert expected_id == user.user_id
def test_service_cache_get_users(svc_client_cache):
"""Test getting multiple users."""
client, _, cache = svc_client_cache
expected_users = set([cache.ensure_user({"user_id": uuid.uuid4().hex}).user_id for _ in range(10)])
received_users = set([user.user_id for user in cache.get_users()])
assert expected_users.issubset(received_users)
def test_service_cache_get_user(svc_client_cache):
"""Test getting a single user."""
client, _, cache = svc_client_cache
expected_id = uuid.uuid4().hex
expected_user = cache.ensure_user({"user_id": expected_id})
assert expected_user
received_user = cache.get_user(expected_id)
assert received_user
assert expected_user.user_id == received_user.user_id
assert expected_user.fullname == received_user.fullname
assert expected_user.email == received_user.email
assert expected_user.token == received_user.token
def test_service_cache_make_job(svc_client_cache):
"""Test service cache jobs."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": "testuser"})
job_data = {
"job_id": uuid.uuid4().hex,
}
job = cache.make_job(user, job_data=job_data)
assert job
assert job_data["job_id"] == job.job_id
assert job_data["user_id"] == job.user_id
assert isinstance(job.created_at, datetime.datetime)
assert isinstance(job.updated_at, datetime.datetime)
assert "ENQUEUED" == job.state
assert job.extras is None
def test_service_cache_get_job(svc_client_cache):
"""Test service get user job."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": "testuser"})
job_data = {
"job_id": uuid.uuid4().hex,
}
job = cache.make_job(user, job_data=job_data)
assert job
retrieved_job = cache.get_job(user, job.job_id)
assert retrieved_job
assert job.created_at == retrieved_job.created_at
assert job.updated_at == retrieved_job.updated_at
assert job.job_id == retrieved_job.job_id
assert job.user_id == retrieved_job.user_id
assert job.state == retrieved_job.state
assert job.extras == retrieved_job.extras
def test_service_cache_get_jobs(svc_client_cache):
"""Test service get user jobs."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": "testuser1"})
user2 = cache.ensure_user({"user_id": "testuser2"})
for _ in range(10):
job = cache.make_job(user, {"job_id": uuid.uuid4().hex,})
assert job
job2 = cache.make_job(user2, {"job_id": uuid.uuid4().hex,})
assert job2
retrieved_jobs = cache.get_jobs(user)
assert retrieved_jobs
assert 10 == len([job for job in retrieved_jobs])
assert 1 == len([job for job in cache.get_jobs(user2)])
def test_service_cache_get_job_none(svc_client_cache):
"""Test service get user jobs."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": "testuser"})
job_data = {
"job_id": uuid.uuid4().hex,
}
job = cache.make_job(user, job_data)
assert job
assert cache.get_job(user, None) is None
with pytest.raises(AttributeError):
cache.get_job(None, job_data["job_id"])
def test_service_cache_set_file(svc_client_cache):
"""Test service set user file."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": uuid.uuid4().hex})
file = cache.set_file(
user, {"file_name": uuid.uuid4().hex, "file_size": 0, "relative_path": "/tmp/renku-core", "is_dir": False}
)
assert user.user_id == file.user_id
def test_service_cache_set_files(svc_client_cache):
"""Test service set user files."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": uuid.uuid4().hex})
files = [
{"file_name": uuid.uuid4().hex, "file_size": 0, "relative_path": "/tmp/renku-core", "is_dir": False}
for _ in range(10)
]
expected = set([file_["file_name"] for file_ in files])
received_files = cache.set_files(user, files)
received_names = set([file_.file_name for file_ in received_files])
files_age = set([file_.age for file_ in received_files])
assert {0} == files_age
files_ttl_exp = set([file_.ttl_expired() for file_ in received_files])
assert {False} == files_ttl_exp
time.sleep(2)
os.environ["RENKU_SVC_CLEANUP_TTL_FILES"] = "1"
files_age = set([file_.age for file_ in received_files])
assert {2} == files_age
files_ttl_exp = set([file_.ttl_expired() for file_ in received_files])
assert {True} == files_ttl_exp
received_users = set([file_.user_id for file_ in received_files])
assert expected.issubset(received_names)
assert user.user_id in received_users
def test_service_cache_get_file(svc_client_cache):
"""Test service get user file."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": uuid.uuid4().hex})
file_obj = cache.set_file(
user, {"file_name": uuid.uuid4().hex, "file_size": 0, "relative_path": "/tmp/renku-core", "is_dir": False}
)
file = cache.get_file(user, file_obj.file_id)
assert user.user_id == file.user_id
def test_service_cache_get_files(svc_client_cache):
"""Test service get user files."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": uuid.uuid4().hex})
files_data = [
{"file_name": uuid.uuid4().hex, "file_size": 0, "relative_path": "/tmp/renku-core", "is_dir": False}
for _ in range(10)
]
expected_files = {f.file_id for f in cache.set_files(user, files_data)}
received = [f for f in cache.get_files(user)]
received_files = {f.file_id for f in received}
received_user = {f.user_id for f in received}
assert 10 == len(expected_files)
assert 10 <= len(received_files)
assert received_files.issubset(expected_files)
assert user.user_id in received_user
def test_service_cache_invalidate_file(svc_client_cache):
"""Test service invalidate user file."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": uuid.uuid4().hex})
file_obj = cache.set_file(
user, {"file_name": uuid.uuid4().hex, "file_size": 0, "relative_path": "/tmp/renku-core", "is_dir": False}
)
cache.invalidate_file(user, file_obj.file_id)
file = cache.get_file(user, file_obj.file_id)
assert file is None
def test_service_cache_make_project(svc_client_cache):
"""Test service create project."""
client, _, cache = svc_client_cache
user = cache.ensure_user({"user_id": uuid.uuid4().hex})
project_data = {
"name": "renku-project-template",
"depth": 1,
"git_url": "https://github.com/SwissDataScienceCenter/renku-project-template",
"email": "contact@justsam.io",
"fullname": "renku the frog",
"token": "None",
"owner": "SwissDataScienceCenter",
}
project = cache.make_project(user, project_data)
time.sleep(1)
assert project.age == 1
assert not project.ttl_expired()
os.environ["RENKU_SVC_CLEANUP_TTL_PROJECTS"] = "1"
time.sleep(1)
assert project.age == 2
assert project.ttl_expired()
|
nilq/baby-python
|
python
|
"""
The ``planning`` package contains the planning steps for initial and rolling
wave planning. It creates a ``PlanningCreator`` and calls the methods to set
the Gurobi variables and constraints. It runs the planner afterwards and saves
the plans.
The implementations of the ``PlanningCreator`` (initial and rolling wave
planning) classes are in :py:mod:`tutorplanner.gurobiinterface`.
"""
__author__ = "Alexander Elvers <aelvers AT inet.tu-berlin.de>"
|
nilq/baby-python
|
python
|
'''
/vg/station-specific fixes.
'''
from .base import Matcher,MapFix,RenameProperty,DeclareDependencies,ChangeType
from byond.basetypes import BYONDString, BYONDValue, Atom, PropertyFlags
from byond.directions import *
DeclareDependencies('vgstation',['ss13'])
ATMOSBASE = '/obj/machinery/atmospherics'
@MapFix('vgstation')
class FixNetwork(Matcher):
def __init__(self):
pass
def Matches(self, atom):
if atom.path.startswith('/obj/machinery/camera') and 'network' in atom.properties:
return isinstance(atom.properties['network'], BYONDString) and not atom.properties['network'].value.startswith('list(')
return False
def Fix(self, atom):
fix = atom.properties['network'].value
atom.properties['network'] = BYONDValue('list("{0}")'.format(fix))
return atom
def __str__(self):
return 'Changed network property to list'
'''
@MapFix('vgstation-NET2')
class NetworkingChangeAtmos(ChangeType):
def __init__(self):
ChangeType.__init__(self,'/obj/machinery/atmospherics','/obj/machinery/networked/atmos', fuzzy = True)
@MapFix('vgstation-NET2')
class NetworkingChangePower(ChangeType):
def __init__(self):
ChangeType.__init__(self,'/obj/machinery/power','/obj/machinery/networked/power', fuzzy = True)
@MapFix('vgstation-NET2')
class NetworkingChangeFiber(ChangeType):
def __init__(self):
ChangeType.__init__(self,'/obj/machinery/fiber','/obj/machinery/networked/fiber', fuzzy = True)
'''
@MapFix('vgstation')
class FixIDTags(Matcher):
atomsToFix={}
def __init__(self):
pass
def Matches(self, atom):
global atomsToFix
if 'id_tag' in atom.properties:
compiled_atom = self.tree.GetAtom(atom.path)
if compiled_atom is None: return False
if 'id_tag' not in compiled_atom.properties:
FixIDTags.atomsToFix[atom.path] = True
return 'id' in atom.properties and 'id' in atom.mapSpecified
# return False
def Fix(self, atom):
_id = atom.properties['id']
id_idx = atom.mapSpecified.index('id')
atom.properties['id_tag'] = _id
del atom.properties['id']
atom.mapSpecified[id_idx] = 'id_tag'
return atom
def __str__(self):
return 'Renamed id to id_tag'
@MapFix('vgstation')
class StandardizeManifolds(Matcher):
STATE_TO_TYPE = {
'manifold-b' :ATMOSBASE+'/pipe/manifold/supply/visible',
'manifold-b-f':ATMOSBASE+'/pipe/manifold/supply/hidden',
'manifold-r' :ATMOSBASE+'/pipe/manifold/scrubbers/visible',
'manifold-r-f':ATMOSBASE+'/pipe/manifold/scrubbers/hidden',
'manifold-c' :ATMOSBASE+'/pipe/manifold/cyan/visible',
'manifold-c-f':ATMOSBASE+'/pipe/manifold/cyan/hidden',
'manifold-y' :ATMOSBASE+'/pipe/manifold/yellow/visible',
'manifold-y-f':ATMOSBASE+'/pipe/manifold/yellow/hidden',
'manifold-g' :ATMOSBASE+'/pipe/manifold/filtering/visible',
'manifold-g-f':ATMOSBASE+'/pipe/manifold/filtering/hidden',
'manifold' :ATMOSBASE+'/pipe/manifold/general/visible',
'manifold-f' :ATMOSBASE+'/pipe/manifold/general/hidden',
}
def __init__(self):
return
def Matches(self, atom):
if atom.path == ATMOSBASE+'/pipe/manifold' and 'icon_state' in atom.mapSpecified:
return atom.getProperty('icon_state') in self.STATE_TO_TYPE
return False
def Fix(self, atom):
icon_state = atom.properties['icon_state'].value
new_atom = Atom(self.STATE_TO_TYPE[icon_state])
if 'dir' in atom.mapSpecified:
new_atom.setProperty('dir', atom.getProperty('dir'), PropertyFlags.MAP_SPECIFIED)
return new_atom
def __str__(self):
return 'Standardized pipe manifold'
@MapFix('vgstation')
class StandardizePiping(Matcher):
TYPE_TRANSLATIONS = {
ATMOSBASE+'/pipe/simple': 'simple',
ATMOSBASE+'/pipe/manifold': 'manifold',
ATMOSBASE+'/pipe/manifold4w': 'manifold4w',
}
COLOR_CODES = {
'b':'supply',
'r':'scrubbers',
'g':'filtering',
'c':'cyan',
'y':'yellow',
'': 'general'
}
def __init__(self):
self.before = None
self.after = None
return
def trans_simple(self, atom):
type_tmpl = ATMOSBASE+'/pipe/simple/{color}/{visibility}'
color_code, visible = self.parseIconState(atom.getProperty('icon_state', ''))
return self.getNewType(type_tmpl, color_code, visible)
def trans_manifold(self, atom):
type_tmpl = ATMOSBASE+'/pipe/manifold/{color}/{visibility}'
color_code, visible = self.parseIconState(atom.getProperty('icon_state', ''))
return self.getNewType(type_tmpl, color_code, visible)
def trans_manifold4w(self, atom):
type_tmpl = ATMOSBASE+'/pipe/manifold4w/{color}/{visibility}'
color_code, visible = self.parseIconState(atom.getProperty('icon_state', ''))
return self.getNewType(type_tmpl, color_code, visible)
def parseIconState(self, state):
parts = state.split('-')
if len(parts) <= 1:
return ('', True)
elif len(parts) == 2:
if parts[1] == 'f':
return ('', True)
return (parts[1], True)
return (parts[1], parts[2] != 'f')
def getNewType(self, tmpl, color_code, visible, color_wheel=COLOR_CODES):
visibility = 'visible'
if not visible:
visibility = 'hidden'
color = color_wheel[color_code]
return Atom(tmpl.format(color=color, visibility=visibility))
def Matches(self, atom):
return atom.path in self.TYPE_TRANSLATIONS
def Fix(self, atom):
self.before = str(atom)
old_dir = None
if 'dir' in atom.mapSpecified:
old_dir = int(atom.getProperty('dir', 2))
atom = getattr(self, 'trans_{0}'.format(self.TYPE_TRANSLATIONS[atom.path]))(atom)
if old_dir is not None and old_dir != 2:
atom.setProperty('dir', old_dir, PropertyFlags.MAP_SPECIFIED)
self.after = str(atom)
return atom
def __str__(self):
if self.before is not None and self.after is not None:
return 'Standardized pipe: {0} -> {1}'.format(self.before, self.after)
else:
return 'Standardize pipes'
@MapFix('vgstation')
class StandardizeInsulatedPipes(Matcher):
STATE_TO_TYPE = {
'intact' :ATMOSBASE+'/pipe/simple/insulated/visible',
'intact-f':ATMOSBASE+'/pipe/simple/insulated/hidden'
}
def __init__(self):
return
def Matches(self, atom):
if atom.path == ATMOSBASE+'/pipe/simple/insulated':
return True
if atom.path.startswith(ATMOSBASE+'/pipe/simple/insulated') and int(atom.getProperty('dir', 0)) in (3, 8, 12):
# print(atom.MapSerialize())
return True
return False
def Fix(self, atom):
newtype = atom.path
if atom.path == ATMOSBASE+'/pipe/simple/insulated':
icon_state = ''
if 'icon_state' in atom.properties:
icon_state = atom.properties['icon_state'].value
newtype = self.STATE_TO_TYPE.get(icon_state, ATMOSBASE+'/pipe/simple/insulated/visible')
new_atom = Atom(newtype)
if 'dir' in atom.mapSpecified:
# Normalize dir
direction = int(atom.getProperty('dir', 2))
if direction == 3:
direction = 1
elif direction == 8: # Breaks things, for some reason
direction = 4
elif direction == 12:
direction = 4
new_atom.setProperty('dir', direction, PropertyFlags.MAP_SPECIFIED)
return new_atom
def __str__(self):
return 'Standardized insulated pipe'
@MapFix('vgstation')
class FixWindows(Matcher):
def __init__(self):
return
def Matches(self, atom):
if atom.path.startswith('/obj/structure/window/full'):
return False
if atom.path.startswith('/obj/structure/window') and int(atom.getProperty('dir', SOUTH)) in (NORTH | WEST, SOUTH | WEST, NORTH | EAST, SOUTH | EAST):
# print(atom.MapSerialize())
return True
return False
def Fix(self, atom):
newtype = atom.path.replace('/obj/structure/window', '/obj/structure/window/full')
atom.path = newtype
atom.properties = {}
atom.mapSpecified = []
return atom
def __str__(self):
return 'Standardized full windows'
@MapFix('vgstation')
class FixVaultFloors(Matcher):
"""
Changes flooring icons to use /vg/'s standardized vault icons.
"""
# state:1
ICON_STATE_CHANGES = {
'vault:1' :{'icon_state':'dark-markings', 'dir':2},
'vault:2' :{'icon_state':'dark vault stripe', 'dir':2},
'vault:4' :{'icon_state':'dark-markings', 'dir':1},
'vault:8' :{'icon_state':'dark-markings', 'dir':8},
'vault:6' :{'icon_state':'dark vault corner', 'dir':2},
'vault:10':{'icon_state':'dark vault corner', 'dir':8},
'vault:5' :{'icon_state':'dark vault full', 'dir':2},
'vault:9' :{'icon_state':'dark loading', 'dir':4},
'vault-border:1' :{'icon_state':'dark vault stripe', 'dir':2},
'vault-border:2' :{'icon_state':'dark vault stripe', 'dir':1},
'vault-border:4' :{'icon_state':'dark vault stripe', 'dir':4},
'vault-border:8' :{'icon_state':'dark vault stripe', 'dir':8},
'vault-border:6' :{'icon_state':'dark vault corner', 'dir':2},
'vault-border:10':{'icon_state':'dark vault stripe', 'dir':5},
'vault-border:5' :{'icon_state':'dark vault stripe', 'dir':5},
'vault-border:9' :{'icon_state':'dark vault stripe', 'dir':6},
}
def __init__(self):
self.stateKey = ''
self.changesMade = []
return
def GetStateKey(self, atom):
icon_state = ''
_dir = '2'
if 'dir' in atom.properties:
_dir = str(atom.getProperty('dir'))
if 'icon_state' in atom.properties:
icon_state = atom.getProperty('icon_state')
return icon_state + ":" + _dir
def Matches(self, atom):
if atom.path.startswith('/turf/') and 'icon_state' in atom.mapSpecified:
sk = self.GetStateKey(atom)
if sk in self.ICON_STATE_CHANGES:
self.stateKey = sk
return True
return False
def Fix(self, atom):
self.changesMade = []
propChanges = self.ICON_STATE_CHANGES[self.stateKey]
if 'tag' in atom.mapSpecified:
atom.mapSpecified.remove('tag')
for key, newval in propChanges.items():
if key not in atom.mapSpecified:
atom.mapSpecified += [key]
oldval = 'NONE'
if key in atom.properties:
oldval = str(atom.properties[key])
if isinstance(newval, str):
atom.properties[key] = BYONDString(newval)
elif isinstance(newval, int):
atom.properties[key] = BYONDValue(newval)
self.changesMade += ['{0}: {1} -> {2}'.format(key, oldval, atom.properties[key])]
return atom
def __str__(self):
return 'Standardized vault flooring (' + ', '.join(self.changesMade) + ')'
@MapFix('vgstation')
class RenameColorVG(RenameProperty):
def __init__(self):
RenameProperty.__init__(self, "color", "_color")
|
nilq/baby-python
|
python
|
import pytest
from bearboto3.iam import (
InstanceProfileExistsWaiter,
UserExistsWaiter,
RoleExistsWaiter,
PolicyExistsWaiter,
)
from beartype import beartype
from beartype.roar import (
BeartypeCallHintPepParamException,
BeartypeCallHintPepReturnException,
BeartypeDecorHintPep484585Exception,
)
# ============================
# InstanceProfileExistsWaiter
# ============================
def test_instance_profile_exists_arg_pass(gen_instance_profile_exists_waiter):
@beartype
def func(param: InstanceProfileExistsWaiter):
pass
func(gen_instance_profile_exists_waiter)
def test_instance_profile_exists_arg_fail(gen_user_exists_waiter):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: InstanceProfileExistsWaiter):
pass
func(gen_user_exists_waiter)
def test_instance_profile_exists_return_pass(gen_instance_profile_exists_waiter):
@beartype
def func() -> InstanceProfileExistsWaiter:
return gen_instance_profile_exists_waiter
func()
def test_instance_profile_exists_return_fail(gen_user_exists_waiter):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> InstanceProfileExistsWaiter:
return gen_user_exists_waiter
func()
# ============================
# UserExistsWaiter
# ============================
def test_user_exists_arg_pass(gen_user_exists_waiter):
@beartype
def func(param: UserExistsWaiter):
pass
func(gen_user_exists_waiter)
def test_user_exists_arg_fail(gen_instance_profile_exists_waiter):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: UserExistsWaiter):
pass
func(gen_instance_profile_exists_waiter)
def test_user_exists_return_pass(gen_user_exists_waiter):
@beartype
def func() -> UserExistsWaiter:
return gen_user_exists_waiter
func()
def test_user_exists_return_fail(gen_instance_profile_exists_waiter):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> UserExistsWaiter:
return gen_instance_profile_exists_waiter
func()
# ============================
# RoleExistsWaiter
# ============================
def test_role_exists_arg_pass(gen_role_exists_waiter):
@beartype
def func(param: RoleExistsWaiter):
pass
func(gen_role_exists_waiter)
def test_role_exists_arg_fail(gen_user_exists_waiter):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: RoleExistsWaiter):
pass
func(gen_user_exists_waiter)
def test_role_exists_return_pass(gen_role_exists_waiter):
@beartype
def func() -> RoleExistsWaiter:
return gen_role_exists_waiter
func()
def test_role_exists_return_fail(gen_user_exists_waiter):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> RoleExistsWaiter:
return gen_user_exists_waiter
func()
# ============================
# PolicyExistsWaiter
# ============================
def test_policy_exists_arg_pass(gen_policy_exists_waiter):
@beartype
def func(param: PolicyExistsWaiter):
pass
func(gen_policy_exists_waiter)
def test_policy_exists_arg_fail(gen_role_exists_waiter):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: PolicyExistsWaiter):
pass
func(gen_role_exists_waiter)
def test_policy_exists_return_pass(gen_policy_exists_waiter):
@beartype
def func() -> PolicyExistsWaiter:
return gen_policy_exists_waiter
func()
def test_policy_exists_return_fail(gen_role_exists_waiter):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> PolicyExistsWaiter:
return gen_role_exists_waiter
func()
|
nilq/baby-python
|
python
|
__author__ = 'hunter'
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module Nortel-Magellan-Passport-HdlcTransparentMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-Magellan-Passport-HdlcTransparentMIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:17:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion")
PassportCounter64, Integer32, Unsigned32, Gauge32, InterfaceIndex, StorageType, DisplayString, RowStatus, Counter32 = mibBuilder.importSymbols("Nortel-Magellan-Passport-StandardTextualConventionsMIB", "PassportCounter64", "Integer32", "Unsigned32", "Gauge32", "InterfaceIndex", "StorageType", "DisplayString", "RowStatus", "Counter32")
Link, AsciiString, NonReplicated, EnterpriseDateAndTime = mibBuilder.importSymbols("Nortel-Magellan-Passport-TextualConventionsMIB", "Link", "AsciiString", "NonReplicated", "EnterpriseDateAndTime")
passportMIBs, components = mibBuilder.importSymbols("Nortel-Magellan-Passport-UsefulDefinitionsMIB", "passportMIBs", "components")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, Unsigned32, TimeTicks, Gauge32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, ObjectIdentity, Counter64, ModuleIdentity, Bits, NotificationType, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "Unsigned32", "TimeTicks", "Gauge32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "ObjectIdentity", "Counter64", "ModuleIdentity", "Bits", "NotificationType", "iso", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hdlcTransparentMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47))
htds = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82))
htdsRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 1), )
if mibBuilder.loadTexts: htdsRowStatusTable.setStatus('mandatory')
htdsRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"))
if mibBuilder.loadTexts: htdsRowStatusEntry.setStatus('mandatory')
htdsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsRowStatus.setStatus('mandatory')
htdsComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsComponentName.setStatus('mandatory')
htdsStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsStorageType.setStatus('mandatory')
htdsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: htdsIndex.setStatus('mandatory')
htdsCidDataTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 10), )
if mibBuilder.loadTexts: htdsCidDataTable.setStatus('mandatory')
htdsCidDataEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"))
if mibBuilder.loadTexts: htdsCidDataEntry.setStatus('mandatory')
htdsCustomerIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 10, 1, 1), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 8191), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsCustomerIdentifier.setStatus('mandatory')
htdsIfEntryTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 11), )
if mibBuilder.loadTexts: htdsIfEntryTable.setStatus('mandatory')
htdsIfEntryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"))
if mibBuilder.loadTexts: htdsIfEntryEntry.setStatus('mandatory')
htdsIfAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 11, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3))).clone('up')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsIfAdminStatus.setStatus('mandatory')
htdsIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 11, 1, 2), InterfaceIndex().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsIfIndex.setStatus('mandatory')
htdsOperStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 12), )
if mibBuilder.loadTexts: htdsOperStatusTable.setStatus('mandatory')
htdsOperStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"))
if mibBuilder.loadTexts: htdsOperStatusEntry.setStatus('mandatory')
htdsSnmpOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3))).clone('up')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsSnmpOperStatus.setStatus('mandatory')
htdsStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13), )
if mibBuilder.loadTexts: htdsStateTable.setStatus('mandatory')
htdsStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"))
if mibBuilder.loadTexts: htdsStateEntry.setStatus('mandatory')
htdsAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsAdminState.setStatus('mandatory')
htdsOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsOperationalState.setStatus('mandatory')
htdsUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsUsageState.setStatus('mandatory')
htdsAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsAvailabilityStatus.setStatus('mandatory')
htdsProceduralStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsProceduralStatus.setStatus('mandatory')
htdsControlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsControlStatus.setStatus('mandatory')
htdsAlarmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsAlarmStatus.setStatus('mandatory')
htdsStandbyStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 15))).clone(namedValues=NamedValues(("hotStandby", 0), ("coldStandby", 1), ("providingService", 2), ("notSet", 15))).clone('notSet')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsStandbyStatus.setStatus('mandatory')
htdsUnknownStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 13, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1))).clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsUnknownStatus.setStatus('mandatory')
htdsFramer = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2))
htdsFramerRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 1), )
if mibBuilder.loadTexts: htdsFramerRowStatusTable.setStatus('mandatory')
htdsFramerRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsFramerIndex"))
if mibBuilder.loadTexts: htdsFramerRowStatusEntry.setStatus('mandatory')
htdsFramerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerRowStatus.setStatus('mandatory')
htdsFramerComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerComponentName.setStatus('mandatory')
htdsFramerStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerStorageType.setStatus('mandatory')
htdsFramerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: htdsFramerIndex.setStatus('mandatory')
htdsFramerProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 10), )
if mibBuilder.loadTexts: htdsFramerProvTable.setStatus('mandatory')
htdsFramerProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsFramerIndex"))
if mibBuilder.loadTexts: htdsFramerProvEntry.setStatus('mandatory')
htdsFramerInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 10, 1, 1), Link()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsFramerInterfaceName.setStatus('mandatory')
htdsFramerLinkTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11), )
if mibBuilder.loadTexts: htdsFramerLinkTable.setStatus('mandatory')
htdsFramerLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsFramerIndex"))
if mibBuilder.loadTexts: htdsFramerLinkEntry.setStatus('mandatory')
htdsFramerDataInversion = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 16))).clone(namedValues=NamedValues(("off", 0), ("on", 16))).clone('off')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsFramerDataInversion.setStatus('mandatory')
htdsFramerNonOctetData = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsFramerNonOctetData.setStatus('mandatory')
htdsFramerFrameCrcType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("crc16", 0), ("crc32", 1), ("noCrc", 2))).clone('crc16')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsFramerFrameCrcType.setStatus('mandatory')
htdsFramerFlagsBetweenFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsFramerFlagsBetweenFrames.setStatus('mandatory')
htdsFramerLineSignalTransport = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 11, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1))).clone('off')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsFramerLineSignalTransport.setStatus('mandatory')
htdsFramerStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 12), )
if mibBuilder.loadTexts: htdsFramerStateTable.setStatus('mandatory')
htdsFramerStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsFramerIndex"))
if mibBuilder.loadTexts: htdsFramerStateEntry.setStatus('mandatory')
htdsFramerAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerAdminState.setStatus('mandatory')
htdsFramerOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerOperationalState.setStatus('mandatory')
htdsFramerUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerUsageState.setStatus('mandatory')
htdsFramerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13), )
if mibBuilder.loadTexts: htdsFramerStatsTable.setStatus('mandatory')
htdsFramerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsFramerIndex"))
if mibBuilder.loadTexts: htdsFramerStatsEntry.setStatus('mandatory')
htdsFramerFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerFrmToIf.setStatus('mandatory')
htdsFramerFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerFrmFromIf.setStatus('mandatory')
htdsFramerOctetFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerOctetFromIf.setStatus('mandatory')
htdsFramerAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerAborts.setStatus('mandatory')
htdsFramerCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerCrcErrors.setStatus('mandatory')
htdsFramerLrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerLrcErrors.setStatus('mandatory')
htdsFramerNonOctetErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerNonOctetErrors.setStatus('mandatory')
htdsFramerOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerOverruns.setStatus('mandatory')
htdsFramerUnderruns = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerUnderruns.setStatus('mandatory')
htdsFramerLargeFrmErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 13, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerLargeFrmErrors.setStatus('mandatory')
htdsFramerUtilTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 14), )
if mibBuilder.loadTexts: htdsFramerUtilTable.setStatus('mandatory')
htdsFramerUtilEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 14, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsFramerIndex"))
if mibBuilder.loadTexts: htdsFramerUtilEntry.setStatus('mandatory')
htdsFramerNormPrioLinkUtilToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 14, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerNormPrioLinkUtilToIf.setStatus('mandatory')
htdsFramerNormPrioLinkUtilFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 2, 14, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsFramerNormPrioLinkUtilFromIf.setStatus('mandatory')
htdsPlc = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3))
htdsPlcRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 1), )
if mibBuilder.loadTexts: htdsPlcRowStatusTable.setStatus('mandatory')
htdsPlcRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsPlcIndex"))
if mibBuilder.loadTexts: htdsPlcRowStatusEntry.setStatus('mandatory')
htdsPlcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsPlcRowStatus.setStatus('mandatory')
htdsPlcComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsPlcComponentName.setStatus('mandatory')
htdsPlcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsPlcStorageType.setStatus('mandatory')
htdsPlcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: htdsPlcIndex.setStatus('mandatory')
htdsPlcProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10), )
if mibBuilder.loadTexts: htdsPlcProvTable.setStatus('mandatory')
htdsPlcProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsPlcIndex"))
if mibBuilder.loadTexts: htdsPlcProvEntry.setStatus('mandatory')
htdsPlcRemoteName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 2), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 40)).clone(hexValue="")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcRemoteName.setStatus('mandatory')
htdsPlcSetupPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcSetupPriority.setStatus('mandatory')
htdsPlcHoldingPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcHoldingPriority.setStatus('mandatory')
htdsPlcRequiredTxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)).clone(64000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcRequiredTxBandwidth.setStatus('mandatory')
htdsPlcRequiredRxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)).clone(64000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcRequiredRxBandwidth.setStatus('mandatory')
htdsPlcRequiredTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("voice", 0), ("data", 1), ("video", 2), ("trafficType1", 3), ("trafficType2", 4), ("trafficType3", 5), ("trafficType4", 6), ("trafficType5", 7))).clone('data')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcRequiredTrafficType.setStatus('mandatory')
htdsPlcPermittedTrunkTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="f8")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcPermittedTrunkTypes.setStatus('mandatory')
htdsPlcRequiredSecurity = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcRequiredSecurity.setStatus('mandatory')
htdsPlcRequiredCustomerParm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcRequiredCustomerParm.setStatus('mandatory')
htdsPlcPathAttributeToMinimize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cost", 0), ("delay", 1))).clone('cost')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcPathAttributeToMinimize.setStatus('mandatory')
htdsPlcMaximumAcceptableCost = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(1280)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcMaximumAcceptableCost.setStatus('mandatory')
htdsPlcMaximumAcceptableDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000)).clone(100000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcMaximumAcceptableDelay.setStatus('mandatory')
htdsPlcEmissionPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 2)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcEmissionPriority.setStatus('mandatory')
htdsPlcDiscardPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 3)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcDiscardPriority.setStatus('mandatory')
htdsPlcPathType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("manual", 1), ("forced", 2))).clone('normal')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcPathType.setStatus('mandatory')
htdsPlcPathFailureAction = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disconnectConnection", 0), ("reRoutePath", 1))).clone('reRoutePath')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcPathFailureAction.setStatus('mandatory')
htdsPlcBumpPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bumpWhenNecessary", 0), ("bumpToObtainBestRoute", 1))).clone('bumpWhenNecessary')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcBumpPreference.setStatus('mandatory')
htdsPlcOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 10, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcOptimization.setStatus('mandatory')
htdsPlcMpathTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 265), )
if mibBuilder.loadTexts: htdsPlcMpathTable.setStatus('mandatory')
htdsPlcMpathEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 265, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsPlcIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsPlcMpathIndex"))
if mibBuilder.loadTexts: htdsPlcMpathEntry.setStatus('mandatory')
htdsPlcMpathIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 265, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9)))
if mibBuilder.loadTexts: htdsPlcMpathIndex.setStatus('mandatory')
htdsPlcMpathValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 3, 265, 1, 2), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsPlcMpathValue.setStatus('mandatory')
htdsLCo = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4))
htdsLCoRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 1), )
if mibBuilder.loadTexts: htdsLCoRowStatusTable.setStatus('mandatory')
htdsLCoRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsLCoIndex"))
if mibBuilder.loadTexts: htdsLCoRowStatusEntry.setStatus('mandatory')
htdsLCoRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRowStatus.setStatus('mandatory')
htdsLCoComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoComponentName.setStatus('mandatory')
htdsLCoStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoStorageType.setStatus('mandatory')
htdsLCoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: htdsLCoIndex.setStatus('mandatory')
htdsLCoPathDataTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10), )
if mibBuilder.loadTexts: htdsLCoPathDataTable.setStatus('mandatory')
htdsLCoPathDataEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsLCoIndex"))
if mibBuilder.loadTexts: htdsLCoPathDataEntry.setStatus('mandatory')
htdsLCoState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("pathDown", 0), ("selectingRoute", 1), ("connecting", 2), ("pathUp", 3), ("pathDownRetrying", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoState.setStatus('mandatory')
htdsLCoOverrideRemoteName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 2), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: htdsLCoOverrideRemoteName.setStatus('mandatory')
htdsLCoEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("calling", 0), ("called", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoEnd.setStatus('mandatory')
htdsLCoCostMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoCostMetric.setStatus('mandatory')
htdsLCoDelayMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoDelayMetric.setStatus('mandatory')
htdsLCoRoundTripDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 200000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRoundTripDelay.setStatus('mandatory')
htdsLCoSetupPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoSetupPriority.setStatus('mandatory')
htdsLCoHoldingPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoHoldingPriority.setStatus('mandatory')
htdsLCoRequiredTxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 9), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRequiredTxBandwidth.setStatus('mandatory')
htdsLCoRequiredRxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 10), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRequiredRxBandwidth.setStatus('mandatory')
htdsLCoRequiredTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("voice", 0), ("data", 1), ("video", 2), ("trafficType1", 3), ("trafficType2", 4), ("trafficType3", 5), ("trafficType4", 6), ("trafficType5", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRequiredTrafficType.setStatus('mandatory')
htdsLCoPermittedTrunkTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPermittedTrunkTypes.setStatus('mandatory')
htdsLCoRequiredSecurity = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRequiredSecurity.setStatus('mandatory')
htdsLCoRequiredCustomerParameter = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRequiredCustomerParameter.setStatus('mandatory')
htdsLCoEmissionPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoEmissionPriority.setStatus('mandatory')
htdsLCoDiscardPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoDiscardPriority.setStatus('mandatory')
htdsLCoPathType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("manual", 1), ("forced", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPathType.setStatus('mandatory')
htdsLCoRetryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 18), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoRetryCount.setStatus('mandatory')
htdsLCoPathFailureCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 19), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPathFailureCount.setStatus('mandatory')
htdsLCoReasonForNoRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))).clone(namedValues=NamedValues(("none", 0), ("destinationNameTooLong", 1), ("destinationNotSpecified", 2), ("unknownDestinationName", 3), ("incorrectDestination", 4), ("incorrectDestinationEndPoint", 5), ("unknownSource", 6), ("unknownDestination", 7), ("sameNode", 8), ("routeCostTooMuch", 9), ("routesDelayTooLong", 10), ("attributesNotMet", 11), ("anError", 12), ("attributeProfileProblem", 13), ("manualPathIndexProblem", 14))).clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoReasonForNoRoute.setStatus('mandatory')
htdsLCoLastTearDownReason = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))).clone(namedValues=NamedValues(("none", 0), ("normalShutDown", 1), ("insufficientTxLcOrBandwidth", 2), ("insufficientRxLcOrBandwidth", 3), ("trunkFailure", 4), ("trunkCardFailure", 5), ("operatorForced", 6), ("lostLcnClash", 7), ("networkCongestion", 8), ("trunkNotFound", 9), ("farEndNotFound", 10), ("wrongModuleReached", 11), ("farEndBusy", 12), ("callLoopedBack", 13), ("unknownReason", 14), ("farEndNotReady", 15), ("remoteNameMismatch", 16), ("serviceTypeMismatch", 17), ("reconnectFromFarEnd", 18), ("bumped", 19), ("accessCardFailure", 20), ("optimized", 21), ("overrideRemoteName", 22), ("trunkOrFarEndDidNotSupportMode", 23))).clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoLastTearDownReason.setStatus('mandatory')
htdsLCoPathFailureAction = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disconnectConnection", 0), ("reRoutePath", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPathFailureAction.setStatus('mandatory')
htdsLCoBumpPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bumpWhenNecessary", 0), ("bumpToObtainBestRoute", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoBumpPreference.setStatus('mandatory')
htdsLCoOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoOptimization.setStatus('mandatory')
htdsLCoPathUpDateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 10, 1, 25), EnterpriseDateAndTime().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(19, 19), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPathUpDateTime.setStatus('mandatory')
htdsLCoStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 11), )
if mibBuilder.loadTexts: htdsLCoStatsTable.setStatus('mandatory')
htdsLCoStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsLCoIndex"))
if mibBuilder.loadTexts: htdsLCoStatsEntry.setStatus('mandatory')
htdsLCoPktsToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 11, 1, 1), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPktsToNetwork.setStatus('mandatory')
htdsLCoBytesToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 11, 1, 2), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoBytesToNetwork.setStatus('mandatory')
htdsLCoPktsFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 11, 1, 3), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPktsFromNetwork.setStatus('mandatory')
htdsLCoBytesFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 11, 1, 4), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoBytesFromNetwork.setStatus('mandatory')
htdsLCoPathTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 264), )
if mibBuilder.loadTexts: htdsLCoPathTable.setStatus('mandatory')
htdsLCoPathEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 264, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsLCoIndex"), (0, "Nortel-Magellan-Passport-HdlcTransparentMIB", "htdsLCoPathValue"))
if mibBuilder.loadTexts: htdsLCoPathEntry.setStatus('mandatory')
htdsLCoPathValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 82, 4, 264, 1, 1), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: htdsLCoPathValue.setStatus('mandatory')
hdlcTransparentGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 1))
hdlcTransparentGroupBC = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 1, 3))
hdlcTransparentGroupBC03 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 1, 3, 4))
hdlcTransparentGroupBC03A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 1, 3, 4, 2))
hdlcTransparentCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 3))
hdlcTransparentCapabilitiesBC = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 3, 3))
hdlcTransparentCapabilitiesBC03 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 3, 3, 4))
hdlcTransparentCapabilitiesBC03A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 47, 3, 3, 4, 2))
mibBuilder.exportSymbols("Nortel-Magellan-Passport-HdlcTransparentMIB", htdsAvailabilityStatus=htdsAvailabilityStatus, htdsIfIndex=htdsIfIndex, htdsFramerUtilEntry=htdsFramerUtilEntry, htdsLCoSetupPriority=htdsLCoSetupPriority, htdsPlcSetupPriority=htdsPlcSetupPriority, htdsFramerLargeFrmErrors=htdsFramerLargeFrmErrors, htdsPlcProvEntry=htdsPlcProvEntry, htdsLCoRowStatus=htdsLCoRowStatus, htdsLCoPathFailureAction=htdsLCoPathFailureAction, htds=htds, htdsLCoPermittedTrunkTypes=htdsLCoPermittedTrunkTypes, htdsFramerProvEntry=htdsFramerProvEntry, htdsFramerStatsEntry=htdsFramerStatsEntry, htdsPlcIndex=htdsPlcIndex, htdsPlcHoldingPriority=htdsPlcHoldingPriority, htdsLCoStatsEntry=htdsLCoStatsEntry, htdsFramerLrcErrors=htdsFramerLrcErrors, htdsLCoReasonForNoRoute=htdsLCoReasonForNoRoute, htdsFramerAborts=htdsFramerAborts, htdsLCoPathType=htdsLCoPathType, hdlcTransparentCapabilitiesBC03A=hdlcTransparentCapabilitiesBC03A, htdsLCoComponentName=htdsLCoComponentName, htdsFramerStatsTable=htdsFramerStatsTable, htdsStateEntry=htdsStateEntry, htdsOperationalState=htdsOperationalState, htdsLCoPathValue=htdsLCoPathValue, htdsFramerNonOctetErrors=htdsFramerNonOctetErrors, htdsFramerOverruns=htdsFramerOverruns, htdsLCoRequiredSecurity=htdsLCoRequiredSecurity, htdsPlcPathAttributeToMinimize=htdsPlcPathAttributeToMinimize, htdsFramerFrameCrcType=htdsFramerFrameCrcType, htdsRowStatusTable=htdsRowStatusTable, htdsOperStatusTable=htdsOperStatusTable, htdsFramerStorageType=htdsFramerStorageType, htdsProceduralStatus=htdsProceduralStatus, htdsFramer=htdsFramer, htdsPlcRowStatusEntry=htdsPlcRowStatusEntry, htdsLCoRequiredRxBandwidth=htdsLCoRequiredRxBandwidth, htdsCustomerIdentifier=htdsCustomerIdentifier, htdsLCoBytesFromNetwork=htdsLCoBytesFromNetwork, htdsCidDataEntry=htdsCidDataEntry, htdsAlarmStatus=htdsAlarmStatus, htdsFramerRowStatusTable=htdsFramerRowStatusTable, htdsLCoPathUpDateTime=htdsLCoPathUpDateTime, htdsLCoRequiredTrafficType=htdsLCoRequiredTrafficType, htdsPlcDiscardPriority=htdsPlcDiscardPriority, htdsPlcRequiredTxBandwidth=htdsPlcRequiredTxBandwidth, htdsLCoEnd=htdsLCoEnd, hdlcTransparentGroupBC=hdlcTransparentGroupBC, htdsPlcRowStatusTable=htdsPlcRowStatusTable, htdsLCoRetryCount=htdsLCoRetryCount, htdsLCoRequiredTxBandwidth=htdsLCoRequiredTxBandwidth, htdsStandbyStatus=htdsStandbyStatus, hdlcTransparentGroupBC03A=hdlcTransparentGroupBC03A, htdsLCoPathFailureCount=htdsLCoPathFailureCount, htdsFramerAdminState=htdsFramerAdminState, htdsFramerLinkTable=htdsFramerLinkTable, hdlcTransparentMIB=hdlcTransparentMIB, htdsLCoHoldingPriority=htdsLCoHoldingPriority, hdlcTransparentCapabilities=hdlcTransparentCapabilities, htdsPlcMpathIndex=htdsPlcMpathIndex, htdsUsageState=htdsUsageState, htdsLCoIndex=htdsLCoIndex, htdsPlcOptimization=htdsPlcOptimization, htdsStorageType=htdsStorageType, htdsRowStatusEntry=htdsRowStatusEntry, htdsFramerComponentName=htdsFramerComponentName, htdsRowStatus=htdsRowStatus, htdsPlcComponentName=htdsPlcComponentName, htdsLCoLastTearDownReason=htdsLCoLastTearDownReason, htdsLCoPathTable=htdsLCoPathTable, htdsLCoState=htdsLCoState, htdsFramerIndex=htdsFramerIndex, htdsLCoPktsToNetwork=htdsLCoPktsToNetwork, htdsOperStatusEntry=htdsOperStatusEntry, htdsFramerDataInversion=htdsFramerDataInversion, htdsLCoRowStatusEntry=htdsLCoRowStatusEntry, htdsPlcPathFailureAction=htdsPlcPathFailureAction, htdsPlcRequiredRxBandwidth=htdsPlcRequiredRxBandwidth, hdlcTransparentGroup=hdlcTransparentGroup, htdsFramerOctetFromIf=htdsFramerOctetFromIf, htdsLCoPathDataTable=htdsLCoPathDataTable, htdsComponentName=htdsComponentName, htdsFramerRowStatusEntry=htdsFramerRowStatusEntry, htdsLCoCostMetric=htdsLCoCostMetric, htdsLCoDiscardPriority=htdsLCoDiscardPriority, htdsPlc=htdsPlc, htdsPlcStorageType=htdsPlcStorageType, htdsPlcMpathTable=htdsPlcMpathTable, htdsIfAdminStatus=htdsIfAdminStatus, htdsLCoRequiredCustomerParameter=htdsLCoRequiredCustomerParameter, htdsPlcMpathValue=htdsPlcMpathValue, htdsStateTable=htdsStateTable, htdsFramerStateEntry=htdsFramerStateEntry, htdsPlcRequiredTrafficType=htdsPlcRequiredTrafficType, htdsFramerFrmToIf=htdsFramerFrmToIf, htdsFramerLineSignalTransport=htdsFramerLineSignalTransport, htdsLCoStorageType=htdsLCoStorageType, htdsSnmpOperStatus=htdsSnmpOperStatus, htdsFramerUsageState=htdsFramerUsageState, htdsFramerUtilTable=htdsFramerUtilTable, htdsPlcRequiredSecurity=htdsPlcRequiredSecurity, htdsLCoBytesToNetwork=htdsLCoBytesToNetwork, htdsLCoPathDataEntry=htdsLCoPathDataEntry, htdsFramerFrmFromIf=htdsFramerFrmFromIf, htdsFramerOperationalState=htdsFramerOperationalState, htdsPlcPathType=htdsPlcPathType, htdsLCoOverrideRemoteName=htdsLCoOverrideRemoteName, htdsFramerNonOctetData=htdsFramerNonOctetData, htdsPlcRequiredCustomerParm=htdsPlcRequiredCustomerParm, htdsLCoPktsFromNetwork=htdsLCoPktsFromNetwork, htdsFramerFlagsBetweenFrames=htdsFramerFlagsBetweenFrames, htdsPlcRemoteName=htdsPlcRemoteName, htdsFramerProvTable=htdsFramerProvTable, htdsLCoDelayMetric=htdsLCoDelayMetric, hdlcTransparentCapabilitiesBC=hdlcTransparentCapabilitiesBC, htdsFramerCrcErrors=htdsFramerCrcErrors, htdsPlcRowStatus=htdsPlcRowStatus, htdsPlcMaximumAcceptableCost=htdsPlcMaximumAcceptableCost, htdsFramerNormPrioLinkUtilToIf=htdsFramerNormPrioLinkUtilToIf, hdlcTransparentCapabilitiesBC03=hdlcTransparentCapabilitiesBC03, htdsLCoRowStatusTable=htdsLCoRowStatusTable, htdsFramerNormPrioLinkUtilFromIf=htdsFramerNormPrioLinkUtilFromIf, htdsFramerLinkEntry=htdsFramerLinkEntry, htdsPlcEmissionPriority=htdsPlcEmissionPriority, htdsFramerStateTable=htdsFramerStateTable, htdsPlcPermittedTrunkTypes=htdsPlcPermittedTrunkTypes, htdsCidDataTable=htdsCidDataTable, htdsIfEntryTable=htdsIfEntryTable, htdsPlcMaximumAcceptableDelay=htdsPlcMaximumAcceptableDelay, htdsLCoStatsTable=htdsLCoStatsTable, hdlcTransparentGroupBC03=hdlcTransparentGroupBC03, htdsPlcMpathEntry=htdsPlcMpathEntry, htdsIfEntryEntry=htdsIfEntryEntry, htdsPlcProvTable=htdsPlcProvTable, htdsLCo=htdsLCo, htdsUnknownStatus=htdsUnknownStatus, htdsLCoBumpPreference=htdsLCoBumpPreference, htdsPlcBumpPreference=htdsPlcBumpPreference, htdsFramerUnderruns=htdsFramerUnderruns, htdsLCoPathEntry=htdsLCoPathEntry, htdsAdminState=htdsAdminState, htdsFramerRowStatus=htdsFramerRowStatus, htdsFramerInterfaceName=htdsFramerInterfaceName, htdsLCoRoundTripDelay=htdsLCoRoundTripDelay, htdsLCoEmissionPriority=htdsLCoEmissionPriority, htdsIndex=htdsIndex, htdsControlStatus=htdsControlStatus, htdsLCoOptimization=htdsLCoOptimization)
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup, Tag
import os
from abc import ABC, abstractmethod
from typing import List, Dict, NoReturn
class Csskrt(ABC):
def __init__(self, filename: str, tag_styles: Dict):
f = open(filename) # should be able to handle dirs (for later) todo
f_data = f.read()
self.file_path = filename
self.soup = BeautifulSoup(f_data, 'html.parser')
self.tag_styles = tag_styles
@abstractmethod
def get_starter_tags(self) -> List[Tag]:
"""
Return a list of the Tags you want to add to the <head>
:return:
"""
pass
@abstractmethod
def get_wrapper_tag(self) -> List[Tag] or None:
"""
Return the 'wrapper' class for your framework.
Eg. 'container' for Bootstrap
:return:
"""
pass
@abstractmethod
def get_table_styles(self) -> Dict:
"""
Return a dictionary of the table-specific tag and the corresponding
css styles
Eg. { 'table': 'my-table-class, 'thead': 'my-thead-class' }
:return:
"""
@abstractmethod
def version(self) -> str:
"""
:return: The version number
"""
@abstractmethod
def get_list_styles(self) -> Dict:
"""
:return:
"""
def add_class_to_element(self, elem, css_class):
if not elem.get('class'):
elem['class'] = css_class
else:
try:
elem['class'].append(css_class)
except AttributeError:
elem['class'] += ' ' + css_class
def initialize_framework(self, head: Tag, tags: List[Tag]):
"""
Applys the header tags to the head
:param head:
:param tags:
:return:
"""
for tag in tags:
head.append(tag)
def add_wrapper_tag(self, wrapper_tag: Tag):
"""
Add the container tag for the framework
:param wrapper_tag:
:return:
"""
# potentially optimize by using wrap and swapping attributes?
body_children = list(self.soup.body.children)
self.soup.body.clear()
self.soup.body.append(wrapper_tag)
for child in body_children:
wrapper_tag.append(child)
def add_form_classes(self, tag_dict: dict) -> NoReturn:
"""
Adds classes for form fields
:param tag_dict:
:return:
"""
for form in self.soup.find_all('form'):
for elem in form.children:
if elem.name == 'label':
if 'label' in tag_dict:
self.add_class_to_element(elem, tag_dict['label'])
elif elem.name == 'input':
self.add_class_to_element(elem, tag_dict['input'])
if elem.get('type') == 'radio':
if 'radio' in tag_dict:
self.add_class_to_element(elem, tag_dict['radio'])
elif elem.get('type') == 'checkbox':
if 'checkbox' in tag_dict:
self.add_class_to_element(elem, tag_dict['checkbox'])
# elif type(elem) == Tag: # ignore NavigableStrings like /n
# if tag_dict.get(elem.name):
# self.add_class_to_element(elem, tag_dict[elem.name])
def add_table_classes(self, table_tag_dict: dict) -> NoReturn:
"""
Apply the styles to table elements
Supports the following tags:
('table', 'thead', 'tbody', 'tfoot', 'tr', 'th', 'td')
:param table_tag_dict:
:return:
"""
table_keys = ('thead', 'tbody', 'tfoot', 'tr', 'th', 'td')
for table in self.soup.find_all('table'):
if table_tag_dict.get('table'): # Add style to table tag
self.add_class_to_element(table, table_tag_dict['table'])
for tk in table_keys:
if table_tag_dict.get(tk):
all_table_elems = table.find_all(tk)
for elem in all_table_elems:
self.add_class_to_element(elem, table_tag_dict[tk])
def add_list_classes(self, list_tags: dict) -> NoReturn:
"""
Supports the following tags:
('ul', 'ol', 'li')
:param list_tags:
:return:
"""
for list in self.soup.find_all(['ol', 'ul']):
if list.name == 'ul' and list_tags.get('ul'):
self.add_class_to_element(list, list_tags['ul'])
elif list.name == 'ol' and list_tags.get('ol'):
self.add_class_to_element(list, list_tags['ol'])
if list_tags.get('li'):
for li in list.find_all('li', recursive=False):
# recursive=False to prevent double modifying for nested lists
self.add_class_to_element(li, list_tags['li'])
def add_general_classes(self):
"""
Adds styles to single elements
:return:
"""
supported_classes = (
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'button', 'a', 'nav'
)
for tag in self.tag_styles:
if tag in supported_classes:
for elem in self.soup.find_all(tag):
self.add_class_to_element(elem, self.tag_styles[tag])
def output(self, pretty_print: bool) -> NoReturn:
"""
Outputs a new file.
:return:
"""
folder = os.path.dirname(self.file_path)
file = os.path.basename(self.file_path)
file_name, ext = os.path.splitext(file)
new_file_name = os.path.join(folder, file_name + '_csskrt' + ext)
with open(new_file_name, 'w') as out_file:
if pretty_print:
out_file.write(str(self.soup))
else:
out_file.write(self.soup.prettify())
def freshify(self) -> NoReturn:
"""
Main function that applies all the necessary styles
:return:
"""
starter_tags = self.get_starter_tags()
wrapper_tag = self.get_wrapper_tag()
table_styles = self.get_table_styles()
list_styles = self.get_list_styles()
# Modify the head
if self.soup.head:
self.initialize_framework(self.soup.head, starter_tags)
# Add the "wrapper" tag
if wrapper_tag:
self.add_wrapper_tag(wrapper_tag)
# Elements that have children eg. tables, lists, forms have their own
# dedicated function to support complex operations if necessary.
self.add_form_classes(self.tag_styles)
self.add_list_classes(list_styles)
self.add_table_classes(table_styles)
# Add styles for the rest of the elements
self.add_general_classes()
return self.soup
|
nilq/baby-python
|
python
|
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from nboost.plugins.models.rerank.base import RerankModelPlugin
from nboost import defaults
from typing import List, Tuple
class USERerankModelPlugin(RerankModelPlugin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.module = hub.load(self.model_dir)
def rank(self, query: str, choices: List[str],
filter_results: type(defaults.filter_results) = defaults.filter_results
) -> Tuple[List[int], List[float]]:
# questions = [query]
# question_embeddings = self.module.signatures['question_encoder'](
# tf.constant(questions))
# response_embeddings = self.module.signatures['response_encoder'](
# input=tf.constant(choices),
# context=tf.constant(choices))
question_embedding = self.module([query])
candidate_embeddings = self.module(choices)
scores = np.inner(question_embedding, candidate_embeddings)
scores = np.reshape(scores, (-1,))
sorted_indices = list(np.argsort(scores)[::-1])
return sorted_indices, scores[sorted_indices]
|
nilq/baby-python
|
python
|
from yolov3.config.defaults import get_default_config
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""ztc.vm.memory tests"""
import unittest
from ztc.vm.memory import Memory
class Test(unittest.TestCase):
def test_get_active(self):
m = Memory()
assert isinstance(m.active, long)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
nilq/baby-python
|
python
|
import cv2
import argparse
from datetime import datetime
import sys
def do_capture(args):
print("Capturing", args.url)
capture=cv2.VideoCapture(args.url)
i = 0
while True:
if args.nframes is not None and i >= args.nframes:
print("Done")
break
i += 1
frame=capture.read()
if frame is None:
print('Camera not found')
break
if frame[1] is None or frame[1].size == 0:
print('WARNING: empty frame - restarting video capture')
capture = cv2.VideoCapture(args.url)
continue
else:
if args.display:
cv2.imshow("Display", frame[1])
if args.skip is not None:
if i % (args.skip+1) != 0:
continue
path = "%s_%s.%s" % (args.prefix, "{:%Y_%m_%d_%H_%M_%S_%f}".format(datetime.now()), args.fmt)
print(path)
cv2.imwrite(path, frame[1])
if cv2.waitKey(22) & 0xFF == ord('q'):
break
capture.release()
cv2.DestroyAllWindows()
print("Exit")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Fetches MJPEG from web and saves to images""")
parser.add_argument('--url', default="http://192.168.1.3:8081/", help="URL to grab")
parser.add_argument('--nframes', default=None, help="Number of frames to grab")
parser.add_argument('--skip', default=None, type=int, help="Number of frames to skip")
parser.add_argument('--prefix', default='out', help="Starting name of file")
parser.add_argument('--fmt', default='png', help="File format")
parser.add_argument('--display', default=False, type=bool, help="Display output in a window")
args = parser.parse_args(sys.argv[1:])
do_capture(args)
|
nilq/baby-python
|
python
|
#!/usr/local/bin/python
#==========================================================================
# Ingest a given scripps dataset into the corresponding databases.
# To be invoked by the overall scripps_ingest.py using subprocess,
# destination data directory and temporary working directory are defined in
# properties.
#
# input: path to the original scripps tar file;
# output: corresponding overall sqlite db file with all data ingested;
# as well as duplicate-filled sqlite db file for individual stations
#
# usage:
# scripps_ingest_single.py /path/to/download/scripps_data.tar
#
# output:
# /path/to/rdahmm/scripps_data.sqlite
# /path/to/rdahmm/stationID.sqlite
#===========================================================================
import os, sys, string, re
import sqlite3 as db
from datetime import date
from datetime import timedelta
from properties import properties
numargv = len(sys.argv)
if numargv == 1:
sys.exit("usage: scripps_ingest_single.py /path/to/scripps_data.tar")
elif numargv == 2:
[scripps_path, tarfile] = os.path.split(sys.argv[1])
scripps_path += "/"
else:
sys.exit("Invalid number of parameters!")
#rdahmm_path = "/home/yuma/RDAHMM/Data/"
#temp_path = "/home/yuma/RDAHMM/TEMP/"
data_path = properties('data_path')
temp_path = properties('temp_path')
datadir = data_path + tarfile[:tarfile.rfind("_")] + "/"
#dbfile = datadir + tarfile[:-4] + ".sqlite"
# get rid of timestamp from db file name
dbfile = datadir + tarfile[:-13] + ".sqlite"
workdir = temp_path + tarfile[:tarfile.rfind("_")] + "/"
#print datadir, dbfile
if not os.path.exists(datadir):
cmd = "mkdir -p " + datadir
os.system(cmd)
if not os.path.exists(workdir):
cmd = "mkdir -p " + workdir
os.system(cmd)
#if the same db file exists, drop it
if os.path.isfile(dbfile):
print "deleting old database " + dbfile
os.remove(dbfile)
# creating/connecting the database
conn = db.connect(dbfile)
# creating a Cursor
cur = conn.cursor()
# creating tables
sql ="""CREATE TABLE GPSTimeSeries (
StationID CHAR(4),
North Num,
East Num,
Up Num,
Nsig Num,
Esig Num,
Usig Num,
Timestamp TEXT,
UNIQUE (StationID, Timestamp))"""
cur.execute(sql)
sql ="""CREATE TABLE ReferencePositions (
StationID CHAR(4),
Latitude Num,
Longitude Num,
Height Num,
UNIQUE (StationID))"""
cur.execute(sql)
conn.commit()
# clear working directory
cmd = "rm -f " + workdir + "*"
os.system(cmd)
print "Processing ", tarfile, "..."
# unpack data
cmd = "tar xvf " + scripps_path + tarfile + " -C " + workdir
os.system(cmd)
dirlist = os.listdir(workdir)
dirlist.sort()
for datafile in dirlist:
if datafile[-2:] == ".Z":
if os.stat(workdir+datafile).st_size == 0: # When .Z file is empty
continue
cmd = "unzip " + workdir + datafile + " -d " + workdir
os.system(cmd)
datafile = datafile[:-2]
stationID = datafile[:4]
station_dbfile = datadir + stationID + ".sqlite"
if os.path.isfile(station_dbfile):
print "deleting old station database " + station_dbfile
os.remove(station_dbfile)
station_conn = db.connect(station_dbfile)
station_cur = station_conn.cursor()
station_sql ="""CREATE TABLE StationGPSTimeSeries (
North Num,
East Num,
Up Num,
Nsig Num,
Esig Num,
Usig Num,
Timestamp TEXT,
Interploated INT Default 0,
UNIQUE(Timestamp))"""
station_cur.execute(station_sql)
station_conn.commit()
with open(workdir + datafile, 'r') as f:
data = f.readlines()
last_line = ""
for line in data:
if "Reference position" in line:
refs = map(float, re.findall("(-?[0-9.]*[0-9]+)", line))
lat = refs[0] + refs[1]/60.0 + refs[2]/3600.0
long = -1.0 * (refs[3] + refs[4]/60.0 + refs[5]/3600.0)
height = refs[6]
sql = "INSERT INTO ReferencePositions (StationID, Latitude, Longitude, Height) "
sql += " VALUES ('%s', '%s', '%s', '%s')" % (stationID, lat, long, height)
cur.execute(sql)
if not "#" in line:
record = string.split(line)
if len(record) < 9: # When missing white spaces between columns
tmpstr = ' '.join(record[3:])
record[3:9] = re.findall(r"[+-]?\d+\.\d\d", tmpstr)
[year, days] = map(int, record[1:3])
# days is counted starting from 0
timestamp = date.fromordinal(date(year,1,1).toordinal()+days)
[north, east, up, nsig, esig, usig] = record[3:9]
sql = "INSERT INTO GPSTimeSeries (StationID, North, East, Up, Nsig, Esig, Usig, Timestamp) "
sql += " VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % (stationID, north, east, up, nsig, esig, usig, timestamp)
cur.execute(sql)
if last_line == "":
last_line = line
else:
last_record = string.split(last_line)
if len(last_record) < 9:
tmpstr = ' '.join(last_record[3:])
last_record[3:9] = re.findall(r"[+-]?\d+\.\d\d", tmpstr)
[year, days] = map(int, last_record[1:3])
# days is counted starting from 0
last_timestamp = date.fromordinal(date(year,1,1).toordinal()+days)
[lnorth, least, lup, lnsig, lesig, lusig] = last_record[3:9]
# if missing days from last to current, fill with last
for i in range(1, (timestamp - last_timestamp).days):
ts = last_timestamp + timedelta(days=i)
interploated = 1
station_sql = "INSERT INTO StationGPSTimeSeries (North, East, Up, Nsig, Esig, Usig, Timestamp, Interploated) "
station_sql += " VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % (lnorth, least, lup, lnsig, lesig, lusig, ts, interploated)
station_cur.execute(station_sql)
last_line = line
station_sql = "INSERT INTO StationGPSTimeSeries (North, East, Up, Nsig, Esig, Usig, Timestamp) "
station_sql += " VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s')" % (north, east, up, nsig, esig, usig, timestamp)
station_cur.execute(station_sql)
station_conn.commit()
conn.commit()
f.closed
station_cur.close()
station_conn.close()
#insert duplicates for missing days in the station database
# create index
sql = "CREATE INDEX idx_StationID ON GPSTimeSeries(StationID)"
cur.execute(sql)
sql = "CREATE INDEX idx_Timestamp ON GPSTimeSeries(Timestamp)"
cur.execute(sql)
sql = "CREATE INDEX idx_RefStationID ON ReferencePositions(StationID)"
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
# clear working directory
#cmd = "rm -f " + workdir + "*"
#os.system(cmd)
|
nilq/baby-python
|
python
|
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2009 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Thomas Herchenroeder (thron7)
#
################################################################################
import sys, os
##
# qxenviron.py -- provide PYTHONPATH extension
##
# calculate script path
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
# extend PYTHONPATH with 'pylib'
sys.path.insert(0,
os.path.normpath(
os.path.join( scriptDir, os.pardir, os.pardir, "pylib")))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import functools
def parse_tile(s):
lines = s.split('\n')
tile_id = int(lines[0].strip(':').split()[1])
return tile_id, list(map(list, lines[1:]))
def read_tiles():
blocks = sys.stdin.read().strip().split("\n\n")
return {tile_id: tile for tile_id, tile in map(parse_tile, blocks)}
def get_borders(tile):
return [''.join(tile[0]),
''.join(map(lambda x: x[0], tile)),
''.join(tile[-1]),
''.join(map(lambda x: x[-1], tile))]
def normalize_border(border):
return min(border, border[::-1])
def main():
tiles = read_tiles()
borders = dict()
for tile_id, tile in tiles.items():
for border in get_borders(tile):
borders.setdefault(normalize_border(border), []).append(tile_id)
print(borders)
print(max(map(len, borders.values()))) # it should be 2
cnt = dict()
for tile_id in [tile_ids[0] for tile_ids in borders.values() if len(tile_ids) == 1]:
cnt[tile_id] = cnt.get(tile_id, 0) + 1
corners = [tile_id for tile_id, uniq_count in cnt.items() if uniq_count == 2]
print(corners) # it should be 4 different numbers
print(functools.reduce(lambda x, y: x * y, corners))
def rotate90(tile):
size = len(tile)
return [[tile[y][size - 1 - x] for y in range(size)] for x in range(size)]
def flip_sym(tile):
size = len(tile)
return [[tile[y][x] for y in range(size)] for x in range(size)]
def all_rotations(tile):
for i in range(2):
for j in range(4):
yield tile
tile = rotate90(tile)
tile = flip_sym(tile)
def tiles_with_border(border):
return borders[normalize_border(border)]
def another_tile_with_border(border, tile_id):
tile_ids = tiles_with_border(border)
if len(tile_ids) != 2 or tile_id not in tile_ids:
raise ValueError('Can not find unique another tile with border')
return sum(tile_ids) - tile_id
def put(tile_id, x, y):
for dx in range(k):
for dy in range(k):
field[x * k + dx][y * k + dy] = tiles[tile_id][dx + 1][dy + 1]
def is_good_with_neighbour(border_num, expected_border):
return lambda tile: get_borders(tile)[border_num] == expected_border
bs = get_borders(tiles[tile_id])
if y + 1 < n:
next_tile_id = another_tile_with_border(bs[3], tile_id)
rotate_and_put(next_tile_id, x, y + 1, is_good_with_neighbour(1, bs[3]))
if y == 0 and x + 1 < n:
next_tile_id = another_tile_with_border(bs[2], tile_id)
rotate_and_put(next_tile_id, x + 1, y, is_good_with_neighbour(0, bs[2]))
def rotate_and_put(tile_id, x, y, is_good_rotation):
for tile in all_rotations(tiles[tile_id]):
if is_good_rotation(tile):
tiles[tile_id] = tile
put(tile_id, x, y)
return
raise ValueEror('Can not put at (%d, %d)' % (x, y))
def put_all_from_corner(tile_id):
def is_good_at_corner(tile):
return all(map(lambda b: len(tiles_with_border(b)) == 1, get_borders(tile)[:2]))
rotate_and_put(tile_id, 0, 0, is_good_at_corner)
n = int(len(tiles) ** 0.5)
k = len(tiles[corners[0]][0]) - 2
field = [[' '] * (n * k) for i in range(n * k)]
put_all_from_corner(corners[0])
def find_monsters(field):
mask = [' # ',
'# ## ## ###',
' # # # # # # ']
monsters_count = 0
for x in range(n * k - len(mask)):
for y in range(n * k - len(mask[0])):
if all([mask[dx][dy] != '#' or field[x + dx][y + dy] != '.' for dy in range(len(mask[0])) for dx in range(len(mask))]):
monsters_count += 1
for dx in range(len(mask)):
for dy in range(len(mask[0])):
if mask[dx][dy] == '#':
field[x + dx][y + dy] = 'O'
return monsters_count
counts = list(map(find_monsters, all_rotations(field)))
print('\n'.join(map(lambda x: ''.join(x), field)))
print(counts)
print(sum(map(lambda s: ''.join(s).count('#'), field)))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import config
import time
module = 'separable_adapter' # specific module type for universal model: series_adapter, parallel_adapter, separable_adapter
trainMode = 'universal'
from sync_batchnorm import SynchronizedBatchNorm3d
'''
Input: (N, C_{in}, D_{in}, H_{in}, W_{in})
Output: (N, C_{out}, D_{out}, H_{out}, W_{out})
'''
def normalization(planes, norm='bn'):
if norm == 'bn':
m = nn.BatchNorm3d(planes)
elif norm == 'gn':
m = nn.GroupNorm(4, planes)
elif norm == 'in':
m = nn.InstanceNorm3d(planes)
elif norm == 'sync_bn':
m = SynchronizedBatchNorm3d(planes)
else:
raise ValueError('normalization type {} is not supported'.format(norm))
return m
class SegSEBlock(nn.Module):
def __init__(self, in_channels, rate=2, net_mode='3d'):
super(SegSEBlock, self).__init__()
nn.Conv3d = nn.Conv3d
self.in_channels = in_channels
self.rate = rate
self.dila_conv = nn.Conv3d(self.in_channels, self.in_channels // self.rate, 3, padding=2, dilation=self.rate)
self.conv1 = nn.Conv3d(self.in_channels // self.rate, self.in_channels, 1)
def forward(self, input):
x = self.dila_conv(input)
x = self.conv1(x)
x = nn.Sigmoid()(x)
return x
class SELayer(nn.Module):
def __init__(self, channel, reduction=2):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1, 1)
return x * y.expand_as(x)
class RecombinationBlock(nn.Module):
def __init__(self, in_channels, out_channels, batch_normalization=True, kernel_size=3, net_mode='3d'):
super(RecombinationBlock, self).__init__()
bn = SynchronizedBatchNorm3d
self.in_channels = in_channels
self.out_channels = out_channels
self.bach_normalization = batch_normalization
self.kerenl_size = kernel_size
self.rate = 2
self.expan_channels = self.out_channels * self.rate
self.expansion_conv = nn.Conv3d(self.in_channels, self.expan_channels, 1)
self.skip_conv = nn.Conv3d(self.in_channels, self.out_channels, 1)
self.zoom_conv = nn.Conv3d(self.out_channels * self.rate, self.out_channels, 1)
self.bn = bn(self.expan_channels)
self.norm_conv = nn.Conv3d(self.expan_channels, self.expan_channels, self.kerenl_size, padding=1)
self.segse_block = SegSEBlock(self.expan_channels, net_mode=net_mode)
def forward(self, input):
x = self.expansion_conv(input)
for i in range(1):
if self.bach_normalization:
x = self.bn(x)
x = nn.ReLU6(in_place=True)(x)
x = self.norm_conv(x)
x = self.zoom_conv(x)
skip_x = self.skip_conv(input)
out = x + skip_x
return out
def num_pool2stride_size(num_pool_per_axis):
max_num = max(num_pool_per_axis)
stride_size_per_pool = list()
for i in range(max_num):
unit = [1,2]
stride_size_per_pool.append([unit[i<num_pool_per_axis[0]], unit[i<num_pool_per_axis[1]], unit[i<num_pool_per_axis[2]]])
return stride_size_per_pool # [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
def norm_act(nchan, only='both', _norm=None, args=None):
if _norm == 'adain':
norm = AdaptiveInstanceNorm3d(nchan)
else:
norm = SynchronizedBatchNorm3d(nchan)
if config.use_dyrelu:
act = nn.LeakyReLU(negative_slope=1e-2,inplace=True)
else:
act = nn.LeakyReLU(negative_slope=1e-2,inplace=True)
if only=='norm':
return norm
elif only=='act':
return act
else:
return nn.Sequential(norm, act)
class conv1x1(nn.Module):
def __init__(self, inChans, outChans=None, stride=1, padding=0, args=None):
super(conv1x1, self).__init__()
if module == 'series_adapter':
self.op1 = nn.Sequential(
norm_act(inChans,only='norm',args=args),
nn.Conv3d(inChans, inChans, kernel_size=1, stride=1)
)
elif module == 'parallel_adapter':
self.op1 = nn.Conv3d(inChans, outChans, kernel_size=1, stride=stride, padding=padding)
else:
self.op1 = nn.Conv3d(inChans, inChans, kernel_size=1, stride=1)
def forward(self, x):
out = self.op1(x)
if module == 'series_adapter':
out += x
return out
class dwise(nn.Module): # 3x3 Conv3d
def __init__(self, inChans, kernel_size=3, stride=1, padding=1 ,args=None):
super(dwise, self).__init__()
self.conv1 = nn.Conv3d(inChans, inChans, kernel_size=kernel_size, stride=stride, padding=padding, groups=inChans)
self.op1 = norm_act(inChans,only='both',args=args)
def forward(self, x):
out = self.conv1(x)
out = self.op1(out)
return out
class pwise(nn.Module): # 1x1 Conv3d
def __init__(self, inChans, outChans, kernel_size=1, stride=1, padding=0):
super(pwise, self).__init__()
self.conv1 = nn.Conv3d(inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x):
out = self.conv1(x)
return out
class conv_unit(nn.Module): # 2 conv3d layers (3x3+1x1) + bn/act OR 1 conv3d-with stride-2 to downsample + bn/act
'''
variants of conv3d+norm by applying adapter or not.
'''
def __init__(self, nb_tasks, inChans, outChans, kernel_size=3, stride=1, padding=1, second=0 ,args=None):
super(conv_unit, self).__init__()
self.stride = stride
if self.stride != 1 and self.stride != (1,1,1):
self.conv = nn.Conv3d(inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding) # padding != 0 for stride != 2 if doing padding=SAME.
elif self.stride == 1 or self.stride == (1,1,1):
if trainMode != 'universal': # independent, shared
self.conv = nn.Conv3d(inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding) # padding != 0 for stride != 2 if doing padding=SAME.
else:
if module in ['series_adapter', 'parallel_adapter']:
self.conv = nn.Conv3d(inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding) # padding != 0 for stride != 2 if doing padding=SAME.
if module == 'series_adapter':
self.adapOps = nn.ModuleList([conv1x1(outChans, args=args) for i in range(nb_tasks)]) # based on https://github.com/srebuffi/residual_adapters/
elif module == 'parallel_adapter':
self.adapOps = nn.ModuleList([conv1x1(inChans, outChans, args=args) for i in range(nb_tasks)])
else:
pass
elif module == 'separable_adapter':
self.adapOps = nn.ModuleList([dwise(inChans,args=args) for i in range(nb_tasks)])
self.pwise = pwise(inChans, outChans)
else:
pass
self.op = nn.ModuleList([norm_act(outChans, only='norm',args=args) for i in range(nb_tasks)])
def forward(self, x):
task_idx = config.task_idx
if self.stride != 1 and self.stride != (1,1,1):
out = self.conv(x)
out = self.op[task_idx](out)
return out
elif self.stride == 1 or self.stride == (1,1,1):
if trainMode != 'universal': # independent, shared
out = self.conv(x)
out = self.op[task_idx](out)
else:
if module in ['series_adapter', 'parallel_adapter']:
out = self.conv(x)
if module == 'series_adapter':
out = self.adapOps[task_idx](out)
elif module == 'parallel_adapter':
share_map = out
para_map = self.adapOps[task_idx](x)
out = out + para_map
else:
pass
out = self.op[task_idx](out)
if module == 'parallel_adapter':
return out, share_map, para_map # for visualization of feature maps
else:
return out
elif module == 'separable_adapter':
out = self.adapOps[task_idx](x)
para_map = out
out = self.pwise(out)
share_map = out
out = self.op[task_idx](out)
return out, share_map, para_map
else:
pass
class InputTransition(nn.Module): # 1 Conv3d + bn_act
def __init__(self, inChans, base_outChans,args=None):
super(InputTransition, self).__init__()
self.op1 = nn.Sequential(
nn.Conv3d(inChans, base_outChans, kernel_size=3, stride=1, padding=1),
norm_act(base_outChans,args=args))
def forward(self, x):
out = self.op1(x)
return out
class DownSample(nn.Module):
def __init__(self, nb_tasks, inChans, outChans, kernel_size=3, stride=1, padding=1, args=None):
super(DownSample, self).__init__()
self.args = args
self.op1 = conv_unit(nb_tasks, inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding, args=args)
self.op2 = conv_unit(nb_tasks, inChans, outChans, kernel_size=(kernel_size,kernel_size,1), stride=(stride[0],stride[0],1), padding=(padding,padding,0), args=args)
self.act1 = norm_act(outChans, only="act",args=args)
def forward(self, x):
out = self.op1(x)
out = self.act1(out)
return out
class DownBlock(nn.Module): # 2 conv-unit: i.e.: Conv:[3x3,1x1,3x3,1x1] with 4 bn/act, also + residual connection
def __init__(self, nb_tasks, inChans, outChans, kernel_size=3, stride=1, padding=1 ,args=None):
super(DownBlock, self).__init__()
self.args = args
self.op1 = conv_unit(nb_tasks, inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding, args=args)
self.act1 = norm_act(outChans, only="act",args=args)
self.op2 = conv_unit(nb_tasks, outChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding, args=args)
self.act2 = norm_act(outChans, only="act",args=args)
def forward(self, x):
if module == 'parallel_adapter' or module == 'separable_adapter':
out, share_map, para_map = self.op1(x)
else:
out = self.op1(x)
out = self.act1(out)
if module == 'parallel_adapter' or module == 'separable_adapter':
out, share_map, para_map = self.op2(out)
else:
out = self.op2(out)
if config.residual:
out = self.act2(x + out)
else:
out = self.act2(out)
return out
def Upsample3D(scale_factor=(2)):
'''
task specific
'''
upsample = nn.Upsample(scale_factor=scale_factor, mode='trilinear', align_corners=True)
return upsample
class UnetUpsample(nn.Module): # Upsample + 1-conv-unit (2-Conv3d)
def __init__(self, nb_tasks, inChans, outChans, up_stride=(2), norm=None, args=None):
super(UnetUpsample, self).__init__()
self.args = args
self.upsamples = nn.ModuleList(
[Upsample3D(scale_factor=up_stride) for i in range(nb_tasks)])
self.op = conv_unit(nb_tasks, inChans, outChans, kernel_size=3,stride=1, padding=1, args=args)
self.act = norm_act(outChans, only='both', _norm=norm, args=args)
def forward(self, x):
task_idx = config.task_idx
out = self.upsamples[task_idx](x)
if module == 'parallel_adapter' or module == 'separable_adapter':
out, share_map, para_map = self.op(out)
else:
out = self.op(out)
out = self.act(out)
if module == 'parallel_adapter' or module == 'separable_adapter':
return out, share_map, para_map
else:
return out
class UpBlock(nn.Module): # 2-conv-unit (4 Conv3d + bn/act), w/o residual-connection
def __init__(self, nb_tasks, inChans, outChans, kernel_size=3, stride=1, padding=1, norm=None, args=None):
super(UpBlock, self).__init__()
self.args = args
self.op1 = conv_unit(nb_tasks, inChans, outChans, kernel_size=kernel_size, stride=stride, padding=padding, args=args)
self.act1 = norm_act(outChans, only="act", _norm=norm, args=args)
self.op2 = conv_unit(nb_tasks, outChans, outChans, kernel_size=1, stride=1, padding=padding, args=args) # ori: padding=0
self.act2 = norm_act(outChans, only="act", _norm=norm, args=args)
self.residual_conv = nn.Conv3d(inChans, outChans, kernel_size=1, stride=stride, padding=0)
def forward(self, x, up_x):
if module == 'parallel_adapter' or module == 'separable_adapter':
out, share_map, para_map = self.op1(x)
else:
out = self.op1(x)
out = self.act1(out)
if module == 'parallel_adapter' or module == 'separable_adapter':
out, share_map, para_map = self.op2(out)
else:
out = self.op2(out)
if config.residual: # same to ResNet # New Add the residual connects in the upsamples
_x = self.residual_conv(x)
out = self.act2(_x + out)
else:
out = self.act2(out)
return out
class DeepSupervision(nn.Module):
'''
task specific
'''
def __init__(self, inChans, num_class, up_stride=(2,2,2), use_kd=None, args=None):
super(DeepSupervision, self).__init__()
self.op1 = nn.Sequential(
nn.Conv3d(inChans, num_class, kernel_size=1, stride=1, padding=0),
norm_act(num_class,args=args))
self.op2 = Upsample3D(scale_factor=up_stride)
self.re_channel = nn.Conv3d(inChans, 8, kernel_size=1, stride=1, padding=0) ## maybe no use
self.op1_FeaDistill1 = nn.Sequential(
nn.Conv3d(inChans, args.fea_dim, kernel_size=3, stride=1, padding=1),
norm_act(args.fea_dim,args=args))
self.op1_FeaDistill2 = nn.Sequential(
nn.Conv3d(args.fea_dim, num_class, kernel_size=1, stride=1, padding=0),
norm_act(num_class,args=args))
self.args = args
self.use_kd = use_kd
def forward(self, x, deep_supervision):
if self.use_kd:
if deep_supervision is None:
fea = self.op1_FeaDistill1(x)
logit = self.op1_FeaDistill2(fea)
else:
fea = self.op1_FeaDistill1(x)
logit = self.op1_FeaDistill2(fea)
logit = torch.add(logit, deep_supervision) # Add
out = self.op2(logit)
return out, fea, logit
else:
if config.deep_sup_type == 'add':
if deep_supervision is None:
out = self.op1(x)
deep_sup_fea = out
else:
deep_sup_fea = self.op1(x)
out = torch.add(deep_sup_fea, deep_supervision) # Add
out = self.op2(out)
return out, deep_sup_fea
elif config.deep_sup_type == 'concat':
if deep_supervision is None:
out = self.re_channel(x) # [1,32,x,x,x]
else:
out = torch.cat([self.re_channel(x), deep_supervision], axis=1) # concat
out = self.op2(out)
return out
class OutputTransition(nn.Module): # 1 Conv3d
'''
task specific
'''
def __init__(self, inChans, num_class):
super(OutputTransition, self).__init__()
self.conv1 = nn.Conv3d(inChans, num_class, kernel_size=1, stride=1, padding=0)
self.final_conv = nn.Conv3d(4*8, num_class, kernel_size=1, stride=1, padding=0)
def forward(self, x, deep_supervision=None):
if config.deep_sup_type == 'add':
out = self.conv1(x)
if deep_supervision is None:
return out
else:
out = torch.add(out, deep_supervision) # Add deep_sup: [1,32*4,128,128,128]
return out
elif config.deep_sup_type == 'concat':
out = torch.cat([x, deep_supervision], axis=1) # Concat
out = F.dropout3d(out, p=0.5) # Dropout3d
out = self.final_conv(out)
return out
class u2net3d(nn.Module):
def __init__(self, inChans_list=[4], base_outChans=8, num_class_list=[4], args=None, label_downsample=False, multi_branch=False): # base_outChans=16
'''
Args:
One or more tasks could be input at once. So lists of inital model settings are passed.
inChans_list: a list of num_modality for each input task.
base_outChans: outChans of the inputTransition, i.e. inChans of the first layer of the shared backbone of the universal model.
depth: depth of the shared backbone.
'''
super(u2net3d, self).__init__()
nb_tasks = len(num_class_list) # 1
self.depth = max(config.num_pool_per_axis) + 1 # 5 num_pool_per_axis firstly defined in train_xxxx.py or main.py
stride_sizes = num_pool2stride_size(config.num_pool_per_axis)
self.in_tr_list = nn.ModuleList(
[InputTransition(inChans_list[j], base_outChans,args=args) for j in range(nb_tasks)]
) # task-specific input layers
outChans_list = list()
self.down_blocks = nn.ModuleList() # # register modules from regular python list.
self.down_samps = nn.ModuleList()
self.down_pads = list() # used to pad as padding='same' in tensorflow
inChans = base_outChans
for i in range(self.depth):
outChans = base_outChans * (2**i)
outChans_list.append(outChans)
self.down_blocks.append(DownBlock(nb_tasks, inChans, outChans, kernel_size=3, stride=1, padding=1,args=args))
if i != self.depth-1:
# stride for each axis could be 1 or 2, depending on tasks. # to apply padding='SAME' as tensorflow, cal and save pad num to manually pad in forward().
pads = list() # 6 elements for one 3-D volume. originized for last dim backward to first dim, e.g. w,w,h,h,d,d # required for F.pad.
# pad 1 to the right end if s=2 else pad 1 to both ends (s=1).
for j in stride_sizes[i][::-1]:
if j == 2:
pads.extend([0,1])
elif j == 1:
pads.extend([1,1])
self.down_pads.append(pads)
self.down_samps.append(DownSample(nb_tasks, outChans, outChans*2, kernel_size=3, stride=tuple(stride_sizes[i]), padding=1, args=args)) # padding=0
inChans = outChans*2
else:
inChans = outChans
self.up_samps = nn.ModuleList([None] * (self.depth-1))
self.up_blocks = nn.ModuleList([None] * (self.depth-1))
self.dSupers = nn.ModuleList() # 1 elements if self.depth =2, or 2 elements if self.depth >= 3
for i in range(self.depth-2, -1, -1): # i=[4,3,2,1,0]
self.up_samps[i] = UnetUpsample(nb_tasks, inChans, outChans_list[i], up_stride=stride_sizes[i][0],args=args)
self.up_blocks[i] = UpBlock(nb_tasks, outChans_list[i]*2, outChans_list[i], kernel_size=3,stride=1, padding=1, args=args)
if config.deep_supervision and i <= (self.depth-3) and i > 0:
self.dSupers.append(nn.ModuleList(
[DeepSupervision(outChans_list[i], num_class_list[j], up_stride=tuple(stride_sizes[i-1])) for j in range(nb_tasks)]))
inChans = outChans_list[i]
self.out_tr_list = nn.ModuleList(
[OutputTransition(inChans, num_class_list[j]) for j in range(nb_tasks)])
self.args = args
def forward(self, x):
task_idx = config.task_idx # 0
deep_supervision = None
deep_sup_fea = [] # 3 feaure maps
out = self.in_tr_list[task_idx](x) # 1st-3x3conv, [1, 8, 128, 128, 128]
down_list = list()
for i in range(self.depth): # 5/6 5/6*(nn.Conv3d+down_sample) --Encoder
out = self.down_blocks[i](out)
if i != self.depth-1: # 5
down_list.append(out) # will not store the deepest, so as to save memory
out = self.down_samps[i](out) #
idx = 0
for i in range(self.depth-2, -1, -1): # i=[(4,) 3,2,1,0] 4/5 * (nn.Conv3d+up_sample+concat) --Decoder
if module == 'parallel_adapter' or module == 'separable_adapter':
out, share_map, para_map = self.up_samps[i](out) # Conduct the true upsample
else:
out = self.up_samps[i](out)
up_x = out
out = torch.cat((out, down_list[i]), dim=1)
out = self.up_blocks[i](out, up_x) # Actually, there is not residual conntection here! (up_x is not used at all! Try?)
if config.deep_supervision and i <= (self.depth-3) and i > 0: # On 3or4-level, expect the final-level (and smallest 4*4 level)
deep_supervision, _deep_sup_fea = self.dSupers[idx][task_idx](out, deep_supervision)
deep_sup_fea.append(_deep_sup_fea) ###
idx += 1
if (not config.deep_supervision):
deep_sup_fea = []
out = self.out_tr_list[task_idx](out, deep_supervision)
if module == 'parallel_adapter' or module == 'separable_adapter':
return out, share_map, para_map, deep_sup_fea
else:
return out
class StyleEncoder(nn.Module): ### split/group from the start time: (2d-conv)
def __init__(self, inChans_list=[1], base_outChans=4, style_dim=8, in_dim_2d=128): # No bn
super(StyleEncoder, self).__init__()
self.model = []
# self.model_freq = []
self.model += [nn.Conv2d(inChans_list[0], base_outChans, kernel_size=3, stride=1, padding=1)]
self.model += [nn.ReLU(inplace=True)]
dim = base_outChans
for i in range(1):
self.model += [nn.Conv2d(dim, 2 * dim, kernel_size=3, stride=1, padding=1)]
self.model += [nn.ReLU(inplace=True)]
dim *= 2
self.model += [nn.Conv2d(dim, 2 * dim, kernel_size=3, stride=[2,2], padding=1)]
self.model += [nn.ReLU(inplace=True)]
dim *= 2
self.model += [nn.Conv2d(dim, style_dim, kernel_size=1, stride=1, padding=0)]
self.model += [nn.ReLU(inplace=True)]
self.model = nn.Sequential(*self.model)
def forward(self, x): # parallel version: [b*4,1,128,128,128] --> [bx4x128,1,128,128]
if x.shape[1] == 1:
x = torch.squeeze(x,1) # [b*4,128,128,128]
b,d,w,h = x.shape[:4]
x = x.permute(0,3,2,1) ###
x_parallel = torch.unsqueeze(torch.cat([x[:,i,...] for i in range(x.shape[1])],0), 1) # [bx4x128,1,128,128]
freq_fea_out = self.model(x_parallel)
out = nn.AdaptiveAvgPool2d(1)(freq_fea_out)
return out, freq_fea_out
class ContentEncoder(nn.Module):
def __init__(self, inChans_list=[1], base_outChans=8, args=None): # base_outChans=16
super(ContentEncoder, self).__init__()
self.depth = max(config.num_pool_per_axis) + 1 # 5 num_pool_per_axis firstly defined in train_xxxx.py or main.py
stride_sizes = num_pool2stride_size(config.num_pool_per_axis)
self.in_tr_list = nn.ModuleList([InputTransition(inChans_list[j], base_outChans) for j in range(1)]) # task-specific input layers
self.outChans_list = list()
self.down_blocks = nn.ModuleList() # # register modules from regular python list.
self.down_samps = nn.ModuleList()
self.inChans = base_outChans
for i in range(self.depth):
outChans = base_outChans * (2**i)
self.outChans_list.append(outChans)
self.down_blocks.append(DownBlock(1, self.inChans, outChans, kernel_size=3, stride=1, padding=1, args=args))
if i != self.depth-1:
self.down_samps.append(DownSample(1, outChans, outChans*2, kernel_size=3, stride=tuple(stride_sizes[i]), padding=1, args=args)) # padding=0
self.inChans = outChans*2
else:
self.inChans = outChans
def forward(self, x): # x: [N, C, D, H, W]
out = self.in_tr_list[0](x) # 1st-3x3conv, [1, 8, 128, 128, 128]
down_list = list()
for i in range(self.depth):
out = self.down_blocks[i](out)
if i != self.depth-1:
down_list.append(out) # will not store the deepest, so as to save memory
out = self.down_samps[i](out) #
return out, down_list, self.inChans, self.outChans_list
class Decoder(nn.Module):
def __init__(self, inChans, outChans_list, concatChan_list=[None], num_class_list=[4], norm='adain', use_distill=False, use_kd=False, args=None): # base_outChans=16, norm=['in','adain']
super(Decoder, self).__init__()
nb_tasks = len(num_class_list) # 1
self.depth = max(config.num_pool_per_axis) + 1
stride_sizes = num_pool2stride_size(config.num_pool_per_axis)
self.up_samps = nn.ModuleList([None] * (self.depth-1))
self.up_blocks = nn.ModuleList([None] * (self.depth-1))
self.dSupers = nn.ModuleList() # 1 elements if self.depth =2, or 2 elements if self.depth >= 3
self.dSupers_bin = nn.ModuleList()
self.use_distill = use_distill
for i in range(self.depth-2, -1, -1):
if i == self.depth-2 and norm == 'adain':
_norm = 'adain'
else:
_norm = None
self.up_samps[i] = UnetUpsample(nb_tasks, inChans, outChans_list[i], up_stride=stride_sizes[i][0], norm=_norm, args=args)
self.up_blocks[i] = UpBlock(nb_tasks, outChans_list[i]+concatChan_list[i], outChans_list[i], kernel_size=3, stride=1, padding=1, norm=_norm, args=args)
if config.deep_supervision and i <= (self.depth-3) and i > 0:
self.dSupers.append(nn.ModuleList([DeepSupervision(outChans_list[i], num_class_list[j], up_stride=tuple(stride_sizes[i-1]),use_kd=use_kd,args=args) for j in range(nb_tasks)]))
self.dSupers_bin.append(nn.ModuleList([DeepSupervision(outChans_list[i], 1, up_stride=tuple(stride_sizes[i-1]),use_kd=use_kd,args=args) for j in range(nb_tasks)]))
inChans = outChans_list[i]
self.out_tr_list_all = nn.ModuleList([OutputTransition(inChans, num_class_list[j]) for j in range(nb_tasks)])
self.out_tr_list_binary = nn.ModuleList([OutputTransition(inChans, 1)])
self.use_kd = use_kd
def forward(self, x, down_list, is_binary=False):
# x: [N, C, D, H, W]
task_idx = config.task_idx
deep_supervision = None
deep_sup_fea = []
distill_kd_fea = []
idx = 0
out = x
for i in range(self.depth-2, -1, -1): # i=[(4,) 3,2,1,0] 4/5 * (nn.Conv3d+up_sample+concat) --Decoder
out, share_map, para_map = self.up_samps[i](out) # Conduct the true upsample
up_x = out
out = torch.cat((out, down_list[i]), dim=1)
out = self.up_blocks[i](out, up_x)
if config.deep_supervision and i <= (self.depth-3) and i > 0: # On 3 or 4-level, expect the final-level (and smallest 4*4 level)
if self.use_kd:
if is_binary:
deep_supervision, _distill_fea, _logit = self.dSupers_bin[idx][task_idx](out, deep_supervision)
else:
deep_supervision, _distill_fea, _logit = self.dSupers[idx][task_idx](out, deep_supervision)
_deep_sup_fea = _logit
distill_kd_fea.append(_distill_fea)
else:
if is_binary:
deep_supervision, _deep_sup_fea = self.dSupers_bin[idx][task_idx](out, deep_supervision)
else:
deep_supervision, _deep_sup_fea = self.dSupers[idx][task_idx](out, deep_supervision)
deep_sup_fea.append(_deep_sup_fea)
idx += 1
if (not config.deep_supervision):
deep_sup_fea = []
if is_binary:
out = self.out_tr_list_binary[0](out, None)
else:
out = self.out_tr_list_all[task_idx](out, deep_supervision)
if self.use_distill:
distill_fea = out
if self.use_kd:
return out, deep_sup_fea, distill_kd_fea, distill_fea
else:
return out, deep_sup_fea, distill_fea
else:
return out, deep_sup_fea
class AdaINGen(nn.Module):
# AdaIN auto-encoder architecture
def __init__(self, input_dim=4, dim=2, decode_indim=16, style_dim=8, mlp_dim=256, num_class_list=[1], norm='adain', in_dim_2d=128, auxdec_dim=1, modal_num=4, args=None): #in_dim_2d=128):
super(AdaINGen, self).__init__()
# style encoder
self.enc_style = StyleEncoder(inChans_list=[input_dim], base_outChans=dim, style_dim=style_dim, in_dim_2d=in_dim_2d)
# content encoder
self.enc_content = ContentEncoder(inChans_list=[input_dim*modal_num], base_outChans=dim, args=args) # out, down_list, self.inChans, self.outChans_list
_outChans_list = [dim*pow(2,i) for i in range(1+max(config.num_pool_per_axis))] # [8, 16, 32, 64] ##
_outChans_list_small = [auxdec_dim*pow(2,i) for i in range(1+max(config.num_pool_per_axis))]
self.dec = Decoder(decode_indim, _outChans_list_small, _outChans_list, num_class_list=num_class_list, norm=norm,args=args) # [8, 16, 32, 64]
# MLP to generate AdaIN parameters
self.mlp = MLP(style_dim, self.get_num_adain_params(self.dec), mlp_dim, 3, norm='none', activ='relu')
self.args = args
def forward(self, images, x):
# reconstruct an image
content, style_fake, enc_list = self.encode(images, x)
images_recon = self.decode(content, style_fake, enc_list)
return images_recon
def encode(self, images, x):
# encode an image to its content and style codes
t=time.time()
style_fake, freq_fea_out = self.enc_style(images)
content, enc_list = self.enc_content(x)[:2]
if self.args.use_freq_map == True:
style_fea_map = freq_fea_out
else:
style_fea_map = torch.Tensor([0.0]).cuda()
return content, style_fake, style_fea_map, enc_list
def decode(self, content, style, enc_list):
# decode content and style codes to an image
if style != None:
adain_params = self.mlp(style)
self.assign_adain_params(adain_params, self.dec)
images, deep_sup_fea = self.dec(content, enc_list)[:2]
return images, deep_sup_fea
def assign_adain_params(self, adain_params, model):
'''
Adopt AdaIN layer to fuse the style-aware information and feature maps
assign the adain_params to the AdaIN layers in model
'''
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm3d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm3d":
num_adain_params += 2*m.num_features
return num_adain_params
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'):
super(MLP, self).__init__()
self.model = []
self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)]
for i in range(n_blk - 2):
self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]
self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'none':
self.activation = None
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class AdaptiveInstanceNorm3d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm3d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
# weight and bias are dynamically assigned
self.weight = None
self.bias = None
# just dummy buffers, not used
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
# Apply instance norm
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Target_transform(nn.Module): # n_res x (conv+bn+relu) with residual + (conv+bn+relu)
def __init__(self, n_res, dim, output_dim, res_norm='adain', activ='LeakyReLU', args=None):
super(Target_transform, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [nn.Conv3d(dim, dim//4, 1, 1, bias=False, padding=0)]
dim = dim//4
self.model += [ResBlocks(n_res, dim, res_norm, activ, args=args)]
self.model += [Conv3dBlock(dim, output_dim, 3, 1, 1, norm='none', activation='LeakyReLU', args=args)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlocks(nn.Module): # num_blocks x (conv+bn+relu) with residual
def __init__(self, num_blocks, dim, norm='in', activation='relu', args=None):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, args=args)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlock(nn.Module): # 2x(conv+bn+relu) with residual
def __init__(self, dim, norm='in', activation='relu', args=None):
super(ResBlock, self).__init__()
model = []
model += [Conv3dBlock(dim ,dim, 3, 1, 1, norm=norm, activation=activation, args=args)]
model += [Conv3dBlock(dim ,dim, 3, 1, 1, norm=norm, activation='none', args=args)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class Conv3dBlock(nn.Module): # 1x(conv+bn+relu)
def __init__(self, input_dim ,output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', args=None):
super(Conv3dBlock, self).__init__()
self.args = args
self.use_bias = True
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = SynchronizedBatchNorm3d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm3d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm3d(norm_dim)
elif norm == 'none' :
self.norm = None
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'LeakyReLU':
self.activation = nn.LeakyReLU(negative_slope=1e-2,inplace=True)
elif activation == 'none':
self.activation = None
# initialize convolution
self.conv = nn.Conv3d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias,padding=1)
self.conv_1x3 = nn.Conv3d(input_dim, output_dim, kernel_size=(kernel_size,kernel_size,1), stride=(stride,stride,1), bias=self.use_bias,padding=(padding,padding,0))
def forward(self, x):
x = self.conv(x)
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
|
nilq/baby-python
|
python
|
"""
@author: Arpit Somani
"""
#Import the required modules
import numpy
#Creating the platform, where we play the game.
board= numpy.array([['_','_','_'],['_','_','_'],['_','_','_']])
#We have 2 symbols , as its a 2 player game
p1s= 'X'
p2s= 'O'
#Checking for empty place in rows
def check_rows(symbol):
for r in range (3):
count=0
for c in range(3):
if board[r][c] == symbol:
count=count+1
if count==3:
print(symbol,"Won")
return True
return False
#Checking for empty place in columns
def check_cols(symbol):
for c in range (3):
count=0
for r in range(3):
if board[r][c] == symbol:
count=count+1
if count==3:
print(symbol,"Won")
return True
return False
#Checking for empty place in diagonals
def check_diagonals(symbol):
if board[0][2]==board[1][1] and board[1][1]==board[2][0] and board[1][1]==symbol:
print(symbol,"Won")
return True
if board[0][0]==board[1][1] and board[1][1]==board[2][2] and board[1][1]==symbol:
print(symbol,"Won")
return True
return False
#When a player get into the win siituation, when a straight line formed either at row,column or diagonal position.
def won(symbol):
return check_rows(symbol) or check_cols(symbol) or check_diagonals(symbol)
#Placing of players symbol as desired empty position
def place(symbol):
print(numpy.matrix(board))
while(1):
row=int(input('Enter row: 1 or 2 or 3: '))
col=int(input('Enter col: 1 or 2 or 3: '))
if row>0 and row<4 and col>0 and col<4 and board[row-1][col-1]=='_':
break
else:
print('Invalid input. PLease enter again!')
board[row-1][col-1]=symbol
#The play function, player 1 starts game, and the chances will revolve one by one in between 2 players, until a winner arise.
def play():
for turn in range(9):
if turn%2==0:
print("X's turn")
place(p1s)
if won(p1s):
break
else:
print("O's turn")
place(p2s)
if won(p2s):
break
if not(won(p1s)) and not(won(p2s)):
print("Draw!")
#Calling play function
play()
|
nilq/baby-python
|
python
|
# segmented sieve
from __future__ import annotations
import math
def sieve(n: int) -> tuple:
"""
>>> sieve(2 **3)
(2, 3, 5, 7)
>>> sieve(3 ** 3)
(2, 3, 5, 7, 11, 13, 17, 19, 23)
>>> sieve(4)
(2, 3)
"""
in_prime = []
start = 2
end = int(math.sqrt(n))
temp = [True] * (end + 1)
prime = []
while start <= end:
if temp[start] is True:
in_prime.append(start)
for i in range(start * start, end + 1, start):
if temp[i] is True:
temp[i] = False
start += 1
prime += in_prime
low = end + 1
high = low + end - 1
if high > n:
high = n
while low <= n:
temp = [True] * (high - low + 1)
for each in in_prime:
t = math.floor(low / each) * each
if t < low:
t += each
for j in range(t, high + 1, each):
temp[j - low] = False
for j in range(len(temp)):
if temp[j] is True:
prime.append(low + j)
low = high + 1
high = low + end - 1
if high > n:
high = n
return tuple(prime)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
# print(sieve(4))
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.5 on 2020-04-23 10:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='payment',
name='payment_type',
field=models.CharField(default='Rent', max_length=50),
),
]
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.functional as F
import torch.optim as optim
from random import randint
import argparse
from datetime import datetime
import os
import numpy as np
from time import time
from collections import defaultdict
from gnn_cnn_model.deepwalks import *
from gnn_cnn_model.model import *
from gnn_cnn_model.utils import *
from params import args
from utils import load_data, accuracy, load_data_wiki
if args.cuda != -1:
device = torch.device("cuda:" + str(args.cuda))
else:
device = torch.device("cpu")
print(os.getcwd())
def subgraph_tensor(walks, indices):
a = []
# may have duplicated keys so use i as the key
for i, target_node in enumerate(indices):
paths = walks[int(target_node)]
b = []
for path in paths:
p = [int(n) for n in path]
b.append(p)
b_tensor = torch.tensor(b, dtype=torch.long)
a.append(b_tensor)
out = torch.stack(a).to(device)
return out
# train
def train():
print('********************* start training *********************')
best_value = 1000000
stopping_step = 0
idx_all = torch.tensor(range(n_nodes), dtype=torch.long).to(device)
best_epoch = 0
for epoch in np.arange(args.n_epoch) + 1:
# st = time()
# walks_train = deepwalks(edges, undirected=True, number_walks=args.number_walks,
# walk_length=args.walk_length, seed=randint(0, 99999))
walks_train = deepwalks(edges, undirected=True, number_walks=args.number_walks,
walk_length=args.walk_length, seed=args.seed)
# print('sample subgraph time:', time()-st)
model.train()
start_time = time()
# n_train_nodes = len(idx_train)
# shuffled_indices = torch.randperm(n_nodes)
# scores = []
# losses_train, losses_val = 0, 0
# for batch, count in enumerate(range(0, n_train_nodes, args.batch_size)):
optimizer.zero_grad()
# train for users nodes
# indices = shuffled_indices[count:min(count+args.batch_size, n_train_nodes)]
# indices = idx_all[shuffled_indices]
path_tensor = subgraph_tensor(walks_train, idx_all)
# target_emb = features[indices]
# loss
# item_empty = torch.tensor(item_empty, dtype=torch.long).to(device)
score = model(idx_all, path_tensor)
# idx_train_shuffled = torch.zeros_like(idx_train)
# for i, idx in enumerate(idx_train):
# idx_train_shuffled[i] = torch.nonzero(indices == idx)
score_train = score[idx_train]
label_train = labels[idx_train]
loss_train = criterion(score_train, label_train)
loss_train.backward()
optimizer.step()
# losses_train += loss_train
# scores.append(score)
end_time = time()
# label = labels[shuffled_indices]
# scores = torch.cat(scores, dim=0)
acc_train = accuracy(score_train, label_train)
# print('********************* start evaluation *********************')
# idx_val_shuffled = torch.zeros_like(idx_val)
# for i, idx in enumerate(idx_val):
# idx_val_shuffled[i] = torch.nonzero(indices == idx)
loss_val = criterion(score[idx_val], labels[idx_val])
acc_val = accuracy(score[idx_val], labels[idx_val])
# loss_val, acc_val = eval_on_test_data(idx_val, walks_train)
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.4f}'.format(loss_train),
'acc_train: {:.4f}'.format(acc_train),
'loss_val: {:.4f}'.format(loss_val.data.item()),
'acc_val: {:.4f}'.format(acc_val.data.item()),
'time: {:.4f}s'.format(end_time-start_time))
# print('********************* end evaluation *********************')
loss_val = loss_val.detach().cpu().item()
if loss_val < best_value:
# print('update!')
torch.save(model.state_dict(), 'output/{}.pkl'.format(args.best_model))
best_epoch = epoch
best_value, stopping_step, should_stop = early_stopping(loss_val, best_value, stopping_step, flag_step=150)
if should_stop:
break
return best_epoch
def eval_on_test_data(test_data, seed):
walks = deepwalks(edges, undirected=True, number_walks=args.number_walks,
walk_length=args.walk_length, seed=seed)
model.eval()
with torch.no_grad():
# indices = test_data
# path_tensor = sample_subgraph(walks, indices)
# # target_emb = features[indices]
#
# # loss
# score = model(path_tensor, indices)
# label = labels[indices]
# loss = criterion(score, label)
#
# acc_val = accuracy(score, label)
path_tensor = subgraph_tensor(walks, idx_all)
score = model(idx_all, path_tensor)
loss = criterion(score[test_data], labels[test_data])
acc = accuracy(score[test_data], labels[test_data])
return loss, acc
if __name__ == '__main__':
adj, features, labels, idx_train, idx_val, idx_test, edges = load_data()
# adj, features, labels, idx_train, idx_val, idx_test, edges = load_data_wiki(dataset='chameleon')
n_nodes = adj.shape[0]
# tmp = torch.unique(labels)
n_labels = int(torch.unique(labels).size(0))
fea_dim = features.shape[1]
# deepwalk
# print('********************* walk for train val test *********************')
# walks_train = deepwalks(edges, undirected=True, number_walks=args.number_walks,
# walk_length=args.walk_length, seed=args.seed)
# walks_val = deepwalks(edges, idx_val, undirected=True, number_walks=args.number_walks,
# walk_length=args.walk_length, seed=args.seed)
# walks_test = deepwalks(edges, idx_test, undirected=True, number_walks=args.number_walks,
# walk_length=args.walk_length, seed=args.seed)
# check_walk_length(walks_train, args.walk_length)
# check_walk_length(walks_val, args.walk_length)
# check_walk_length(walks_test, args.walk_length)
# some nodes dont have paths since their edges are not in train
# idx_train = torch.tensor(list(walks_train.keys()), dtype=torch.long).to(device)
# idx_val = torch.tensor(list(walks_val.keys()), dtype=torch.long).to(device)
# idx_test = torch.tensor(list(walks_test.keys()), dtype=torch.long).to(device)
idx_all = torch.tensor(range(n_nodes), dtype=torch.long).to(device)
idx_train = idx_train.to(device)
idx_val = idx_val.to(device)
idx_test = idx_test.to(device)
features = features.to(device)
labels = labels.to(device)
tmp = labels[:int(n_nodes*0.5)].detach().cpu().numpy()
unique, counts = np.unique(tmp, return_counts=True)
print(np.asarray((unique, counts)).T)
# conv model
# n_nodes, n_paths, dim, feature, d_model, d_inner, d_k, d_v, n_head, n_layers
model = Transformer(n_nodes, args.number_walks, args.walk_length, n_labels, fea_dim, features.to(device),
100, 100, 100, 100, 8, args.n_layers, True).to(device)
# model = recomm(transformer, 2, fea_dim, n_labels).to(device)
optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-3, lr=args.lr)
criterion = nn.CrossEntropyLoss()
best_epoch = train()
# start test
model.load_state_dict(torch.load('output/{}.pkl'.format(args.best_model)))
loss_test, acc_test = eval_on_test_data(idx_test, args.seed)
print("best epoch:", best_epoch)
print("Test set results:",
"loss= {:.4f}".format(loss_test.detach().cpu().item()),
"accuracy= {:.4f}".format(acc_test.detach().cpu().item()))
loss_test, acc_test = eval_on_test_data(idx_test, args.seed+100)
print("best epoch:", best_epoch)
print("Test set results:",
"loss= {:.4f}".format(loss_test.detach().cpu().item()),
"accuracy= {:.4f}".format(acc_test.detach().cpu().item()))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Compute the DSigma profiles for different lenses
"""
import os
import pickle
from astropy.table import Table
from jianbing import scatter
from jianbing import wlensing
TOPN_DIR = '/tigress/sh19/work/topn'
# Lensing data using medium photo-z quality cut
s16a_lensing = os.path.join(TOPN_DIR, 'prepare', 's16a_weak_lensing_medium.hdf5')
# Random
s16a_rand = Table.read(s16a_lensing, path='random')
# Pre-compute results using medium photo-z quality cut
s16a_precompute_med = os.path.join(
TOPN_DIR, 'precompute', 'topn_public_s16a_medium_precompute.hdf5')
# S16A HSC redMaPPer catalog
redm_hsc = Table.read(s16a_precompute_med, path='redm_hsc_specz')
redm_hsc_photoz = Table.read(s16a_precompute_med, path='redm_hsc')
# SDSS DR8 redMaPPer catalog
redm_sdss = Table.read(s16a_precompute_med, path='redm_sdss_specz')
# S16A HSC CAMIRA catalog
cam_s16a = Table.read(s16a_precompute_med, path='cam_s16a_specz')
cam_s16a_photoz = Table.read(s16a_precompute_med, path='cam_s16a')
# TopN bins
topn_bins = Table.read(
os.path.join(TOPN_DIR, 'precompute', 'topn_bins.fits'))
# Tablulated simulation results
sim_cat = Table.read(
os.path.join(TOPN_DIR, 'precompute', 'sim_merge_all_dsig.fits'))
n_rand = 200000
n_boot = 1000
n_jobs = 8
topn_clusters = {}
topn_clusters_sum = {}
# CAMIRA clusters; n_mem; use spec-z
mask = cam_s16a['flag'] > 0
topn_clusters['cam_s16a_n_mem'] = wlensing.gather_topn_dsigma_profiles(
cam_s16a, s16a_rand, topn_bins, 'n_mem', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['cam_s16a_n_mem'] = scatter.compare_model_dsigma(
topn_clusters['cam_s16a_n_mem'], sim_cat, model_err=False, poly=True, verbose=True)
topn_clusters['cam_s16a_n_mem_all'] = wlensing.gather_topn_dsigma_profiles(
cam_s16a, s16a_rand, topn_bins, 'n_mem', mask=None, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['cam_s16a_n_mem_all'] = scatter.compare_model_dsigma(
topn_clusters['cam_s16a_n_mem_all'], sim_cat, model_err=False, poly=True, verbose=True)
# CAMIRA clusters; logms; use spec-z
topn_clusters['cam_s16a_logms'] = wlensing.gather_topn_dsigma_profiles(
cam_s16a, s16a_rand, topn_bins, 'logms', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['cam_s16a_logms'] = scatter.compare_model_dsigma(
topn_clusters['cam_s16a_logms'], sim_cat, model_err=False, poly=True, verbose=True)
# CAMIRA clusters; n_mem; use spec-z
mask = cam_s16a_photoz['flag'] > 0
topn_clusters['cam_s16a_photoz_n_mem'] = wlensing.gather_topn_dsigma_profiles(
cam_s16a_photoz, s16a_rand, topn_bins, 'n_mem', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['cam_s16a_photoz_n_mem'] = scatter.compare_model_dsigma(
topn_clusters['cam_s16a_photoz_n_mem'], sim_cat, model_err=False, poly=True, verbose=True)
topn_clusters['cam_s16a_photoz_n_mem_all'] = wlensing.gather_topn_dsigma_profiles(
cam_s16a_photoz, s16a_rand, topn_bins, 'n_mem', mask=None, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['cam_s16a_photoz_n_mem_all'] = scatter.compare_model_dsigma(
topn_clusters['cam_s16a_photoz_n_mem_all'], sim_cat, model_err=False, poly=True, verbose=True)
# CAMIRA clusters; logms; use spec-z
topn_clusters['cam_s16a_photoz_logms'] = wlensing.gather_topn_dsigma_profiles(
cam_s16a_photoz, s16a_rand, topn_bins, 'logms', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['cam_s16a_photoz_logms'] = scatter.compare_model_dsigma(
topn_clusters['cam_s16a_photoz_logms'], sim_cat, model_err=False, poly=True, verbose=True)
# SDSS redMaPPer clusters; lambda; using spec-z
mask = redm_sdss['flag'] > 0
topn_clusters['redm_sdss_lambda'] = wlensing.gather_topn_dsigma_profiles(
redm_sdss, s16a_rand, topn_bins[0:2], 'lambda_cluster_redm', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['redm_sdss_lambda'] = scatter.compare_model_dsigma(
topn_clusters['redm_sdss_lambda'], sim_cat, model_err=False, poly=True, verbose=True)
topn_clusters['redm_sdss_lambda_all'] = wlensing.gather_topn_dsigma_profiles(
redm_sdss, s16a_rand, topn_bins[0:2], 'lambda_cluster_redm', mask=None, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['redm_sdss_lambda_all'] = scatter.compare_model_dsigma(
topn_clusters['redm_sdss_lambda_all'], sim_cat, model_err=False, poly=True, verbose=True)
# HSC redMaPPer clusters; lambda; using spec-z
mask = redm_hsc['flag'] > 0
topn_clusters['redm_hsc_lambda'] = wlensing.gather_topn_dsigma_profiles(
redm_hsc, s16a_rand, topn_bins, 'lambda', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['redm_hsc_lambda'] = scatter.compare_model_dsigma(
topn_clusters['redm_hsc_lambda'], sim_cat, model_err=False, poly=True, verbose=True)
topn_clusters['redm_hsc_lambda_all'] = wlensing.gather_topn_dsigma_profiles(
redm_hsc, s16a_rand, topn_bins, 'lambda', mask=None, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['redm_hsc_lambda_all'] = scatter.compare_model_dsigma(
topn_clusters['redm_hsc_lambda_all'], sim_cat, model_err=False, poly=True, verbose=True)
# HSC redMaPPer clusters; lambda; using spec-z
mask = redm_hsc_photoz['flag'] > 0
topn_clusters['redm_hsc_photoz_lambda'] = wlensing.gather_topn_dsigma_profiles(
redm_hsc_photoz, s16a_rand, topn_bins, 'lambda', mask=mask, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['redm_hsc_photoz_lambda'] = scatter.compare_model_dsigma(
topn_clusters['redm_hsc_photoz_lambda'], sim_cat, model_err=False, poly=True, verbose=True)
topn_clusters['redm_hsc_photoz_lambda_all'] = wlensing.gather_topn_dsigma_profiles(
redm_hsc_photoz, s16a_rand, topn_bins, 'lambda', mask=None, n_rand=n_rand, n_boot=n_boot,
verbose=True, n_jobs=n_jobs)
topn_clusters_sum['redm_hsc_photoz_lambda_all'] = scatter.compare_model_dsigma(
topn_clusters['redm_hsc_photoz_lambda_all'], sim_cat, model_err=False, poly=True, verbose=True)
pickle.dump(
topn_clusters, open(os.path.join(TOPN_DIR, 'topn_clusters.pkl'), "wb"))
pickle.dump(
topn_clusters_sum, open(os.path.join(TOPN_DIR, 'topn_clusters_sum.pkl'), "wb"))
|
nilq/baby-python
|
python
|
"""
File name: __init__.py
Author: Lukas Müller
Python Version: 3.6
"""
from .lfsr import LFSR
from .fsr_function import FSRFunction
from .nlfsr import NLFSR
from .tools import logical_and, logical_xor
name = "pyfsr"
version = "1.0"
|
nilq/baby-python
|
python
|
from unittest import TestCase
from hamcrest import assert_that, is_
from util.string_util import replace_not_alphanumeric, normalize, remove_multi_spaces, create_filename, replace_numeric, \
contains_numeric
class TestStringUtils(TestCase):
def test_remove_multi_spaces(self):
assert_that(remove_multi_spaces('foo bar'), is_('foo bar'))
assert_that(remove_multi_spaces(' foo bar '), is_('foo bar'))
assert_that(remove_multi_spaces(' foo bar '), is_('foo bar'))
def test_replace_non_alphanumeric(self):
assert_that(replace_not_alphanumeric('a$b€c?d!e. fG'), is_('abcde fG'))
def test_replace_numeric(self):
assert_that(replace_numeric('foo 123 bar'), is_('foo ### bar'))
def test_contains_numeric(self):
assert_that(contains_numeric('abc'), is_(False))
assert_that(contains_numeric('ab3c'), is_(True))
def test_create_filename(self):
assert_that(create_filename('a$b€c?d!e. fG'), is_('abcde_fg'))
def test_normalize(self):
assert_that(normalize(' Mäßigung! Please 123 '), is_('massigung please 123'))
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from django.urls import include
from django.views.generic import TemplateView
import django_cas_ng.views
urlpatterns = [
path("admin/", admin.site.urls),
path(
"operations/",
include(("operations.urls", "operations"), namespace="operations"),
),
path("", include(("users.urls", "users"), namespace="users")),
path(
"accounts/cerbere-login",
django_cas_ng.views.LoginView.as_view(),
name="cas_ng_login",
),
path(
"accounts/cerbere-logout",
django_cas_ng.views.LogoutView.as_view(),
name="cas_ng_logout",
),
path("cgu", TemplateView.as_view(template_name="editorial/cgu.html"), name="cgu"),
path(
"accessibilite",
TemplateView.as_view(template_name="editorial/accessibilite.html"),
name="accessibilite",
),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
nilq/baby-python
|
python
|
from flask import Flask, g, render_template, flash, redirect, url_for, abort, send_file, request, jsonify
from flask_bcrypt import check_password_hash
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
from flask_admin import Admin, AdminIndexView, BaseView, expose
from flask_admin.contrib.peewee import ModelView
import forms
import models
class MyHomeView(AdminIndexView):
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
class MyModelView(ModelView):
def is_accessible(self):
return current_user.is_authenticated and current_user.is_admin
DEBUG = True
PORT = 8000
HOST = '0.0.0.0'
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = 'joimnasdf*&@)JOINSf*@ih89f2n'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
counter = 0
newcounter = 0
@login_manager.user_loader
def load_user(userid):
try:
return models.User.get(models.User.id == int(userid))
except models.DoesNotExist:
return None
@app.before_request
def before_request():
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route('/')
@login_required
def index():
flash("people checked in")
flash(counter)
flash("new people regisetered")
flash(newcounter)
return render_template('index.html')
@app.route('/login', methods=('GET', 'POST'))
def login():
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.select().where(
models.User.email ** form.email.data
).get()
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("You've been logged in!", "success")
return redirect(url_for('index'))
else:
flash("Email or password is invalid")
except models.DoesNotExist:
flash("Email or password is invalid")
return render_template('login.html', form=form)
@app.route('/signin', methods=('GET','POST'))
def signIn():
form = forms.SigninForm()
if form.validate_on_submit():
models.Check.create(phoneNumber=form.phoneNumber.data)
try:
user = models.Taco.select().where(
models.Taco.phoneNumber ** form.phoneNumber.data).get()
flash("Welcome to Temple Library", "Success")
global counter
counter += 1
return redirect(url_for('signIn'))
except models.DoesNotExist:
flash("We can't find you on System, Please fill out this form")
return redirect(url_for('new_taco'))
return render_template('signin.html',form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash("You've been logged out! Come back soon!", "success")
return redirect(url_for('index'))
@app.route('/register', methods=('GET', 'POST'))
def register():
form = forms.RegisterForm()
if form.validate_on_submit():
flash("Yay, you registered!", "success")
models.User.create_user(
email=form.email.data,
password=form.password.data
)
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/signup', methods=('GET', 'POST'))
def new_taco():
form = forms.TacoForm()
if form.validate_on_submit():
flash("Welcome to Temple Library", "success")
models.Taco.create(user=g.user._get_current_object(),
phoneNumber=form.phoneNumber.data,
fullName=form.fullName.data,
member=form.member.data)
global newcounter
newcounter += 1
return redirect(url_for('signIn'))
return render_template('signup.html', form=form)
if __name__ == '__main__':
models.initialize()
admin = Admin(app, name="Temple")
admin.add_view(MyModelView(models.User))
admin.add_view(MyModelView(models.Taco, name="Member Info"))
admin.add_view(MyModelView(models.Check))
try:
models.User.create_user(
email='rughaniarpan@gmail.com',
password='password',
admin = True
)
except ValueError:
pass
app.run(debug=DEBUG, host=HOST, port=PORT)
|
nilq/baby-python
|
python
|
'''
Recursion
We're going to take a break from our tour of data structures in order to look at the concept of recursion. Recursion is going to be a useful tool for solving some of the problems we'll be tackling later on, and this is a good place to introduce it and get some practice using it with the data structures we're reviewing.
When you hear the terms recursion or recursive, this might remind you of the terms repetition and repetitive—and this is a good connection, because recursion does indeed involve repetition. However, recursion isn't just about repetition.
With recursion, we solve a problem by first solving smaller instances of the same problem. In practice, this often involves calling a function from within itself—in other words, we feed some input into the function, and the function produces some output—which we then feed back into the same function. And we continue to do this until we arrive at the solution.
https://youtu.be/_aI2Jch6Epk
https://youtu.be/ioDP7ndd40Y
'''
|
nilq/baby-python
|
python
|
"""Tests for the fixes of CNRM-CM6-1."""
import os
import iris
import numpy as np
import pytest
from netCDF4 import Dataset
from esmvalcore.cmor._fixes.cmip6.cnrm_cm6_1 import Cl, Clcalipso, Cli, Clw
from esmvalcore.cmor.fix import Fix
from esmvalcore.cmor.table import get_var_info
@pytest.fixture
def cl_file(tmp_path):
"""Create netcdf file with similar issues as ``cl``."""
nc_path = os.path.join(tmp_path, 'cnrm_cm6_1_cl.nc')
dataset = Dataset(nc_path, mode='w')
dataset.createDimension('time', size=1)
dataset.createDimension('lev', size=3)
dataset.createDimension('lat', size=2)
dataset.createDimension('lon', size=2)
dataset.createDimension('bnds', size=2)
# Dimensional variables
dataset.createVariable('time', np.float64, dimensions=('time',))
dataset.createVariable('lev', np.float64, dimensions=('lev',))
dataset.createVariable('lev_bnds', np.float64, dimensions=('lev', 'bnds'))
dataset.createVariable('lat', np.float64, dimensions=('lat',))
dataset.createVariable('lon', np.float64, dimensions=('lon',))
dataset.variables['time'][:] = [0.0]
dataset.variables['time'].standard_name = 'time'
dataset.variables['time'].units = 'days since 6543-2-1'
dataset.variables['lev'][:] = [1.0, 2.0, 4.0]
dataset.variables['lev'].standard_name = (
'atmosphere_hybrid_sigma_pressure_coordinate')
dataset.variables['lev'].bounds = 'lev_bnds'
dataset.variables['lev'].units = '1'
dataset.variables['lev'].formula_term = (
'ap: ap b: b ps: ps') # Error in attribute intended
dataset.variables['lev_bnds'][:] = [[0.5, 1.5], [1.5, 3.0], [3.0, 5.0]]
dataset.variables['lev_bnds'].standard_name = (
'atmosphere_hybrid_sigma_pressure_coordinate')
dataset.variables['lev_bnds'].units = '1'
dataset.variables['lev_bnds'].formula_term = (
'ap: ap b: b ps: ps') # Error in attribute intended
dataset.variables['lat'][:] = [-30.0, 0.0]
dataset.variables['lat'].standard_name = 'latitude'
dataset.variables['lat'].units = 'degrees_north'
dataset.variables['lon'][:] = [30.0, 60.0]
dataset.variables['lon'].standard_name = 'longitude'
dataset.variables['lon'].units = 'degrees_east'
# Coordinates for derivation of pressure coordinate
# Wrong shape of bounds is intended
dataset.createVariable('ap', np.float64, dimensions=('lev',))
dataset.createVariable('ap_bnds', np.float64, dimensions=('bnds', 'lev'))
dataset.createVariable('b', np.float64, dimensions=('lev',))
dataset.createVariable('b_bnds', np.float64, dimensions=('bnds', 'lev'))
dataset.createVariable('ps', np.float64,
dimensions=('time', 'lat', 'lon'))
dataset.variables['ap'][:] = [1.0, 2.0, 5.0]
dataset.variables['ap_bnds'][:] = [[0.0, 1.5, 1.5], [3.0, 3.0, 6.0]]
dataset.variables['b'][:] = [0.0, 1.0, 3.0]
dataset.variables['b_bnds'][:] = [[-1.0, 0.5, 0.5], [2.0, 2.0, 5.0]]
dataset.variables['ps'][:] = np.arange(1 * 2 * 2).reshape(1, 2, 2)
dataset.variables['ps'].standard_name = 'surface_air_pressure'
dataset.variables['ps'].units = 'Pa'
# Cl variable
dataset.createVariable('cl', np.float32,
dimensions=('time', 'lev', 'lat', 'lon'))
dataset.variables['cl'][:] = np.full((1, 3, 2, 2), 0.0, dtype=np.float32)
dataset.variables['cl'].standard_name = (
'cloud_area_fraction_in_atmosphere_layer')
dataset.variables['cl'].units = '%'
dataset.close()
return nc_path
def test_get_cl_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'CNRM-CM6-1', 'Amon', 'cl')
assert fix == [Cl(None)]
AIR_PRESSURE_POINTS = np.array([[[[1.0, 1.0],
[1.0, 1.0]],
[[2.0, 3.0],
[4.0, 5.0]],
[[5.0, 8.0],
[11.0, 14.0]]]])
AIR_PRESSURE_BOUNDS = np.array([[[[[0.0, 1.5],
[-1.0, 2.0]],
[[-2.0, 2.5],
[-3.0, 3.0]]],
[[[1.5, 3.0],
[2.0, 5.0]],
[[2.5, 7.0],
[3.0, 9.0]]],
[[[3.0, 6.0],
[5.0, 11.0]],
[[7.0, 16.0],
[9.0, 21.0]]]]])
def test_cl_fix_metadata(cl_file):
"""Test ``fix_metadata`` for ``cl``."""
cubes = iris.load(cl_file)
# Raw cubes
assert len(cubes) == 6
var_names = [cube.var_name for cube in cubes]
assert 'cl' in var_names
assert 'ap' in var_names
assert 'ap_bnds' in var_names
assert 'b' in var_names
assert 'b_bnds' in var_names
assert 'ps' in var_names
# Raw cl cube
cl_cube = cubes.extract_strict('cloud_area_fraction_in_atmosphere_layer')
assert not cl_cube.coords('air_pressure')
# Apply fix
vardef = get_var_info('CMIP6', 'Amon', 'cl')
fix = Cl(vardef)
fixed_cubes = fix.fix_metadata(cubes)
assert len(fixed_cubes) == 1
fixed_cl_cube = fixed_cubes.extract_strict(
'cloud_area_fraction_in_atmosphere_layer')
fixed_air_pressure_coord = fixed_cl_cube.coord('air_pressure')
assert fixed_air_pressure_coord.points is not None
assert fixed_air_pressure_coord.bounds is not None
assert fixed_air_pressure_coord.points.shape == (1, 3, 2, 2)
assert fixed_air_pressure_coord.bounds.shape == (1, 3, 2, 2, 2)
np.testing.assert_allclose(fixed_air_pressure_coord.points,
AIR_PRESSURE_POINTS)
np.testing.assert_allclose(fixed_air_pressure_coord.bounds,
AIR_PRESSURE_BOUNDS)
lat_coord = fixed_cl_cube.coord('latitude')
lon_coord = fixed_cl_cube.coord('longitude')
assert lat_coord.bounds is not None
assert lon_coord.bounds is not None
np.testing.assert_allclose(lat_coord.bounds,
[[-45.0, -15.0], [-15.0, 15.0]])
np.testing.assert_allclose(lon_coord.bounds,
[[15.0, 45.0], [45.0, 75.0]])
def test_get_clcalipso_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'CNRM-CM6-1', 'CFmon', 'clcalipso')
assert fix == [Clcalipso(None)]
@pytest.fixture
def clcalipso_cubes():
"""Cubes to test fix for ``clcalipso``."""
alt_40_coord = iris.coords.DimCoord([0.0], var_name='alt40')
cube = iris.cube.Cube([0.0], var_name='clcalipso',
dim_coords_and_dims=[(alt_40_coord.copy(), 0)])
x_cube = iris.cube.Cube([0.0], var_name='x',
dim_coords_and_dims=[(alt_40_coord.copy(), 0)])
return iris.cube.CubeList([cube, x_cube])
def test_clcalipso_fix_metadata(clcalipso_cubes):
"""Test ``fix_metadata`` for ``clcalipso``."""
vardef = get_var_info('CMIP6', 'CFmon', 'clcalipso')
fix = Clcalipso(vardef)
cubes = fix.fix_metadata(clcalipso_cubes)
assert len(cubes) == 1
cube = cubes[0]
coord = cube.coord('altitude')
assert coord.standard_name == 'altitude'
def test_get_cli_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'CNRM-CM6-1', 'Amon', 'cli')
assert fix == [Cli(None)]
def test_cli_fix():
"""Test fix for ``cli``."""
assert Cli is Cl
def test_get_clw_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'CNRM-CM6-1', 'Amon', 'clw')
assert fix == [Clw(None)]
def test_clw_fix():
"""Test fix for ``clw``."""
assert Clw is Cl
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding utf-8 -*-
#
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Author: Markus Ritschel
# eMail: kontakt@markusritschel.de
# Date: 03/04/2019
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
from __future__ import division, print_function, absolute_import
from mpi_icelab_routines.arduino import read_arduino
from mpi_icelab_routines.ctds import read_ctd
from mpi_icelab_routines.harps.salinity_harps import read as read_harp
from mpi_icelab_routines.licor import read_licor
|
nilq/baby-python
|
python
|
from .code_actions import actions_manager
from .code_actions import CodeActionOrCommand
from .core.logging import debug
from .core.protocol import Diagnostic
from .core.protocol import Request
from .core.registry import LspTextCommand
from .core.registry import windows
from .core.sessions import SessionBufferProtocol
from .core.settings import userprefs
from .core.typing import List, Optional, Any, Dict, Tuple, Sequence
from .core.views import diagnostic_severity
from .core.views import format_diagnostic_for_html
from .core.views import first_selection_region
from .core.views import FORMAT_MARKED_STRING, FORMAT_MARKUP_CONTENT, minihtml
from .core.views import make_command_link
from .core.views import make_link
from .core.views import show_lsp_popup
from .core.views import text_document_position_params
from .core.views import update_lsp_popup
from .core.windows import AbstractViewListener
import functools
import sublime
import webbrowser
SUBLIME_WORD_MASK = 515
_test_contents = [] # type: List[str]
class LinkKind:
__slots__ = ("lsp_name", "label", "subl_cmd_name", "supports_side_by_side")
def __init__(self, lsp_name: str, label: str, subl_cmd_name: str, supports_side_by_side: bool) -> None:
self.lsp_name = lsp_name
self.label = label
self.subl_cmd_name = subl_cmd_name
self.supports_side_by_side = supports_side_by_side
def link(self, point: int, view: sublime.View) -> str:
args = {'point': point}
link = make_command_link(self.subl_cmd_name, self.label, args, None, view)
if self.supports_side_by_side:
args['side_by_side'] = True
link += ' ' + make_command_link(self.subl_cmd_name, '◨', args, 'icon', view)
return link
link_kinds = [
LinkKind("definition", "Definition", "lsp_symbol_definition", True),
LinkKind("typeDefinition", "Type Definition", "lsp_symbol_type_definition", True),
LinkKind("declaration", "Declaration", "lsp_symbol_declaration", True),
LinkKind("implementation", "Implementation", "lsp_symbol_implementation", True),
LinkKind("references", "References", "lsp_symbol_references", False),
LinkKind("rename", "Rename", "lsp_symbol_rename", False),
]
class LspHoverCommand(LspTextCommand):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self._base_dir = None # type: Optional[str]
def run(
self,
edit: sublime.Edit,
only_diagnostics: bool = False,
point: Optional[int] = None,
event: Optional[dict] = None
) -> None:
temp_point = point
if temp_point is None:
region = first_selection_region(self.view)
if region is not None:
temp_point = region.begin()
if temp_point is None:
return
window = self.view.window()
if not window:
return
hover_point = temp_point
wm = windows.lookup(window)
self._base_dir = wm.get_project_path(self.view.file_name() or "")
self._hover = None # type: Optional[Any]
self._actions_by_config = {} # type: Dict[str, List[CodeActionOrCommand]]
self._diagnostics_by_config = [] # type: Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]]
# TODO: For code actions it makes more sense to use the whole selection under mouse (if available)
# rather than just the hover point.
def run_async() -> None:
listener = wm.listener_for_view(self.view)
if not listener:
return
if not only_diagnostics:
self.request_symbol_hover_async(listener, hover_point)
self._diagnostics_by_config, covering = listener.diagnostics_touching_point_async(hover_point)
if self._diagnostics_by_config:
if not only_diagnostics:
actions_manager.request_with_diagnostics_async(
self.view, covering, self._diagnostics_by_config,
functools.partial(self.handle_code_actions, listener, hover_point))
self.show_hover(listener, hover_point, only_diagnostics)
sublime.set_timeout_async(run_async)
def request_symbol_hover_async(self, listener: AbstractViewListener, point: int) -> None:
session = listener.session('hoverProvider', point)
if session:
document_position = text_document_position_params(self.view, point)
session.send_request_async(
Request("textDocument/hover", document_position, self.view),
lambda response: self.handle_response(listener, response, point))
def handle_code_actions(
self,
listener: AbstractViewListener,
point: int,
responses: Dict[str, List[CodeActionOrCommand]]
) -> None:
self._actions_by_config = responses
self.show_hover(listener, point, only_diagnostics=False)
def handle_response(self, listener: AbstractViewListener, response: Optional[Any], point: int) -> None:
self._hover = response
self.show_hover(listener, point, only_diagnostics=False)
def provider_exists(self, listener: AbstractViewListener, link: LinkKind) -> bool:
return bool(listener.session('{}Provider'.format(link.lsp_name)))
def symbol_actions_content(self, listener: AbstractViewListener, point: int) -> str:
if userprefs().show_symbol_action_links:
actions = [lk.link(point, self.view) for lk in link_kinds if self.provider_exists(listener, lk)]
if actions:
return '<div class="actions">' + " | ".join(actions) + "</div>"
return ""
def diagnostics_content(self) -> str:
formatted = []
for sb, diagnostics in self._diagnostics_by_config:
by_severity = {} # type: Dict[int, List[str]]
formatted.append('<div class="diagnostics">')
for diagnostic in diagnostics:
by_severity.setdefault(diagnostic_severity(diagnostic), []).append(
format_diagnostic_for_html(self.view, diagnostic, self._base_dir))
for items in by_severity.values():
formatted.extend(items)
config_name = sb.session.config.name
if config_name in self._actions_by_config:
action_count = len(self._actions_by_config[config_name])
if action_count > 0:
href = "{}:{}".format('code-actions', config_name)
text = "choose code action ({} available)".format(action_count)
formatted.append('<div class="actions">[{}] {}</div>'.format(config_name, make_link(href, text)))
formatted.append("</div>")
return "".join(formatted)
def hover_content(self) -> str:
content = (self._hover.get('contents') or '') if isinstance(self._hover, dict) else ''
return minihtml(self.view, content, allowed_formats=FORMAT_MARKED_STRING | FORMAT_MARKUP_CONTENT)
def show_hover(self, listener: AbstractViewListener, point: int, only_diagnostics: bool) -> None:
sublime.set_timeout(lambda: self._show_hover(listener, point, only_diagnostics))
def _show_hover(self, listener: AbstractViewListener, point: int, only_diagnostics: bool) -> None:
contents = self.diagnostics_content() + self.hover_content()
if contents and not only_diagnostics:
contents += self.symbol_actions_content(listener, point)
_test_contents.clear()
_test_contents.append(contents) # for testing only
if contents:
if self.view.is_popup_visible():
update_lsp_popup(self.view, contents)
else:
show_lsp_popup(
self.view,
contents,
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point,
on_navigate=lambda href: self._on_navigate(href, point))
def _on_navigate(self, href: str, point: int) -> None:
if href.startswith("subl:"):
pass
elif href.startswith('code-actions:'):
_, config_name = href.split(":")
titles = [command["title"] for command in self._actions_by_config[config_name]]
self.view.run_command("lsp_selection_set", {"regions": [(point, point)]})
window = self.view.window()
if window:
window.show_quick_panel(titles, lambda i: self.handle_code_action_select(config_name, i),
placeholder="Code actions")
elif href.startswith("location:"):
window = self.view.window()
if window:
window.open_file(href[len("location:"):], flags=sublime.ENCODED_POSITION)
else:
# NOTE: Remove this check when on py3.8.
if not (href.lower().startswith("http://") or href.lower().startswith("https://")):
href = "http://" + href
if not webbrowser.open(href):
debug("failed to open:", href)
def handle_code_action_select(self, config_name: str, index: int) -> None:
if index > -1:
def run_async() -> None:
session = self.session_by_name(config_name)
if session:
session.run_code_action_async(self._actions_by_config[config_name][index], progress=True)
sublime.set_timeout_async(run_async)
|
nilq/baby-python
|
python
|
from django import forms
#from pagedown.widgets import PagedownWidget
from apps.saas.models import Offer
class OfferForm(forms.ModelForm):
#content= forms.CharField(widget=PagedownWidget(show_preview=False))
#publish= forms.DateField(widget=forms.SelectDateWidget)
class Meta:
model = Offer
fields= [
"tipo_venta",
"financing",
"hardware",
"empleados",
"modulos",
]
|
nilq/baby-python
|
python
|
from pathlib import Path, PurePath
import re
import json
import copy
import logging
from sc2_tournament_analysis.defaults import standard_player_match
from sc2_tournament_analysis.handle_replay import handle_replay
def recursive_parse(
*,
sub_dir=None,
data_function,
player_match=None,
identifier_rules=[],
multi=False,
):
"""
Function that recurses through directories
to find replay files and then parse them
"""
logging.basicConfig(filename='recursive_parse.log', level=logging.DEBUG)
path = Path().absolute() / sub_dir
match_info_paths = []
global_match_info = []
if player_match is None:
player_match = standard_player_match
elif player_match is False:
player_match = []
def check_dir_name(path_str, player_names, identifiers):
# See if dir is an identifier
try:
for rule_name, rule in identifier_rules:
match = re.search(rule, path_str)
if match and (rule_name, match.group()) not in identifiers:
identifiers.append((rule_name, match.group()))
break
except ValueError as error:
logging.critical('Error: rule does not follow format (<name>, <rule>)')
logging.critical(f'{error}\n')
return
# Regex to parse player names from dir name
current_name_str = path_str
for rule, rule_type in player_match:
if not current_name_str:
break
if rule_type == 'search':
current_name_str = re.search(rule, current_name_str)
if current_name_str:
current_name_str = current_name_str.group()
elif rule_type == 'split':
current_name_str = re.split(rule, current_name_str)
if current_name_str and type(current_name_str) is list:
player_names = current_name_str
else:
player_names = None
return player_names
def recurse(path, player_names=[], identifiers=[]):
if path.is_dir():
logging.debug(f'In dir: {PurePath(path).name}')
logging.debug(f'Path: {path}\n')
current_path_str = PurePath(path).name
result = check_dir_name(
current_path_str, player_names, identifiers
)
if result:
player_names = result
# iterate through subdirectories and recurse
for item in path.iterdir():
item_path_str = PurePath(item).name
item_identifiers = copy.deepcopy(identifiers)
result = check_dir_name(
item_path_str, player_names, item_identifiers
)
if result:
player_names = result
# if dir, recurse
recurse(item, player_names, item_identifiers)
elif path.is_file():
logging.debug(f'Found file: {PurePath(path).name}')
logging.debug(path)
for index, p in enumerate(player_names):
logging.debug(f'Player {index}: {p}\n')
logging.debug('\n')
if multi:
match_info_paths.append((path, player_names, identifiers))
else:
match_info = handle_replay(
path,
player_names,
identifiers,
data_function=data_function,
player_match=player_match,
)
global_match_info.extend(match_info)
else:
logging.error('Error: Not a file or directory')
recurse(path)
if multi:
return match_info_paths
with open('match_info.json', 'w', encoding='utf-8') as output:
json.dump({'match_info': global_match_info}, output)
|
nilq/baby-python
|
python
|
# Interpretable cnn for big five personality traits using audio data #
# Parameters initalization #
NUM_FRAMES = 208 # No of Frames for summary spectrogram.
NUM_BANDS = 64 # No of frequency bands for mel-spectrogram.
# Model hyperparameters.
INIT_STDDEV = 0.01 # Standard deviation used to initialize weights.
LEARNING_RATE = 1e-4 # Learning rate for the Adam optimizer.
ADAM_EPSILON = 1e-8 # Epsilon for the Adam optimizer.
No_of_Epochs = 50 # No of epochs for training the model.
NUM_CLASSES = 5 # No of output classess.
kernel_size_x = 26 # x-axis dimension for GAP layer.
kernel_size_y = 8 # y-axis dimension for GAP layer.
|
nilq/baby-python
|
python
|
fichier = open("/run/media/Thytu/TOSHIBA EXT/PoC/Smartshark/DS/test_ds_DDOS.csv", "r")
pos = 13 #pos to flatten
def flat_line(line, target):
index = 0
pos = 0
for l in line:
print(l, end="")
pos += 1
if (l == ','):
index += 1
if index == target:
break;
line = line[pos:]
save = pos
pos = 0
index = 0
for l in line:
pos += 1
if (l == ',' or l == '\0' or l == '\n'):
break;
ret = line[:pos - 1]
if len(ret) > 1 and float(ret) <= 9999999999999999999.9:
print(float(ret), end="")
elif (len(ret) > 1):
print(",", float(99999999999999), end="")
else:
print(",", float("0.0"), end="")
print(line[pos - 1:], end="")
for line in fichier:
flat_line(line, pos)
|
nilq/baby-python
|
python
|
class Attachment:
def __init__(
self,
id,
filename,
size,
url,
proxy_url,
**kwargs
):
self.id = id
self.filename = filename
self.size = size
self.url = url
self.proxy_url = proxy_url
if 'height' in kwargs.keys() and 'width' in kwargs.keys():
self.height = kwargs['height']
self.width = kwargs['width']
|
nilq/baby-python
|
python
|
from pathlib import Path
from tempfile import NamedTemporaryFile
import numpy as np
import pandas as pd
import pytest
from etna.datasets import generate_ar_df
@pytest.fixture
def base_pipeline_yaml_path():
tmp = NamedTemporaryFile("w")
tmp.write(
"""
_target_: etna.pipeline.Pipeline
horizon: 4
model:
_target_: etna.models.CatBoostModelMultiSegment
transforms:
- _target_: etna.transforms.LinearTrendTransform
in_column: target
- _target_: etna.transforms.SegmentEncoderTransform
"""
)
tmp.flush()
yield Path(tmp.name)
tmp.close()
@pytest.fixture
def base_pipeline_omegaconf_path():
tmp = NamedTemporaryFile("w")
tmp.write(
"""
_target_: etna.pipeline.Pipeline
horizon: 4
model:
_target_: etna.models.CatBoostModelMultiSegment
transforms:
- _target_: etna.transforms.LinearTrendTransform
in_column: target
- _target_: etna.transforms.SegmentEncoderTransform
- _target_: etna.transforms.LagTransform
in_column: target
lags: "${shift:${horizon},[1, 2, 4]}"
"""
)
tmp.flush()
yield Path(tmp.name)
tmp.close()
@pytest.fixture
def base_timeseries_path():
df = generate_ar_df(periods=100, start_time="2021-06-01", n_segments=2)
tmp = NamedTemporaryFile("w")
df.to_csv(tmp, index=False)
tmp.flush()
yield Path(tmp.name)
tmp.close()
@pytest.fixture
def base_timeseries_exog_path():
df_regressors = pd.DataFrame(
{
"timestamp": list(pd.date_range("2021-06-01", periods=120)) * 2,
"regressor_1": np.arange(240),
"regressor_2": np.arange(240) + 5,
"segment": ["segment_0"] * 120 + ["segment_1"] * 120,
}
)
tmp = NamedTemporaryFile("w")
df_regressors.to_csv(tmp, index=False)
tmp.flush()
yield Path(tmp.name)
tmp.close()
@pytest.fixture
def base_forecast_omegaconf_path():
tmp = NamedTemporaryFile("w")
tmp.write(
"""
prediction_interval: true
quantiles: [0.025, 0.975]
n_folds: 3
"""
)
tmp.flush()
yield Path(tmp.name)
tmp.close()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='message.proto',
package='com.akolar.maxaltitude',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\rmessage.proto\x12\x16\x63om.akolar.maxaltitude\"\xed\x07\n\x0e\x41ircraftBeacon\x12\x14\n\x0cmessage_from\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65stto\x18\x02 \x01(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\t\x12\x10\n\x08latitude\x18\x04 \x01(\x01\x12\x11\n\tlongitude\x18\x05 \x01(\x01\x12\x10\n\x08\x61ltitude\x18\x16 \x01(\x02\x12\x10\n\x08receiver\x18\x06 \x01(\t\x12\x0b\n\x03uid\x18\x07 \x01(\t\x12\x0f\n\x07stealth\x18\x08 \x01(\x08\x12\x14\n\x0c\x64o_not_track\x18\t \x01(\x08\x12\x13\n\x0braw_message\x18\x13 \x01(\t\x12\x0f\n\x07relayer\x18\n \x01(\t\x12\x0f\n\x07heading\x18\x0b \x01(\r\x12\x14\n\x0cground_speed\x18\x0c \x01(\x02\x12\x16\n\x0evertical_speed\x18\r \x01(\x02\x12\x11\n\tturn_rate\x18\x0e \x01(\x02\x12\x1d\n\x15signal_to_noise_ratio\x18\x0f \x01(\x02\x12\x13\n\x0b\x65rror_count\x18\x10 \x01(\r\x12\x13\n\x0b\x66req_offset\x18\x11 \x01(\x02\x12J\n\raircraft_type\x18\x14 \x01(\x0e\x32\x33.com.akolar.maxaltitude.AircraftBeacon.AircraftType\x12H\n\x0c\x61\x64\x64ress_type\x18\x15 \x01(\x0e\x32\x32.com.akolar.maxaltitude.AircraftBeacon.AddressType\x12\x43\n\x0bgps_quality\x18\x12 \x01(\x0b\x32..com.akolar.maxaltitude.AircraftBeacon.Quality\x1a/\n\x07Quality\x12\x12\n\nhorizontal\x18\x01 \x01(\r\x12\x10\n\x08vertical\x18\x02 \x01(\r\"\x80\x02\n\x0c\x41ircraftType\x12\x14\n\x10UNKNOWN_AIRPLANE\x10\x00\x12\n\n\x06GLIDER\x10\x01\x12\r\n\tTOW_PLANE\x10\x02\x12\x19\n\x15HELICOPTER_ROTORCRAFT\x10\x03\x12\r\n\tPARACHUTE\x10\x04\x12\x0e\n\nDROP_PLANE\x10\x05\x12\x0f\n\x0bHANG_GLIDER\x10\x06\x12\x0e\n\nPARAGLIDER\x10\x07\x12\x14\n\x10POWERED_AIRCRAFT\x10\x08\x12\x10\n\x0cJET_AIRCRAFT\x10\t\x12\x07\n\x03UFO\x10\n\x12\n\n\x06\x42\x41LOON\x10\x0b\x12\x0b\n\x07\x41IRSHIP\x10\x0c\x12\x07\n\x03UAV\x10\r\x12\x11\n\rSTATIC_OBJECT\x10\x0f\"U\n\x0b\x41\x64\x64ressType\x12\x13\n\x0fUNKNOWN_ADDRESS\x10\x00\x12\x08\n\x04ICAO\x10\x01\x12\t\n\x05\x46LARM\x10\x02\x12\x0f\n\x0bOGN_TRACKER\x10\x03\x12\x0b\n\x07NAVITER\x10\x04\x62\x06proto3')
)
_AIRCRAFTBEACON_AIRCRAFTTYPE = _descriptor.EnumDescriptor(
name='AircraftType',
full_name='com.akolar.maxaltitude.AircraftBeacon.AircraftType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_AIRPLANE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GLIDER', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOW_PLANE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HELICOPTER_ROTORCRAFT', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARACHUTE', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DROP_PLANE', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HANG_GLIDER', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PARAGLIDER', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POWERED_AIRCRAFT', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JET_AIRCRAFT', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UFO', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BALOON', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AIRSHIP', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UAV', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATIC_OBJECT', index=14, number=15,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=704,
serialized_end=960,
)
_sym_db.RegisterEnumDescriptor(_AIRCRAFTBEACON_AIRCRAFTTYPE)
_AIRCRAFTBEACON_ADDRESSTYPE = _descriptor.EnumDescriptor(
name='AddressType',
full_name='com.akolar.maxaltitude.AircraftBeacon.AddressType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_ADDRESS', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ICAO', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLARM', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OGN_TRACKER', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NAVITER', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=962,
serialized_end=1047,
)
_sym_db.RegisterEnumDescriptor(_AIRCRAFTBEACON_ADDRESSTYPE)
_AIRCRAFTBEACON_QUALITY = _descriptor.Descriptor(
name='Quality',
full_name='com.akolar.maxaltitude.AircraftBeacon.Quality',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='horizontal', full_name='com.akolar.maxaltitude.AircraftBeacon.Quality.horizontal', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vertical', full_name='com.akolar.maxaltitude.AircraftBeacon.Quality.vertical', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=701,
)
_AIRCRAFTBEACON = _descriptor.Descriptor(
name='AircraftBeacon',
full_name='com.akolar.maxaltitude.AircraftBeacon',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message_from', full_name='com.akolar.maxaltitude.AircraftBeacon.message_from', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='destto', full_name='com.akolar.maxaltitude.AircraftBeacon.destto', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='com.akolar.maxaltitude.AircraftBeacon.timestamp', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='latitude', full_name='com.akolar.maxaltitude.AircraftBeacon.latitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='longitude', full_name='com.akolar.maxaltitude.AircraftBeacon.longitude', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='altitude', full_name='com.akolar.maxaltitude.AircraftBeacon.altitude', index=5,
number=22, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='receiver', full_name='com.akolar.maxaltitude.AircraftBeacon.receiver', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uid', full_name='com.akolar.maxaltitude.AircraftBeacon.uid', index=7,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stealth', full_name='com.akolar.maxaltitude.AircraftBeacon.stealth', index=8,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='do_not_track', full_name='com.akolar.maxaltitude.AircraftBeacon.do_not_track', index=9,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='raw_message', full_name='com.akolar.maxaltitude.AircraftBeacon.raw_message', index=10,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relayer', full_name='com.akolar.maxaltitude.AircraftBeacon.relayer', index=11,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='heading', full_name='com.akolar.maxaltitude.AircraftBeacon.heading', index=12,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ground_speed', full_name='com.akolar.maxaltitude.AircraftBeacon.ground_speed', index=13,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vertical_speed', full_name='com.akolar.maxaltitude.AircraftBeacon.vertical_speed', index=14,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='turn_rate', full_name='com.akolar.maxaltitude.AircraftBeacon.turn_rate', index=15,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signal_to_noise_ratio', full_name='com.akolar.maxaltitude.AircraftBeacon.signal_to_noise_ratio', index=16,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error_count', full_name='com.akolar.maxaltitude.AircraftBeacon.error_count', index=17,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freq_offset', full_name='com.akolar.maxaltitude.AircraftBeacon.freq_offset', index=18,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aircraft_type', full_name='com.akolar.maxaltitude.AircraftBeacon.aircraft_type', index=19,
number=20, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='address_type', full_name='com.akolar.maxaltitude.AircraftBeacon.address_type', index=20,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gps_quality', full_name='com.akolar.maxaltitude.AircraftBeacon.gps_quality', index=21,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AIRCRAFTBEACON_QUALITY, ],
enum_types=[
_AIRCRAFTBEACON_AIRCRAFTTYPE,
_AIRCRAFTBEACON_ADDRESSTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=1047,
)
_AIRCRAFTBEACON_QUALITY.containing_type = _AIRCRAFTBEACON
_AIRCRAFTBEACON.fields_by_name['aircraft_type'].enum_type = _AIRCRAFTBEACON_AIRCRAFTTYPE
_AIRCRAFTBEACON.fields_by_name['address_type'].enum_type = _AIRCRAFTBEACON_ADDRESSTYPE
_AIRCRAFTBEACON.fields_by_name['gps_quality'].message_type = _AIRCRAFTBEACON_QUALITY
_AIRCRAFTBEACON_AIRCRAFTTYPE.containing_type = _AIRCRAFTBEACON
_AIRCRAFTBEACON_ADDRESSTYPE.containing_type = _AIRCRAFTBEACON
DESCRIPTOR.message_types_by_name['AircraftBeacon'] = _AIRCRAFTBEACON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AircraftBeacon = _reflection.GeneratedProtocolMessageType('AircraftBeacon', (_message.Message,), dict(
Quality = _reflection.GeneratedProtocolMessageType('Quality', (_message.Message,), dict(
DESCRIPTOR = _AIRCRAFTBEACON_QUALITY,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:com.akolar.maxaltitude.AircraftBeacon.Quality)
))
,
DESCRIPTOR = _AIRCRAFTBEACON,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:com.akolar.maxaltitude.AircraftBeacon)
))
_sym_db.RegisterMessage(AircraftBeacon)
_sym_db.RegisterMessage(AircraftBeacon.Quality)
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
##########################################################################
# MediPy - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
def enum(name, *members, **named):
""" Create an enumerated type called ``name`` with given ``members``: ::
>>> Colors = medipy.base.enum("Colors", "red", "green", "blue")
>>> color = Colors.red
By default, the members are integers, but their value can be specified
using named parameters. In this case, the values must be storable in a
dictionary: ::
>>> Colors = medipy.base.enum("Colors", "red", green="green", blue=3.14)
>>> Colors.red
0
>>> Colors.green
'green'
>>> Colors.blue
3.14
The name of members can be retrieved either in bulk or using their
values: ::
>>> Colors.members
['red', 'green', 'blue']
>>> Colors[0]
'red'
>>> Colors["green"]
'green'
>>> Colors[3.14]
'blue'
The enumerated type is derived of :class:`~medipy.base.Enum`: ::
>>> isinstance(Colors, medipy.base.Enum)
True
"""
enums = dict(zip(members, range(len(members))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
new_type = Enum(name, (), enums)
new_type._reverse = reverse
return new_type
class Enum(type) :
""" Base class for all enumerated types. This class should not be used
directly
"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
@property
def members(self) :
return self._reverse.values()
def __getitem__(self, item) :
""" Return the name of a member given its value.
"""
return self._reverse[item]
|
nilq/baby-python
|
python
|
# # # # # # # # # # # # #
# #
# #
# #
# Pi Radio #
# By #
# Jackson #
# Hoggard #
# (c)2018 #
# #
# #
# #
# # # # # # # # # # # # #
import os, argparse, random
from colorama import Fore, Back, Style
from multiprocessing import Process
parser = argparse.ArgumentParser(prog='python PiRadio.py', description='Broadcasts WAV/MP3 file over FM using RPI GPIO #4 pin.')
parser.add_argument("-s", "--song_file", help="Set song to play")
parser.add_argument("-f", "--frequency", help="Set TX frequency. Acceptable range 87.1-108.2", type=float)
arg = parser.parse_args()
playlist = []
frequency = 0
def start():
os.system("clear")
print("Starting Pi Radio")
if arg.frequency is None:
frequency = 0 #raw_input("Enter the frequency (Press Enter/Return for 99.9MHz): ")
if frequency == 0:
frequency = '99.9'
elif 87.1 >= arg.frequency >= 108.2:
print "Frequency is out of range.";exit()
else:
frequency = str(arg.frequency)
print ("\nFrequency set to " + frequency)
makePlaylist()
os.system("clear")
begin()
print "Songs in Playlist:\n" + Fore.GREEN + "______________________________\n"
i = 0
while i < len(playlist):
print Style.RESET_ALL + playlist[i]
i += 1
print Fore.GREEN + "______________________________"
print Fore.WHITE + "Type Choice Number:\n1. Shuffle Play\n2. Talk\n3. Exit\n\n\n"
userInput()
def begin():
print(Fore.RED + Back.WHITE + '# PiRadio Station v1.1 #')
print(Style.RESET_ALL)
def play(song):
print Style.RESET_ALL + "\n"
arg.song_file = song
try:
if ".mp3" in arg.song_file.lower():
os.system("ffmpeg -i "+arg.song_file+" "+"-f s16le -ar 22.05k -ac 1 - | sudo ./fm_transmitter -f"+" "+frequency+" "+" - ")
elif ".wav" in arg.song_file.lower():
os.system("sudo ./fm_transmitter -f"+" "+ "99.9" +" " + "/home/pi/fm_transmitter/music/" + arg.song_file)
else:
print "That file extension is not supported."
print "File name provided: %s" %arg.song_file
raise IOError
except Exception:
print "Something went wrong. Halting."; exit()
except IOError:
print "There was an error regarding file selection. Halting."; exit()
def makePlaylist():
for root, dirs, files, in os.walk("/home/pi/fm_transmitter/music"):
for file in files:
if file.endswith(".wav"):
#print(file)
playlist.append(file)
def playSongs():
print Style.RESET_ALL + "\n"
currentsong = ''
i = 0
run = True
while run == True:
i = random.randint(0, len(playlist) - 1)
print Fore.RED + Back.WHITE + "Now Playing: " + playlist[i] + "\n"
print Style.RESET_ALL
p1 = Process(target = play(playlist[i]))
p1.start()
p2 = Process(target = checkForQuit)
p2.start()
def talk():
print("Still testing. Please choose a different option")
userInput()
def userInput():
choice = input(" > ")
processInput(choice)
def processInput(c):
if(c == 1): playSongs()
if(c == 2): talk()
if(c == 3): exit()
else:
userInput()
def checkForQuit():
if(keyboard.is_pressed('q')):
p1.stop()
start()
|
nilq/baby-python
|
python
|
import struct, base64
import numpy as np
mdmr_dtypes = { 0: 's', 1: 'I', 2: 'q', 3: 'f', 4: 'd'}
# output of test_encode
b64data = """TURNUjAwMDECAAAABAAAAKUAAAAAAAAABAAAADcAAAAAAAAAAAAAAAAAAAB47j1cctzBv2t9kdCWc8G/fSJPkq6ZwL/PSe8bX3u+v6TC2EKQg7q/6iEa3UHstL8YQznRrkKqv4NuL2mM1oG/GEM50a5Cqj96Nqs+V1vBPxR5knTN5M8/yol2FVJ+2D+hSs0eaAXgPzaTb7a5MeI/+5EiMqzi4j9YkGYsms7iP4WUn1T7dOI/Arfu5qkO4j+OQLyuX7DhP8dLN4lBYOE/5pZWQ+Ie4T8IyQImcOvgP065wrtcxOA/K6T8pNqn4D9OtKuQ8pPgP/fHe9XKhOA/ba0vEtpy4D9cOBCSBUzgP/TDCOHRxt8/8S4X8Z2Y3T/MYmLzcW3YP4j029eBc84/IHu9++O9sj9XW7G/7J6sv/SmIhXGFsK/CoUIOIQqyb+6g9iZQufNv+o+AKlNnNC/38Mlx53S0b/njCjtDb7Sv9mZQuc1dtO/YDyDhv4J1L8XnwJgPIPUv7lTOlj/59S/eCgK9Ik81b9sBOJ1/YLVv3lA2ZQrvNW/2CrB4nDm1b/M0eP3Nv3Vv6DDfHkB9tW/YFlpUgq61b9+Oh4zUBnVv+XVOQZkr9O/AAAAAAAAAAAAAAAAAAAAABcrajANw+6/xty1hHzQ7r8IyQImcOvuv/lmmxvTE++/fm/Tn/1I778OT6+UZYjvv88sCVBTy++/AAAAAAAA8L+XcymuKvvvvyvB4nDmV++/28TJ/Q5F7b+fWRKgppbov5CDEmba/uC/OWItPgXA0L/e5SK+E7Oev0ZfQZqxaMI/SOF6FK5H0T9VwaikTkDXPxPyQc9m1ds//Yf029eB3z9eaK7TSEvhP76HS447peI/3xrYKsHi4z/BqKROQBPlP4e/JmvUQ+Y/csRafAqA5z/jUwCMZ9DoP6OvIM1YNOo/7ncoCvSJ6z/ye5v+7EfsP49TdCSX/+o/FjCBW3fz5T/rxVBOtKvcP2Hgufdwyc0/V5V9VwT/sz+NeohGdxCbv9Sa5h2n6Li/NXugFRiywr+HM7+aAwTHv0ku/yH99sm//vFetTLhy7/a4a/JGvXMv482jliLT82/ecxAZfz7zL9GzsKedvjLv/a0w1+TNcq/NJ2dDI6Sx7+bVZ+rrdjDv+KS407pYL2/HQOy17s/rr+pwTQMHxGTP/0wQni0ccA/QkP/BBcr0j8AAAAAAAAAAAAAAAAAAAAAN2xblNkgz78R34lZL4bOv9vEyf0ORc2/+GuyRj1Ey7+ZDTLJyFnIv3E486s5QMS/YJM16iEavb/ZQpCDEmaqvw3DR8SUSKI/Cty6m6c6xD9rgqj7AKTUP8cpOpLLf+A/qkNuhhvw5T+l2qfjMQPpP61M+KV+3uk/Ksb5m1CI6T/RBfUtc7roPw7bFmU2yOc/5bM8D+7O5j8vaYzWUdXlP2JnCp3X2OQ/io7k8h/S4z+7fsFu2LbiP9UJaCJseOE/UtUEUfcB4D+CrRIsDmfcP4YgByXMtNc/MlpHVRNE0T+CHJQw0/a/P5BJRs7Cnra/+Um1T8dj2L825QrvchHmv8eA7PXuj+y/zH9Iv30d778dPX5v05/vv2LWi6GcaO+/xuHMr+YA77/V52or9pfuvz86deWzPO6/bcX+snvy7b9FEr2MYrntv1WkwthCkO2/S3ZsBOJ17b9+HThnRGntv7bbLjTXae2/u/JZngd37b+OklfnGJDtv6TH7236s+2/HcnlP6Tf7b+XytsRTgvuvxY1mIbhI+6/CoDxDBr67b+V8e8zLhztvwAAAAAAAAAAAAAAAAAAAAAXK2owDcPuv8bctYR80O6/CMkCJnDr7r/5Zpsb0xPvv35v05/9SO+/Dk+vlGWI77/PLAlQU8vvvwAAAAAAAPC/l3Mprir7778rweJw5lfvv9vEyf0ORe2/n1kSoKaW6L+QgxJm2v7gvzliLT4FwNC/3uUivhOznr9GX0GasWjCP0jhehSuR9E/VcGopE5A1z8T8kHPZtXbP/2H9NvXgd8/Xmiu00hL4T++h0uOO6XiP98a2CrB4uM/waikTkAT5T+HvyZr1EPmP3LEWnwKgOc/41MAjGfQ6D+jryDNWDTqP+53KAr0ies/8nub/uxH7D+PU3Qkl//qPxYwgVt38+U/68VQTrSr3D9h4Ln3cMnNP1eVfVcE/7M/jXqIRncQm7/UmuYdp+i4vzV7oBUYssK/hzO/mgMEx79JLv8h/fbJv/7xXrUy4cu/2uGvyRr1zL+PNo5Yi0/Nv3nMQGX8+8y/Rs7Cnnb4y7/2tMNfkzXKvzSdnQyOkse/m1Wfq63Yw7/ikuNO6WC9vx0Dste7P66/qcE0DB8Rkz/9MEJ4tHHAP0JD/wQXK9I/AAAAAAAAAAA="""
strdata = base64.b64decode(b64data)
def parse(data):
ptr = 0
hdr_st = '=' + ''.join(('4s', '4s', 'I'))
hdr_size = struct.calcsize(hdr_st)
magic,version,n_blocks = struct.unpack(hdr_st, data[:hdr_size])
ptr += hdr_size
blocks = []
for block_i in range(n_blocks):
block_fmt = '=iq'
block_size = struct.calcsize(block_fmt)
dtype, length = struct.unpack(block_fmt, data[ptr:ptr+block_size])
blocks.append((dtype,length))
base_offset = hdr_size + (struct.calcsize(block_fmt) * n_blocks)
output_data = []
for block_i, blockinfo in enumerate(blocks):
ptr = base_offset
dtype, length = blockinfo
dtype_fmt = mdmr_dtypes[dtype]
format = '{}{}'.format(length, dtype_fmt)
block_size = struct.calcsize(format)
block_data = struct.unpack(format, data[ptr:ptr+block_size])
output_data.append(np.array(block_data))
ptr += block_size
return (magic, version, n_blocks, output_data)
if __name__ == '__main__':
(magic, version, n_blocks, data) = parse(strdata)
print("magic: ", magic)
print("version: ", version)
print("n_blocks: ", n_blocks)
print("data (arrays): ")
for i,array in enumerate(data):
print("### array number:", i)
print(array)
|
nilq/baby-python
|
python
|
from app import app
from config import MONGO_URI, client
from flask import session, request, jsonify
import pymongo
import requests
import json
from datetime import datetime
# 連進MongoDB
db = client['aiboxdb']
@app.route('/api/android/getRemind', methods=['GET'])
def android_get_remind():
'''取得沒有user_nickname(未登入)的提醒資料, 且會過濾掉過期的提醒資料
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 為登入提醒的資料(沒資料為空list); 錯誤訊息
'msg': 訊息
}
'''
try:
remind_collect = db['reminder']
user_remind_doc = remind_collect.find({'user_nickname': ''})
except Exception as err:
resp = {
'status': '404',
'result': err,
'msg': '取得未登入提醒資料失敗'
}
return jsonify(resp)
result_list = []
for item in user_remind_doc:
if datetime.strptime(item['remind_time'], '%Y-%m-%d %H:%M:%S') > datetime.today():
obj = {
'remind_time': item['remind_time'],
'dosomething': item['dosomething']
}
result_list.append(obj)
resp = {
'status': '200',
'result': result_list,
'msg': '取得未登入提醒資料成功'
}
return jsonify(resp)
@app.route('/api/android/getAllLocation', methods=['GET'])
def android_get_all_location():
'''取得所有查詢的地點
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 所有查詢地點(沒有則為空list); 錯誤訊息
'msg': 訊息
}
'''
try:
location_collect = db['location']
location_doc = location_collect.find().sort("_id", 1)
except Exception as err:
resp = {
'status': '404',
'result': err,
'msg': '取得所有查詢地點失敗'
}
return jsonify(resp)
result_list = []
for item in location_doc:
obj = {
'location': item['location'],
'region': item['region'],
'number': str(item['number']),
'unit': item['unit'],
'date': item['date']
}
result_list.append(obj)
resp = {
'status': '200',
'result': result_list,
'msg': '取得所有查詢地點成功'
}
return jsonify(resp)
@app.route('/api/android/getLastLocation', methods=['GET'])
def android_get_last_location():
'''取得最後一個(最新)查詢的地點
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 最新的查詢地點(沒有則為空物件{}); 錯誤訊息
'msg': 訊息
}
'''
try:
location_collect = db['location']
location_doc = location_collect.find().sort("_id", -1).limit(1) # 找_id最大的
except Exception as err:
resp = {
'status': '404',
'result': err,
'msg': '取得最新的查詢地點失敗'
}
return jsonify(resp)
obj= {}
for item in location_doc:
obj = {
'location': item['location'],
'region': item['region'],
'number': str(item['number']),
'unit': item['unit'],
'date': item['date']
}
resp = {
'status': '200',
'result': obj,
'msg': '取得最後一個(最新)查詢地點成功'
}
return jsonify(resp)
@app.route('/api/android/getWeather', methods=['GET'])
def android_get_weather():
'''取得某城市的天氣狀況
Params:
city: 城市名
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 取得某城市的天氣狀況; 錯誤訊息
'msg': 訊息
}
'''
city = request.args.get('city')
print(city)
has_city = False
city_transfer = {
'新北': '新北市',
'新北市': '新北市',
'台北': '臺北市',
'台北市': '臺北市',
'台中': '臺中市',
'台中市': '臺中市',
'台南': '臺南市',
'台南市': '臺南市'
}
for key, values in city_transfer.items():
if city == key:
city = values
weather = {
'Wx': '', # 天氣現象
'MaxT': '', # 最高溫度
'MinT': '', # 最低溫度
'CI': '', # 舒適度
'PoP': '', # 降雨機率
'info': '' # 讓app可以換相對應的背景
}
# 政府開放資料, 天氣api
resp = requests.get('https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-C0032-001?Authorization=rdec-key-123-45678-011121314')
data = json.loads(resp.text)
records = data['records']['location'] # 各地區的預報紀錄
for record in records:
if record['locationName'] == city:
has_city = True
elements = record['weatherElement']
for element in elements:
if element['elementName'] == 'Wx': # 天氣現象
weather['Wx'] = element['time'][-1]['parameter']['parameterName']
if element['elementName'] == 'MaxT': # 最高溫度
weather['MaxT'] = element['time'][-1]['parameter']['parameterName']
if element['elementName'] == 'MinT': # 最低溫度
weather['MinT'] = element['time'][-1]['parameter']['parameterName']
if element['elementName'] == 'CI': # 舒適度
weather['CI'] = element['time'][-1]['parameter']['parameterName']
if element['elementName'] == 'PoP': # 降雨機率
weather['PoP'] = element['time'][-1]['parameter']['parameterName']
# app天氣背景資訊
if '雨' in weather['Wx']:
weather['info'] = 'rainy'
elif '晴' in weather or '熱' in weather:
weather['info'] = 'sunny'
elif '雲' in weather['Wx']:
weather['info'] = 'cloudy'
else:
weather['info'] = 'cloudy'
if has_city is True:
resp = {
'status': '200',
'result': weather,
'msg': '取得某城市的天氣狀況成功'
}
return jsonify(resp)
else:
resp = {
'status': '404',
'result': '沒有此城市',
'msg': '取得某城市的天氣狀況失敗'
}
return jsonify(resp)
@app.route('/api/android/getHospital', methods=['GET'])
def android_get_hospital():
'''取得醫院的資訊
Params:
hospital: 醫院名稱
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 取得醫院的資訊; 錯誤訊息
'msg': 訊息
}
'''
hospital = request.args.get('hospital')
db = client['aiboxdb']
hospital_collect = db['hospital']
hospital_doc = hospital_collect.find_one({'機構名稱': {'$regex': hospital}})
if hospital_doc != None:
hospital_doc.pop('_id')
resp = {
'status': '200',
'result': hospital_doc,
'msg': '取得醫院資訊成功'
}
return jsonify(resp)
else:
resp = {
'status': '404',
'result': '沒有此醫院',
'msg': '取得醫院資訊失敗'
}
return jsonify(resp)
@app.route('/api/android/getECPhone', methods=['GET'])
def android_get_ec_phone():
'''取得緊急聯絡電話
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 取得緊急聯絡電話; 錯誤訊息
'msg': 訊息
}
'''
db = client['aiboxdb']
temp_ec_phone_collect = db['temp_ec_phone']
temp_ec_phone_doc = temp_ec_phone_collect.find_one({'_id': 0})
if temp_ec_phone_doc['phone'] != '':
resp = {
'status': '200',
'result': {
'phone': temp_ec_phone_doc['phone']
},
'msg': '取得緊急聯絡電話成功'
}
temp_ec_phone_doc = temp_ec_phone_collect.find_one_and_update({'_id': 0}, {'$set': {'phone': ''}}, upsert=False)
return jsonify(resp)
else:
resp = {
'status': '404',
'result': "null",
'msg': '取得緊急聯絡電話失敗'
}
return jsonify(resp)
@app.route('/api/android/getActivity', methods=['GET'])
def android_get_activity():
'''取得活動資訊
Returns:
{
'status': '200'->取得成功; '404'->取得失敗
'result': 取得活動資訊; 錯誤訊息
'msg': 訊息
}
'''
db = client['aiboxdb']
open_activity_collect = db['open_activity']
open_activity_doc = open_activity_collect.find({}, {'_id': False})
resp = {
'status': '200',
'result': list(open_activity_doc),
'msg': '取得活動資訊成功'
}
return jsonify(resp)
|
nilq/baby-python
|
python
|
from pathlib import Path
from typing import Tuple, Callable, Union
ReadPaths = Union[Tuple[Path], Tuple[Path, Path]]
def _make_paired_paths(
dir_path: Path,
paired: bool,
mkstr: Callable[[int], str]
) -> ReadPaths:
path1 = dir_path/mkstr(1)
return (path1, dir_path/mkstr(2)) if paired else (path1,)
def make_read_paths(
reads_dir_path: Path,
paired: bool
) -> ReadPaths:
return _make_paired_paths(reads_dir_path, paired, lambda n: f"reads_{n}.fq.gz")
def make_legacy_read_paths(
reads_dir_path: Path,
paired: bool
) -> ReadPaths:
return _make_paired_paths(reads_dir_path, paired, lambda n: f"reads_{n}.fastq")
|
nilq/baby-python
|
python
|
import json
from lib import action
class VaultWriteAction(action.VaultBaseAction):
def run(self, path, values):
return self.vault.write(path, **json.loads(values))
|
nilq/baby-python
|
python
|
import numpy as np
import operator
def TI_Forward_Neighborhood(D, p, Eps):
""" ."""
seeds = []
forwardThreshold = p.dist + Eps
# You have to declare the list to traverse.
# First is the index with element "p"
# Items are selected from start to item "p"
# And finally it turns around
indice = D.index(p)
points_list = D[indice + 1:]
# The newly calculated list is traversed
for q in points_list:
if q.dist > forwardThreshold:
break
if Distance(q.Coords, p.Coords) <= Eps:
seeds.append(q)
# The list with the seeds is returned.
return seeds
def TI_Backward_Neighborhood(D, pto, Eps):
seeds = []
backwardThreshold = pto.dist - Eps
# You have to declare the list to go.
# First is the index where the element "p" is
# Items are selected from start to item "p"
# And finally he turns around
indice = D.index(pto)
points_list = D[:indice]
points_list.reverse()
# The newly calculated list is traversed
for q in points_list:
if q.dist < backwardThreshold:
break
if Distance(q.Coords, pto.Coords) <= Eps:
seeds.append(q)
# The list with the seeds is returned.
return seeds
def TI_Neighborhood(D, p, Eps):
part_1 = TI_Backward_Neighborhood(D, p, Eps)
part_2 = TI_Forward_Neighborhood(D, p, Eps)
return part_1 + part_2
def TI_ExpandCluster(D, D_prim,
p, ClId, Eps, MinPts):
"""D is increasingly ordered with respect to the
distances from the reference point"""
# The set of points around point "p" is explored. Note that
# seeds is a set or list of points.
seeds = TI_Neighborhood(D, p, Eps)
# Points around "p" are counted, including itself
p.NeighborsNo += len(seeds)
# "p" can be noise or an edge point
if p.NeighborsNo < MinPts:
# It is initially declared as noise
p.ClusterId = -1 # "NOISE"
# You go through each point of the set of seeds
for q in seeds:
q.Border.append(p)
q.NeighborsNo += 1
# The list of edge points of "p" is declared empty
p.Border = []
# "P" is removed from D to D_prim
D.remove(p)
D_prim.append(p)
return False
else:
# Cluster membership is assigned
p.ClusterId = ClId
# The points found in the seeds are covered
for q in seeds:
q.ClusterId = ClId
q.NeighborsNo += 1
for q in p.Border:
# Identify which element is in the D_prim listing, and
# then modify this.
D_prim[D_prim.index(q)].ClusterId = ClId
# Once again the set is emptied
p.Border = []
# "P" is removed from D to D_prim
D.remove(p)
D_prim.append(p)
# As long as the number of elements in the seed list is
# greater than zero, that is, while finding ONE element, the
# next iteration:
while seeds:
# Somehow in this while the process is repeated
curPoint = seeds[0]
curSeeds = TI_Neighborhood(D, curPoint, Eps)
curPoint.NeighborsNo += len(curSeeds)
# i curPoint is on the edge
if curPoint.NeighborsNo < MinPts:
for q in curSeeds:
q.NeighborsNo += 1
# If curPoint is core
else:
while curSeeds:
q = curSeeds[0]
q.NeighborsNo += 1
if q.ClusterId == "UNCLASSIFIED":
q.ClusterId = ClId
# Remove "p" from D to
# D_prim
curSeeds.remove(q)
seeds.append(q)
else:
curSeeds.remove(q)
# The edge points are traversed
for q in curPoint.Border:
# Identify which element is in the
# listing D_prim, and then this is modified.
D_prim[D_prim.index(q)].ClusterId = ClId
# The content of the variables is modified
curPoint.Border = []
D.remove(curPoint)
D_prim.append(curPoint)
seeds.remove(curPoint)
# The logical value is returned.
return True
def Distance(point, pnt_ref):
"""Function that calculates the distance in two dimensions"""
point = np.array(point[0:2])
pnt_ref = np.array(pnt_ref[0:2])
return np.sqrt(np.sum(np.power(point - pnt_ref, 2)))
class class_point:
"""Class that generates a point with its attributes"""
def __init__(self, point, pnt_ref, metadata=None):
try:
# Metadata
self.metadata = metadata
# The original coordinates are saved
self.Coords = point[0:2]
except:
pass
# p.ClusterId = UNCLASSIFIED;
self.ClusterId = "UNCLASSIFIED"
# p.dist = Distance(p,r)
self.dist = Distance(point[0:2], pnt_ref[0:2])
# p.NeighborsNo = 1
self.NeighborsNo = 1
# p.Border = vacio
self.Border = []
def TI_DBScan(D, eps, MinPts, metadata=None):
"""This class applies the TI-DBScan algorithm to the set
of points delivered.
D = [[coord1, coord2, ...], ...]:
It is a list of tuples or lists, where the two
first items in each list are the coordinates and
the third is METAdata."""
try:
# /* assert: r denotes a reference point */
pnt_ref = D[0]
except IndexError:
pass
# the number of points cannot be 1.
MinPts = MinPts if MinPts > 1 else 2
# D' = empty set of points;
D_prim = []
#Points are transformed
try:
D = [class_point(
D[indice], pnt_ref, metadata=metadata[indice])
for indice in range(len(D))]
except TypeError:
D = [class_point(
D[indice], pnt_ref)
for indice in range(len(D))]
# sort all points in D non-decreasingly w.r.t. field dist;
#D = sorted(D, key=operator.attrgetter('dist'))
D = sorted(D, key=operator.attrgetter('dist'))
# ClusterId = label of first cluster;
i = 0
ClusterId = i #"%s" % (
# for each point p in the ordered set D starting from
# the first point until last point in D do
# While the list of points to review is not empty, it iterates
# infinitely.
while D:
p = D[0]
#for p in D:
# if TI-ExpandCluster(D, D', p, ClusterId, Eps, MinPts) then
if TI_ExpandCluster(D, D_prim,
p, ClusterId, eps, MinPts):
# ClusterId = NextId(ClusterId)
i += 1
ClusterId = i #"%s" % (i)
# endif
# endfor
# return D'// D' is a clustered set of points
return D_prim
# The next line is for testing
if __name__ == "__main__":
set_of_points = [[1.00, 1.00], [1.50, 1.00], [2.00, 1.50],
[5.00, 5.00], [6.00, 5.50], [5.50, 6.00],
[10.00, 11.00], [10.50, 9.50], [10.00, 10.00],
[8.00, 1.00], [1.00, 8.00]]
#set_of_points = [[1.00, 1.00], [1.50, 1.00], [2.00, 1.50],
# [5.00, 5.00], [6.00, 5.50], [5.50, 6.00],
# [8.00, 1.00], [1.00, 8.00]]
#set_of_points = [[1.00, 1.00], [1.50, 1.00], [2.00, 1.50],
# [5.00, 5.00], [8.00, 1.00], [1.00, 8.00]]
result = TI_DBScan(set_of_points, 2, 2)
for element in result:
print (element.ClusterId)
print (element.Coords)
print ("")
|
nilq/baby-python
|
python
|
import time
import os
from coala_utils.decorators import enforce_signature
from coalib.output.printers.LogPrinter import LogPrinterMixin
from coalib.misc.CachingUtilities import (
pickle_load, pickle_dump, delete_files)
class FileCache:
"""
This object is a file cache that helps in collecting only the changed
and new files since the last run. Example/Tutorial:
>>> from pyprint.NullPrinter import NullPrinter
>>> from coalib.output.printers.LogPrinter import LogPrinter
>>> import logging
>>> import copy, time
>>> log_printer = LogPrinter()
>>> log_printer.log_level = logging.CRITICAL
To initialize the cache create an instance for the project:
>>> cache = FileCache(log_printer, "test", flush_cache=True)
Now we can track new files by running:
>>> cache.track_files(["a.c", "b.c"])
Since all cache operations are lazy (for performance), we need to
explicitly write the cache to disk for persistence in future uses:
(Note: The cache will automatically figure out the write location)
>>> cache.write()
Let's go into the future:
>>> time.sleep(1)
Let's create a new instance to simulate a separate run:
>>> cache = FileCache(log_printer, "test", flush_cache=False)
>>> old_data = copy.deepcopy(cache.data)
We can mark a file as changed by doing:
>>> cache.untrack_files({"a.c"})
Again write to disk after calculating the new cache times for each file:
>>> cache.write()
>>> new_data = cache.data
Since we marked 'a.c' as a changed file:
>>> "a.c" not in cache.data
True
>>> "a.c" in old_data
True
Since 'b.c' was untouched after the second run, its time was updated
to the latest value:
>>> old_data["b.c"] < new_data["b.c"]
True
"""
@enforce_signature
def __init__(
self,
log_printer: LogPrinterMixin,
project_dir: str,
flush_cache: bool=False):
"""
Initialize FileCache.
:param log_printer: An object to use for logging.
:param project_dir: The root directory of the project to be used
as a key identifier.
:param flush_cache: Flush the cache and rebuild it.
"""
self.log_printer = log_printer
self.project_dir = project_dir
self.current_time = int(time.time())
cache_data = pickle_load(log_printer, project_dir, {})
last_time = -1
if 'time' in cache_data:
last_time = cache_data['time']
if not flush_cache and last_time > self.current_time:
log_printer.warn('It seems like you went back in time - your '
'system time is behind the last recorded run '
'time on this project. The cache will '
'be force flushed.')
flush_cache = True
self.data = cache_data.get('files', {})
if flush_cache:
self.flush_cache()
# store the files to be untracked and then untrack them in the end
# so that an untracked file is not tracked again by mistake in a
# later section (which will happen if that file doesn't yield a
# result in that section).
self.to_untrack = set()
def flush_cache(self):
"""
Flushes the cache and deletes the relevant file.
"""
self.data = {}
delete_files(self.log_printer, [self.project_dir])
self.log_printer.debug('The file cache was successfully flushed.')
def __enter__(self):
return self
def write(self):
"""
Update the last run time on the project for each file
to the current time. Using this object as a contextmanager is
preferred (that will automatically call this method on exit).
"""
for file in self.to_untrack:
if file in self.data:
del self.data[file]
for file_name in self.data:
self.data[file_name] = self.current_time
pickle_dump(
self.log_printer,
self.project_dir,
{'time': self.current_time, 'files': self.data})
def __exit__(self, type, value, traceback):
"""
Update the last run time on the project for each file
to the current time.
"""
self.write()
def untrack_files(self, files):
"""
Removes the given files from the cache so that they are no longer
considered cached for this and the next run.
:param files: A set of files to remove from cache.
"""
self.to_untrack.update(files)
def track_files(self, files):
"""
Start tracking files given in ``files`` by adding them to the
database.
:param files: A set of files that need to be tracked.
These files are initialized with their last
modified tag as -1.
"""
for file in files:
if file not in self.data:
self.data[file] = -1
def get_uncached_files(self, files):
"""
Returns the set of files that are not in the cache yet or have been
untracked.
:param files: The list of collected files.
:return: A set of files that are uncached.
"""
if self.data == {}:
# The first run on this project. So all files are new
# and must be returned irrespective of whether caching is turned on.
return files
else:
return {file
for file in files
if (file not in self.data or
int(os.path.getmtime(file)) > self.data[file])}
|
nilq/baby-python
|
python
|
from flask import Flask
from flask import render_template, url_for, request
import datetime
from person import Person
from my_lib import get_week
from parser_price import Parser_price
from head_hunter_vacancies import HeadHunter_vacancies
from bd_apartment import Appartment_BD
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, View_apartment, Country, Region, City, Address, Characteristic
pers = Person()
app = Flask( __name__ )
engine = create_engine('sqlite:///apartment.db?check_same_thread=False')
Base.metadata.bind = engine
DBSession = sessionmaker( bind=engine )
session = DBSession()
@app.route("/")
@app.route("/index/")
def main_win(): # Главная страница
today = datetime.datetime.today()
scw = get_week( int( today.strftime('%w') ) )
return render_template( 'index.html', curdate = today.strftime('%d-%m-%Y'), curweek = scw )
@app.route("/personal/")
def pers_win(): # Персональные данные -------------------------------------------------------
dic={
'photo' : pers.get_photo(),
'fio' : pers.get_name() + ' ' + pers.get_otch() + ' ' + pers.get_fam(),
'birthday' : pers.get_birthday(),
'attach': pers.get_attach()
}
return render_template( 'personal.html', **dic )
@app.route("/parser/" )
def parser(): # начальная страница парсера квартир - выбор района ---------------------------
return render_template( 'parser_form.html' )
@app.route("/price_apartments/", methods=['POST'] )
def price(): # результат работы парсера - цены
region = request.form['region'] # получение параметра
parser = Parser_price( region ) # создать объект парсинга
dicMin = parser.cost_min(rej='dic')
dicMax = parser.cost_max(rej='dic')
# параметры для страницы
dic={}
dic['region'] = region
dic['minprice'] = dicMin['price']
dic['mincity'] = dicMin['city']
dic['mincharact'] = dicMin['address']+'; '+dicMin['region']+'; '+dicMin['characteristic']
dic['maxprice'] = dicMax['price']
dic['maxcity'] = dicMax['city']
dic['maxcharact'] = dicMax['address'] + '; ' + dicMax['region'] + '; ' + dicMax['characteristic']
return render_template( 'price_apartments.html', **dic )
@app.route("/hh_main/" )
def hh_main(): # начальная страница выкансий API ---------------------------------------------
return render_template('hh_city.html')
@app.route("/hh_vacancy/", methods=['POST'] )
def hh_vacancy():
city = request.form['city'] # какой город был выбран
vac = request.form['vac']
hh = HeadHunter_vacancies()
lst, num, sum = hh.view_vacancies( city, vac )
dic={}
s = ''
for v in lst:
if v:
s += '* '+v+'\n'
dic['skills'] = s
dic['city'] = city
dic['vac'] = vac
if num == 0:
dic['salary'] = 0.0
else:
dic['salary'] = round( sum/num, 2 )
return render_template('hh_vacancy.html', **dic)
@app.route("/bd_apartment/" )
def bd_apartment():
return render_template('bd_apartment.html')
@app.route("/bd_apartment_view/", methods=['POST'] )
def bd_apartment_view():
region = request.form['region'] # получение параметра
load = request.form.get('load') # получение параметра
dic = {}
bd = Appartment_BD( )
dic['field'] = []
if bd.is_connect == 'OK':
lstField = bd.get_title_table() # список кортежей(записей) с полями внутри
dic['field']=lstField
# данные БД
# перезаписать
if load:
parser = Parser_price( ) # создать объект парсинга по району
lst_data = parser.data_search( region )
bd.save_data( lst_data )
lst_view_data, update = bd.get_data( region )
dic['data'] = lst_view_data
dic['region'] = region
dic['update'] = update
return render_template('bd_apartment_view.html', **dic)
#----------------------------------------------------------------------------------------- SALAlchemy
def get_field():
#print('============ Поля просмотра =======================')
lst_field = session.query(View_apartment).all()
lst=[]
for field in lst_field:
#print(field.colname, field.coltitle, sep=', ')
lst.append( {'npp':field.npp, 'name':field.colname, 'title':field.coltitle} )
lst = sorted( lst, key=lambda x: x['npp'] ) # сортировка по порядку
return lst
def get_country():
query = session.query(City, Country)
query = query.join(City, City.id_country == Country.id)
records = query.all()
lst=[]
for city, country in records:
#print(city, country, sep=', ')
lst.append( {'city':city, 'country':country} )
return lst
def get_region( name, session ):
reg = session.query(Region).filter( Region.name == name).first()
lst=[]
if reg:
lst = [reg.id, reg.name, reg.id_city, reg.date]
return lst
def get_data( region, session ):
reg = get_region( region, session )
records = []
upd=''
if reg:
id_reg = reg[0]
upd = reg[3]
query = session.query( Characteristic, Address ).filter( Address.id_region == id_reg )
query = query.join( Address, Address.id == Characteristic.id_address )
records = query.all()
# for char, address in records:
# print( address, char, sep='; ')
return (records, upd)
def save_data( lst:list, session): #
s1 = lst[0]['region'].replace( ',',' ' )
reg = ' '.join( s1.split(' ')[0:2] )
id_reg = set_region( reg, session )
for v in lst:
id_addr = set_address( id_reg, v['address'], session )
id_charact = set_characteristic( id_addr, v['characteristic'], session)
def set_region( name:str, session:DBSession ):
reg = get_region( name, session )
id_reg = None
if reg:
id_reg = reg[0]
else: # нет такого района
today = datetime.datetime.today().strftime('%d-%m-%Y')
reg = Region( name=name, id_city=1, date=today)
session.add( reg )
session.commit()
reg = get_region( name, session )
if reg:
id_reg = reg[0]
return id_reg
def get_address( name, id_city, session ):
adr = session.query(Address).filter( Address.name == name).first()
lst=[]
if adr:
lst = [adr.id, adr.name, adr.id_city, adr.id_region]
return lst
def set_address( id_reg, name, session:DBSession ):
adr = get_address( name, 1, session )
id_adr = None
if adr:
id_adr = adr[0]
else: # нет такого
adr = Address( name=name, id_city=1, id_region=id_reg )
session.add(adr)
session.commit()
adr = get_address(name, 1, session )
if adr:
id_adr = adr[0]
return id_adr
def get_characteristic( id_addr, session ):
ch = session.query(Characteristic).filter( Characteristic.id_address == id_addr).first()
lst=[]
if ch:
lst = [ch.id, ch.data]
return lst
def set_characteristic( id_addr, data, session ):
ch = get_characteristic( id_addr, session)
id_ch = None
if ch:
id_adr = ch[0]
else: # нет такого
ch = Characteristic( data=data, id_address=id_addr )
session.add( ch )
session.commit()
ch = get_characteristic( id_addr, session )
if ch:
id_ch = ch[0]
return id_ch
@app.route("/sqlalchemy_apartment/")
def sqlalchemy_apartment():
return render_template('sqlalchemy_apartment.html')
@app.route("/sqlalchemy_apartment_view/", methods=['POST'])
def sqlalchemy_apartment_view():
region = request.form['region'] # получение параметра
load = request.form.get('load') # получение параметра
dic = {}
dic['region'] = region
dic['field'] = []
lstField = get_field()
lst = []
for v in lstField:
lst.append(v['title'])
dic['field'] = lst
# данные БД
# перезаписать
if load:
parser = Parser_price() # создать объект парсинга по району
lst_data = parser.data_search(region)
save_data( lst_data, session )
#print( bd.get_id_region( region ) )
lst_view_data, update = get_data( region, session )
dic['data'] = lst_view_data
dic['region'] = region
dic['update'] = update
return render_template('sqlalchemy_apartment_view.html', **dic)
# ********************************************************************
if __name__ == "__main__":
#print( 'версия:', flask.__version__ )
app.run( debug=True )
#Thread(target=app.polling, args=()).start()
|
nilq/baby-python
|
python
|
#from __future__ import absolute_import
from celery import shared_task
#from celery.contrib import rdb #DEBUG
@shared_task
def myflqTaskRequest(analysisID):
from django.conf import settings
from myflq.models import Analysis,AnalysisResults
from django.core.files import File
import subprocess,time,tempfile
#rdb.set_trace() #DEBUG => telnet 127.0.0.1 portnumber
analysis = Analysis.objects.get(id=analysisID)
analysis.progress = 'P'
analysis.save()
tempfigure = tempfile.NamedTemporaryFile(delete=False,suffix='.png')
tempxml = tempfile.NamedTemporaryFile(delete=False,suffix='.xml')
tempfigure.close(), tempxml.close() #Only their filenames need to be passed to the subprocess
command = ['python3','../MyFLq.py', '-p', analysis.configuration.user.password,
'analysis', '--sampleName', analysis.originalFilename,
'--negativeReadsFilter' if analysis.negativeReadsFilter else 'REMOVE',
'--primerBuffer', str(analysis.primerBuffer),
'--kMerAssign' if analysis.kMerAssign else 'REMOVE',
str(analysis.kMerAssign) if analysis.kMerAssign else 'REMOVE',
'--flankOut' if analysis.flankOut else 'REMOVE',
'--stutterBuffer', str(analysis.stutterBuffer),
'--useCompress' if analysis.useCompress else 'REMOVE',
'--withAlignment' if analysis.withAlignment else 'REMOVE',
'--threshold', str(analysis.threshold),
'--clusterInfo' if analysis.clusterInfo else 'REMOVE',
'--randomSubset' if analysis.randomSubset else 'REMOVE',
str(analysis.randomSubset) if analysis.randomSubset else 'REMOVE',
'-r',tempxml.name,'-s', settings.STATIC_URL+'css/resultMyFLq.xsl','-v',tempfigure.name,
analysis.fastq.file.name, analysis.configuration.dbusername(),
analysis.configuration.fulldbname(), 'default']
while 'REMOVE' in command: command.remove('REMOVE')
try:
subprocess.check_output(command,stderr=subprocess.STDOUT)
analysisResult = AnalysisResults(analysis=analysis)
analysisResult.xmlFile.save(tempxml.name,File(open(tempxml.name)))
analysisResult.figFile.save(tempfigure.name,File(open(tempfigure.name,'rb')))
analysisResult.save()
analysis.progress = 'F'
analysis.save()
except subprocess.CalledProcessError as e:
analysis.progress = 'FA'
analysis.save()
print('FAILURE:',e.output.decode())
import os
os.remove(tempxml.name), os.remove(tempfigure.name)
print('Command:\n',' '.join(command))
return 'Executed:\n'+' '.join(command)
@shared_task
def alleleTaskRequest(sequence):
"""
Retrieves the sequence identifier on ENA.
Submits an entry if not already available.
"""
from urllib.request import urlopen
from time import sleep
#urlopen("http://www.ebi.ac.uk/ena/search/showQueryCollections?type=exact") #DEBUG see collection ids
# 20 Human -----Human (EMBL-Bank)
#Submit search for sequence #TODO make work with &type=exact => mail ENA
response = urlopen('http://www.ebi.ac.uk/ena/search/executeSearch?Sequence={seq}&collection_id=20'.format(seq=sequence))
response = response.read().decode().strip()
#Wait for result completion
status = urlopen(response).read().decode()
while not status.startswith('COMPLETE'):
sleep(30)
status = urlopen(response).read().decode()
totalResults = int(status.strip().split('\t')[-1])
#See if there is a full identity match (check first only 10 results)
resultsQuery = response.replace('Status','Results')+'&fields=accession,identity,e_value&offset={offset}&length=10'
for i in range(0,totalResults,10):
results = urlopen(resultsQuery.format(offset=i))
results = response.read().decode().strip()
if '\t100\t' in results: break
if '\t100\t' in results:
for result in results.split('\r\n'):
result = result.split('\t')
if result[1] == '100': return result[0] #result[0] is the accession id
#If not returned then sequence has to be submitted
enasubmit = 'https://www-test.ebi.ac.uk/ena/submit/drop-box/submit/'
#https://www.ebi.ac.uk/ena/submit/drop-box/submit/ #TODO for production
|
nilq/baby-python
|
python
|
import json
from importlib import resources
import requests
import explorer
from explorer import configs
from explorer.enums.fields_enum import FieldsEnum as fields
from explorer.utils.parsing import ResponseParser as parser
class BlockchainExplorer:
def __new__(cls, api_key: str, net: str, prefix: str):
with resources.path(configs, f"{net.upper()}-stable.json") as path:
config_path = str(path)
return cls.from_config(api_key=api_key, config_path=config_path, net=net, prefix=prefix)
@staticmethod
def __load_config(config_path: str) -> dict:
with open(config_path, "r") as f:
return json.load(f)
@staticmethod
def __run(func, api_key: str, net: str, prefix: str):
def wrapper(*args, **kwargs):
url = (
f"{prefix.format(net.lower()).replace('-main','')}"
f"{func(*args, **kwargs)}"
f"{fields.API_KEY}"
f"{api_key}"
)
r = requests.get(url, headers={"User-Agent": ""})
return parser.parse(r)
return wrapper
@classmethod
def from_config(cls, api_key: str, config_path: str, net: str, prefix: str):
config = cls.__load_config(config_path)
for func, v in config.items():
if not func.startswith("_"): # disabled if _
attr = getattr(getattr(explorer, v["module"]), func)
setattr(cls, func, cls.__run(attr, api_key, net, prefix))
return cls
class Etherscan(BlockchainExplorer):
def __new__(cls, api_key: str, net: str = "MAIN"):
return BlockchainExplorer(api_key, net, prefix=fields.PREFIX)
class Arbiscan(BlockchainExplorer):
def __new__(cls, api_key: str, net: str = "MAIN"):
return BlockchainExplorer(api_key, net, prefix="https://api.arbiscan.io/api?")
|
nilq/baby-python
|
python
|
import feedparser
import justext
import requests
import sys
from database import Database
from bs4 import BeautifulSoup
import re
import mistune
from unidecode import unidecode
def get_text_from_reuters(link):
response = requests.get(link)
resText = response.content.decode("UTF-8", 'ignore')
soup = BeautifulSoup(resText, 'html.parser')
tmp = [x.extract() for x in soup.find_all(class_= "Edition_items_293of")]
for tag in soup.find_all(["script", "meta", "head", "style", "noscript"]):
tag.decompose()
for tag in soup.find_all(True, class_= ["Attribution_content_27_rw", "Image_container_1tVQo"]):
tag.decompose()
paragraphs = justext.justext(soup.prettify(), justext.get_stoplist("English"))
text = "\n\n".join([p.text for p in paragraphs if not p.is_boilerplate])
return text
def get_text_from_cnn(link):
response = requests.get(link)
soup = BeautifulSoup(response.content, 'lxml')
for tag in soup.find_all(["script","img", "meta", "head", "style", "noscript", "h3", "h4"]):
tag.decompose()
for tag in soup.find_all(class_= ["video__end-slate__top-wrapper", "cd__headline", "el__storyelement--standard","el__article--embed", "zn-body__read-more", "el__leafmedia","el__leafmedia--storyhighlights", "zn-body__footer", "el__embedded--standard", "el__storyelement__title", "media__caption"]):
tag.decompose()
title = soup.find("h1", class_ = "pg-headline")
content = soup.find("section", id = "body-text")
return "{}\n\n{}".format(title.get_text(), content.get_text())
def get_text_from_wikipedia(link):
markdown = mistune.Markdown()
response = requests.get(link)
unaccented_string = unidecode(str(response.content)).replace("\\n", " ")
html = unaccented_string
html = markdown(html)
soup = BeautifulSoup(html, 'lxml')
title = soup.find(id = "firstHeading")
content = soup.find("div", class_ = "mw-parser-output")
to_remove = content.find(id = "External_links")
to_remove = content.find(id = "Notes") if content.find(id = "Notes") is not None else to_remove
to_remove = content.find(id = "See_also") if content.find(id = "See_also") is not None else to_remove
to_remove = content.find(id = "Gallery") if content.find(id = "Gallery") is not None else to_remove
to_remove = content.find(id = "Selected_bibliography") if content.find(id = "Selected_bibliography") is not None else to_remove
if to_remove is not None:
parent = list(to_remove.parents)[0]
for tag in parent.find_next_siblings():
tag.decompose()
for tag in content.find_all(["small", "math", "table", "h2", "sup"]):
tag.decompose()
for tag in content.find_all(True, id = ["toc"]):
tag.decompose()
for tag in content.find_all(True, class_ =["mw-headline","IPA","mw-editsection", "quotebox", "infobox", "vertical-navbox", "navbox", "reference", "reflist", "thumb"]):
tag.decompose()
for tag in content.find_all(True, role = "note"):
tag.decompose()
# paren_reg = re.compile("/\(([^()]+)\)/g")
# out = paren_reg.sub('', content.get_text())
out = content.get_text().replace("\\", "")
out = out.replace("'", "")
out = out.replace(";", "")
return "{}\n\n{}".format(title.get_text(), out)
def collect(url):
d = feedparser.parse(url)
texts = {}
for entry in d["entries"]:
link = entry["link"]
print("downloading: " + link)
text = source(link)
texts[link] = text
return texts
|
nilq/baby-python
|
python
|
from typing import Dict, Type
from .setting import Setting
class SettingRegistry:
def __init__(self):
self._registry: Dict[Type, Type[Setting]] = {}
def register_setting(self, type_hint: Type, setting_cls: Type[Setting]):
self._registry[type_hint] = setting_cls
def get_setting_class_for_type(self, type_hint: Type) -> Type[Setting]:
return self._registry.get(type_hint, Setting)
registry = SettingRegistry()
|
nilq/baby-python
|
python
|
import boto3
from botocore.exceptions import ClientError
from .config import s3, bucket_name
import logging
log = logging.getLogger("my-logger")
def generate_presigned_url(s3_client, client_method, method_parameters, expires_in):
"""
Generating a presigned Amazon S3 URL that can be used to perform an action.
"""
try:
url = s3_client.generate_presigned_url(
ClientMethod=client_method,
Params=method_parameters,
ExpiresIn=expires_in
)
log.info("Got presigned URL")
except ClientError:
log.info(
f"Couldn't get a presigned URL for client method {client_method}")
raise
return url
def upload_file(obj):
client_action = 'put_object'
file_path = obj.file_name
url = generate_presigned_url(
s3, client_action, {'Bucket': bucket_name, 'Key': file_path}, 3600)
return {"presigned_url": url, "filename": obj.file_name}
|
nilq/baby-python
|
python
|
#!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
from minifold.doc_type import DocType
def test_sort():
hdr = DocType.HDR
book = DocType.BOOKS_AND_THESES
assert sorted([hdr, book]) == [book, hdr]
def test_dblp():
from minifold.dblp import DblpConnector
for s in [
"conference and workshop papers",
"conference or workshop",
"journal articles",
"informal publications",
"books and theses",
"editorship"
]:
assert DblpConnector.to_doc_type(s) != DocType.UNKNOWN
def test_hal():
from minifold.hal import HalConnector
for s in ["art", "comm", "report", "hdr", "couv", "patent"]:
assert HalConnector.to_doc_type(s) != DocType.UNKNOWN
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
#Für jeden Wert in der Liste neuen e-Wert errechnen
def e_function(my_list):
return [np.exp(val) for val in my_list]
#Funktion plotten
def plot_func(x, y, farbe, windowName):
plt.figure(windowName)
plt.plot(x, y, color=farbe)
plt.title("My Image")
plt.xlabel("x")
plt.ylabel("e(x)")
plt.show()
a = 1
b = 5
mylist = np.array(range(a, b + 1), dtype=np.int8)
e_list = e_function(mylist)
plot_func(mylist, e_list, "black", "MyWindowName")
|
nilq/baby-python
|
python
|
from recon.core.module import BaseModule
import re
class Module(BaseModule):
meta = {
'name': 'Shodan Hostname Enumerator',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Harvests hosts from the Shodan API by using the \'hostname\' search operator. Updates the \'hosts\' table with the results.',
'required_keys': ['shodan_api'],
'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL',
'options': (
('limit', 1, True, 'limit number of api requests per input source (0 = unlimited)'),
),
}
def module_run(self, domains):
limit = self.options['limit']
for domain in domains:
self.heading(domain, level=0)
query = 'hostname:%s' % (domain)
results = self.search_shodan_api(query, limit)
for host in results:
address = host['ip_str']
port = host['port']
if not host['hostnames']:
host['hostnames'] = [None]
for hostname in host['hostnames']:
self.add_ports(ip_address=address, port=port, host=hostname)
self.add_hosts(host=hostname, ip_address=address)
|
nilq/baby-python
|
python
|
from .imports import *
def bn_drop_lin(inp, n_out, bn=True, p=0., actn=None):
out = inp
if bn:
out = BatchNormalization()(out)
if p>0:
out = Dropout(p)(out)
use_bias = False if bn else True
out = Dense(n_out, activation=actn, use_bias=use_bias)(out)
return out
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
highestNumber = -1
highestNumberPosition = -1
for i in range(100):
number = int(input())
if number > highestNumber:
highestNumber = number
highestNumberPosition = i + 1
print(highestNumber)
print(highestNumberPosition)
|
nilq/baby-python
|
python
|
import requests, re
def fbvid(url):
req=requests.get(url)
if req.status_code == 200:
try:
return {"status":True,"url":re.search('hd_src:"(.+?)"', req.text)[1]}
except TypeError:
return {"status":False,"msg":"private_video"}
else:
return {"status":False,"msg":"Link Wrong"}
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import datetime
import numba
import time
import os
def cummul(array):
temp = 1
ret = []
for element in array:
temp *= element
ret.append(temp)
return np.array(ret)
def listdirs(folder):
return [
d for d in (os.path.join(folder, d1) for d1 in os.listdir(folder))
if os.path.isdir(d)
]
def split_list(alist, wanted_parts=1, slip=2):
length = len(alist)
return [ alist[int(np.clip(i*length// wanted_parts-slip, 0, np.inf)): (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def to_timestamp(timestamp, pattern="%Y-%m-%d %H:%M:%S"):
return np.array([time.mktime(datetime.datetime.strptime(t, pattern).timetuple()) for t in
timestamp.tolist()], dtype=np.float64)
def to_datetime(timestamp):
return [datetime.datetime.fromtimestamp(x) for x in timestamp]
def sign(a):
return (a > 0) - (a < 0)
def fillna_arr(array, **kwargs):
df = pd.DataFrame({"v": array})
df = df.fillna(**kwargs)
return df["v"].values
def rolling_window(observations, n, func:lambda x: x):
ret = []
for i, data in enumerate(observations[n - 1:]):
strip = func(observations[i:i + n])
ret.append(strip)
return np.array(ret)
def get_rolling_window_size(timestamp_lst, period):
return len(timestamp_lst[np.where(timestamp_lst <= timestamp_lst[0] + period)[0]])
def filter_array(func, array):
mask = func(array)
index = np.where(mask)[-1]
return array[index]
def append_dict(d, d1):
for key, value in d1.items():
d[key] = value
class LabeledScalarStorage:
def __init__(self, *args):
self.value = {}
for a in args:
self.add_label(a)
def __add__(self, other):
for key, value in other.value.items():
self.value[key] = value
return self
def __getitem__(self, item):
return self.value[item]
def __setitem__(self, key, value):
self.value[key] = value
def keys(self):
return self.value.keys()
def values(self):
return self.value.values()
def items(self):
return self.value.items()
def add_label(self, label):
self.value[label] = []
def add_scalar(self, label, scalar):
self.value[label].append(scalar)
def extend(self, other):
for key in self.value.keys():
self[key].extend(other[key])
@property
def dataframe(self):
return pd.DataFrame(self.value)
class StructureDataset:
def __init__(self):
self.value = {}
def __getitem__(self, item):
return self.value[item]
def __setitem__(self, x, y):
if isinstance(self.value[x], StructureDataset):
raise ValueError("can't __setitem__ to group")
self.value[x] = y
def keys(self):
return self.value.keys()
def values(self):
return self.value.values()
def items(self):
return self.value.items()
def create_group(self, name):
self.value[name] = StructureDataset()
return self.value[name]
def create_dataset(self, name, data):
self.value[name] = np.array(data)
def get(self, *args):
temp = self
for a in args:
temp = temp[a]
return temp
|
nilq/baby-python
|
python
|
text = input("entrez une chaine de caracteres : ")
isPalindromic = text[0] == text[len(text) - 1]
i, j = 0, len(text)- 1
while i < j and isPalindromic:
isPalindromic = text[i] == text[j]
i, j = i + 1, j - 1
print(isPalindromic)
|
nilq/baby-python
|
python
|
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.label import Label
from kivy.properties import ListProperty
from kivy.uix.behaviors import ButtonBehavior
import urllib.request
import urllib.error
import urllib.parse
import json
from widget.ConsulDetails import ConsulDetailsApp
from widget.ConsulDetailsModel import ConsulDetailsModel
from widget.ConsulDetailsModel import ConsulChecksModel
Builder.load_file("widget/template/ConsulWidget.kv")
class DetailButton(ButtonBehavior, Label):
def __init__(self, **kwargs):
super(DetailButton, self).__init__(**kwargs)
def display_details(self, name):
print("Show details for %s" % name)
details_model = self.__make_details_object(name)
popup = ConsulDetailsApp(details_model).build()
popup.open()
@staticmethod
def __make_details_object(name):
details_model = ConsulDetailsModel()
details_model.name = name
req = urllib.request.Request('http://localhost:8500/v1/health/service/%s?dc=dc1&token=' % name)
try:
response = urllib.request.urlopen(req)
data = json.loads(response.read().decode('utf-8'))
details_model.service_id = data[0]['Service']['ID']
details_model.service_name = data[0]['Service']['Service']
details_model.service_adres = data[0]['Service']['Address']
details_model.service_port = data[0]['Service']['Port']
for check in data[0]['Checks']:
checkObject = ConsulChecksModel()
checkObject.check_id = check['CheckID']
checkObject.name = check['Name']
checkObject.status = check['Status']
checkObject.output = check['Output']
checkObject.service_id = check['ServiceID']
checkObject.service_name = check['ServiceName']
checkObject.status_color(check['Status'])
details_model.checks.append(checkObject)
except urllib.error.URLError as e:
print(e.reason)
return details_model
class ConsulWidget(BoxLayout):
state = 'all'
data = ListProperty([])
rv_data = ListProperty([])
def __init__(self, **kwargs):
super(ConsulWidget, self).__init__(**kwargs)
def on_enter(self):
print('on_enter')
self.make_data_request()
self.start()
def on_leave(self):
print('on_leave')
self.stop()
def make_data_request(self):
req = urllib.request.Request('http://localhost:8500/v1/internal/ui/services?dc=dc1&token=')
try:
response = urllib.request.urlopen(req)
self.data = json.loads(response.read().decode('utf-8'))
self.test_subset()
except urllib.error.URLError as e:
print(e.reason)
def test_subset(self, state=state):
if not len(self.data):
self.make_data_request()
else:
self.state = state
self.rv_data = [{'name': str(item['Name']), 'passing' : str(item['ChecksPassing']), 'warning' : str(item['ChecksWarning']), 'critical': str(item['ChecksCritical']), 'statuscolor' : self.__setColor(item)} for item in self.data if self.__match_state(state, item)]
@staticmethod
def __match_state(state, item):
if state is 'all':
return True
elif state is 'failing' and item['ChecksCritical'] or item['ChecksWarning']:
return True
elif state is 'succes' and not item['ChecksCritical'] and not item['ChecksWarning']:
return True
@staticmethod
def __setColor(item):
c = [0, 1, 0.3, 0.2]
if item['ChecksCritical']:
c = [1, 0, 0, 0.2]
elif item['ChecksWarning']:
c = [1, 0.6, 0, 0.2]
return c
def start(self):
print('Start data refresh timer')
Clock.schedule_interval(self.refresh_data, 5)
def stop(self):
print('Stop data refresh timer')
Clock.unschedule(self.refresh_data)
def refresh_data(self, dt):
print('refresh consul data')
self.data = []
self.make_data_request()
class Consul(App):
def build(self):
return ConsulWidget()
|
nilq/baby-python
|
python
|
#!/bin/python3
import json
import os
import sys
import io
import time
from specimen import specimen
from growlab_v1_http_client import growlab_v1_http_client
from readingsbuilder import readingsbuilder
from pathbuilder import pathbuilder
if __name__ == "__main__":
print("Starting growlab")
config = {}
try:
with open("./config.json") as f:
config = json.loads(f.read())
except Exception as e:
sys.stderr.write("Error: {}".format(e))
sys.exit(1)
print("Loaded config, saving images to {}".format(
config["images"]["output_directory"]))
http_client = growlab_v1_http_client(config["http"])
thp_readings = http_client.get_thp_readings()
light_intensity_readings = http_client.get_light_intensity_readings()
camera_mode = http_client.get_camera_mode()
timestamp_string = time.strftime("%Y-%m-%d %H:%M:%S")
r_builder = readingsbuilder(
thp_readings, light_intensity_readings, camera_mode, timestamp_string)
readings = r_builder.build_readings_structrue()
# print(readings)
readings_pathbuilder = pathbuilder(config["data"]["output_directory"],
"." + config["data"]["encoding"], timestamp_string)
readings_filepath = readings_pathbuilder.build_file_path()
print("Readings file output path [", readings_filepath, "]")
with open(readings_filepath, 'w') as readings_output_file:
json.dump(readings, readings_output_file)
is_image_taken = False
camera_image = http_client.get_camera_image()
if len(camera_image) != 0:
is_image_taken = True
if is_image_taken:
frame = io.BytesIO(http_client.get_camera_image())
pwd = os.getcwd()
output_path = pwd + "/html"
# print("Html page content output path [", output_path, "]")
try:
os.mkdir(output_path)
except:
pass
spec = specimen(config["text"], config["images"])
pb = pathbuilder(config["images"]["output_directory"],
"." + config["images"]["encoding"], timestamp_string)
image_file_path = pb.build_file_path()
print("Image file output path [", image_file_path, "]")
if is_image_taken:
spec.save_image(image_file_path, frame, readings)
spec.save_html(image_file_path, output_path, readings, is_image_taken)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.