content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from targets.firefox.fx_testcase import *
class Test(FirefoxTest):
@pytest.mark.details(
description='Browser controls work as expected.',
locale=['en-US'],
test_case_id='119481',
test_suite_id='1998'
)
def run(self, firefox):
window_controls_minimize_pattern = Pattern('window_controls_minimize.png')
hover_minimize_control_pattern = Pattern('hover_minimize_control.png')
window_controls_restore_pattern = Pattern('window_controls_restore.png')
hover_restore_control_pattern = Pattern('hover_restore_control.png')
window_controls_maximize_pattern = Pattern('window_controls_maximize.png')
hover_maximize_control_pattern = Pattern('hover_maximize_control.png')
window_controls_close_pattern = Pattern('window_controls_close.png')
hover_close_control_pattern = Pattern('hover_close_control.png')
navigate(LocalWeb.FIREFOX_TEST_SITE)
assert exists(LocalWeb.FIREFOX_LOGO, 10), 'Page successfully loaded, firefox logo found.'
if OSHelper.is_linux():
Mouse().move(Location(0, 0))
hover(window_controls_minimize_pattern)
assert exists(hover_minimize_control_pattern, 10), 'Hover over the \'minimize\' button works correctly.'
if OSHelper.is_windows() or OSHelper.is_linux():
hover(window_controls_restore_pattern)
assert exists(hover_restore_control_pattern, 10), 'Hover over the \'restore\' button works correctly.'
if OSHelper.is_mac():
middle = find(hover_maximize_control_pattern)
Mouse().move(Location(middle.x + 7, middle.y + 5))
assert exists(hover_maximize_control_pattern, 10), 'Hover over the \'maximize\' button works correctly.'
Mouse().move(Location(middle.x - 35, middle.y + 5))
assert exists(hover_close_control_pattern.similar(0.7), 10), \
'Hover over the \'close\' button works correctly.'
else:
hover(window_controls_close_pattern)
assert exists(hover_close_control_pattern, 10), 'Hover over the \'close\' button works correctly.'
if OSHelper.is_windows() or OSHelper.is_linux():
click_window_control('restore', 'main')
time.sleep(Settings.DEFAULT_UI_DELAY)
hover(window_controls_maximize_pattern)
assert exists(hover_maximize_control_pattern, 10), \
'Hover over the \'maximize\' button works correctly; Window successfully restored.'
if OSHelper:
hover(Pattern('home_button.png'))
click_window_control('minimize', 'main')
time.sleep(Settings.DEFAULT_UI_DELAY)
try:
assert wait_vanish(LocalWeb.FIREFOX_LOGO, 10), 'Window successfully minimized.'
except FindError:
raise FindError('Window not minimized.')
restore_window_from_taskbar()
if OSHelper.is_windows():
click_window_control('maximize', 'main')
assert exists(LocalWeb.FIREFOX_LOGO, 10), 'Window successfully opened again.'
click_window_control('close', 'main')
try:
assert wait_vanish(LocalWeb.FIREFOX_LOGO, 10), 'Window successfully closed.'
except FindError:
assert False, 'Window successfully closed.'
| tests/firefox/toolbars_window_controls/browser_controls_upper_corner.py | 3,530 | This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. | 192 | en | 0.934305 |
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
from __future__ import annotations
import random
import time
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Callable, Dict
import numpy as np
from inferlo.base.factors.discrete_factor import DiscreteFactor
from inferlo.base import InferenceResult
if TYPE_CHECKING:
from inferlo import GraphModel
recordSentMessages = True
class Prob:
"""Equivalent of dai::Prob.
Wrapper around a vector - represents probability distribution.
"""
@staticmethod
def uniform(n):
"""Creates unifom probability distribution."""
return Prob.same_value(n, 1.0 / n)
@staticmethod
def same_value(n: int, val: float):
"""Creates vector filled with the same value."""
return Prob(np.ones(n, dtype=np.float64) * val)
def __init__(self, p: np.ndarray):
self.p = p
def fill(self, x):
"""Sets all entries to x."""
self.p = np.ones_like(self.p) * x
def clone(self):
"""Makes a copy."""
return Prob(np.array(self.p))
def __imul__(self, other):
self.p *= other.p
return self
def __iadd__(self, other):
self.p += other.p
return self
def normalize(self):
"""Normalize distribution."""
self.p /= np.sum(self.p)
def entropy(self) -> float:
"""Calculate entropy of the distribution."""
return - np.sum(self.p * np.log(self.p))
def __str__(self):
return str(self.p)
def dist_kl(p: Prob, q: Prob):
"""Kullback-Leibler divergence between two probability distributions."""
kl_div = p.p * (np.log(p.p + (p == 0)) - np.log(q.p + (p.p == 0)))
return np.sum(kl_div)
def dist_linf(p: Prob, q: Prob):
"""Distance between two probability distributions in L_infinity norm."""
return np.max(np.abs(p.p - q.p))
@dataclass
class Neighbor:
"""Describes the neighbor relationship of two nodes in a graph.
Corresponds to dai::Neighbor.
"""
# Corresponds to the index of this Neighbor entry in the vector of
# neighbors.
iter: int
# Contains the absolute index of the neighboring node.
node: int
# Contains the "dual" index (i.e., the index of this node in the Neighbors
# vector of the neighboring node)
dual: int
@dataclass
class EdgeProp:
"""Type used for storing edge properties."""
index: np.ndarray # Index cached for this edge.
message: Prob # Old message living on this edge.
new_message: Prob # New message living on this edge
residual: float # Residual for this edge
class LDFactor:
"""Equivalent of dai::Factor.
Consists of set of variables and flattened values assigned to all var
combinations. Variables are assigned like in Inferlo, but tensor is
transposed before flattening.
"""
def __init__(self, model: GraphModel, var_idx: List[int], p: Prob):
self.model = model
self.var_idx = var_idx
self.p = p
@staticmethod
def uniform(model: GraphModel, var_idx: List[int]):
"""Creates factor defining uniform distribution."""
total_domain_size = 1
for i in var_idx:
total_domain_size *= model.get_variable(i).domain.size()
return LDFactor(model, var_idx, Prob.uniform(total_domain_size))
@staticmethod
def from_inferlo_factor(f: DiscreteFactor):
"""Converts inferlo.DiscreteFactor to LDFactor."""
rev_perm = list(range(len(f.var_idx)))[::-1]
prob = f.values.transpose(rev_perm).reshape(-1)
return LDFactor(f.model, f.var_idx, Prob(prob))
def to_inferlo_factor(self) -> DiscreteFactor:
"""Converts LDFactor to inferlo.DiscreteFactor."""
sizes = [self.model.get_variable(i).domain.size()
for i in self.var_idx[::-1]]
libdai_tensor = self.p.p.reshape(sizes)
rev_perm = list(range(len(self.var_idx)))[::-1]
inferlo_tensor = libdai_tensor.transpose(rev_perm)
return DiscreteFactor(self.model, self.var_idx, inferlo_tensor)
def combine_with_factor(self, other: LDFactor,
func: Callable[[float, float], float]):
"""Applies binary function to two factors."""
# Check that variables of the other factor are subset of variables of
# the given factor.
for i in other.var_idx:
assert i in self.var_idx
# Now, update every value of given factor with corresponding value of
# the other factor.
for idx in range(len(self.p.p)):
j = other._encode_value_index(self._decode_value_index(idx))
self.p.p[idx] = func(self.p.p[idx], other.p.p[j])
return self
def __iadd__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x + y)
def __imul__(self, other: LDFactor):
return self.combine_with_factor(other, lambda x, y: x * y)
def marginal(self, new_var_idx, normed=True) -> LDFactor:
"""Sums factor over some variables."""
result = self.to_inferlo_factor().marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def max_marginal(self, new_var_idx, normed=True) -> LDFactor:
"""Eleiminates certain variables by finding maximum."""
result = self.to_inferlo_factor().max_marginal(new_var_idx)
result = LDFactor.from_inferlo_factor(result)
if normed:
result.p.normalize()
return result
def clone(self):
"""Makes a copy of this factor."""
return LDFactor(self.model, self.var_idx, self.p.clone())
def _decode_value_index(self, idx):
"""Returns dict from variable id to variable value."""
ans = dict()
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans[var_id] = idx % size
idx //= size
return ans
def _encode_value_index(self, var_values: Dict[int, int]):
ans = 0
base = 1
for var_id in self.var_idx:
size = self.model.get_variable(var_id).domain.size()
ans += base * var_values[var_id]
base *= size
return ans
def __str__(self):
return "%s %s" % (self.var_idx, self.p.p)
class BP:
"""Belief propagation algorithm, equivalent to dai::BP.
This class is ported from libDAI's dai::BP class. It runs belief
propagation algorithm for graphical model with discrete variables with
arbitrary factor graph.
At the moment MAXPROD algorithm (for finding MAP state) is not supported.
Use BP.infer() to perform inference.
"""
@staticmethod
def infer(model, options=None):
"""Runs inference BP algorithm for given model.
Supports all options which libdai::BP supports. Refer to libDAI
documentation for options descritpion.
"""
if options is None:
options = {'tol': 1e-9, 'logdomain': 0, 'updates': 'SEQRND'}
inf_alg = BP(model, options)
inf_alg.init()
inf_alg.run()
return InferenceResult(inf_alg.log_z(), inf_alg.marg_prob())
def __init__(self, model: GraphModel, props: Dict[str, str]):
# Stores all edge properties
self._edges: List[List[EdgeProp]] = []
# Maximum difference between variable beliefs encountered so far
self._maxdiff = 0.0
# Number of iterations needed
self._iters = 0
# The history of message updates (only recorded if \a
# recordSentMessages is \c true)
self._sentMessages = []
# Stores variable beliefs of previous iteration
self._oldBeliefsV: List[LDFactor] = []
# Stores factor beliefs of previous iteration
self._old_beliefs_f: List[LDFactor] = []
# Stores the update schedule
self._update_seq = []
self.model = model
self.factors = [
LDFactor.from_inferlo_factor(
DiscreteFactor.from_factor(f)) for f in model.get_factors()]
self.nrVars = model.num_variables
self.nrFactors = len(self.factors)
# Prepare Neighbors.
# For every variable - factors, referencing it.
self.nbV: List[List[Neighbor]] = [[] for _ in range(self.nrVars)]
# For every factor - variables it references.
self.nbF: List[List[Neighbor]] = [[] for _ in range(self.nrFactors)]
for factor_id in range(len(self.factors)):
factor = self.factors[factor_id]
for var_iter_index in range(len(factor.var_idx)):
var_id = factor.var_idx[var_iter_index]
nbv_len = len(self.nbV[var_id])
nbf_len = len(self.nbF[factor_id])
assert var_iter_index == nbf_len
self.nbV[var_id].append(
Neighbor(
iter=nbv_len,
node=factor_id,
dual=nbf_len))
self.nbF[factor_id].append(
Neighbor(
iter=nbf_len,
node=var_id,
dual=nbv_len))
# Parse properties.
self.logdomain = bool(int(props.get('logdomain', 0)))
self.updates = props['updates']
self.inference = props.get('inference', 'SUMPROD')
self.verbose = int(props.get('verbose', 0))
self.damping = float(props.get('damping', 0.0))
self.maxiter = int(props.get('maxiter', 10000))
self.maxtime = float(props.get('maxtime', np.inf))
self.tol = float(props['tol'])
self._construct()
def _construct(self):
"""Helper function for constructors."""
# Create edge properties
self._edges = []
for i in range(self.nrVars):
self._edges.append([])
for _ in self.nbV[i]:
size = self._var_size(i)
new_ep = EdgeProp(
index=None,
message=Prob.uniform(size),
new_message=Prob.uniform(size),
residual=0.0)
self._edges[i].append(new_ep)
# Create old beliefs
self._oldBeliefsV = []
for i in range(self.nrVars):
self._oldBeliefsV.append(LDFactor.uniform(self.model, [i]))
self._old_beliefs_f = []
for ii in range(self.nrFactors):
self._old_beliefs_f.append(
LDFactor.uniform(
self.model,
self.factors[ii].var_idx))
# Create update sequence
self._update_seq = []
for ii in range(self.nrFactors):
for i in self.nbF[ii]:
self._update_seq.append((i.node, i.dual))
def init(self):
"""Initializes messages awith default values."""
c = 0.0 if self.logdomain else 1.0
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._edges[i][ii.iter].message.fill(c)
self._edges[i][ii.iter].new_message.fill(c)
if self.updates == 'SEQMAX':
self._update_residual(i, ii.iter, 0.0)
self._iters = 0
def find_max_residual(self):
"""Find max residual."""
# TODO: optimize with a lookup table.
max_r = -np.inf
best_edge = None
for i in range(self.nrVars):
for _I in range(len(self.nbV[i])):
if self._edges[i][_I].residual > max_r:
max_r = self._edges[i][_I].residual
best_edge = i, _I
return best_edge
def _calc_incoming_message_product(
self,
ii: int,
without_i: bool,
i: int) -> Prob:
"""Calculate the product of factor \a I and the incoming messages.
If without_i == True, the message coming from variable i is omitted
from the product.
This function is used by calc_new_message and calc_belief_f.
"""
f_prod = self.factors[ii].clone()
if self.logdomain:
f_prod.p.p = np.log(f_prod.p.p)
# Calculate product of incoming messages and factor I
for j in self.nbF[ii]:
if without_i and (j.node == i):
continue
# prod_j will be the product of messages coming into j
size = self._var_size(j.node)
default_val = 0.0 if self.logdomain else 1.0
prod_j = Prob.same_value(size, default_val)
for J in self.nbV[j.node]:
if J.node != ii: # for all J in nb(j) \ I
if self.logdomain:
prod_j += self._edges[j.node][J.iter].message
else:
prod_j *= self._edges[j.node][J.iter].message
# multiply prod with prod_j
if self.logdomain:
f_prod += LDFactor(self.model, [j.node], prod_j)
else:
f_prod *= LDFactor(self.model, [j.node], prod_j)
return f_prod.p
def _calc_new_message(self, i: int, _I: int):
# calculate updated message I->i
ii = self.nbV[i][_I].node
if len(self.factors[ii].var_idx) == 1: # optimization
marg = self.factors[ii].p.clone()
else:
Fprod = self.factors[ii].clone()
Fprod.p = self._calc_incoming_message_product(ii, True, i)
if self.logdomain:
Fprod.p.p = np.exp(Fprod.p.p - np.max(Fprod.p.p))
# Marginalize onto i
if self.inference == 'SUMPROD':
marg = Fprod.marginal([i]).p
else:
marg = Fprod.max_marginal([i]).p
# Store result
if self.logdomain:
self._edges[i][_I].new_message = Prob(np.log(marg.p))
else:
self._edges[i][_I].new_message = marg
# Update the residual if necessary
if self.updates == 'SEQMAX':
self._update_residual(
i,
_I,
dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message))
# BP::run does not check for NANs for performance reasons
# Somehow NaNs do not often occur in BP...
def run(self):
"""Runs BP algorithm."""
tic = time.time()
# Do several passes over the network until maximum number of iterations
# has been reached or until the maximum belief difference is smaller
# than tolerance.
max_diff = np.inf
while (self._iters < self.maxiter) and (
max_diff > self.tol) and (time.time() - tic) < self.maxtime:
if self.updates == 'SEQMAX':
if self._iters == 0:
# do the first pass
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
# Maximum-Residual BP [\ref EMK06]
for _ in range(len(self._update_seq)):
# Update the message with the largest residual.
i, _I = self.find_max_residual()
self._update_message(i, _I)
# I->i has been updated, which means that residuals for all
# J->j with J in nb[i]\I and j in nb[J]\i have to be
# updated
for J in self.nbV[i]:
if J.iter != _I:
for j in self.nbF[J.node]:
_J = j.dual
if j != i:
self._calc_new_message(j.node, _J)
elif self.updates == 'PARALL':
# Parallel updates
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._calc_new_message(i, ii.iter)
for i in range(self.nrVars):
for ii in self.nbV[i]:
self._update_message(i, ii.iter)
else:
# Sequential updates
if self.updates == 'SEQRND':
random.shuffle(self._update_seq)
for e in self._update_seq:
self._calc_new_message(e[0], e[1])
self._update_message(e[0], e[1])
# Calculate new beliefs and compare with old ones
max_diff = -np.inf
for i in range(self.nrVars):
b = self._belief_v(i).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._oldBeliefsV[i].p))
self._oldBeliefsV[i] = b
for ii in range(self.nrFactors):
b = self._belief_f(ii).clone()
max_diff = max(max_diff,
dist_linf(b.p, self._old_beliefs_f[ii].p))
self._old_beliefs_f[ii] = b
self._iters += 1
if max_diff > self._maxdiff:
self._maxdiff = max_diff
return max_diff
def _calc_belief_v(self, i: int) -> Prob:
p = Prob.same_value(self.model.get_variable(i).domain.size(),
0.0 if self.logdomain else 1.0)
for ii in self.nbV[i]:
if self.logdomain:
p += self._edges[i][ii.iter].new_message
else:
p *= self._edges[i][ii.iter].new_message
return p
def _belief_v(self, i: int) -> LDFactor:
p = self._calc_belief_v(i)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, [i], p)
def _belief_f(self, ii) -> LDFactor:
p = self._calc_belief_f(ii)
if self.logdomain:
p.p = np.exp(p.p - np.max(p.p))
p.normalize()
return LDFactor(self.model, self.factors[ii].var_idx, p)
def _calc_belief_f(self, ii: int) -> Prob:
return self._calc_incoming_message_product(ii, False, 0)
def log_z(self) -> float:
"""Calculates logarithm of the partition function."""
ans = 0.0
for i in range(self.nrVars):
ans += (1.0 - len(self.nbV[i])) * self._belief_v(i).p.entropy()
for ii in range(self.nrFactors):
ans -= dist_kl(self._belief_f(ii).p, self.factors[ii].p)
return ans
def marg_prob(self) -> np.ndarray:
"""Calculates marginal probabilities."""
max_domain_size = np.max([self._var_size(i)
for i in range(self.nrVars)])
ans = np.zeros((self.nrVars, max_domain_size), dtype=np.float64)
for var_id in range(self.nrVars):
ans[var_id, 0:self._var_size(var_id)] = self._belief_v(var_id).p.p
return ans
def _var_size(self, var_idx):
return self.model.get_variable(var_idx).domain.size()
def _update_message(self, i: int, _I: int):
if recordSentMessages:
self._sentMessages.append((i, _I))
if self.damping == 0.0:
self._edges[i][_I].message = self._edges[i][_I].new_message.clone()
if self.updates == 'SEQMAX':
self._update_residual(i, _I, 0.0)
else:
d = self.damping
old_msg = self._edges[i][_I].message.p
new_msg = self._edges[i][_I].new_message.p
if self.logdomain:
self._edges[i][_I].message.p = (
(old_msg * d) + (new_msg * (1.0 - d)))
else:
self._edges[i][_I].message.p = (
(old_msg ** d) * (new_msg ** (1.0 - d)))
if self.updates == 'SEQMAX':
new_res = dist_linf(
self._edges[i][_I].new_message,
self._edges[i][_I].message)
self._update_residual(i, _I, new_res)
def _update_residual(self, i, _I, r):
self._edges[i][_I].residual = r
| inferlo/generic/libdai_bp.py | 20,194 | Belief propagation algorithm, equivalent to dai::BP.
This class is ported from libDAI's dai::BP class. It runs belief
propagation algorithm for graphical model with discrete variables with
arbitrary factor graph.
At the moment MAXPROD algorithm (for finding MAP state) is not supported.
Use BP.infer() to perform inference.
Type used for storing edge properties.
Equivalent of dai::Factor.
Consists of set of variables and flattened values assigned to all var
combinations. Variables are assigned like in Inferlo, but tensor is
transposed before flattening.
Describes the neighbor relationship of two nodes in a graph.
Corresponds to dai::Neighbor.
Equivalent of dai::Prob.
Wrapper around a vector - represents probability distribution.
Calculate the product of factor I and the incoming messages.
If without_i == True, the message coming from variable i is omitted
from the product.
This function is used by calc_new_message and calc_belief_f.
Helper function for constructors.
Returns dict from variable id to variable value.
Makes a copy.
Makes a copy of this factor.
Applies binary function to two factors.
Kullback-Leibler divergence between two probability distributions.
Distance between two probability distributions in L_infinity norm.
Calculate entropy of the distribution.
Sets all entries to x.
Find max residual.
Converts inferlo.DiscreteFactor to LDFactor.
Runs inference BP algorithm for given model.
Supports all options which libdai::BP supports. Refer to libDAI
documentation for options descritpion.
Initializes messages awith default values.
Calculates logarithm of the partition function.
Calculates marginal probabilities.
Sums factor over some variables.
Eleiminates certain variables by finding maximum.
Normalize distribution.
Runs BP algorithm.
Creates vector filled with the same value.
Converts LDFactor to inferlo.DiscreteFactor.
Creates unifom probability distribution.
Creates factor defining uniform distribution.
Copyright (c) 2020, The InferLO authors. All rights reserved. Licensed under the Apache License, Version 2.0 - see LICENSE file. Corresponds to the index of this Neighbor entry in the vector of neighbors. Contains the absolute index of the neighboring node. Contains the "dual" index (i.e., the index of this node in the Neighbors vector of the neighboring node) Index cached for this edge. Old message living on this edge. New message living on this edge Residual for this edge Check that variables of the other factor are subset of variables of the given factor. Now, update every value of given factor with corresponding value of the other factor. Stores all edge properties Maximum difference between variable beliefs encountered so far Number of iterations needed The history of message updates (only recorded if \a recordSentMessages is \c true) Stores variable beliefs of previous iteration Stores factor beliefs of previous iteration Stores the update schedule Prepare Neighbors. For every variable - factors, referencing it. For every factor - variables it references. Parse properties. Create edge properties Create old beliefs Create update sequence TODO: optimize with a lookup table. Calculate product of incoming messages and factor I prod_j will be the product of messages coming into j for all J in nb(j) \ I multiply prod with prod_j calculate updated message I->i optimization Marginalize onto i Store result Update the residual if necessary BP::run does not check for NANs for performance reasons Somehow NaNs do not often occur in BP... Do several passes over the network until maximum number of iterations has been reached or until the maximum belief difference is smaller than tolerance. do the first pass Maximum-Residual BP [\ref EMK06] Update the message with the largest residual. I->i has been updated, which means that residuals for all J->j with J in nb[i]\I and j in nb[J]\i have to be updated Parallel updates Sequential updates Calculate new beliefs and compare with old ones | 3,966 | en | 0.82621 |
import os
import shutil
def setup_vscode():
def _get_vscode_cmd(port):
executable = "code-server"
if not shutil.which(executable):
raise FileNotFoundError("Can not find code-server in PATH")
# Start vscode in CODE_WORKINGDIR env variable if set
# If not, start in 'current directory', which is $REPO_DIR in mybinder
# but /home/jovyan (or equivalent) in JupyterHubs
working_dir = os.getenv("CODE_WORKINGDIR", ".")
extensions_dir = os.getenv("CODE_EXTENSIONSDIR", None)
extra_extensions_dir = os.getenv("CODE_EXTRA_EXTENSIONSDIR", None)
cmd = [
executable,
"--auth",
"none",
"--allow-http",
"--disable-telemetry",
"--port=" + str(port),
]
if extensions_dir:
cmd += ["--extensions-dir", extensions_dir]
if extra_extensions_dir:
cmd += ["--extra-extensions-dir", extra_extensions_dir]
cmd.append(working_dir)
return cmd
return {
"command": _get_vscode_cmd,
"timeout": 20,
"new_browser_tab": True,
"launcher_entry": {
"title": "VS Code",
"icon_path": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "icons", "vscode.svg"
),
},
}
| jupyter_vscode_proxy/__init__.py | 1,376 | Start vscode in CODE_WORKINGDIR env variable if set If not, start in 'current directory', which is $REPO_DIR in mybinder but /home/jovyan (or equivalent) in JupyterHubs | 168 | en | 0.774063 |
import time
import logging
import os
import openpathsampling as paths
from .path_simulator import PathSimulator, MCStep
from ..ops_logging import initialization_logging
logger = logging.getLogger(__name__)
init_log = logging.getLogger('openpathsampling.initialization')
class PathSampling(PathSimulator):
"""
General path sampling code.
Takes a single move_scheme and generates samples from that, keeping one
per replica after each move.
"""
calc_name = "PathSampling"
def __init__(self, storage, move_scheme=None, sample_set=None,
initialize=True):
"""
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage where all results should be stored in
move_scheme : :class:`openpathsampling.MoveScheme`
the move scheme used for the pathsampling cycle
sample_set : :class:`openpathsampling.SampleSet`
the initial SampleSet for the Simulator
initialize : bool
if `False` the new PathSimulator will continue at the step and
not create a new SampleSet object to cut the connection to previous
steps
"""
super(PathSampling, self).__init__(storage)
self.move_scheme = move_scheme
if move_scheme is not None:
self.root_mover = move_scheme.move_decision_tree()
self._mover = paths.PathSimulatorMover(self.root_mover, self)
else:
self.root_mover = None
self._mover = None
initialization_logging(init_log, self,
['move_scheme', 'sample_set'])
self.live_visualizer = None
self.status_update_frequency = 1
if initialize:
# NOTE: why aren't we using save_initial_step here?
samples = []
if sample_set is not None:
for sample in sample_set:
samples.append(sample.copy_reset())
self.sample_set = paths.SampleSet(samples)
mcstep = MCStep(
simulation=self,
mccycle=self.step,
active=self.sample_set,
change=paths.AcceptedSampleMoveChange(self.sample_set.samples)
)
self._current_step = mcstep
else:
self.sample_set = sample_set
self._current_step = None
self.root = self.sample_set
if self.storage is not None:
template_trajectory = self.sample_set.samples[0].trajectory
self.storage.save(template_trajectory)
self.storage.save([self.move_scheme, self.root_mover,
self._mover])
self.save_current_step()
def to_dict(self):
return {
'root': self.root,
'move_scheme': self.move_scheme,
'root_mover': self.root_mover,
}
@classmethod
def from_dict(cls, dct):
# create empty object
obj = cls(None)
# and correct the content
obj.move_scheme = dct['move_scheme']
obj.root = dct['root']
obj.root_mover = dct['root_mover']
obj._mover = paths.PathSimulatorMover(obj.root_mover, obj)
return obj
@property
def current_step(self):
return self._current_step
def save_current_step(self):
"""
Save the current step to the storage
"""
if self.storage is not None and self._current_step is not None:
try:
# new storage does a stash here, not a save
self.storage.stash(self._current_step)
except AttributeError:
self.storage.steps.save(self._current_step)
@classmethod
def from_step(cls, storage, step, initialize=True):
"""
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage to be used to hold the simulation results
step : :class:`openpathsampling.MCStep`
the step used to fill the initial parameters
initialize : bool
if `False` the new PathSimulator will continue at the given step and
not create a new SampleSet object to cut the connection to previous
steps.
Returns
-------
:class:`openpathsampling.PathSampling`
the new simulator object
"""
obj = cls(
storage,
step.simulation.move_scheme,
step.sample_set,
initialize=initialize
)
return obj
def restart_at_step(self, step, storage=None):
"""
Continue with a loaded pathsampling at a given step
Notes
-----
You can only continue from a step that is compatible in the sense
that it was previously generated from the pathsampling instance.
If you want to switch the move scheme you need to create a new
pathsampling instance. You can do so with the constructor or using
the classmethod `from_step` which simplifies the setup process
Parameters
----------
step : :class:`MCStep`
the step to be continued from. You are always free to chose any step
which can be used to fork a simulation but for analysis you may
only use one path of steps.
storage : :class:`Storage`
If given this will change the storage used to store the generated
steps
"""
if step.simulation is not self:
raise RuntimeWarning(
'Trying to continue from other step. Please use the '
'`.from_step` method to create a new PathSampling object '
'instead.')
if storage is not None:
self.storage = storage
self.step = step.mccycle
self.sample_set = step.active
self.root = step.simulation.root
self._current_step = step
def run_until(self, n_steps):
# if self.storage is not None:
# if len(self.storage.steps) > 0:
# self.step = len(self.storage.steps)
n_steps_to_run = n_steps - self.step
self.run(n_steps_to_run)
def run_until_decorrelated(self, time_reversal=True):
"""Run until all trajectories are decorrelated.
This runs until all the replicas in ``self.sample_set`` have
decorrelated from their initial conditions. "Decorrelated" here is
meant in the sense commonly used in one-way shooting: this runs
until no configurations from the original trajectories remain.
"""
originals = {s.replica: s.trajectory for s in self.sample_set}
current = self.sample_set
# cache the output stream; force the primary `run` method to not
# output anything
original_output_stream = self.output_stream
self.output_stream = open(os.devnull, 'w')
def n_correlated(sample_set, originals):
return sum([originals[r].is_correlated(sample_set[r],
time_reversal)
for r in originals])
original_output_stream.write("Decorrelating trajectories....\n")
to_decorrelate = n_correlated(self.sample_set, originals)
# walrus in py38!
while to_decorrelate:
out_str = "Step {}: {} of {} trajectories still correlated\n"
paths.tools.refresh_output(
out_str.format(self.step + 1, to_decorrelate, len(originals)),
refresh=False,
output_stream=original_output_stream
)
self.run(1)
to_decorrelate = n_correlated(self.sample_set, originals)
paths.tools.refresh_output(
"Step {}: All trajectories decorrelated!\n".format(self.step+1),
refresh=False,
output_stream=original_output_stream
)
self.output_stream = original_output_stream
def run(self, n_steps):
mcstep = None
# cvs = list()
# n_samples = 0
# if self.storage is not None:
# n_samples = len(self.storage.snapshots)
# cvs = list(self.storage.cvs)
initial_time = time.time()
for nn in range(n_steps):
self.step += 1
logger.info("Beginning MC cycle " + str(self.step))
refresh = self.allow_refresh
if self.step % self.status_update_frequency == 0:
# do we visualize this step?
if self.live_visualizer is not None and mcstep is not None:
# do we visualize at all?
self.live_visualizer.draw_ipynb(mcstep)
refresh = False
elapsed = time.time() - initial_time
if nn > 0:
time_per_step = elapsed / nn
else:
time_per_step = 1.0
paths.tools.refresh_output(
"Working on Monte Carlo cycle number " + str(self.step)
+ "\n" + paths.tools.progress_string(nn, n_steps,
elapsed),
refresh=refresh,
output_stream=self.output_stream
)
time_start = time.time()
movepath = self._mover.move(self.sample_set, step=self.step)
samples = movepath.results
new_sampleset = self.sample_set.apply_samples(samples)
time_elapsed = time.time() - time_start
# TODO: we can save this with the MC steps for timing? The bit
# below works, but is only a temporary hack
setattr(movepath.details, "timing", time_elapsed)
mcstep = MCStep(
simulation=self,
mccycle=self.step,
previous=self.sample_set,
active=new_sampleset,
change=movepath
)
self._current_step = mcstep
self.save_current_step()
# if self.storage is not None:
# # I think this is done automatically when saving snapshots
# # for cv in cvs:
# # n_len = len(self.storage.snapshots)
# # cv(self.storage.snapshots[n_samples:n_len])
# # n_samples = n_len
#
# self.storage.steps.save(mcstep)
if self.step % self.save_frequency == 0:
self.sample_set.sanity_check()
self.sync_storage()
self.sample_set = new_sampleset
self.sync_storage()
if self.live_visualizer is not None and mcstep is not None:
self.live_visualizer.draw_ipynb(mcstep)
paths.tools.refresh_output(
"DONE! Completed " + str(self.step) + " Monte Carlo cycles.\n",
refresh=False,
output_stream=self.output_stream
)
| openpathsampling/pathsimulators/path_sampling.py | 11,053 | General path sampling code.
Takes a single move_scheme and generates samples from that, keeping one
per replica after each move.
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage where all results should be stored in
move_scheme : :class:`openpathsampling.MoveScheme`
the move scheme used for the pathsampling cycle
sample_set : :class:`openpathsampling.SampleSet`
the initial SampleSet for the Simulator
initialize : bool
if `False` the new PathSimulator will continue at the step and
not create a new SampleSet object to cut the connection to previous
steps
Parameters
----------
storage : :class:`openpathsampling.storage.Storage`
the storage to be used to hold the simulation results
step : :class:`openpathsampling.MCStep`
the step used to fill the initial parameters
initialize : bool
if `False` the new PathSimulator will continue at the given step and
not create a new SampleSet object to cut the connection to previous
steps.
Returns
-------
:class:`openpathsampling.PathSampling`
the new simulator object
Continue with a loaded pathsampling at a given step
Notes
-----
You can only continue from a step that is compatible in the sense
that it was previously generated from the pathsampling instance.
If you want to switch the move scheme you need to create a new
pathsampling instance. You can do so with the constructor or using
the classmethod `from_step` which simplifies the setup process
Parameters
----------
step : :class:`MCStep`
the step to be continued from. You are always free to chose any step
which can be used to fork a simulation but for analysis you may
only use one path of steps.
storage : :class:`Storage`
If given this will change the storage used to store the generated
steps
Run until all trajectories are decorrelated.
This runs until all the replicas in ``self.sample_set`` have
decorrelated from their initial conditions. "Decorrelated" here is
meant in the sense commonly used in one-way shooting: this runs
until no configurations from the original trajectories remain.
Save the current step to the storage
NOTE: why aren't we using save_initial_step here? create empty object and correct the content new storage does a stash here, not a save if self.storage is not None: if len(self.storage.steps) > 0: self.step = len(self.storage.steps) cache the output stream; force the primary `run` method to not output anything walrus in py38! cvs = list() n_samples = 0 if self.storage is not None: n_samples = len(self.storage.snapshots) cvs = list(self.storage.cvs) do we visualize this step? do we visualize at all? TODO: we can save this with the MC steps for timing? The bit below works, but is only a temporary hack if self.storage is not None: I think this is done automatically when saving snapshots for cv in cvs: n_len = len(self.storage.snapshots) cv(self.storage.snapshots[n_samples:n_len]) n_samples = n_len self.storage.steps.save(mcstep) | 3,055 | en | 0.772745 |
"""
WSGI config for rush00 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rush00.settings")
application = get_wsgi_application()
| rush00/wsgi.py | 389 | WSGI config for rush00 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ | 212 | en | 0.766387 |
"""
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
import time
import win32ui
class WptRecord:
def __init__(self):
self.proc = None
self.window = None
self.UWM_PREPARE = (0x8000 + 0)
self.UWM_START = (0x8000 + 1)
self.UWM_STOP = (0x8000 + 2)
self.UWM_PROCESS = (0x8000 + 3)
self.UWM_DONE = (0x8000 + 4)
self.UWM_WAIT_FOR_IDLE = (0x8000 + 5)
def Prepare(self, test):
recorder = test.GetRecorder()
file_base = test.GetFileBase()
if recorder is not None and file_base is not None:
args = [recorder, '--filebase', file_base, '--histograms']
if test.TcpDump():
args.append('--tcpdump')
if test.Video():
args.append('--video')
if test.FullSizeVideo():
args.append('--noresize')
args.extend(['--quality', str(test.GetImageQuality())])
try:
self.proc = subprocess.Popen(args)
except:
logging.debug('Error launching recorder "{0}"'.format(recorder))
# Wait for the recorder window to be available for 30 seconds
start = time.time()
while self.window is None and time.time() - start < 30:
try:
self.window = win32ui.FindWindow("wptRecord", "wptRecord")
except:
time.sleep(0.1)
if self.window is not None:
try:
self.window.SendMessage(self.UWM_PREPARE, 0, 0)
except:
pass
def Start(self):
if self.window is not None:
try:
self.window.PostMessage(self.UWM_START, 0, 0)
except:
pass
def WaitForIdle(self, wait_seconds):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_WAIT_FOR_IDLE, wait_seconds, 0)
except:
pass
def Stop(self):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_STOP, 0, 0)
except:
pass
def Process(self, start_offset):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_PROCESS, start_offset, 0)
except:
pass
def Done(self):
if self.window is not None:
try:
self.window.SendMessage(self.UWM_DONE, 0, 0)
except:
pass
if self.proc is not None:
self.proc.wait() | agent/webdriver/recorder.py | 2,774 | Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Wait for the recorder window to be available for 30 seconds | 634 | en | 0.865276 |
import urllib.request
from urllib.parse import urlencode
import json
import pprint
import socket
import struct
#from src import etri2conll
def getETRI_rest(text):
url = "http://143.248.135.20:31235/etri_parser"
contents = {}
contents['text'] = text
contents = json.dumps(contents).encode('utf-8')
u = urllib.request.Request(url, contents)
response = urllib.request.urlopen(u)
result = response.read().decode('utf-8')
result = json.loads(result)
return result
def read_blob(sock, size):
buf = ''
while len(buf) != size:
ret = sock.recv(size - len(buf))
if not ret:
raise Exception("Socket closed")
ret += buf
return buf
def read_long(sock):
size = struct.calcsize("L")
data = readblob(sock, size)
return struct.unpack("L", data)
def getETRI(text):
host = '143.248.135.60'
port = 33222
ADDR = (host, port)
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
clientSocket.connect(ADDR)
except Exception as e:
return None
try:
clientSocket.sendall(str.encode(text))
#clientSocket.sendall(text.encode('unicode-escape'))
#clientSocket.sendall(text.encode('utf-8'))
buffer = bytearray()
while True:
data = clientSocket.recv(1024)
if not data:
break
buffer.extend(data)
result = json.loads(buffer.decode(encoding='utf-8'))
return result['sentence']
except Exception as e:
return None
def lemmatizer(word, pos):
etri = getETRI(word)
lemmas = etri[0]['WSD']
lemma = word
for i in lemmas:
p = i['type']
if pos == 'v' or pos == 'VV':
if p == 'VV':
lemma = i['text']
break
elif pos == 'n' or pos == 'NN' or pos == 'NNG' or pos == 'NNP' or pos =='NNB' or pos =='NR' or pos == 'NP':
if 'NN' in p:
lemma = i['text']
break
elif pos == 'adj' or pos == 'VA':
if p == 'VA':
lemma = i['text']
break
else:
pass
return lemma
def getPOS(word):
etri = getETRI(word)
pos = etri[0]['WSD'][0]['type']
if pos.startswith('N'):
pos = 'n'
elif pos == 'VV':
pos = 'v'
elif pos == 'VA':
pos = 'adj'
else:
pos == 'n'
return pos
def getMorpEval(tid, nlp):
result = '_'
for i in nlp[0]['morp_eval']:
if i['id'] == tid:
morp = i['result']
morps = morp.split('+')
pos_sequence = []
for m in morps:
if '/' not in m:
pass
else:
p = m.split('/')[1]
pos_sequence.append(p)
pos = '+'.join(pos_sequence)
result = pos
else:
pass
return result
def getMorhWithWord(tid, nlp):
result = '_'
for i in nlp[0]['morp_eval']:
if i['id'] == tid:
morp = i['result']
break
return morp
def getETRI_CoNLL2006(text):
nlp = getETRI(text)
result = []
for i in nlp[0]['dependency']:
tid = i['id']
token = i['text']
third = getMorhWithWord(tid, nlp)
pos = getMorpEval(tid, nlp)
five = '_'
arc = i['head']
pt = i['label']
eight = '_'
nine = '_'
line = [tid, token, third, pos, five, arc, pt, eight, nine]
result.append(line)
return result
def getETRI_CoNLL2009(text):
nlp = getETRI(text)
result = []
for i in nlp[0]['dependency']:
tid = i['id']
token = i['text']
third = getMorhWithWord(tid, nlp)
plemma = token
pos = getMorpEval(tid, nlp)
ppos = pos
feat = '_'
pfeat = '_'
head = i['head']
phead = head
deprel = i['label']
pdeprel = i['label']
line = [tid, token, third, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel]
result.append(line)
return result
#def test():
#conll = getETRI_CoNLL2006(text)
#conll = getETRI_CoNLL2009(text)
#pprint.pprint(conll)
#test()
| etri.py | 4,415 | from src import etri2conllclientSocket.sendall(text.encode('unicode-escape'))clientSocket.sendall(text.encode('utf-8'))def test():conll = getETRI_CoNLL2006(text)conll = getETRI_CoNLL2009(text)pprint.pprint(conll)test() | 218 | en | 0.107769 |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '4.7.3'
| gitlab/datadog_checks/gitlab/__about__.py | 138 | (C) Datadog, Inc. 2018-present All rights reserved Licensed under a 3-clause BSD style license (see LICENSE) | 108 | en | 0.821201 |
from django.contrib.auth import models as auth_models
from django.core.mail import send_mail
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import gettext_lazy as _
from oscar.core.compat import AUTH_USER_MODEL
class UserManager(auth_models.BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the given email and
password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(
email=email, is_staff=False, is_active=True,
is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class AbstractUser(auth_models.AbstractBaseUser,
auth_models.PermissionsMixin):
"""
An abstract base user suitable for use in Oscar projects.
This is basically a copy of the core AbstractUser model but without a
username field
"""
email = models.EmailField(_('email address'), unique=True)
first_name = models.CharField(
_('First name'), max_length=255, blank=True)
last_name = models.CharField(
_('Last name'), max_length=255, blank=True)
is_staff = models.BooleanField(
_('Staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(
_('Active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'),
default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
abstract = True
verbose_name = _('User')
verbose_name_plural = _('Users')
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Return the short name for the user.
"""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Send an email to this user.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
def _migrate_alerts_to_user(self):
"""
Transfer any active alerts linked to a user's email address to the
newly registered user.
"""
ProductAlert = self.alerts.model
alerts = ProductAlert.objects.filter(
email=self.email, status=ProductAlert.ACTIVE)
alerts.update(user=self, key='', email='')
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
# Migrate any "anonymous" product alerts to the registered user
# Ideally, this would be done via a post-save signal. But we can't
# use get_user_model to wire up signals to custom user models
# see Oscar ticket #1127, Django ticket #19218
self._migrate_alerts_to_user()
class AbstractProductAlert(models.Model):
"""
An alert for when a product comes back in stock
"""
product = models.ForeignKey(
'catalogue.Product',
on_delete=models.CASCADE)
# A user is only required if the notification is created by a
# registered user, anonymous users will only have an email address
# attached to the notification
user = models.ForeignKey(
AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="alerts",
verbose_name=_('User'))
email = models.EmailField(_("Email"), db_index=True, blank=True)
# This key are used to confirm and cancel alerts for anon users
key = models.CharField(_("Key"), max_length=128, blank=True, db_index=True)
# An alert can have two different statuses for authenticated
# users ``ACTIVE`` and ``CANCELLED`` and anonymous users have an
# additional status ``UNCONFIRMED``. For anonymous users a confirmation
# and unsubscription key are generated when an instance is saved for
# the first time and can be used to confirm and unsubscribe the
# notifications.
UNCONFIRMED, ACTIVE, CANCELLED, CLOSED = (
'Unconfirmed', 'Active', 'Cancelled', 'Closed')
STATUS_CHOICES = (
(UNCONFIRMED, _('Not yet confirmed')),
(ACTIVE, _('Active')),
(CANCELLED, _('Cancelled')),
(CLOSED, _('Closed')),
)
status = models.CharField(_("Status"), max_length=20,
choices=STATUS_CHOICES, default=ACTIVE)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_confirmed = models.DateTimeField(_("Date confirmed"), blank=True,
null=True)
date_cancelled = models.DateTimeField(_("Date cancelled"), blank=True,
null=True)
date_closed = models.DateTimeField(_("Date closed"), blank=True, null=True)
class Meta:
abstract = True
app_label = 'customer'
verbose_name = _('Product alert')
verbose_name_plural = _('Product alerts')
@property
def is_anonymous(self):
return self.user is None
@property
def can_be_confirmed(self):
return self.status == self.UNCONFIRMED
@property
def can_be_cancelled(self):
return self.status in (self.ACTIVE, self.UNCONFIRMED)
@property
def is_cancelled(self):
return self.status == self.CANCELLED
@property
def is_active(self):
return self.status == self.ACTIVE
def confirm(self):
self.status = self.ACTIVE
self.date_confirmed = timezone.now()
self.save()
confirm.alters_data = True
def cancel(self):
self.status = self.CANCELLED
self.date_cancelled = timezone.now()
self.save()
cancel.alters_data = True
def close(self):
self.status = self.CLOSED
self.date_closed = timezone.now()
self.save()
close.alters_data = True
def get_email_address(self):
if self.user:
return self.user.email
else:
return self.email
def save(self, *args, **kwargs):
if not self.id and not self.user:
self.key = self.get_random_key()
self.status = self.UNCONFIRMED
# Ensure date fields get updated when saving from modelform (which just
# calls save, and doesn't call the methods cancel(), confirm() etc).
if self.status == self.CANCELLED and self.date_cancelled is None:
self.date_cancelled = timezone.now()
if not self.user and self.status == self.ACTIVE \
and self.date_confirmed is None:
self.date_confirmed = timezone.now()
if self.status == self.CLOSED and self.date_closed is None:
self.date_closed = timezone.now()
return super().save(*args, **kwargs)
def get_random_key(self):
return get_random_string(length=40, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')
def get_confirm_url(self):
return reverse('customer:alerts-confirm', kwargs={'key': self.key})
def get_cancel_url(self):
return reverse('customer:alerts-cancel-by-key', kwargs={'key': self.key})
| src/oscar/apps/customer/abstract_models.py | 8,184 | An alert for when a product comes back in stock
An abstract base user suitable for use in Oscar projects.
This is basically a copy of the core AbstractUser model but without a
username field
Transfer any active alerts linked to a user's email address to the
newly registered user.
Creates and saves a User with the given email and
password.
Send an email to this user.
Return the first_name plus the last_name, with a space in between.
Return the short name for the user.
Migrate any "anonymous" product alerts to the registered user Ideally, this would be done via a post-save signal. But we can't use get_user_model to wire up signals to custom user models see Oscar ticket 1127, Django ticket 19218 A user is only required if the notification is created by a registered user, anonymous users will only have an email address attached to the notification This key are used to confirm and cancel alerts for anon users An alert can have two different statuses for authenticated users ``ACTIVE`` and ``CANCELLED`` and anonymous users have an additional status ``UNCONFIRMED``. For anonymous users a confirmation and unsubscription key are generated when an instance is saved for the first time and can be used to confirm and unsubscribe the notifications. Ensure date fields get updated when saving from modelform (which just calls save, and doesn't call the methods cancel(), confirm() etc). | 1,393 | en | 0.88757 |
"""A training script of TD3 on OpenAI Gym Mujoco environments.
This script follows the settings of http://arxiv.org/abs/1802.09477 as much
as possible.
"""
import argparse
import logging
import sys
import gym
import gym.wrappers
import numpy as np
import torch
from torch import nn
import pfrl
from pfrl import experiments, explorers, replay_buffers, utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--outdir",
type=str,
default="results",
help=(
"Directory path to save output files."
" If it does not exist, it will be created."
),
)
parser.add_argument(
"--env",
type=str,
default="Hopper-v2",
help="OpenAI Gym MuJoCo env to perform algorithm on.",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed [0, 2 ** 32)")
parser.add_argument(
"--gpu", type=int, default=0, help="GPU to use, set to -1 if no GPU."
)
parser.add_argument(
"--load", type=str, default="", help="Directory to load agent from."
)
parser.add_argument(
"--steps",
type=int,
default=10**6,
help="Total number of timesteps to train the agent.",
)
parser.add_argument(
"--eval-n-runs",
type=int,
default=10,
help="Number of episodes run for each evaluation.",
)
parser.add_argument(
"--eval-interval",
type=int,
default=5000,
help="Interval in timesteps between evaluations.",
)
parser.add_argument(
"--replay-start-size",
type=int,
default=10000,
help="Minimum replay buffer size before " + "performing gradient updates.",
)
parser.add_argument("--batch-size", type=int, default=100, help="Minibatch size")
parser.add_argument(
"--render", action="store_true", help="Render env states in a GUI window."
)
parser.add_argument(
"--demo", action="store_true", help="Just run evaluation, not training."
)
parser.add_argument("--load-pretrained", action="store_true", default=False)
parser.add_argument(
"--pretrained-type", type=str, default="best", choices=["best", "final"]
)
parser.add_argument(
"--monitor", action="store_true", help="Wrap env with gym.wrappers.Monitor."
)
parser.add_argument(
"--log-level", type=int, default=logging.INFO, help="Level of the root logger."
)
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
args.outdir = experiments.prepare_output_dir(args, args.outdir, argv=sys.argv)
print("Output files are saved in {}".format(args.outdir))
# Set a random seed used in PFRL
utils.set_random_seed(args.seed)
def make_env(test):
env = gym.make(args.env)
# Unwrap TimeLimit wrapper
assert isinstance(env, gym.wrappers.TimeLimit)
env = env.env
# Use different random seeds for train and test envs
env_seed = 2**32 - 1 - args.seed if test else args.seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = pfrl.wrappers.CastObservationToFloat32(env)
if args.monitor:
env = pfrl.wrappers.Monitor(env, args.outdir)
if args.render and not test:
env = pfrl.wrappers.Render(env)
return env
env = make_env(test=False)
timestep_limit = env.spec.max_episode_steps
obs_space = env.observation_space
action_space = env.action_space
print("Observation space:", obs_space)
print("Action space:", action_space)
obs_size = obs_space.low.size
action_size = action_space.low.size
policy = nn.Sequential(
nn.Linear(obs_size, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, action_size),
nn.Tanh(),
pfrl.policies.DeterministicHead(),
)
policy_optimizer = torch.optim.Adam(policy.parameters())
def make_q_func_with_optimizer():
q_func = nn.Sequential(
pfrl.nn.ConcatObsAndAction(),
nn.Linear(obs_size + action_size, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, 1),
)
q_func_optimizer = torch.optim.Adam(q_func.parameters())
return q_func, q_func_optimizer
q_func1, q_func1_optimizer = make_q_func_with_optimizer()
q_func2, q_func2_optimizer = make_q_func_with_optimizer()
rbuf = replay_buffers.ReplayBuffer(10**6)
explorer = explorers.AdditiveGaussian(
scale=0.1, low=action_space.low, high=action_space.high
)
def burnin_action_func():
"""Select random actions until model is updated one or more times."""
return np.random.uniform(action_space.low, action_space.high).astype(np.float32)
# Hyperparameters in http://arxiv.org/abs/1802.09477
agent = pfrl.agents.TD3(
policy,
q_func1,
q_func2,
policy_optimizer,
q_func1_optimizer,
q_func2_optimizer,
rbuf,
gamma=0.99,
soft_update_tau=5e-3,
explorer=explorer,
replay_start_size=args.replay_start_size,
gpu=args.gpu,
minibatch_size=args.batch_size,
burnin_action_func=burnin_action_func,
)
if len(args.load) > 0 or args.load_pretrained:
# either load or load_pretrained must be false
assert not len(args.load) > 0 or not args.load_pretrained
if len(args.load) > 0:
agent.load(args.load)
else:
agent.load(
utils.download_model("TD3", args.env, model_type=args.pretrained_type)[
0
]
)
eval_env = make_env(test=True)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
max_episode_len=timestep_limit,
)
print(
"n_runs: {} mean: {} median: {} stdev {}".format(
args.eval_n_runs,
eval_stats["mean"],
eval_stats["median"],
eval_stats["stdev"],
)
)
import json
import os
with open(os.path.join(args.outdir, "demo_scores.json"), "w") as f:
json.dump(eval_stats, f)
else:
experiments.train_agent_with_evaluation(
agent=agent,
env=env,
steps=args.steps,
eval_env=eval_env,
eval_n_steps=None,
eval_n_episodes=args.eval_n_runs,
eval_interval=args.eval_interval,
outdir=args.outdir,
train_max_episode_len=timestep_limit,
)
if __name__ == "__main__":
main()
| examples/mujoco/reproduction/td3/train_td3.py | 6,901 | Select random actions until model is updated one or more times.
A training script of TD3 on OpenAI Gym Mujoco environments.
This script follows the settings of http://arxiv.org/abs/1802.09477 as much
as possible.
Set a random seed used in PFRL Unwrap TimeLimit wrapper Use different random seeds for train and test envs Cast observations to float32 because our model uses float32 Hyperparameters in http://arxiv.org/abs/1802.09477 either load or load_pretrained must be false | 478 | en | 0.806241 |
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from artapp.models import Artist, Art
from django.template.defaultfilters import slugify
class RegistrationForm(ModelForm):
username = forms.CharField(label=(u'User Name'))
email = forms.EmailField(label=(u'Email Address'))
password = forms.CharField(label=(u'Password'), widget=forms.PasswordInput(render_value=False))
password1 = forms.CharField(label=(u'Verify Password'), widget=forms.PasswordInput(render_value=False))
class Meta:
model = Artist
exclude = ('user',)
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError('That username is already taken, please select another')
def clean(self):
if self.cleaned_data['password'] != self.cleaned_data['password1']:
raise forms.ValidationError("The passwords did not match. Please try again")
myslug=slugify(self.cleaned_data['username'])
try:
Artist.objects.get(slug=myslug)
except Artist.DoesNotExist:
return self.cleaned_data
#return self.cleaned_data
raise forms.ValidationError("That username is already taken, please select another")
class LoginForm(forms.Form):
username = forms.CharField(label=(u'User Name'))
password = forms.CharField(label=(u'Password'), widget=forms.PasswordInput(render_value=False))
#next_url = forms.CharField(label=(u'next url'), widget=forms.HiddenInput()) #hidden
class VoteForm(forms.Form):
VOTECHOICES = [('upvote', 'downvote')]
vote = forms.MultipleChoiceField(required=False, widget=forms.CheckboxSelectMultiple, choices=VOTECHOICES)
artslug = forms.CharField()
class SubmitArtForm(ModelForm):
class Meta:
model = Art
fields = ['title', 'drawing']
#exclude = ('slug','created_at', 'likes',)
def clean(self):
myslug=slugify(self.cleaned_data['title'])
try:
Art.objects.get(slug=myslug)
except Art.DoesNotExist:
return self.cleaned_data
raise forms.ValidationError("Ascii Art with that slug already exists, please pick another title")
"""
def clean_title(self):
title = self.cleaned_data['title']
try:
Art.objects.get(title=title)
except Art.DoesNotExist:
return title
raise forms.ValidationError("Ascii Art with that title already exists, please pick another title")
""" | artapp/forms.py | 2,407 | return self.cleaned_datanext_url = forms.CharField(label=(u'next url'), widget=forms.HiddenInput()) hiddenexclude = ('slug','created_at', 'likes',) | 147 | en | 0.205082 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.discovery_v1alpha1_api import DiscoveryV1alpha1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestDiscoveryV1alpha1Api(unittest.TestCase):
"""DiscoveryV1alpha1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.discovery_v1alpha1_api.DiscoveryV1alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_create_namespaced_endpoint_slice(self):
"""Test case for create_namespaced_endpoint_slice
"""
pass
def test_delete_collection_namespaced_endpoint_slice(self):
"""Test case for delete_collection_namespaced_endpoint_slice
"""
pass
def test_delete_namespaced_endpoint_slice(self):
"""Test case for delete_namespaced_endpoint_slice
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_endpoint_slice_for_all_namespaces(self):
"""Test case for list_endpoint_slice_for_all_namespaces
"""
pass
def test_list_namespaced_endpoint_slice(self):
"""Test case for list_namespaced_endpoint_slice
"""
pass
def test_patch_namespaced_endpoint_slice(self):
"""Test case for patch_namespaced_endpoint_slice
"""
pass
def test_read_namespaced_endpoint_slice(self):
"""Test case for read_namespaced_endpoint_slice
"""
pass
def test_replace_namespaced_endpoint_slice(self):
"""Test case for replace_namespaced_endpoint_slice
"""
pass
if __name__ == '__main__':
unittest.main()
| kubernetes/test/test_discovery_v1alpha1_api.py | 2,027 | DiscoveryV1alpha1Api unit test stubs
Test case for create_namespaced_endpoint_slice
Test case for delete_collection_namespaced_endpoint_slice
Test case for delete_namespaced_endpoint_slice
Test case for get_api_resources
Test case for list_endpoint_slice_for_all_namespaces
Test case for list_namespaced_endpoint_slice
Test case for patch_namespaced_endpoint_slice
Test case for read_namespaced_endpoint_slice
Test case for replace_namespaced_endpoint_slice
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: E501 noqa: E501 | 799 | en | 0.460375 |
# -*- coding:utf-8 -*-
"""
Sections organize movement between pages in an experiment.
.. moduleauthor:: Johannes Brachem <jbrachem@posteo.de>, Paul Wiemann <paulwiemann@gmail.com>
"""
import time
import typing as t
from ._core import ExpMember
from ._helper import inherit_kwargs
from .page import _PageCore, _DefaultFinalPage
from .exceptions import AlfredError, ValidationError, AbortMove
from random import shuffle
@inherit_kwargs
class Section(ExpMember):
"""
The basic section, allows forward and backward movements.
Args:
shuffle (bool): If True, the order of all members in this
section will be randomized every time the section is entered.
Shuffling is not recursive, it only affects direct members
of a section. That means, if there are subsections,
their position in the parent section will be randomized,
but the members within the subsection will not be affected.
Defaults to False. Can be defined as a class attribute.
{kwargs}
Examples:
Using a basic section and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.Section(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.Section): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
"""
#: Controls, whether participants can move forward from pages in
#: this section.
allow_forward: bool = True
#: Controls, whether participants can move backward from *and to*
#: pages in this section.
allow_backward: bool = True
#: Controls, whether participants can jump *from* pages in this
#: section
allow_jumpfrom: bool = True
#: Controls, whether participants can jump *to* pages in this
#: section.
allow_jumpto: bool = True
#: If *True*, pages in this section will be closed on leaving
close_pages_on_leave: bool = False
#: If True, the members of this section will be randomized every
#: time the section is entered.
shuffle: bool = False
def __init__(self, title: str = None, name: str = None, shuffle: bool = None, **kwargs):
super().__init__(title=title, name=name, **kwargs)
self._members = {}
self._should_be_shown = True
#: bool: Boolean flag, indicating whether the experiment session
#: is currently operating within this section
self.active: bool = False
if shuffle is not None:
self.shuffle = shuffle
self._catch_page_hooks()
def __contains__(self, member):
try:
return member.name in self.all_members or member.name in self.all_elements
except AttributeError:
return member in self.all_members or member in self.all_elements
def __iadd__(self, other):
self.append(other)
return self
def __getitem__(self, name):
return self.all_members[name]
def __setitem__(self, name, value):
if "members" in self.__dict__ and name in self.members:
if self.members[name] is value:
return
else:
raise AlfredError(f"{name} is a member of {self}. The name is reserved.")
else:
raise KeyError(
(
f"{name} not found in members of {self}. "
"You can use square bracket syntax only for changing existing pages, not for adding "
"new ones. Please use the augmented assignment operator '+=' for this purpose."
)
)
def __getattr__(self, name):
try:
return self.all_members[name]
except KeyError:
raise AttributeError(f"{self} has no attribute '{name}'.")
def __setattr__(self, name, value):
if "members" in self.__dict__ and name in self.members:
if self.members[name] is value:
return
else:
raise AlfredError(f"{name} is a member of {self}. The name is reserved.")
else:
self.__dict__[name] = value
def _shuffle_members(self):
"""Non-recursive shuffling of this section's members."""
members = list(self.members.items())
shuffle(members)
self._members = dict(members)
@property
def members(self) -> dict:
"""
Dictionary of the section's members.
"""
return self._members
@members.setter
def members(self, value):
self._members = value
@property
def empty(self) -> bool:
"""
True, if there are no pages or subsections in this section.
"""
return False if self.members else True
@property
def all_updated_members(self) -> dict:
"""
Returns a dict of all members that already have exp access.
Operates recursively, i.e. pages and subsections of subsections
are included.
"""
return {name: m for name, m in self.all_members.items() if m.exp is not None}
@property
def all_updated_pages(self) -> dict:
"""
Returns a dict of all pages in the current section that have
access to the experiment session. Operates recursively, i.e.
pages in subsections are included.
"""
pages = {}
for name, member in self.all_updated_members.items():
if isinstance(member, _PageCore):
pages[name] = member
return pages
@property
def all_updated_elements(self) -> dict:
"""
Returns a dict of all elements in the current section that have
access to the experiment session. Operates recursively, i.e.
elements on pages in subsections are included.
"""
elements = {}
for page in self.all_updated_pages.values():
elements.update(page.updated_elements)
return elements
@property
def all_members(self) -> dict:
"""
Returns a flat dict of all members in this section and its subsections.
The order is preserved, i.e. members are listed in this dict in
the same order in which they appear in the experiment.
"""
members = {}
for name, member in self.members.items():
members[name] = member
if isinstance(member, Section):
members.update(member.all_members)
return members
@property
def last_member(self):
"""
Returns the last member of the current section. Can be a page
or a subsection.
"""
try:
return list(self.members.values())[-1]
except IndexError:
return None
@property
def first_member(self):
"""
Returns the first member of the current section. Can be a page
or a subsection.
"""
try:
return list(self.members.values())[0]
except IndexError:
return None
@property
def first_page(self):
"""
Returns the first page inside the current section.
"""
try:
return list(self.all_pages.values())[0]
except IndexError:
return None
@property
def last_page(self):
"""
Returns the last page inside the current section.
"""
try:
return list(self.all_pages.values())[-1]
except IndexError:
return None
@property
def all_subsections(self) -> dict:
"""
Returns a flat dict of all sections in this section and its subsections.
The order is preserved, i.e. sections are listed in this dict in
the same order in which they appear in the experiment.
"""
subsections = {}
for name, member in self.members.items():
if isinstance(member, Section):
subsections[name] = member
subsections.update(member.all_subsections)
return subsections
@property
def subsections(self) -> dict:
"""
Returns a flat dict of all subsections in this section.
Subsections in subsections are not included. Use
:attr:`.all_subsections` for that purpose.
"""
return {name: sec for name, sec in self.members.items() if isinstance(sec, Section)}
@property
def all_pages(self) -> dict:
"""
Returns a flat dict of all pages in this section and its subsections.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
"""
pages = {}
for name, member in self.members.items():
if isinstance(member, _PageCore):
pages[name] = member
elif isinstance(member, Section):
pages.update(member.all_pages)
return pages
@property
def all_closed_pages(self) -> dict:
"""
Returns a flat dict of all *closed* pages in this section and its
subsections.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
"""
return {name: page for name, page in self.all_pages.items() if page.is_closed}
@property
def all_shown_pages(self) -> dict:
"""
Returns a flat dict of all pages in this section and its
subsections that have already been shown.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
"""
return {name: page for name, page in self.all_pages.items() if page.has_been_shown}
@property
def pages(self) -> dict:
"""
Returns a flat dict of all pages in this section.
Pages in subsections are not included. Use :attr:`.all_pages`
for that purpose.
"""
return {name: page for name, page in self.members.items() if isinstance(page, _PageCore)}
@property
def all_elements(self) -> dict:
"""
Returns a flat dict of all elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
"""
elements = {}
for page in self.all_pages.values():
elements.update(page.elements)
return elements
@property
def all_input_elements(self) -> dict:
"""
Returns a flat dict of all input elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
"""
elements = {}
for page in self.all_pages.values():
elements.update(page.input_elements)
return elements
@property
def all_shown_input_elements(self) -> dict:
"""
Returns a flat dict of all shown input elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
"""
elements = {}
for page in self.all_pages.values():
if page.has_been_shown:
elements.update(page.input_elements)
return elements
@property
def data(self) -> dict:
"""
Returns a dictionary of user input data for all pages in this
section and its subsections.
"""
data = {}
for page in self.all_pages.values():
data.update(page.data)
return data
@property
def unlinked_data(self) -> dict:
"""
Returns a dictionary of user input data for all *unlinked* pages
in this section and its subsections.
"""
data = {}
for page in self.all_pages.values():
data.update(page.unlinked_data)
return data
def added_to_experiment(self, exp):
# docstring inherited
super().added_to_experiment(exp)
self.log.add_queue_logger(self, __name__)
self.on_exp_access()
self._update_members_recursively()
def _update_members(self):
for member in self.members.values():
if not member.experiment:
member.added_to_experiment(self.exp)
if not member.section:
member.added_to_section(self)
def _update_members_recursively(self):
self._update_members()
for member in self.members.values():
member._update_members_recursively()
def _generate_unset_tags_in_subtree(self):
for i, member in enumerate(self.members.values(), start=1):
if member.tag is None:
member.tag = str(i)
if isinstance(member, Section):
member._generate_unset_tags_in_subtree()
def append(self, *items):
"""
Appends a variable number of pages or subsections to the section.
In practice, it is recommended to use the augmented assignment
operator ``+=`` instead in order to add pages or subsections.
"""
for item in items:
if item.name in dir(self):
raise ValueError(f"Name of {item} is also an attribute of {self}.")
if item.name in self.members:
raise AlfredError(f"Name '{item.name}' is already present in the experiment.")
item.added_to_section(self)
self.members[item.name] = item
if self.experiment is not None:
item.added_to_experiment(self.experiment)
item._update_members_recursively()
if not item.tag:
item.tag = str(len(self.members) + 1)
def on_exp_access(self):
"""
Executed *once*, when the :class:`.ExperimentSession` becomes
available to the section.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_enter(self):
"""
Executed *every time* this section is entered.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_leave(self):
"""
Executed *every time* this section is left.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_resume(self):
"""
Executed *every time* the experiment resumes from a direct subsection to this section.
Resuming takes place, when a child section is left and the
next page is a direct child of the parent section. Then this
the parent section becomes the primary current section again: it
resumes its status.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def on_hand_over(self):
"""
Executed *every time* a direct subsection of this section is entered.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
"""
pass
def _enter(self):
self.active = True
self.log.debug(f"Entering {self}.")
self.on_enter()
self._update_members()
if self.shuffle:
self._shuffle_members()
if isinstance(self.first_member, Section) and not self.first_member.active:
self._hand_over()
self.first_member._enter()
def _leave(self):
self.log.debug(f"Leaving {self}.")
self.on_leave()
try:
self.validate_on_leave()
except ValidationError:
raise AbortMove
if self.close_pages_on_leave:
for page in self.pages.values():
page.close()
if self is self.parent.last_member:
self.parent._leave()
def _resume(self):
self.log.debug(f"Resuming to {self}.")
self.on_resume()
def _hand_over(self):
self.log.debug(f"{self} handing over to child section.")
self.on_hand_over()
def _forward(self):
pass
def _backward(self):
pass
def _jumpfrom(self):
pass
def _jumpto(self):
pass
def _move(self, direction, from_page, to_page):
"""
Conducts a section's part of moving in an alfred experiment.
Raises:
ValidationError: If validation of the current page fails.
"""
if direction == "forward":
self._forward()
elif direction == "backward":
self._backward()
elif direction == "jumpto":
self._jumpto()
elif direction.startswith("jump"):
self._jumpfrom()
if to_page.section.name in self.all_members:
self._hand_over()
elif not to_page.section is self:
self._leave()
if direction.startswith("jump"):
to_page.section._jumpto()
if self.name in to_page.section.all_members:
to_page.section._resume()
elif not to_page.section is self:
to_page.section._enter()
if self.exp.aborted:
raise AbortMove
def _validate(self, direction: str):
if direction == "forward":
self.validate_on_forward()
elif direction == "backward":
self.validate_on_backward()
elif direction.startswith("jump"):
self.validate_on_jump()
def validate_on_leave(self):
"""
Validates pages and their input elements within the section.
Can be overloaded to change the validating behavior of a derived
section.
Notes:
Validation is conducted only for pages that are direct
children of this section. Pages in subsections are not
validated.
Raises:
ValidationError: If validation fails.
"""
for page in self.pages.values():
if not page._validate():
raise ValidationError()
if not page._validate_elements():
msg = self.exp.config.get("hints", "no_input_section_validation")
msg = msg.format(n=len(self.pages))
self.exp.post_message(msg, level="danger")
raise ValidationError()
def validate_on_move(self):
"""
Validates the current page and its elements.
Can be overloaded to change the validating behavior of a derived
section. By default, this validation method is called on each
foward and backward move, as well as when participants jump
*from* the section, but not when they jump *to* the section.
Raises:
ValidationError: If validation fails.
See Also:
Use the individual methods :meth:`.validate_on_forward`,
:meth:`.validate_on_backward`, :meth:`.validate_on_jumpfrom`,
and :meth:`.validate_on_jumpto` if you want even more fine-
grained control over validation behavior.
.. versionchanged:: 2.1.0
Switched the default order of validation. Now, elements are
validated first, then the page itself is validated.
"""
if not self.exp.current_page._validate_elements():
raise ValidationError()
if not self.exp.current_page._validate():
raise ValidationError()
def validate_on_forward(self):
"""
Called for validation on each forward move.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
"""
self.validate_on_move()
def validate_on_backward(self):
"""
Called for validation on each backward move.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
"""
self.validate_on_move()
def validate_on_jump(self):
"""
Called for validation on jumping from this section.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
"""
self.validate_on_move()
def _catch_page_hooks(self):
"""
Raises errors, if users define page hooks on a section.
"""
explanation = " This does not work. Remove the method to continue."
try:
self.on_first_show()
msg = f"You tried to use the page-only hook method 'on_first_show' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
try:
self.on_each_show()
msg = f"You tried to use the page-only hook method 'on_each_show' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
try:
self.on_each_hide()
msg = f"You tried to use the page-only hook method 'on_each_hide' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
try:
self.on_first_hide()
msg = f"You tried to use the page-only hook method 'on_first_hide' on the section {self}."
raise AlfredError(msg + explanation)
except AttributeError:
pass
@inherit_kwargs
class RevisitSection(Section):
"""
A section that disables all input elements upon moving forward (and
jumping) form it, but still allows participants to revisit previous
pages.
Args:
{kwargs}
Examples:
Using a RevisitSection and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.RevisitSection(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.RevisitSection): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
"""
allow_forward: bool = True
allow_backward: bool = True
allow_jumpfrom: bool = True
allow_jumpto: bool = True
def _forward(self):
super()._forward()
self.exp.movement_manager.current_page.close()
def _jumpfrom(self):
super()._jumpfrom()
self.exp.movement_manager.current_page.close()
@inherit_kwargs
class HideOnForwardSection(Section):
"""
A section that hides pages once they have been submitted.
Args:
{kwargs}
This section enables the following behaviors:
1. Once a participant has entered their data on a page and submitted
it by pressing "forward", the participant cannot revisit that
page – it is hidden.
2. The participant can, however, go back to pages in previous sections.
For instance, they may revisit the instructions page. A press on
"forward" will then ignore the hidden pages and take the participant
back to the most recent page.
.. versionadded:: 2.3.0
Examples:
You can test the section's behavior with this example::
import alfred3 as al
exp = al.Experiment()
main = al.Section(name="main")
main += al.Page(name="first")
hide = al.HideOnForwardSection(name="hide")
hide += al.Page(name="second")
hide += al.Page(name="thirs")
exp += main
exp += hide
"""
allow_forward: bool = True
allow_backward: bool = True
allow_jumpfrom: bool = True
allow_jumpto: bool = True
def _forward(self):
super()._forward()
self.exp.movement_manager.current_page.close()
self.exp.movement_manager.current_page.should_be_shown = False
def _jumpfrom(self):
super()._jumpfrom()
self.exp.movement_manager.current_page.close()
self.exp.movement_manager.current_page.should_be_shown = False
@inherit_kwargs
class ForwardOnlySection(RevisitSection):
"""
A section that allows only a single step forward; no jumping and no
backwards steps.
Args:
{kwargs}
Examples:
Using an ForwardOnlySection and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.ForwardOnlySection(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.ForwardOnlySection): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
"""
allow_forward: bool = True
allow_backward: bool = False
allow_jumpfrom: bool = False
allow_jumpto: bool = False
@inherit_kwargs
class _FinishedSection(Section):
"""
A section that finishes the experiment on entering it.
Args:
{kwargs}
"""
allow_forward: bool = False
allow_backward: bool = False
allow_jumpfrom: bool = False
allow_jumpto: bool = True
def _enter(self):
super()._enter()
self.experiment.finish()
class _AbortSection(Section):
allow_forward: bool = False
allow_backward: bool = False
allow_jumpfrom: bool = False
allow_jumpto: bool = True
@inherit_kwargs
class _RootSection(Section):
"""
A section that serves as parent for all other sections in the
experiment.
Args:
{kwargs}
Defines the '_content' section and the '__finished_section' as its
only direct children.
"""
name = "_root"
def __init__(self, experiment):
super().__init__()
self._experiment = experiment
self.log.add_queue_logger(self, __name__)
self.content = Section(name="_content")
self.admin_section = None
self.finished_section = _FinishedSection(name="__finished_section")
self.finished_section += _DefaultFinalPage(name="_final_page")
self._all_pages_list = None
self._all_page_names = None
def append_root_sections(self):
if self.exp.admin_mode:
from .admin import _AdminSection
self += _AdminSection(name="_content")
self += self.finished_section
else:
self += self.content
self += self.finished_section
@property
def all_page_names(self):
"""
Improvised caching mechanism for the list of all page names.
Danger: The caching is not compatible with sections that shuffle their members.
The shuffling does not affect this list, which is unexpected
behavior in most cases.
For this reason, the caching is currently deactivated.
"""
return list(self.all_pages.keys())
# if not self._all_page_names:
# self._all_page_names = list(self.all_pages.keys())
# elif not len(self._all_page_names) == len(self.all_pages):
# self._all_page_names = list(self.all_pages.keys())
# return self._all_page_names
@property
def all_pages_list(self):
"""
Improvised caching mechanism for the list of all pages.
Danger: The caching is not compatible with sections that shuffle their members.
The shuffling does not affect this list, which is unexpected
behavior in most cases.
For this reason, the caching is currently deactivated.
"""
return list(self.all_pages.values())
# if not self._all_pages_list:
# self._all_pages_list = list(self.all_pages.values())
# elif not len(self._all_pages_list) == len(self.all_pages):
# self._all_pages_list = list(self.all_pages.values())
# return self._all_pages_list
@property
def final_page(self):
return self.finished_section._final_page
| src/alfred3/section.py | 33,080 | A section that allows only a single step forward; no jumping and no
backwards steps.
Args:
{kwargs}
Examples:
Using an ForwardOnlySection and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.ForwardOnlySection(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.ForwardOnlySection): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
A section that hides pages once they have been submitted.
Args:
{kwargs}
This section enables the following behaviors:
1. Once a participant has entered their data on a page and submitted
it by pressing "forward", the participant cannot revisit that
page – it is hidden.
2. The participant can, however, go back to pages in previous sections.
For instance, they may revisit the instructions page. A press on
"forward" will then ignore the hidden pages and take the participant
back to the most recent page.
.. versionadded:: 2.3.0
Examples:
You can test the section's behavior with this example::
import alfred3 as al
exp = al.Experiment()
main = al.Section(name="main")
main += al.Page(name="first")
hide = al.HideOnForwardSection(name="hide")
hide += al.Page(name="second")
hide += al.Page(name="thirs")
exp += main
exp += hide
A section that disables all input elements upon moving forward (and
jumping) form it, but still allows participants to revisit previous
pages.
Args:
{kwargs}
Examples:
Using a RevisitSection and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.RevisitSection(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.RevisitSection): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
The basic section, allows forward and backward movements.
Args:
shuffle (bool): If True, the order of all members in this
section will be randomized every time the section is entered.
Shuffling is not recursive, it only affects direct members
of a section. That means, if there are subsections,
their position in the parent section will be randomized,
but the members within the subsection will not be affected.
Defaults to False. Can be defined as a class attribute.
{kwargs}
Examples:
Using a basic section and filling it with a page in instance
style::
import alfred3 as al
exp = al.Experiment()
exp += al.Section(name="main")
exp.main += al.Page(title="Demo", name="DemoPage")
Using a basic section and filling it with a page in class style::
import alfred3 as al
exp = al.Experiment()
@exp.member
class Main(al.Section): pass
@exp.member(of_section="Main")
class DemoPage(al.Page):
title = "Demo"
A section that finishes the experiment on entering it.
Args:
{kwargs}
A section that serves as parent for all other sections in the
experiment.
Args:
{kwargs}
Defines the '_content' section and the '__finished_section' as its
only direct children.
Raises errors, if users define page hooks on a section.
Conducts a section's part of moving in an alfred experiment.
Raises:
ValidationError: If validation of the current page fails.
Non-recursive shuffling of this section's members.
Returns a flat dict of all *closed* pages in this section and its
subsections.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
Returns a flat dict of all elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
Returns a flat dict of all input elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
Returns a flat dict of all members in this section and its subsections.
The order is preserved, i.e. members are listed in this dict in
the same order in which they appear in the experiment.
Improvised caching mechanism for the list of all page names.
Danger: The caching is not compatible with sections that shuffle their members.
The shuffling does not affect this list, which is unexpected
behavior in most cases.
For this reason, the caching is currently deactivated.
Returns a flat dict of all pages in this section and its subsections.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
Improvised caching mechanism for the list of all pages.
Danger: The caching is not compatible with sections that shuffle their members.
The shuffling does not affect this list, which is unexpected
behavior in most cases.
For this reason, the caching is currently deactivated.
Returns a flat dict of all shown input elements in this section.
Recursive: Includes elements from pages in this section and all
its subsections.
Returns a flat dict of all pages in this section and its
subsections that have already been shown.
The order is preserved, i.e. pages are listed in this dict in
the same order in which they appear in the experiment.
Returns a flat dict of all sections in this section and its subsections.
The order is preserved, i.e. sections are listed in this dict in
the same order in which they appear in the experiment.
Returns a dict of all elements in the current section that have
access to the experiment session. Operates recursively, i.e.
elements on pages in subsections are included.
Returns a dict of all members that already have exp access.
Operates recursively, i.e. pages and subsections of subsections
are included.
Returns a dict of all pages in the current section that have
access to the experiment session. Operates recursively, i.e.
pages in subsections are included.
Appends a variable number of pages or subsections to the section.
In practice, it is recommended to use the augmented assignment
operator ``+=`` instead in order to add pages or subsections.
Returns a dictionary of user input data for all pages in this
section and its subsections.
True, if there are no pages or subsections in this section.
Returns the first member of the current section. Can be a page
or a subsection.
Returns the first page inside the current section.
Returns the last member of the current section. Can be a page
or a subsection.
Returns the last page inside the current section.
Dictionary of the section's members.
Executed *every time* this section is entered.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
Executed *once*, when the :class:`.ExperimentSession` becomes
available to the section.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
Executed *every time* a direct subsection of this section is entered.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
Executed *every time* this section is left.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
Executed *every time* the experiment resumes from a direct subsection to this section.
Resuming takes place, when a child section is left and the
next page is a direct child of the parent section. Then this
the parent section becomes the primary current section again: it
resumes its status.
.. warning:: We are currently questioning the four section hooks *on_enter*,
*on_hand_over*, *on_resume*, and *on_leave*. Everything that you may wish
to accomplish with these hooks can be done in page hooks. The section
versions have some caveats that make them a bit tougher
to use correctly. So, for the meantime, please avoid these hooks and
use page hooks instead. The attributes :attr:`.Section.first_page`
and :attr:`.Section.last_page` may be useful for you in this regard.
The :meth:`.Section.on_exp_access` hook is not going anywhere, although we may
at some point decide to introduce an alternative name for it in order
to avoid confusion with :meth:`.Page.on_exp_access`.
See Also:
See "How to use hooks" for a how to on using hooks and an overview
of available hooks.
Returns a flat dict of all pages in this section.
Pages in subsections are not included. Use :attr:`.all_pages`
for that purpose.
Returns a flat dict of all subsections in this section.
Subsections in subsections are not included. Use
:attr:`.all_subsections` for that purpose.
Returns a dictionary of user input data for all *unlinked* pages
in this section and its subsections.
Called for validation on each backward move.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
Called for validation on each forward move.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
Called for validation on jumping from this section.
Overload this method to customize validation behavior.
See Also:
By default, sections use :meth:`.validate_on_move` for
validation on all kinds of moves.
Validates pages and their input elements within the section.
Can be overloaded to change the validating behavior of a derived
section.
Notes:
Validation is conducted only for pages that are direct
children of this section. Pages in subsections are not
validated.
Raises:
ValidationError: If validation fails.
Validates the current page and its elements.
Can be overloaded to change the validating behavior of a derived
section. By default, this validation method is called on each
foward and backward move, as well as when participants jump
*from* the section, but not when they jump *to* the section.
Raises:
ValidationError: If validation fails.
See Also:
Use the individual methods :meth:`.validate_on_forward`,
:meth:`.validate_on_backward`, :meth:`.validate_on_jumpfrom`,
and :meth:`.validate_on_jumpto` if you want even more fine-
grained control over validation behavior.
.. versionchanged:: 2.1.0
Switched the default order of validation. Now, elements are
validated first, then the page itself is validated.
Sections organize movement between pages in an experiment.
.. moduleauthor:: Johannes Brachem <jbrachem@posteo.de>, Paul Wiemann <paulwiemann@gmail.com>
-*- coding:utf-8 -*-: Controls, whether participants can move forward from pages in: this section.: Controls, whether participants can move backward from *and to* : pages in this section.: Controls, whether participants can jump *from* pages in this: section: Controls, whether participants can jump *to* pages in this: section.: If *True*, pages in this section will be closed on leaving: If True, the members of this section will be randomized every: time the section is entered.: bool: Boolean flag, indicating whether the experiment session: is currently operating within this section docstring inherited if not self._all_page_names: self._all_page_names = list(self.all_pages.keys()) elif not len(self._all_page_names) == len(self.all_pages): self._all_page_names = list(self.all_pages.keys()) return self._all_page_names if not self._all_pages_list: self._all_pages_list = list(self.all_pages.values()) elif not len(self._all_pages_list) == len(self.all_pages): self._all_pages_list = list(self.all_pages.values()) return self._all_pages_list | 14,981 | en | 0.845974 |
import bottle, logging, argparse, json, sys
from beaker.middleware import SessionMiddleware
from . import database, processing, routing
logger = logging.getLogger("snuggle.api.server")
def load_config(filename):
try:
f = open(filename)
return json.load(f)
except Exception as e:
raise Exception("Could not load configuration file: %s" % e)
def application(config):
#configure db
db = database.DB(config)
#configure processors
processing.configure(db, config)
#construct app
return SessionMiddleware(
bottle.default_app(),
{
'session.type': "memory",
'session.key': "s_id",
'session.secret': config['sessions']['secret'],
'session.timeout': 60*30, #30 minutes
'session.auto': True
}
)
def main():
parser = argparse.ArgumentParser(
description='Loads a jsop API for snuggle'
)
parser.add_argument(
'config',
type=load_config,
help='the path to the configuration file'
)
parser.add_argument(
'-p', "--profile",
action="store_true",
default=False,
help='run in profile mode?'
)
parser.add_argument(
'-d', "--debug",
action="store_true",
default=False,
help='print debugging output?'
)
args = parser.parse_args()
LOGGING_STREAM = sys.stderr
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO,
stream=LOGGING_STREAM,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%b-%d %H:%M:%S'
)
if args.profile:
try:
import pstats
import cProfile as profile
except ImportError:
import profile
f = tempfile.NamedTemporaryFile()
profile.runctx("run(args.config)", globals(), locals(), f.name)
p = pstats.Stats(f.name)
p.strip_dirs().sort_stats("time").print_stats(10)
else:
run(args.config)
def run(config):
logger.info("Configuring system.")
app = application(config)
logger.info("Running server.")
bottle.run(
app=app,
host=config['server']['host'],
port=config['server']['port'],
server='cherrypy'
)
if __name__ == "__main__":
logging.debug("calling main()")
main() | snuggle/api/server.py | 2,027 | configure dbconfigure processorsconstruct app30 minutes | 55 | en | 0.434382 |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from registration.backends.simple.views import RegistrationView
from plan.models import Plan, UserProfile
from plan.forms import UserForm, RegistrationFormDHHD
def index(request):
# Query the database for a list of all the plans currently stored.
# Order the plans by the number of likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in the context_dict dictionary which will be passed to the template engine.
#popular_plan_list = Plan.objects.order_by('-views')[:3]
popular_plan_list = reformat_plan(Plan.objects.filter(active=True).order_by('-views')[:3])
# Most recent plans
recent_plan_list = reformat_plan(Plan.objects.filter(active=True).order_by('-pub_date')[:3])
context_dict = {'popular_plans': popular_plan_list, 'recent_plans': recent_plan_list}
return render(request, 'index.html', context_dict)
def about(request):
return render(request, 'about.html', {})
def reformat_plan(plan_list):
for plan in plan_list:
# Re-format the width and depth from floats to FF'-II" format.
plan.width = str(int(plan.width)) + "'-" + str(round((plan.width - int(plan.width))*12)) + '"'
plan.depth = str(int(plan.depth)) + "'-" + str(round((plan.depth - int(plan.depth))*12)) + '"'
# Re-format bedrooms from float to int
plan.bed = int(plan.bed)
# Re-format bathrooms to int if the number of bathrooms is whole
if not plan.bath%1:
plan.bath = int(plan.bath)
return plan_list
@login_required
def myplans(request):
user_name = request.user.get_username()
user = User.objects.get(username=user_name)
profile = UserProfile.objects.get(user=user)
plan_list = profile.fav_plans.all()
return render(request, 'myplans.html', {'plan_list': plan_list})
class MyRegistrationView(RegistrationView):
form_class = RegistrationFormDHHD
def get_success_url(self, request, user):
return '/'
def register(self, request, **cleaned_data):
new_user = RegistrationView.register(self, request, **cleaned_data)
UserProfile(user=new_user).save()
return new_user
| dhhd/dhhd/views.py | 2,295 | Query the database for a list of all the plans currently stored. Order the plans by the number of likes in descending order. Retrieve the top 5 only - or all if less than 5. Place the list in the context_dict dictionary which will be passed to the template engine.popular_plan_list = Plan.objects.order_by('-views')[:3] Most recent plans Re-format the width and depth from floats to FF'-II" format. Re-format bedrooms from float to int Re-format bathrooms to int if the number of bathrooms is whole | 498 | en | 0.809724 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore.common import dtype as mstype
from mindspore import nn
from mindspore import Tensor
from mindspore.ops import composite as C
from mindspore import context
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend")
class ForwardNet(nn.Cell):
def __init__(self, max_cycles=10):
super(ForwardNet, self).__init__()
self.max_cycles = max_cycles
self.i = Tensor(np.array(0), mstype.int32)
self.zero = Tensor(np.array(0), mstype.int32)
self.weight = Parameter(Tensor(np.array(0), mstype.int32))
def construct(self, x, y):
i = self.i
out = self.zero
while i < self.max_cycles:
self.weight = i
if out <= 20:
self.weight = i
out = x * y + out
i = i + 1
if out >= 30:
self.weight = out
out = out - 30
return out, self.weight
class BackwardNet(nn.Cell):
def __init__(self, net):
super(BackwardNet, self).__init__(auto_prefix=False)
self.forward_net = net
self.grad = C.GradOperation(get_all=True)
def construct(self, *inputs):
grads = self.grad(self.forward_net)(*inputs)
return grads
def test_forward():
x = Tensor(np.array(1), mstype.int32)
y = Tensor(np.array(3), mstype.int32)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
graph_forward_net = ForwardNet(max_cycles=10)
graph_mode_out = graph_forward_net(x, y)
# Pynative Mode
context.set_context(mode=context.PYNATIVE_MODE)
pynative_forward_net = ForwardNet(max_cycles=10)
pynative_mode_out = pynative_forward_net(x, y)
assert graph_mode_out == pynative_mode_out
def test_backward():
x = Tensor(np.array(1), mstype.int32)
y = Tensor(np.array(3), mstype.int32)
# Graph Mode
context.set_context(mode=context.GRAPH_MODE)
graph_forward_net = ForwardNet(max_cycles=10)
graph_backward_net = BackwardNet(graph_forward_net)
graph_mode_grads = graph_backward_net(x, y)
# Pynative Mode
context.set_context(mode=context.PYNATIVE_MODE)
pynative_forward_net = ForwardNet(max_cycles=10)
pynative_backward_net = BackwardNet(pynative_forward_net)
pynative_mode_grads = pynative_backward_net(x, y)
assert graph_mode_grads == pynative_mode_grads
| tests/st/control/inner/test_111_if_after_if_in_while.py | 3,096 | Copyright 2020 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ Graph Mode Pynative Mode Graph Mode Pynative Mode | 688 | en | 0.805959 |
import re
from datetime import date, datetime, time, timedelta, timezone
ISO_8601_DATETIME_REGEX = re.compile(
r"^(\d{4})-?([0-1]\d)-?([0-3]\d)[t\s]?([0-2]\d:?[0-5]\d:?[0-5]\d|23:59:60|235960)(\.\d+)?(z|[+-]\d{2}:\d{2})?$",
re.I,
)
ISO_8601_DATE_REGEX = re.compile(r"^(\d{4})-?([0-1]\d)-?([0-3]\d)$", re.I)
ISO_8601_TIME_REGEX = re.compile(
r"^(?P<time>[0-2]\d:?[0-5]\d:?[0-5]\d|23:59:60|235960)(?P<microseconds>\.\d+)?(?P<tzpart>z|[+-]\d{2}:\d{2})?$",
re.I,
)
ISO_8601_TIME_DURATION_REGEX = re.compile(
r"^(?P<sign>-?)P(?=\d|T\d)(?:(?P<weeks>\d+)W)?(?:(?P<days>\d+)D)?(?:T(?:(?P<hours>\d+)H)?(?:(?P<minutes>\d+)M)?(?:(?P<seconds>\d+(?:\.\d+)?)S)?)?$",
re.I,
)
def parse_iso_datetime(value: str) -> datetime:
if not ISO_8601_DATETIME_REGEX.match(value):
raise ValueError(f"passed value {value!r} is not valid ISO-8601 datetime.")
date_parts = ISO_8601_DATETIME_REGEX.findall(value)[0]
time_part = date_parts[3]
if ":" in time_part:
time_part = time_part.split(":")
else:
time_part = list(map("".join, zip(*[iter(time_part)] * 2)))
microseconds_part = date_parts[4]
microseconds = microseconds_part.lstrip(".") if microseconds_part else "0"
time_part.append(microseconds)
if date_parts[5] and date_parts[5].lower() != "z":
sign = 1 if date_parts[5][0] == "+" else -1
hours, minutes = date_parts[5][1:].split(":")
offset = timezone(timedelta(hours=int(hours) * sign, minutes=int(minutes) * sign))
elif date_parts[5] and date_parts[5].lower() == "z":
offset = timezone.utc
else:
offset = None # type: ignore
return datetime(
year=int(date_parts[0]),
month=int(date_parts[1]),
day=int(date_parts[2]),
hour=int(time_part[0]),
minute=int(time_part[1]),
second=int(time_part[2]),
microsecond=int(time_part[3]),
tzinfo=offset,
)
def parse_iso_date(value: str) -> date:
if not ISO_8601_DATE_REGEX.match(value):
raise ValueError("Passed value is not valid ISO-8601 date.")
date_parts = ISO_8601_DATE_REGEX.findall(value)[0]
return date(year=int(date_parts[0]), month=int(date_parts[1]), day=int(date_parts[2]))
def parse_iso_duration(value: str) -> timedelta:
"""
Parses duration string according to ISO 8601 and returns timedelta representation (it excludes year and month)
http://www.datypic.com/sc/xsd/t-xsd_dayTimeDuration.html
:param str value:
:return dict:
"""
if not ISO_8601_TIME_DURATION_REGEX.match(value):
raise ValueError(f"Passed value {value} is not valid ISO-8601 duration.")
duration = ISO_8601_TIME_DURATION_REGEX.fullmatch(value)
sign = -1 if duration.group("sign") else 1 # type: ignore
kwargs = {
"weeks": int(duration.group("weeks")) * sign if duration.group("weeks") else 0, # type: ignore
"days": int(duration.group("days")) * sign if duration.group("days") else 0, # type: ignore
"hours": int(duration.group("hours")) * sign if duration.group("hours") else 0, # type: ignore
"minutes": int(duration.group("minutes")) * sign # type: ignore
if duration.group("minutes") # type: ignore
else 0,
"seconds": float(duration.group("seconds")) * sign # type: ignore
if duration.group("seconds") # type: ignore
else 0,
}
return timedelta(**kwargs) # type: ignore
def parse_iso_time(value: str) -> time:
if not ISO_8601_TIME_REGEX.match(value):
raise ValueError(f"Passed value {value} is not valid ISO-8601 time.")
time_parts = ISO_8601_TIME_REGEX.fullmatch(value)
hour_parts = time_parts.group("time") # type: ignore
if ":" in hour_parts:
hour_parts = hour_parts.split(":")
else:
hour_parts = list(map("".join, zip(*[iter(hour_parts)] * 2)))
microseconds = time_parts.group("microseconds") # type: ignore
if microseconds is not None:
microseconds = int(microseconds[1:])
else:
microseconds = 0
tz_part = time_parts.group("tzpart") # type: ignore
if tz_part and tz_part.lower() != "z":
sign = 1 if tz_part[0] == "+" else -1
hours, minutes = tz_part[1:].split(":")
offset = timezone(timedelta(hours=int(hours) * sign, minutes=int(minutes) * sign))
elif tz_part and tz_part.lower() == "z":
offset = timezone.utc
else:
offset = None # type: ignore
return time(
hour=int(hour_parts[0]),
minute=int(hour_parts[1]),
second=int(hour_parts[2]),
microsecond=microseconds,
tzinfo=offset,
)
def timedelta_to_iso_duration(value: timedelta) -> str:
seconds = value.total_seconds()
sign = "-" if seconds < 0 else ""
seconds = abs(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
weeks, days, hours, minutes = map(int, (weeks, days, hours, minutes))
seconds = round(seconds, 6)
iso_8601 = sign + "P"
iso_8601_date = ""
iso_8601_time = ""
if weeks:
iso_8601_date += f"{weeks}W"
if days:
iso_8601_date += f"{days}D"
if hours:
iso_8601_time += f"{hours}H"
if minutes:
iso_8601_time += f"{minutes}M"
if seconds:
if seconds.is_integer():
iso_8601_time += f"{int(seconds)}S"
else:
iso_8601_time += f"{seconds}S"
return f"{iso_8601}{iso_8601_date}" + (f"T{iso_8601_time}" if iso_8601_time else "")
__all__ = [
"parse_iso_datetime",
"parse_iso_date",
"parse_iso_duration",
"parse_iso_time",
"timedelta_to_iso_duration",
]
| chili/iso_datetime.py | 5,740 | Parses duration string according to ISO 8601 and returns timedelta representation (it excludes year and month)
http://www.datypic.com/sc/xsd/t-xsd_dayTimeDuration.html
:param str value:
:return dict:
type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore | 383 | en | 0.352859 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('report', '0003_auto_20151015_1921'),
]
operations = [
migrations.AlterField(
model_name='report',
name='client',
field=models.CharField(default=None, max_length=40, null=True, verbose_name='Zg\u0142aszaj\u0105cy', blank=True),
),
migrations.AlterField(
model_name='report',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Utworzone'),
),
migrations.AlterField(
model_name='report',
name='description',
field=models.TextField(verbose_name='Opis'),
),
migrations.AlterField(
model_name='report',
name='resolved_at',
field=models.DateTimeField(null=True, verbose_name='Rozpatrzone', blank=True),
),
migrations.AlterField(
model_name='report',
name='resolved_by',
field=models.ForeignKey(verbose_name='Rozpatrzone przez', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),
),
]
| report/migrations/0004_auto_20151031_0721.py | 1,318 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# 导入需要的包
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import sklearn.linear_model
import matplotlib
# Display plots inline and change default figure size
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0) # 生成数据集并绘制出来
np.random.seed(0)
X, y = sklearn.datasets.make_moons(200, noise=0.20)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
# 训练逻辑回归训练器
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
# Helper function to plot a decision boundary.
# If you don't fully understand this function don't worry, it just generates the contour plot below.
def plot_decision_boundary(pred_func):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
# Plot the decision boundary
plot_decision_boundary(lambda x: clf.predict(x))
plt.title("Logistic Regression")
plt.show()
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1. / num_examples * data_loss
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.zeros((1, nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(num_examples), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" % (i, calculate_loss(model)))
return model
# Helper function to predict an output (0 or 1)
def predict(model, x):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# Build a model with a 3-dimensional hidden layer
model = build_model(3, print_loss=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(model, x))
plt.title("Decision Boundary for hidden layer size 3")
plt.show()
| artificial_intelligence/experiment_7.py | 5,000 | 导入需要的包 Display plots inline and change default figure size 生成数据集并绘制出来 训练逻辑回归训练器 Helper function to plot a decision boundary. If you don't fully understand this function don't worry, it just generates the contour plot below. Set min and max values and give it some padding Generate a grid of points with distance h between them Predict the function value for the whole gid Plot the contour and training examples Plot the decision boundary training set size input layer dimensionality output layer dimensionality Gradient descent parameters (I picked these by hand) learning rate for gradient descent regularization strength Helper function to evaluate the total loss on the dataset Forward propagation to calculate our predictions Calculating the loss Add regulatization term to loss (optional) This function learns parameters for the neural network and returns the model. - nn_hdim: Number of nodes in the hidden layer - num_passes: Number of passes through the training data for gradient descent - print_loss: If True, print the loss every 1000 iterations Initialize the parameters to random values. We need to learn these. This is what we return at the end Gradient descent. For each batch... Forward propagation Backpropagation Add regularization terms (b1 and b2 don't have regularization terms) Gradient descent parameter update Assign new parameters to the model Optionally print the loss. This is expensive because it uses the whole dataset, so we don't want to do it too often. Helper function to predict an output (0 or 1) Forward propagation Build a model with a 3-dimensional hidden layer Plot the decision boundary | 1,626 | en | 0.73604 |
# Distributed DL Client runs on the master node
# @author: Trung Phan
# @created date: 2021-06-28
# @last modified date:
# @note:
from ddlf.cluster import *
async def main():
cluster = Cluster()
await cluster.connect()
await cluster.show_data()
await cluster.clean()
await cluster.show_data()
await cluster.close()
asyncio.run(main())
| examples/task-clean.py | 362 | Distributed DL Client runs on the master node @author: Trung Phan @created date: 2021-06-28 @last modified date: @note: | 119 | en | 0.727345 |
from django import forms
from django.db.models.loading import get_model
from django.utils.translation import ugettext_lazy as _
from oscar.forms import widgets
Voucher = get_model('voucher', 'Voucher')
Benefit = get_model('offer', 'Benefit')
Range = get_model('offer', 'Range')
class VoucherForm(forms.Form):
"""
A specialised form for creating a voucher and offer
model.
"""
name = forms.CharField(label=_("Name"))
code = forms.CharField(label=_("Code"))
start_date = forms.DateField(
label=_("Start date"), widget=widgets.DatePickerInput())
end_date = forms.DateField(
label=_("End date"), widget=widgets.DatePickerInput())
usage = forms.ChoiceField(choices=Voucher.USAGE_CHOICES, label=_("Usage"))
benefit_range = forms.ModelChoiceField(
label=_('Which products get a discount?'),
queryset=Range.objects.all(),
)
type_choices = (
(Benefit.PERCENTAGE, _('Percentage off of products in range')),
(Benefit.FIXED, _('Fixed amount off of products in range')),
)
benefit_type = forms.ChoiceField(
choices=type_choices,
label=_('Discount type'),
)
benefit_value = forms.DecimalField(
label=_('Discount value'))
def __init__(self, voucher=None, *args, **kwargs):
self.voucher = voucher
super(VoucherForm, self).__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data['name']
try:
voucher = Voucher.objects.get(name=name)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The name '%s' is already in"
" use") % name)
return name
def clean_code(self):
code = self.cleaned_data['code'].strip().upper()
if not code:
raise forms.ValidationError(_("Please enter a voucher code"))
try:
voucher = Voucher.objects.get(code=code)
except Voucher.DoesNotExist:
pass
else:
if (not self.voucher) or (voucher.id != self.voucher.id):
raise forms.ValidationError(_("The code '%s' is already in"
" use") % code)
return code
def clean(self):
cleaned_data = super(VoucherForm, self).clean()
start_date = cleaned_data.get('start_date', None)
end_date = cleaned_data.get('end_date', None)
if start_date and end_date and end_date < start_date:
raise forms.ValidationError(_("The start date must be before the"
" end date"))
return cleaned_data
class VoucherSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Name"))
code = forms.CharField(required=False, label=_("Code"))
is_active = forms.BooleanField(required=False, label=_("Is Active?"))
def clean_code(self):
return self.cleaned_data['code'].upper()
| oscar/apps/dashboard/vouchers/forms.py | 3,089 | A specialised form for creating a voucher and offer
model. | 58 | en | 0.878028 |
import collections
import itertools
import string
import unittest
# noinspection PyUnusedLocal
# skus = unicode string
def getItemPrices():
itemPrices = {}
itemPrices['A'] = {1:50, 3:130, 5:200}
itemPrices['B'] = {1:30, 2:45}
itemPrices['C'] = {1:20}
itemPrices['D'] = {1:15}
itemPrices['E'] = {1:40}
itemPrices['F'] = {1:10}
itemPrices['G'] = {1:20}
itemPrices['H'] = {1:10, 5:45, 10:80}
itemPrices['I'] = {1:35}
itemPrices['J'] = {1:60}
itemPrices['K'] = {1:70, 2:120}
itemPrices['L'] = {1:90}
itemPrices['M'] = {1:15}
itemPrices['N'] = {1:40}
itemPrices['O'] = {1:10}
itemPrices['P'] = {1:50, 5:200}
itemPrices['Q'] = {1:30, 3:80}
itemPrices['R'] = {1:50}
itemPrices['S'] = {1:20}
itemPrices['T'] = {1:20}
itemPrices['U'] = {1:40}
itemPrices['V'] = {1:50, 2:90, 3:130}
itemPrices['W'] = {1:20}
itemPrices['X'] = {1:17}
itemPrices['Y'] = {1:20}
itemPrices['Z'] = {1:21}
return itemPrices
def getGroupItemPrices():
itemPrices = getItemPrices()
groupPrices = {}
for combination in itertools.combinations_with_replacement("STXYZ", 3):
regularCost = sum(itemPrices[item][1] for item in combination)
saving = regularCost - 45
# FIXME: Using 0 to denote saving from using group
groupPrices["".join(combination)] = {1:45, 0:saving}
return groupPrices
def getItemFreebies():
itemFreebies = {}
itemFreebies['E'] = {2:'B'}
itemFreebies['F'] = {3:'F'}
itemFreebies['N'] = {3:'M'}
itemFreebies['R'] = {3:'Q'}
itemFreebies['U'] = {4:'U'}
return itemFreebies
def generateItemCounts(skus):
itemCounts = collections.defaultdict(int)
for item in skus:
invalidItem = item not in string.ascii_uppercase
if invalidItem:
raise ValueError
else:
itemCounts[item] += 1
return itemCounts
def removeFreeItems(itemCounts):
itemFreebies = getItemFreebies()
freeItems = {}
for item, count in itemCounts.items():
freebies = itemFreebies.get(item, {})
for itemsNeededForFreebe, freeItem in freebies.items():
freebeeCount = int(count/itemsNeededForFreebe)
freeItems[freeItem] = freebeeCount
for freeItem, count in freeItems.items():
itemCounts[freeItem] = max(0, itemCounts[freeItem] - count)
def applyItemGroupings(itemCounts):
groupItemPrices = getGroupItemPrices()
groupsByLargestSaving = sorted(list(groupItemPrices.keys()), key = lambda group: groupItemPrices[group][0], reverse=True)
for group in groupsByLargestSaving:
while True:
groupCounts = collections.defaultdict(int)
for groupItem in group:
if itemCounts[groupItem]:
groupCounts[groupItem] += 1
itemCounts[groupItem] -= 1
else:
for item, count in groupCounts.items():
itemCounts[item] += count
break
else:
itemCounts[group] += 1
continue
break
def calculateItemCosts(itemCounts):
itemPrices = getItemPrices()
itemPrices.update(getGroupItemPrices())
totalCost = 0
for item, count in itemCounts.items():
prices = itemPrices[item]
for n in reversed(list(prices.keys())):
if n == 0:
continue
price = prices[n]
offerCount = int(count/n)
totalCost += offerCount * price
count -= offerCount * n
return totalCost
def checkout(skus):
try:
itemCounts = generateItemCounts(skus)
except ValueError:
return -1
removeFreeItems(itemCounts)
applyItemGroupings(itemCounts)
return calculateItemCosts(itemCounts)
class TestCheckOut(unittest.TestCase):
def test_invalidSKUItemReturnsMinus1(self):
self.assertEqual(checkout("AB32"), -1)
self.assertEqual(checkout("ABc"), -1)
self.assertEqual(checkout("AB!"), -1)
def test_emptySKUCostsNothing(self):
self.assertEqual(checkout(""), 0)
def test_singlePrices(self):
self.assertEqual(checkout('A'), 50)
self.assertEqual(checkout('B'), 30)
self.assertEqual(checkout('C'), 20)
self.assertEqual(checkout('D'), 15)
self.assertEqual(checkout('E'), 40)
self.assertEqual(checkout('F'), 10)
self.assertEqual(checkout('G'), 20)
self.assertEqual(checkout('H'), 10)
self.assertEqual(checkout('I'), 35)
self.assertEqual(checkout('J'), 60)
self.assertEqual(checkout('K'), 70)
self.assertEqual(checkout('L'), 90)
self.assertEqual(checkout('M'), 15)
self.assertEqual(checkout('N'), 40)
self.assertEqual(checkout('O'), 10)
self.assertEqual(checkout('P'), 50)
self.assertEqual(checkout('Q'), 30)
self.assertEqual(checkout('R'), 50)
self.assertEqual(checkout('S'), 20)
self.assertEqual(checkout('T'), 20)
self.assertEqual(checkout('U'), 40)
self.assertEqual(checkout('V'), 50)
self.assertEqual(checkout('W'), 20)
self.assertEqual(checkout('X'), 17)
self.assertEqual(checkout('Y'), 20)
self.assertEqual(checkout('Z'), 21)
def test_multipleItemOffers(self):
self.assertEqual(checkout('AAA'), 130)
self.assertEqual(checkout('AAAAA'), 200)
self.assertEqual(checkout('BB'), 45)
self.assertEqual(checkout("HHHHH"), 45)
self.assertEqual(checkout("HHHHHHHHHH"), 80)
self.assertEqual(checkout("KK"), 120)
self.assertEqual(checkout("PPPPP"), 200)
self.assertEqual(checkout("QQQ"), 80)
self.assertEqual(checkout("VV"), 90)
self.assertEqual(checkout("VVV"), 130)
def test_multipleNonOfferItemsAreMultiplesOfSingleItemPrice(self):
self.assertEqual(checkout('CC'), checkout('C') * 2)
self.assertEqual(checkout('DD'), checkout('D') * 2)
def test_mixedSingleItemsAreSumOfIndividualPrices(self):
self.assertEqual(checkout("BADC"), checkout("A") + checkout("B") + checkout("C") + checkout("D"))
def test_multipleSpecialOffserAreMultipleOfSpecialOfferPrice(self):
self.assertEqual(checkout("AAAAAAAAAA"), checkout("AAAAA") * 2)
self.assertEqual(checkout("BBBB"), checkout("BB") * 2)
def test_mixedOffersAreSumOfSpecialAndIndividualPrices(self):
self.assertEqual(checkout("AAAAAAA"), checkout("AAAAA") + checkout("AA"))
self.assertEqual(checkout("BBB"), checkout("BB") + checkout("B"))
def test_mixedSpecialOffersAreSumsOfOffers(self):
self.assertEqual(checkout("ABABA"), checkout("BB") + checkout("AAA"))
def test_mixedItemsAreSumed(self):
self.assertEqual(checkout("ABCCABADDA"), checkout("BB") + checkout("AAA") + checkout("A") + checkout("CC") + checkout("DD"))
def test_specialOfferCombinationsMinimisePrice(self):
self.assertEqual(checkout("AAAAAAAAA"), checkout("AAAAA") + checkout("AAA") + checkout("A"))
def test_2ESpecialOfferGivesOneFreeB(self):
self.assertEqual(checkout("EE"), checkout("E") + checkout("E"))
self.assertEqual(checkout("EEB"), checkout("E") + checkout("E"))
self.assertEqual(checkout("EEBEE"), checkout("E") * 4)
self.assertEqual(checkout("EEBEEB"), checkout("E") * 4)
self.assertEqual(checkout("EEBEEBB"), checkout("E") * 4 + checkout("B"))
def test_3FSpecialOfferGivesOneFreeF(self):
self.assertEqual(checkout("FFF"), checkout("F") * 2)
self.assertEqual(checkout("FFFFF"), checkout("F") * 4)
self.assertEqual(checkout("FFFFFF"), checkout("F") * 4)
def test_3NSpecialOfferGivesOneFreeM(self):
self.assertEqual(checkout("NNNM"), checkout("NNN"))
def test_3RSpecialOfferGivesOneFreeQ(self):
self.assertEqual(checkout("RRRQ"), checkout("RRR"))
def test_4USpecialOfferGivesOneFreeU(self):
self.assertEqual(checkout("UUUU"), checkout("UUU"))
def test_groupDiscount(self):
for combination in itertools.combinations_with_replacement("STXYZ", 3):
self.assertEqual(checkout("".join(combination)), 45)
def test_maximumGroupDiscount(self):
self.assertEqual(checkout("STXYZ"), 45 + checkout("XY"))
self.assertEqual(checkout("SSSX"), 45 + checkout("X"))
def test_multipleGroupDiscountsAreGiven(self):
self.assertEqual(checkout("STXYZTYX"), 90 + checkout("XX"))
if __name__ == '__main__':
unittest.main()
| lib/solutions/CHK/checkout_solution.py | 8,606 | noinspection PyUnusedLocal skus = unicode string FIXME: Using 0 to denote saving from using group | 97 | en | 0.302203 |
import pdb
if __name__ == "__main__":
with open("21input.txt") as f:
data = f.read().split("\n")
data.pop(-1)
print(data)
all_food = []
for food in data:
allergens = False
ings = []
alle = []
for ingredient in food.split(" "):
if "(contains" == ingredient:
allergens = True
elif allergens:
alle.append(ingredient[:-1])
else:
ings.append(ingredient)
all_food.append([ings, alle])
print(all_food)
alg_dico = {}
assigned = {}
for food in all_food:
for alg in food[1]:
if alg in alg_dico:
alg_dico[alg] &= set(food[0])
else:
alg_dico[alg] = set(food[0])
solved = []
unsolved = []
for alg, val in alg_dico.items():
if (len(val) == 1):
solved.append(alg)
else:
unsolved.append(alg)
for alg in alg_dico.keys():
alg_dico[alg] = list(alg_dico[alg])
print(alg_dico, solved, unsolved)
while (len(unsolved)>0) :
for alg in solved:
val = alg_dico[alg][0]
for algx in unsolved:
if val in (alg_dico[algx]):
alg_dico[algx].remove(val)
if len(alg_dico[algx]) == 1:
solved.append(algx)
unsolved.remove(algx)
used_ing = list(alg_dico.values())
used_ing = [x[0] for x in used_ing]
# for alg, val in alg_dico.items():
# if (len(val) == 1):
# for valx in alg_dico.values():
# if val in valx and valx != val:
# valx.remove(val)
print(used_ing)
cpt = 0
for ings, algs in all_food:
for ing in ings:
if ing not in used_ing:
cpt+=1
print(cpt)
algs = list(alg_dico.keys())
algs.sort()
used_ing_sorted = []
for alg in algs:
used_ing_sorted.append(alg_dico[alg][0])
print(used_ing_sorted, ",".join(used_ing_sorted))
| 2020/21day.py | 2,092 | for alg, val in alg_dico.items(): if (len(val) == 1): for valx in alg_dico.values(): if val in valx and valx != val: valx.remove(val) | 173 | en | 0.215319 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
Link.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
HistoricalLink = apps.get_model('perma','HistoricalLink')
HistoricalLink.objects.filter(uploaded_to_internet_archive=True).update(internet_archive_upload_status='completed')
HistoricalLink.objects.filter(uploaded_to_internet_archive=False).update(internet_archive_upload_status='not_started')
def reverse_update_upload_to_ia_field(apps, schema_editor):
Link = apps.get_model('perma', 'Link')
Link.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
Link.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
HistoricalLink = apps.get_model('perma', 'HistoricalLink')
HistoricalLink.objects.filter(internet_archive_upload_status='completed').update(uploaded_to_internet_archive=True)
HistoricalLink.objects.filter(
Q(internet_archive_upload_status='deleted') | Q(internet_archive_upload_status='not_started') | Q(internet_archive_upload_status='failed') | Q(internet_archive_upload_status='failed_permanently')
).update(uploaded_to_internet_archive=False)
class Migration(migrations.Migration):
dependencies = [
('perma', '0005_auto_20160513_2006'),
]
operations = [
migrations.AddField(
model_name='historicallink',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.AddField(
model_name='link',
name='internet_archive_upload_status',
field=models.CharField(default=b'not_started', max_length=20, choices=[(b'not_started', b'not_started'), (b'completed', b'completed'), (b'failed', b'failed'), (b'failed_permanently', b'failed_permanently'), (b'deleted', b'deleted')]),
),
migrations.RunPython(update_upload_to_ia_field, reverse_code=reverse_update_upload_to_ia_field),
migrations.RemoveField(
model_name='historicallink',
name='uploaded_to_internet_archive',
),
migrations.RemoveField(
model_name='link',
name='uploaded_to_internet_archive',
),
]
| perma_web/perma/migrations/0006_add_internetarchive_status.py | 2,987 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
class Freestream:
"""
Freestream conditions.
"""
def __init__(self, u_inf=1.0, alpha=0.0):
"""
Sets the freestream speed and angle (in degrees).
Parameters
----------
u_inf: float, optional
Freestream speed;
default: 1.0.
alpha: float, optional
Angle of attack in degrees;
default 0.0.
"""
self.u_inf = u_inf
self.alpha = alpha*numpy.pi/180.0 # degrees to radians
| steapy/freestream.py | 651 | Freestream conditions.
Sets the freestream speed and angle (in degrees).
Parameters
----------
u_inf: float, optional
Freestream speed;
default: 1.0.
alpha: float, optional
Angle of attack in degrees;
default 0.0.
degrees to radians | 251 | en | 0.487281 |
#!/usr/bin/python
"""
IO Module
"""
import sys
import logging
from time import time as _time
import threading
import cPickle
from bisect import bisect_left
from collections import deque
from bacpypes.debugging import bacpypes_debugging, DebugContents, ModuleLogger
from bacpypes.core import deferred
from bacpypes.comm import PDU, Client, bind
from bacpypes.task import FunctionTask
from bacpypes.udp import UDPDirector
# some debugging
_debug = 0
_log = ModuleLogger(globals())
_commlog = logging.getLogger(__name__ + "._commlog")
#
# IOCB States
#
IDLE = 0 # has not been submitted
PENDING = 1 # queued, waiting for processing
ACTIVE = 2 # being processed
COMPLETED = 3 # finished
ABORTED = 4 # finished in a bad way
_stateNames = {
IDLE: 'IDLE',
PENDING: 'PENDING',
ACTIVE: 'ACTIVE',
COMPLETED: 'COMPLETED',
ABORTED: 'ABORTED',
}
#
# IOQController States
#
CTRL_IDLE = 0 # nothing happening
CTRL_ACTIVE = 1 # working on an iocb
CTRL_WAITING = 1 # waiting between iocb requests (throttled)
_ctrlStateNames = {
CTRL_IDLE: 'IDLE',
CTRL_ACTIVE: 'ACTIVE',
CTRL_WAITING: 'WAITING',
}
# dictionary of local controllers
_local_controllers = {}
_proxy_server = None
# special abort error
TimeoutError = RuntimeError("timeout")
#
# _strftime
#
def _strftime():
return "%011.6f" % (_time() % 3600,)
#
# IOCB - Input Output Control Block
#
_identNext = 1
_identLock = threading.Lock()
@bacpypes_debugging
class IOCB(DebugContents):
_debug_contents = \
( 'args', 'kwargs'
, 'ioState', 'ioResponse-', 'ioError'
, 'ioController', 'ioServerRef', 'ioControllerRef', 'ioClientID', 'ioClientAddr'
, 'ioComplete', 'ioCallback+', 'ioQueue', 'ioPriority', 'ioTimeout'
)
def __init__(self, *args, **kwargs):
global _identNext
# lock the identity sequence number
_identLock.acquire()
# generate a unique identity for this block
ioID = _identNext
_identNext += 1
# release the lock
_identLock.release()
# debugging postponed until ID acquired
if _debug: IOCB._debug("__init__(%d) %r %r", ioID, args, kwargs)
# save the ID
self.ioID = ioID
# save the request parameters
self.args = args
self.kwargs = kwargs
# start with an idle request
self.ioState = IDLE
self.ioResponse = None
self.ioError = None
# blocks are bound to a controller
self.ioController = None
# blocks could reference a local or remote server
self.ioServerRef = None
self.ioControllerRef = None
self.ioClientID = None
self.ioClientAddr = None
# each block gets a completion event
self.ioComplete = threading.Event()
self.ioComplete.clear()
# applications can set a callback functions
self.ioCallback = []
# request is not currently queued
self.ioQueue = None
# extract the priority if it was given
self.ioPriority = kwargs.get('_priority', 0)
if '_priority' in kwargs:
if _debug: IOCB._debug(" - ioPriority: %r", self.ioPriority)
del kwargs['_priority']
# request has no timeout
self.ioTimeout = None
def add_callback(self, fn, *args, **kwargs):
"""Pass a function to be called when IO is complete."""
if _debug: IOCB._debug("add_callback(%d) %r %r %r", self.ioID, fn, args, kwargs)
# store it
self.ioCallback.append((fn, args, kwargs))
# already complete?
if self.ioComplete.isSet():
self.trigger()
def wait(self, *args):
"""Wait for the completion event to be set."""
if _debug: IOCB._debug("wait(%d) %r", self.ioID, args)
# waiting from a non-daemon thread could be trouble
self.ioComplete.wait(*args)
def trigger(self):
"""Set the event and make the callback."""
if _debug: IOCB._debug("trigger(%d)", self.ioID)
# if it's queued, remove it from its queue
if self.ioQueue:
if _debug: IOCB._debug(" - dequeue")
self.ioQueue.remove(self)
# if there's a timer, cancel it
if self.ioTimeout:
if _debug: IOCB._debug(" - cancel timeout")
self.ioTimeout.suspend_task()
# set the completion event
self.ioComplete.set()
# make the callback
for fn, args, kwargs in self.ioCallback:
if _debug: IOCB._debug(" - callback fn: %r %r %r", fn, args, kwargs)
fn(self, *args, **kwargs)
def complete(self, msg):
"""Called to complete a transaction, usually when process_io has
shipped the IOCB off to some other thread or function."""
if _debug: IOCB._debug("complete(%d) %r", self.ioID, msg)
if self.ioController:
# pass to controller
self.ioController.complete_io(self, msg)
else:
# just fill in the data
self.ioState = COMPLETED
self.ioResponse = msg
self.trigger()
def abort(self, err):
"""Called by a client to abort a transaction."""
if _debug: IOCB._debug("abort(%d) %r", self.ioID, err)
if self.ioController:
# pass to controller
self.ioController.abort_io(self, err)
elif self.ioState < COMPLETED:
# just fill in the data
self.ioState = ABORTED
self.ioError = err
self.trigger()
def set_timeout(self, delay, err=TimeoutError):
"""Called to set a transaction timer."""
if _debug: IOCB._debug("set_timeout(%d) %r err=%r", self.ioID, delay, err)
# if one has already been created, cancel it
if self.ioTimeout:
self.ioTimeout.suspend_task()
else:
self.ioTimeout = FunctionTask(self.abort, err)
# (re)schedule it
self.ioTimeout.install_task(_time() + delay)
def __repr__(self):
xid = id(self)
if (xid < 0): xid += (1 << 32)
sname = self.__module__ + '.' + self.__class__.__name__
desc = "(%d)" % (self.ioID,)
return '<' + sname + desc + ' instance at 0x%08x' % (xid,) + '>'
#
# IOChainMixIn
#
@bacpypes_debugging
class IOChainMixIn(DebugContents):
_debugContents = ( 'ioChain++', )
def __init__(self, iocb):
if _debug: IOChainMixIn._debug("__init__ %r", iocb)
# save a refence back to the iocb
self.ioChain = iocb
# set the callback to follow the chain
self.add_callback(self.chain_callback)
# if we're not chained, there's no notification to do
if not self.ioChain:
return
# this object becomes its controller
iocb.ioController = self
# consider the parent active
iocb.ioState = ACTIVE
try:
if _debug: IOChainMixIn._debug(" - encoding")
# let the derived class set the args and kwargs
self.Encode()
if _debug: IOChainMixIn._debug(" - encode complete")
except:
# extract the error and abort the request
err = sys.exc_info()[1]
if _debug: IOChainMixIn._exception(" - encoding exception: %r", err)
iocb.abort(err)
def chain_callback(self, iocb):
"""Callback when this iocb completes."""
if _debug: IOChainMixIn._debug("chain_callback %r", iocb)
# if we're not chained, there's no notification to do
if not self.ioChain:
return
# refer to the chained iocb
iocb = self.ioChain
try:
if _debug: IOChainMixIn._debug(" - decoding")
# let the derived class transform the data
self.Decode()
if _debug: IOChainMixIn._debug(" - decode complete")
except:
# extract the error and abort
err = sys.exc_info()[1]
if _debug: IOChainMixIn._exception(" - decoding exception: %r", err)
iocb.ioState = ABORTED
iocb.ioError = err
# break the references
self.ioChain = None
iocb.ioController = None
# notify the client
iocb.trigger()
def abort_io(self, iocb, err):
"""Forward the abort downstream."""
if _debug: IOChainMixIn._debug("abort_io %r %r", iocb, err)
# make sure we're being notified of an abort request from
# the iocb we are chained from
if iocb is not self.ioChain:
raise RuntimeError("broken chain")
# call my own abort(), which may forward it to a controller or
# be overridden by IOGroup
self.abort(err)
def encode(self):
"""Hook to transform the request, called when this IOCB is
chained."""
if _debug: IOChainMixIn._debug("encode (pass)")
# by default do nothing, the arguments have already been supplied
def decode(self):
"""Hook to transform the response, called when this IOCB is
completed."""
if _debug: IOChainMixIn._debug("decode")
# refer to the chained iocb
iocb = self.ioChain
# if this has completed successfully, pass it up
if self.ioState == COMPLETED:
if _debug: IOChainMixIn._debug(" - completed: %r", self.ioResponse)
# change the state and transform the content
iocb.ioState = COMPLETED
iocb.ioResponse = self.ioResponse
# if this aborted, pass that up too
elif self.ioState == ABORTED:
if _debug: IOChainMixIn._debug(" - aborted: %r", self.ioError)
# change the state
iocb.ioState = ABORTED
iocb.ioError = self.ioError
else:
raise RuntimeError("invalid state: %d" % (self.ioState,))
#
# IOChain
#
class IOChain(IOCB, IOChainMixIn):
def __init__(self, chain, *args, **kwargs):
"""Initialize a chained control block."""
if _debug: IOChain._debug("__init__ %r %r %r", chain, args, kwargs)
# initialize IOCB part to pick up the ioID
IOCB.__init__(self, *args, **kwargs)
IOChainMixIn.__init__(self, chain)
#
# IOGroup
#
@bacpypes_debugging
class IOGroup(IOCB, DebugContents):
_debugContents = ('ioMembers',)
def __init__(self):
"""Initialize a group."""
if _debug: IOGroup._debug("__init__")
IOCB.__init__(self)
# start with an empty list of members
self.ioMembers = []
# start out being done. When an IOCB is added to the
# group that is not already completed, this state will
# change to PENDING.
self.ioState = COMPLETED
self.ioComplete.set()
def add(self, iocb):
"""Add an IOCB to the group, you can also add other groups."""
if _debug: IOGroup._debug("Add %r", iocb)
# add this to our members
self.ioMembers.append(iocb)
# assume all of our members have not completed yet
self.ioState = PENDING
self.ioComplete.clear()
# when this completes, call back to the group. If this
# has already completed, it will trigger
iocb.add_callback(self.group_callback)
def group_callback(self, iocb):
"""Callback when a child iocb completes."""
if _debug: IOGroup._debug("group_callback %r", iocb)
# check all the members
for iocb in self.ioMembers:
if not iocb.ioComplete.isSet():
if _debug: IOGroup._debug(" - waiting for child: %r", iocb)
break
else:
if _debug: IOGroup._debug(" - all children complete")
# everything complete
self.ioState = COMPLETED
self.trigger()
def abort(self, err):
"""Called by a client to abort all of the member transactions.
When the last pending member is aborted the group callback
function will be called."""
if _debug: IOGroup._debug("abort %r", err)
# change the state to reflect that it was killed
self.ioState = ABORTED
self.ioError = err
# abort all the members
for iocb in self.ioMembers:
iocb.abort(err)
# notify the client
self.trigger()
#
# IOQueue - Input Output Queue
#
@bacpypes_debugging
class IOQueue:
def __init__(self, name):
if _debug: IOQueue._debug("__init__ %r", name)
self.queue = []
self.notempty = threading.Event()
self.notempty.clear()
def put(self, iocb):
"""Add an IOCB to a queue. This is usually called by the function
that filters requests and passes them out to the correct processing
thread."""
if _debug: IOQueue._debug("put %r", iocb)
# requests should be pending before being queued
if iocb.ioState != PENDING:
raise RuntimeError("invalid state transition")
# save that it might have been empty
wasempty = not self.notempty.isSet()
# add the request to the end of the list of iocb's at same priority
priority = iocb.ioPriority
item = (priority, iocb)
self.queue.insert(bisect_left(self.queue, (priority+1,)), item)
# point the iocb back to this queue
iocb.ioQueue = self
# set the event, queue is no longer empty
self.notempty.set()
return wasempty
def get(self, block=1, delay=None):
"""Get a request from a queue, optionally block until a request
is available."""
if _debug: IOQueue._debug("get block=%r delay=%r", block, delay)
# if the queue is empty and we do not block return None
if not block and not self.notempty.isSet():
return None
# wait for something to be in the queue
if delay:
self.notempty.wait(delay)
if not self.notempty.isSet():
return None
else:
self.notempty.wait()
# extract the first element
priority, iocb = self.queue[0]
del self.queue[0]
iocb.ioQueue = None
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
# return the request
return iocb
def remove(self, iocb):
"""Remove a control block from the queue, called if the request
is canceled/aborted."""
if _debug: IOQueue._debug("remove %r", iocb)
# remove the request from the queue
for i, item in enumerate(self.queue):
if iocb is item[1]:
if _debug: IOQueue._debug(" - found at %d", i)
del self.queue[i]
# if the queue is empty, clear the event
qlen = len(self.queue)
if not qlen:
self.notempty.clear()
break
else:
if _debug: IOQueue._debug(" - not found")
def abort(self, err):
"""abort all of the control blocks in the queue."""
if _debug: IOQueue._debug("abort %r", err)
# send aborts to all of the members
try:
for iocb in self.queue:
iocb.ioQueue = None
iocb.abort(err)
# flush the queue
self.queue = []
# the queue is now empty, clear the event
self.notempty.clear()
except ValueError:
pass
#
# IOController
#
@bacpypes_debugging
class IOController:
def __init__(self, name=None):
"""Initialize a controller."""
if _debug: IOController._debug("__init__ name=%r", name)
# save the name
self.name = name
# register the name
if name is not None:
if name in _local_controllers:
raise RuntimeError("already a local controller called '%s': %r" % (name, _local_controllers[name]))
_local_controllers[name] = self
def abort(self, err):
"""Abort all requests, no default implementation."""
pass
def request_io(self, iocb):
"""Called by a client to start processing a request."""
if _debug: IOController._debug("request_io %r", iocb)
# bind the iocb to this controller
iocb.ioController = self
try:
# hopefully there won't be an error
err = None
# change the state
iocb.ioState = PENDING
# let derived class figure out how to process this
self.process_io(iocb)
except:
# extract the error
err = sys.exc_info()[1]
# if there was an error, abort the request
if err:
self.abort_io(iocb, err)
def process_io(self, iocb):
"""Figure out how to respond to this request. This must be
provided by the derived class."""
raise NotImplementedError("IOController must implement process_io()")
def active_io(self, iocb):
"""Called by a handler to notify the controller that a request is
being processed."""
if _debug: IOController._debug("active_io %r", iocb)
# requests should be idle or pending before coming active
if (iocb.ioState != IDLE) and (iocb.ioState != PENDING):
raise RuntimeError("invalid state transition (currently %d)" % (iocb.ioState,))
# change the state
iocb.ioState = ACTIVE
def complete_io(self, iocb, msg):
"""Called by a handler to return data to the client."""
if _debug: IOController._debug("complete_io %r %r", iocb, msg)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
else:
# change the state
iocb.ioState = COMPLETED
iocb.ioResponse = msg
# notify the client
iocb.trigger()
def abort_io(self, iocb, err):
"""Called by a handler or a client to abort a transaction."""
if _debug: IOController._debug("abort_io %r %r", iocb, err)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
else:
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
#
# IOQController
#
@bacpypes_debugging
class IOQController(IOController):
wait_time = 0.0
def __init__(self, name=None):
"""Initialize a queue controller."""
if _debug: IOQController._debug("__init__ name=%r", name)
# give ourselves a nice name
if not name:
name = self.__class__.__name__
IOController.__init__(self, name)
# start idle
self.state = CTRL_IDLE
# no active iocb
self.active_iocb = None
# create an IOQueue for iocb's requested when not idle
self.ioQueue = IOQueue(str(name) + "/Queue")
def abort(self, err):
"""Abort all pending requests."""
if _debug: IOQController._debug("abort %r", err)
if (self.state == CTRL_IDLE):
if _debug: IOQController._debug(" - idle")
return
while True:
iocb = self.ioQueue.get()
if not iocb:
break
if _debug: IOQController._debug(" - iocb: %r", iocb)
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
if (self.state != CTRL_IDLE):
if _debug: IOQController._debug(" - busy after aborts")
def request_io(self, iocb):
"""Called by a client to start processing a request."""
if _debug: IOQController._debug("request_io %r", iocb)
# bind the iocb to this controller
iocb.ioController = self
# if we're busy, queue it
if (self.state != CTRL_IDLE):
if _debug: IOQController._debug(" - busy, request queued")
iocb.ioState = PENDING
self.ioQueue.put(iocb)
return
try:
# hopefully there won't be an error
err = None
# let derived class figure out how to process this
self.process_io(iocb)
except:
# extract the error
err = sys.exc_info()[1]
# if there was an error, abort the request
if err:
self.abort_io(iocb, err)
def process_io(self, iocb):
"""Figure out how to respond to this request. This must be
provided by the derived class."""
raise NotImplementedError("IOController must implement process_io()")
def active_io(self, iocb):
"""Called by a handler to notify the controller that a request is
being processed."""
if _debug: IOQController._debug("active_io %r", iocb)
# base class work first, setting iocb state and timer data
IOController.active_io(self, iocb)
# change our state
self.state = CTRL_ACTIVE
# keep track of the iocb
self.active_iocb = iocb
def complete_io(self, iocb, msg):
"""Called by a handler to return data to the client."""
if _debug: IOQController._debug("complete_io %r %r", iocb, msg)
# check to see if it is completing the active one
if iocb is not self.active_iocb:
raise RuntimeError("not the current iocb")
# normal completion
IOController.complete_io(self, iocb, msg)
# no longer an active iocb
self.active_iocb = None
# check to see if we should wait a bit
if self.wait_time:
# change our state
self.state = CTRL_WAITING
# schedule a call in the future
task = FunctionTask(IOQController._wait_trigger, self)
task.install_task(_time() + self.wait_time)
else:
# change our state
self.state = CTRL_IDLE
# look for more to do
deferred(IOQController._trigger, self)
def abort_io(self, iocb, err):
"""Called by a handler or a client to abort a transaction."""
if _debug: IOQController._debug("abort_io %r %r", iocb, err)
# normal abort
IOController.abort_io(self, iocb, err)
# check to see if it is completing the active one
if iocb is not self.active_iocb:
if _debug: IOQController._debug(" - not current iocb")
return
# no longer an active iocb
self.active_iocb = None
# change our state
self.state = CTRL_IDLE
# look for more to do
deferred(IOQController._trigger, self)
def _trigger(self):
"""Called to launch the next request in the queue."""
if _debug: IOQController._debug("_trigger")
# if we are busy, do nothing
if self.state != CTRL_IDLE:
if _debug: IOQController._debug(" - not idle")
return
# if there is nothing to do, return
if not self.ioQueue.queue:
if _debug: IOQController._debug(" - empty queue")
return
# get the next iocb
iocb = self.ioQueue.get()
try:
# hopefully there won't be an error
err = None
# let derived class figure out how to process this
self.process_io(iocb)
except:
# extract the error
err = sys.exc_info()[1]
# if there was an error, abort the request
if err:
self.abort_io(iocb, err)
# if we're idle, call again
if self.state == CTRL_IDLE:
deferred(IOQController._trigger, self)
def _wait_trigger(self):
"""Called to launch the next request in the queue."""
if _debug: IOQController._debug("_wait_trigger")
# make sure we are waiting
if (self.state != CTRL_WAITING):
raise RuntimeError("not waiting")
# change our state
self.state = CTRL_IDLE
# look for more to do
IOQController._trigger(self)
#
# IOProxy
#
@bacpypes_debugging
class IOProxy:
def __init__(self, controllerName, serverName=None, requestLimit=None):
"""Create an IO client. It implements request_io like a controller, but
passes requests on to a local controller if it happens to be in the
same process, or the IOProxyServer instance to forward on for processing."""
if _debug: IOProxy._debug("__init__ %r serverName=%r, requestLimit=%r", controllerName, serverName, requestLimit)
# save the server reference
self.ioControllerRef = controllerName
self.ioServerRef = serverName
# set a limit on how many requests can be submitted
self.ioRequestLimit = requestLimit
self.ioPending = set()
self.ioBlocked = deque()
# bind to a local controller if possible
if not serverName:
self.ioBind = _local_controllers.get(controllerName, None)
if self.ioBind:
if _debug: IOProxy._debug(" - local bind successful")
else:
if _debug: IOProxy._debug(" - local bind deferred")
else:
self.ioBind = None
if _debug: IOProxy._debug(" - bind deferred")
def request_io(self, iocb, urgent=False):
"""Called by a client to start processing a request."""
if _debug: IOProxy._debug("request_io %r urgent=%r", iocb, urgent)
global _proxy_server
# save the server and controller reference
iocb.ioServerRef = self.ioServerRef
iocb.ioControllerRef = self.ioControllerRef
# check to see if it needs binding
if not self.ioBind:
# if the server is us, look for a local controller
if not self.ioServerRef:
self.ioBind = _local_controllers.get(self.ioControllerRef, None)
if not self.ioBind:
iocb.abort("no local controller %s" % (self.ioControllerRef,))
return
if _debug: IOProxy._debug(" - local bind successful")
else:
if not _proxy_server:
_proxy_server = IOProxyServer()
self.ioBind = _proxy_server
if _debug: IOProxy._debug(" - proxy bind successful: %r", self.ioBind)
# if this isn't urgent and there is a limit, see if we've reached it
if (not urgent) and self.ioRequestLimit:
# call back when this is completed
iocb.add_callback(self._proxy_trigger)
# check for the limit
if len(self.ioPending) < self.ioRequestLimit:
if _debug: IOProxy._debug(" - cleared for launch")
self.ioPending.add(iocb)
self.ioBind.request_io(iocb)
else:
# save it for later
if _debug: IOProxy._debug(" - save for later")
self.ioBlocked.append(iocb)
else:
# just pass it along
self.ioBind.request_io(iocb)
def _proxy_trigger(self, iocb):
"""This has completed, remove it from the set of pending requests
and see if it's OK to start up the next one."""
if _debug: IOProxy._debug("_proxy_trigger %r", iocb)
if iocb not in self.ioPending:
if _debug: IOProxy._warning("iocb not pending: %r", iocb)
else:
self.ioPending.remove(iocb)
# check to send another one
if (len(self.ioPending) < self.ioRequestLimit) and self.ioBlocked:
nextio = self.ioBlocked.popleft()
if _debug: IOProxy._debug(" - cleared for launch: %r", nextio)
# this one is now pending
self.ioPending.add(nextio)
self.ioBind.request_io(nextio)
#
# IOServer
#
PORT = 8002
SERVER_TIMEOUT = 60
@bacpypes_debugging
class IOServer(IOController, Client):
def __init__(self, addr=('',PORT)):
"""Initialize the remote IO handler."""
if _debug: IOServer._debug("__init__ %r", addr)
IOController.__init__(self)
# create a UDP director
self.server = UDPDirector(addr)
bind(self, self.server)
# dictionary of IOCBs as a server
self.remoteIOCB = {}
def confirmation(self, pdu):
if _debug: IOServer._debug('confirmation %r', pdu)
addr = pdu.pduSource
request = pdu.pduData
try:
# parse the request
request = cPickle.loads(request)
if _debug: _commlog.debug(">>> %s: S %s %r" % (_strftime(), str(addr), request))
# pick the message
if (request[0] == 0):
self.new_iocb(addr, *request[1:])
elif (request[0] == 1):
self.complete_iocb(addr, *request[1:])
elif (request[0] == 2):
self.abort_iocb(addr, *request[1:])
except:
# extract the error
err = sys.exc_info()[1]
IOServer._exception("error %r processing %r from %r", err, request, addr)
def callback(self, iocb):
"""Callback when an iocb is completed by a local controller and the
result needs to be sent back to the client."""
if _debug: IOServer._debug("callback %r", iocb)
# make sure it's one of ours
if not self.remoteIOCB.has_key(iocb):
IOServer._warning("IOCB not owned by server: %r", iocb)
return
# get the client information
clientID, clientAddr = self.remoteIOCB[iocb]
# we're done with this
del self.remoteIOCB[iocb]
# build a response
if iocb.ioState == COMPLETED:
response = (1, clientID, iocb.ioResponse)
elif iocb.ioState == ABORTED:
response = (2, clientID, iocb.ioError)
else:
raise RuntimeError("IOCB invalid state")
if _debug: _commlog.debug("<<< %s: S %s %r" % (_strftime(), clientAddr, response))
response = cPickle.dumps( response, 1 )
# send it to the client
self.request(PDU(response, destination=clientAddr))
def abort(self, err):
"""Called by a local application to abort all transactions."""
if _debug: IOServer._debug("abort %r", err)
for iocb in self.remoteIOCB.keys():
self.abort_io(iocb, err)
def abort_io(self, iocb, err):
"""Called by a local client or a local controlled to abort a transaction."""
if _debug: IOServer._debug("abort_io %r %r", iocb, err)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
elif self.remoteIOCB.has_key(iocb):
# get the client information
clientID, clientAddr = self.remoteIOCB[iocb]
# we're done with this
del self.remoteIOCB[iocb]
# build an abort response
response = (2, clientID, err)
if _debug: _commlog.debug("<<< %s: S %s %r" % (_strftime(), clientAddr, response))
response = cPickle.dumps( response, 1 )
# send it to the client
self.socket.sendto( response, clientAddr )
else:
IOServer._error("no reference to aborting iocb: %r", iocb)
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
def new_iocb(self, clientAddr, iocbid, controllerName, args, kwargs):
"""Called when the server receives a new request."""
if _debug: IOServer._debug("new_iocb %r %r %r %r %r", clientAddr, iocbid, controllerName, args, kwargs)
# look for a controller
controller = _local_controllers.get(controllerName, None)
if not controller:
# create a nice error message
err = RuntimeError("no local controller '%s'" % (controllerName, ))
# build an abort response
response = (2, iocbid, err)
if _debug: _commlog.debug("<<< %s: S %s %r" % (_strftime(), clientAddr, response))
response = cPickle.dumps( response, 1 )
# send it to the server
self.request(PDU(response, destination=clientAddr))
else:
# create an IOCB
iocb = IOCB(*args, **kwargs)
if _debug: IOServer._debug(" - local IOCB %r bound to remote %r", iocb.ioID, iocbid)
# save a reference to it
self.remoteIOCB[iocb] = (iocbid, clientAddr)
# make sure we're notified when it completes
iocb.add_callback(self.callback)
# pass it along
controller.request_io(iocb)
def abort_iocb(self, addr, iocbid, err):
"""Called when the client or server receives an abort request."""
if _debug: IOServer._debug("abort_iocb %r %r %r", addr, iocbid, err)
# see if this came from a client
for iocb in self.remoteIOCB.keys():
clientID, clientAddr = self.remoteIOCB[iocb]
if (addr == clientAddr) and (clientID == iocbid):
break
else:
IOServer._error("no reference to aborting iocb %r from %r", iocbid, addr)
return
if _debug: IOServer._debug(" - local IOCB %r bound to remote %r", iocb.ioID, iocbid)
# we're done with this
del self.remoteIOCB[iocb]
# clear the callback, we already know
iocb.ioCallback = []
# tell the local controller about the abort
iocb.abort(err)
#
# IOProxyServer
#
SERVER_TIMEOUT = 60
@bacpypes_debugging
class IOProxyServer(IOController, Client):
def __init__(self, addr=('', 0), name=None):
"""Initialize the remote IO handler."""
if _debug: IOProxyServer._debug("__init__")
IOController.__init__(self, name=name)
# create a UDP director
self.server = UDPDirector(addr)
bind(self, self.server)
if _debug: IOProxyServer._debug(" - bound to %r", self.server.socket.getsockname())
# dictionary of IOCBs as a client
self.localIOCB = {}
def confirmation(self, pdu):
if _debug: IOProxyServer._debug('confirmation %r', pdu)
addr = pdu.pduSource
request = pdu.pduData
try:
# parse the request
request = cPickle.loads(request)
if _debug: _commlog.debug(">>> %s: P %s %r" % (_strftime(), addr, request))
# pick the message
if (request[0] == 1):
self.complete_iocb(addr, *request[1:])
elif (request[0] == 2):
self.abort_iocb(addr, *request[1:])
except:
# extract the error
err = sys.exc_info()[1]
IOProxyServer._exception("error %r processing %r from %r", err, request, addr)
def process_io(self, iocb):
"""Package up the local IO request and send it to the server."""
if _debug: IOProxyServer._debug("process_io %r", iocb)
# save a reference in our dictionary
self.localIOCB[iocb.ioID] = iocb
# start a default timer if one hasn't already been set
if not iocb.ioTimeout:
iocb.set_timeout( SERVER_TIMEOUT, RuntimeError("no response from " + iocb.ioServerRef))
# build a message
request = (0, iocb.ioID, iocb.ioControllerRef, iocb.args, iocb.kwargs)
if _debug: _commlog.debug("<<< %s: P %s %r" % (_strftime(), iocb.ioServerRef, request))
request = cPickle.dumps( request, 1 )
# send it to the server
self.request(PDU(request, destination=(iocb.ioServerRef, PORT)))
def abort(self, err):
"""Called by a local application to abort all transactions, local
and remote."""
if _debug: IOProxyServer._debug("abort %r", err)
for iocb in self.localIOCB.values():
self.abort_io(iocb, err)
def abort_io(self, iocb, err):
"""Called by a local client or a local controlled to abort a transaction."""
if _debug: IOProxyServer._debug("abort_io %r %r", iocb, err)
# if it completed, leave it alone
if iocb.ioState == COMPLETED:
pass
# if it already aborted, leave it alone
elif iocb.ioState == ABORTED:
pass
elif self.localIOCB.has_key(iocb.ioID):
# delete the dictionary reference
del self.localIOCB[iocb.ioID]
# build an abort request
request = (2, iocb.ioID, err)
if _debug: _commlog.debug("<<< %s: P %s %r" % (_strftime(), iocb.ioServerRef, request))
request = cPickle.dumps( request, 1 )
# send it to the server
self.request(PDU(request, destination=(iocb.ioServerRef, PORT)))
else:
raise RuntimeError("no reference to aborting iocb: %r" % (iocb.ioID,))
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
def complete_iocb(self, serverAddr, iocbid, msg):
"""Called when the client receives a response to a request."""
if _debug: IOProxyServer._debug("complete_iocb %r %r %r", serverAddr, iocbid, msg)
# assume nothing
iocb = None
# make sure this is a local request
if not self.localIOCB.has_key(iocbid):
IOProxyServer._error("no reference to IOCB %r", iocbid)
if _debug: IOProxyServer._debug(" - localIOCB: %r", self.localIOCB)
else:
# get the iocb
iocb = self.localIOCB[iocbid]
# delete the dictionary reference
del self.localIOCB[iocbid]
if iocb:
# change the state
iocb.ioState = COMPLETED
iocb.ioResponse = msg
# notify the client
iocb.trigger()
def abort_iocb(self, addr, iocbid, err):
"""Called when the client or server receives an abort request."""
if _debug: IOProxyServer._debug("abort_iocb %r %r %r", addr, iocbid, err)
if not self.localIOCB.has_key(iocbid):
raise RuntimeError("no reference to aborting iocb: %r" % (iocbid,))
# get the iocb
iocb = self.localIOCB[iocbid]
# delete the dictionary reference
del self.localIOCB[iocbid]
# change the state
iocb.ioState = ABORTED
iocb.ioError = err
# notify the client
iocb.trigger()
#
# abort
#
@bacpypes_debugging
def abort(err):
"""Abort everything, everywhere."""
if _debug: abort._debug("abort %r", err)
# start with the server
if IOServer._highlander:
IOServer._highlander.abort(err)
# now do everything local
for controller in _local_controllers.values():
controller.abort(err)
| sandbox/io.py | 39,489 | Initialize a chained control block.
Initialize a group.
Initialize a controller.
Initialize a queue controller.
Create an IO client. It implements request_io like a controller, but
passes requests on to a local controller if it happens to be in the
same process, or the IOProxyServer instance to forward on for processing.
Initialize the remote IO handler.
Initialize the remote IO handler.
This has completed, remove it from the set of pending requests
and see if it's OK to start up the next one.
Called to launch the next request in the queue.
Called to launch the next request in the queue.
Abort everything, everywhere.
Called by a client to abort a transaction.
Called by a client to abort all of the member transactions.
When the last pending member is aborted the group callback
function will be called.
abort all of the control blocks in the queue.
Abort all requests, no default implementation.
Abort all pending requests.
Called by a local application to abort all transactions.
Called by a local application to abort all transactions, local
and remote.
Forward the abort downstream.
Called by a handler or a client to abort a transaction.
Called by a handler or a client to abort a transaction.
Called by a local client or a local controlled to abort a transaction.
Called by a local client or a local controlled to abort a transaction.
Called when the client or server receives an abort request.
Called when the client or server receives an abort request.
Called by a handler to notify the controller that a request is
being processed.
Called by a handler to notify the controller that a request is
being processed.
Add an IOCB to the group, you can also add other groups.
Pass a function to be called when IO is complete.
Callback when an iocb is completed by a local controller and the
result needs to be sent back to the client.
Callback when this iocb completes.
Called to complete a transaction, usually when process_io has
shipped the IOCB off to some other thread or function.
Called by a handler to return data to the client.
Called by a handler to return data to the client.
Called when the client receives a response to a request.
Hook to transform the response, called when this IOCB is
completed.
Hook to transform the request, called when this IOCB is
chained.
Get a request from a queue, optionally block until a request
is available.
Callback when a child iocb completes.
Called when the server receives a new request.
Figure out how to respond to this request. This must be
provided by the derived class.
Figure out how to respond to this request. This must be
provided by the derived class.
Package up the local IO request and send it to the server.
Add an IOCB to a queue. This is usually called by the function
that filters requests and passes them out to the correct processing
thread.
Remove a control block from the queue, called if the request
is canceled/aborted.
Called by a client to start processing a request.
Called by a client to start processing a request.
Called by a client to start processing a request.
Called to set a transaction timer.
Set the event and make the callback.
Wait for the completion event to be set.
IO Module
!/usr/bin/python some debugging IOCB States has not been submitted queued, waiting for processing being processed finished finished in a bad way IOQController States nothing happening working on an iocb waiting between iocb requests (throttled) dictionary of local controllers special abort error _strftime IOCB - Input Output Control Block lock the identity sequence number generate a unique identity for this block release the lock debugging postponed until ID acquired save the ID save the request parameters start with an idle request blocks are bound to a controller blocks could reference a local or remote server each block gets a completion event applications can set a callback functions request is not currently queued extract the priority if it was given request has no timeout store it already complete? waiting from a non-daemon thread could be trouble if it's queued, remove it from its queue if there's a timer, cancel it set the completion event make the callback pass to controller just fill in the data pass to controller just fill in the data if one has already been created, cancel it (re)schedule it IOChainMixIn save a refence back to the iocb set the callback to follow the chain if we're not chained, there's no notification to do this object becomes its controller consider the parent active let the derived class set the args and kwargs extract the error and abort the request if we're not chained, there's no notification to do refer to the chained iocb let the derived class transform the data extract the error and abort break the references notify the client make sure we're being notified of an abort request from the iocb we are chained from call my own abort(), which may forward it to a controller or be overridden by IOGroup by default do nothing, the arguments have already been supplied refer to the chained iocb if this has completed successfully, pass it up change the state and transform the content if this aborted, pass that up too change the state IOChain initialize IOCB part to pick up the ioID IOGroup start with an empty list of members start out being done. When an IOCB is added to the group that is not already completed, this state will change to PENDING. add this to our members assume all of our members have not completed yet when this completes, call back to the group. If this has already completed, it will trigger check all the members everything complete change the state to reflect that it was killed abort all the members notify the client IOQueue - Input Output Queue requests should be pending before being queued save that it might have been empty add the request to the end of the list of iocb's at same priority point the iocb back to this queue set the event, queue is no longer empty if the queue is empty and we do not block return None wait for something to be in the queue extract the first element if the queue is empty, clear the event return the request remove the request from the queue if the queue is empty, clear the event send aborts to all of the members flush the queue the queue is now empty, clear the event IOController save the name register the name bind the iocb to this controller hopefully there won't be an error change the state let derived class figure out how to process this extract the error if there was an error, abort the request requests should be idle or pending before coming active change the state if it completed, leave it alone if it already aborted, leave it alone change the state notify the client if it completed, leave it alone if it already aborted, leave it alone change the state notify the client IOQController give ourselves a nice name start idle no active iocb create an IOQueue for iocb's requested when not idle change the state notify the client bind the iocb to this controller if we're busy, queue it hopefully there won't be an error let derived class figure out how to process this extract the error if there was an error, abort the request base class work first, setting iocb state and timer data change our state keep track of the iocb check to see if it is completing the active one normal completion no longer an active iocb check to see if we should wait a bit change our state schedule a call in the future change our state look for more to do normal abort check to see if it is completing the active one no longer an active iocb change our state look for more to do if we are busy, do nothing if there is nothing to do, return get the next iocb hopefully there won't be an error let derived class figure out how to process this extract the error if there was an error, abort the request if we're idle, call again make sure we are waiting change our state look for more to do IOProxy save the server reference set a limit on how many requests can be submitted bind to a local controller if possible save the server and controller reference check to see if it needs binding if the server is us, look for a local controller if this isn't urgent and there is a limit, see if we've reached it call back when this is completed check for the limit save it for later just pass it along check to send another one this one is now pending IOServer create a UDP director dictionary of IOCBs as a server parse the request pick the message extract the error make sure it's one of ours get the client information we're done with this build a response send it to the client if it completed, leave it alone if it already aborted, leave it alone get the client information we're done with this build an abort response send it to the client change the state notify the client look for a controller create a nice error message build an abort response send it to the server create an IOCB save a reference to it make sure we're notified when it completes pass it along see if this came from a client we're done with this clear the callback, we already know tell the local controller about the abort IOProxyServer create a UDP director dictionary of IOCBs as a client parse the request pick the message extract the error save a reference in our dictionary start a default timer if one hasn't already been set build a message send it to the server if it completed, leave it alone if it already aborted, leave it alone delete the dictionary reference build an abort request send it to the server change the state notify the client assume nothing make sure this is a local request get the iocb delete the dictionary reference change the state notify the client get the iocb delete the dictionary reference change the state notify the client abort start with the server now do everything local | 9,811 | en | 0.914152 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from cli.output import CLIOutput
from cli.user_input import CLIUserInput
class CLIDay():
# constants
INTRO_TEXT_WIDTH = 60
CMD_HELP_ALIASES = [ "h", "help" ]
CMD_WORDS_ALIASES = [ "w", "words" ]
CMD_SKIP_ALIASES = [ "s", "skip" ]
CMD_EXIT_ALIASES = [ "e", "exit",
"q", "quit" ]
CMD_NEXT_ALIASES = [ "n", "next" ]
CMD_PREV_ALIASES = [ "p", "prev" ]
ACTION_EXIT = "exit"
ACTION_TITLE = "title"
ACTION_NEW_WORDS = "new words"
ACTION_INTRO_TEXT = "intro text"
ACTION_SAMPLE_SENTENCES = "sample sentences"
ACTION_DEFINITIONS = "definitions"
ACTION_MATCHING = "matching"
ACTION_OTHER_NEW_WORDS = "other new words"
# General variables #
_next_action = None
_day = None
@classmethod
def start(cls, day):
cls._day = day
cls.mainloop()
@classmethod
def mainloop(cls):
cls._next_action = "title"
while cls._next_action != cls.ACTION_EXIT:
if cls._next_action == cls.ACTION_TITLE:
cls._next_action = cls.ACTION_NEW_WORDS
cls.title()
elif cls._next_action == cls.ACTION_NEW_WORDS:
cls._next_action = cls.ACTION_INTRO_TEXT
cls.new_words()
CLIOutput.empty_line(1)
CLIUserInput.wait_for_enter()
elif cls._next_action == cls.ACTION_INTRO_TEXT:
cls._next_action = cls.ACTION_SAMPLE_SENTENCES
cls.intro_text()
CLIOutput.empty_line(1)
CLIUserInput.wait_for_enter()
elif cls._next_action == cls.ACTION_SAMPLE_SENTENCES:
cls._next_action = cls.ACTION_DEFINITIONS
cls.sample_sentences()
elif cls._next_action == cls.ACTION_DEFINITIONS:
cls._next_action = cls.ACTION_MATCHING
cls.definitions()
elif cls._next_action == cls.ACTION_MATCHING:
cls._next_action = cls.ACTION_OTHER_NEW_WORDS
cls.matching()
elif cls._next_action == cls.ACTION_OTHER_NEW_WORDS:
cls._next_action = cls.ACTION_EXIT
cls.other_new_words()
else:
raise KeyError("Unknown action request.")
# day displays ------------------------------------------------------- #
@classmethod
def title(cls):
"""Display title"""
CLIOutput.empty_line(1)
CLIOutput.center(cls._day.get_title())
@classmethod
def new_words(cls, display_in_full=True):
"""Display new words section"""
regular = list()
phonetic = list()
for unit in cls._day.get_new_words():
regular.append(unit["regular"])
phonetic.append(unit["phonetic"])
if display_in_full:
CLIOutput.section_title("NEW WORDS")
CLIOutput.empty_line(1)
CLIOutput.empty_line(1)
CLIOutput.words_table(regular, phonetic)
@classmethod
def intro_text(cls):
"""Display intro text"""
parts = cls._day.get_intro_text()
CLIOutput.empty_line(2)
CLIOutput.framed(parts, cls.INTRO_TEXT_WIDTH)
# task answer cycle -------------------------------------------------- #
@classmethod
def _answer_cycle(cls, prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
"""Answer cycle"""
while True:
CLIOutput.empty_line(1)
a_type, a_content = CLIUserInput.get_answer(prompt)
if a_type == CLIUserInput.TYPE_ANSWER:
if a_content in answers:
CLIOutput.empty_line(1)
l_pr_answer()
CLIOutput.empty_line(1)
CLIOutput.simple("Correct!")
return True
else:
CLIOutput.warning("Incorrect, try again.")
elif a_type == CLIUserInput.TYPE_COMMAND:
if a_content in cls.CMD_WORDS_ALIASES:
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
elif a_content in cls.CMD_SKIP_ALIASES:
return True
elif a_content in cls.CMD_NEXT_ALIASES:
l_next_msg()
return False
elif a_content in cls.CMD_PREV_ALIASES:
l_prev_msg()
cls._next_action = prev_action
return False
elif a_content in cls.CMD_EXIT_ALIASES:
cls._next_action = cls.ACTION_EXIT
return False
elif a_content in cls.CMD_HELP_ALIASES:
cls.help_cmd_in_task()
else:
CLIOutput.warning("Invalid command.")
else:
raise ValueError("Unknown answer type.")
# tasks -------------------------------------------------------------- #
@classmethod
def sample_sentences(cls):
"""Display 'sample sentences' task"""
data = cls._day.get_sample_sentences()
CLIOutput.section_title("SAMPLE SENTENCES")
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
for sentence in data["sentences"]:
CLIOutput.numbered_sentence(sentence["id"], sentence["beginning"] + CLIOutput.BLANK + sentence["end"], CLIOutput.FORMAT_INDENTED)
new_words_extension = cls._day.get_new_words_extension()
CLIOutput.new_words_extension(new_words_extension)
CLIOutput.empty_line(1)
for sentence in data["sentences"]:
prompt = "{}. ".format(sentence["id"])
l_pr_question = lambda : CLIOutput.numbered_sentence(sentence["id"], sentence["beginning"] + CLIOutput.BLANK + sentence["end"], CLIOutput.FORMAT_REGULAR)
answers = list()
answers.append(sentence['answer'])
full_answer = sentence['answer']
if len(sentence["beginning"]) > 0:
full_answer = sentence["beginning"] + " " + full_answer
if len(sentence["end"]) > 0:
if sentence["end"] not in [".", "!", "?", "?!", "!?"]:
full_answer += " "
full_answer += sentence["end"]
l_pr_answer = lambda : CLIOutput.simple(full_answer)
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = lambda : CLIOutput.general_message("This is the first task: Starting from the beginning.")
l_next_msg = lambda : None
# answer cycle
cls.new_words(False)
CLIOutput.empty_line(1)
l_pr_question()
if not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
return
# return after answer cycle returns
@classmethod
def definitions(cls):
"""Display 'definitions' task"""
# skip until data files are complete
return
data = cls._day.get_definitions()
CLIOutput.section_title("DEFINITIONS")
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
for definition in data["definitions"]:
CLIOutput.numbered_sentence(definition["id"], definition["text"], CLIOutput.FORMAT_INDENTED)
l_words = lambda : [CLIOutput.numbered_sentence(word["id"], word["text"], CLIOutput.FORMAT_INDENTED) for word in data["words"]]
for definition in data["definitions"]:
prompt = "{}. ".format(definition["id"])
l_pr_question = lambda : CLIOutput.numbered_sentence(definition["id"], definition["text"], CLIOutput.FORMAT_REGULAR)
answers = list()
answer_id = [value for (id, value) in data["answers"] if id == definition["id"]][0]
answers.append(answer_id)
answer_text = [item["text"] for item in data["words"] if item["id"] == answer_id][0]
answers.append(answer_text)
l_pr_answer = lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR)
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = lambda : None
l_next_msg = lambda : None
# answer cycle
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
return
# return after answer cycle returns
@classmethod
def matching(cls):
"""Display 'matching' task"""
# skip until data files are complete
return
data = cls._day.get_matching()
CLIOutput.section_title(data["name"])
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
for sentence in data["sentences"]:
CLIOutput.numbered_sentence(sentence["id"], sentence["text"], CLIOutput.FORMAT_INDENTED)
l_words = lambda : [CLIOutput.numbered_sentence(word["id"], word["text"], CLIOutput.FORMAT_INDENTED) for word in data["words"]]
for sentence in data["sentences"]:
prompt = "{}. ".format(sentence["id"])
l_pr_question = lambda : CLIOutput.numbered_sentence(sentence["id"], sentence["text"], CLIOutput.FORMAT_REGULAR)
answers = list()
answer_id = [value for (id, value) in data["answers"] if id == sentence["id"]][0]
answers.append(answer_id)
answer_text = [item["text"] for item in data["words"] if item["id"] == answer_id][0]
answers.append(answer_text)
l_pr_answer = lambda : CLIOutput.numbered_sentence(answer_id, answer_text, CLIOutput.FORMAT_REGULAR)
prev_action = cls.ACTION_SAMPLE_SENTENCES
l_prev_msg = lambda : None
l_next_msg = lambda : None
# answer cycle
CLIOutput.empty_line(2)
l_words()
CLIOutput.empty_line(1)
l_pr_question()
if not cls._answer_cycle(prompt, l_pr_question, answers, l_pr_answer, prev_action, l_prev_msg, l_next_msg):
return
# return after answer cycle returns
@classmethod
def other_new_words(cls):
"""Display other new words section"""
data = cls._day.get_other_new_words()
CLIOutput.section_title("OTHER NEW WORDS:")
CLIOutput.empty_line(1)
CLIOutput.simple(data["prompt"])
CLIOutput.empty_line(1)
a_type, a_content = CLIUserInput.get_answer("")
CLIOutput.empty_line(1)
# helper ------------------------------------------------------------- #
@classmethod
def help_cmd_in_task(cls):
collection = [
["words", "Display New Words section again."],
["skip", "Move on to the next part of the task."],
["next", "Leave task and move on to the next one."],
["prev", "Leave task and jump back to the previous one."],
["exit", "Leave task an exit to top program level."]
]
CLIOutput.empty_line(1)
CLIOutput.simple("Within the task, the following commands are available:")
CLIOutput.value_pair_list(collection, CLIOutput.FORMAT_REGULAR, CLIOutput.SPACING_CLOSE)
# END ---------------------------------------------------------------- #
| cli/day.py | 11,932 | Answer cycle
Display 'definitions' task
Display intro text
Display 'matching' task
Display new words section
Display other new words section
Display 'sample sentences' task
Display title
This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. constants General variables day displays ------------------------------------------------------- task answer cycle -------------------------------------------------- tasks -------------------------------------------------------------- answer cycle return after answer cycle returns skip until data files are complete answer cycle return after answer cycle returns skip until data files are complete answer cycle return after answer cycle returns helper ------------------------------------------------------------- END ---------------------------------------------------------------- | 970 | en | 0.555469 |
from __future__ import absolute_import
from desicos.abaqus.abaqus_functions import create_sketch_plane
from desicos.abaqus.utils import cyl2rec
class Imperfection(object):
"""Base class for all imperfections
This class should be sub-classed when a new imperfection is created.
"""
def __init__(self):
self.name = ''
self.thetadegs = []
self.pts = [] #NOTE zs, rs and pts are the same
self.zs = []
self.rs = []
self.cc = None
self.impconf = None
self.amplitude = None
self.sketch_plane = None
def create_sketch_plane(self):
self.sketch_plane = create_sketch_plane(self.impconf.conecyl,
self)
def get_xyz(self):
r, z = self.impconf.conecyl.r_z_from_pt(self.pt)
return cyl2rec(r, self.thetadeg, z)
| desicos/abaqus/imperfections/imperfection.py | 874 | Base class for all imperfections
This class should be sub-classed when a new imperfection is created.
NOTE zs, rs and pts are the same | 136 | en | 0.907883 |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Code to interact with the primersearch program from EMBOSS."""
class InputRecord(object):
"""Represent the input file into the primersearch program.
This makes it easy to add primer information and write it out to the
simple primer file format.
"""
def __init__(self):
self.primer_info = []
def __str__(self):
output = ""
for name, primer1, primer2 in self.primer_info:
output += "%s %s %s\n" % (name, primer1, primer2)
return output
def add_primer_set(self, primer_name, first_primer_seq,
second_primer_seq):
"""Add primer information to the record."""
self.primer_info.append((primer_name, first_primer_seq,
second_primer_seq))
class OutputRecord(object):
"""Represent the information from a primersearch job.
amplifiers is a dictionary where the keys are the primer names and
the values are a list of PrimerSearchAmplifier objects.
"""
def __init__(self):
self.amplifiers = {}
class Amplifier(object):
"""Represent a single amplification from a primer."""
def __init__(self):
self.hit_info = ""
self.length = 0
def read(handle):
"""Get output from primersearch into a PrimerSearchOutputRecord."""
record = OutputRecord()
for line in handle:
if not line.strip():
continue
elif line.startswith("Primer name"):
name = line.split()[-1]
record.amplifiers[name] = []
elif line.startswith("Amplimer"):
amplifier = Amplifier()
record.amplifiers[name].append(amplifier)
elif line.startswith("\tSequence: "):
amplifier.hit_info = line.replace("\tSequence: ", "")
elif line.startswith("\tAmplimer length: "):
length = line.split()[-2]
amplifier.length = int(length)
else:
amplifier.hit_info += line
for name in record.amplifiers:
for amplifier in record.amplifiers[name]:
amplifier.hit_info = amplifier.hit_info.rstrip()
return record
| Bio/Emboss/PrimerSearch.py | 2,311 | Represent a single amplification from a primer.
Represent the input file into the primersearch program.
This makes it easy to add primer information and write it out to the
simple primer file format.
Represent the information from a primersearch job.
amplifiers is a dictionary where the keys are the primer names and
the values are a list of PrimerSearchAmplifier objects.
Add primer information to the record.
Get output from primersearch into a PrimerSearchOutputRecord.
Code to interact with the primersearch program from EMBOSS.
This code is part of the Biopython distribution and governed by its license. Please see the LICENSE file that should have been included as part of this package. | 699 | en | 0.844436 |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers a decomposition for the Entangle gate.
Applies a Hadamard gate to the first qubit and then, conditioned on this first
qubit, CNOT gates to all others.
"""
from projectq.cengines import DecompositionRule
from projectq.meta import Control, get_control_count
from projectq.ops import X, H, Entangle, All
def _decompose_entangle(cmd):
""" Decompose the entangle gate. """
qr = cmd.qubits[0]
eng = cmd.engine
with Control(eng, cmd.control_qubits):
H | qr[0]
with Control(eng, qr[0]):
All(X) | qr[1:]
all_defined_decomposition_rules = [
DecompositionRule(Entangle.__class__, _decompose_entangle)
]
| projectq/setups/decompositions/entangle.py | 1,282 | Decompose the entangle gate.
Registers a decomposition for the Entangle gate.
Applies a Hadamard gate to the first qubit and then, conditioned on this first
qubit, CNOT gates to all others.
Copyright 2017 ProjectQ-Framework (www.projectq.ch) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 787 | en | 0.847002 |
import re
import json
from math import log, sqrt
from jinja2 import Markup
from sklearn import cluster
from sklearn.decomposition import PCA
from scipy import stats
from sklearn import metrics
import numpy
from db import export_sql
from werkzeug.wrappers import Response
# create higher order transformations
def x2fs(X, fields, type=''):
if type == 'Interaction':
s2 = lambda x: x + 1
e2 = lambda x, y: y
elif type == 'Quadratic':
s2 = lambda x: x
e2 = lambda x, y: y
elif type == 'Purely Quadratic':
s2 = lambda x: x
e2 = lambda x, y: x + 1
else:
return
l1 = len(X[0])
l2 = len(X[0])
for i in range(len(X)):
r = X[i]
for j1 in range(l1):
for j2 in range(s2(j1), e2(j1, l2)):
r.append(r[j1] * r[j2])
for j1 in range(l1):
for j2 in range(s2(j1), e2(j1, l2)):
fields.append(fields[j1] + '*' + fields[j2])
# fit_transform from sklearn doesn't return the loadings V. Here is a hacked version
def fit_transform(pca, X):
U, S, V = pca._fit(X)
if pca.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S
# transposing component matrix such that PCA_1 is in row
V = V.transpose()
return (U, V)
def evaluate(clust_dists, clustidx, X):
results = {}
sum = 0
count = 0
clustsum = [0 for i in range(len(clust_dists[0]))]
clustcount = [0 for i in range(len(clust_dists[0]))]
clustmean = [0 for i in range(len(clust_dists[0]))]
for i in range(len(clustidx)):
sum += clust_dists[i][clustidx[i]]
count += 1
clustsum[clustidx[i]] += clust_dists[i][clustidx[i]]
clustcount[clustidx[i]] += 1
averagedist = float(sum) / count
results['meandist'] = averagedist
for i in range(len(clust_dists[0])):
clustmean[i] = float(clustsum[i]) / clustcount[i]
return results, clustmean
def render(vis, request, info):
info["message"] = []
info["results"] = []
# module independent user inputs
table = request.args.get("table", '')
where = request.args.get("where", '1=1')
limit = request.args.get("limit", '1000')
start = request.args.get("start", '0') # start at 0
reload = int(request.args.get("reload", 0))
view = request.args.get("view", '')
# module dependent user inputs
field = request.args.get("field", '')
pre_process = request.args.get("pre_process", '')
pre_transform = request.args.get("pre_transform", '')
orderBy = request.args.get("orderBy", '')
groupBy = request.args.get("groupBy", '')
if orderBy and len(orderBy) > 0: orderBy = ' order by %s' % orderBy
if groupBy and len(groupBy) > 0: groupBy = ' group by %s' % groupBy
k = int(request.args.get("k", 2))
pfield = request.args.get("pfield", [])
# verify essential parameter details - smell test
if len(table) == 0 or len(field) == 0:
info["message"].append("Table or field missing")
info["message_class"] = "failure"
else:
# prepare sql query
sql = "select %s from %s where %s %s %s limit %s offset %s" % (
field, table, where, groupBy, orderBy, limit, start)
(datfile, reload, result) = export_sql(sql, vis.config, reload, None, view)
if len(result) > 0:
info["message"].append(result)
info["message_class"] = "failure"
else:
X = []
with open(datfile, 'r') as f:
for r in f:
row = r.rstrip().split(',')
X.append([float(r) for r in row])
xfield = pfield
# transform features
x2fs(X, xfield, pre_transform)
pfield = xfield
X = numpy.array(X)
if pre_process == "Z-Score":
X = stats.zscore(X, axis=0)
elif pre_process == "PCA":
pca = PCA()
(X, V) = fit_transform(pca, X)
pfield = ['PCA_%d' % (d + 1) for d in range(len(pfield))]
elif pre_process == "Whitened PCA":
pca = PCA(whiten=True)
(X, V) = fit_transform(pca, X)
pfield = ['PCA_%d' % (d + 1) for d in range(len(pfield))]
clust = cluster.KMeans(n_clusters=k)
cidx = clust.fit_predict(X)
cdists = clust.transform(X)
# summary results
results, clustmeans = evaluate(cdists, cidx, X)
info["results"].append('Clustering the data using K-means with k=%d' % k)
info["results"].append('Average distance to centroid: %.4f' % results['meandist'])
hashquery = datfile + hex(hash(request.args.get('query', datfile)) & 0xffffffff)
if pre_process == "PCA" or pre_process == "Whitened PCA":
#write pca matrix file
info["datfile_matrix"] = hashquery + '.pca.csv'
with open(info["datfile_matrix"], 'w') as f:
f.write("feature,%s\n" % (','.join(xfield)))
for i in range(len(V)):
f.write('PCA_%d,%s\n' % (i + 1, ','.join([str(v) for v in V[i]])))
info["pca_matrix_divs"] = Markup('<h2>PCA Components</h2><div id="svg-pca_matrix"></div>')
else:
info["pca_matrix_divs"] = ''
# preparing within cluster distances into a js array
f = []
for i in range(k):
f.append('{cluster:"%d", distance:%.3f}' % (i, clustmeans[i]))
info["clust_data"] = Markup('clust_data=[' + ','.join(f) + '];')
#provenance
#0:id,1:prediction result (grouping),2:actual label(shape),3:error,4:y,or features
info["datfile_provenance"] = hashquery + '.provenance.csv'
RES = ['Cluster %d' % (i + 1) for i in range(k)]
with open(info["datfile_provenance"], 'w') as f:
f.write('Cluster,Error,%s\n' % (','.join(pfield)))
for i in range(len(cidx)):
e = cdists[i][cidx[i]]
f.write('%s,%.4f,%s\n' % (RES[cidx[i]], e, ','.join([str(r) for r in X[i]])))
pfield = ['cluster'] + pfield
divs = [
'<div class="chart"><div class="title">%s<a href="javascript:reset(%d)" class="reset" style="display: none;">reset</a></div></div>' % (
pfield[d], d + 1) for d in range(len(pfield))]
divs = ''.join(divs)
divs = '<div class="chart"><div class="title">Distance to Centroid (<span id="active"></span> of <span id="total"></span> items selected.)<a href="javascript:reset(0)" class="reset" style="display: none;">reset</a></div></div>' + divs
info['provenance_divs'] = Markup(divs)
info["message_class"] = "success"
if reload > 0:
info["message"].append("Loaded fresh.")
else:
info["message"].append("Loading from cache. Use reload=1 to reload.")
info["datfile"] = info["datfile_provenance"]
# prepare some messages
info["title"] = "FIELD_X: <em>%s</em> from <br />TABLE: <em>%s</em>" % (','.join(pfield), table)
info["title"] = Markup(info["title"])
info["message"] = Markup(''.join('<p>%s</p>' % m for m in info["message"] if len(m) > 0))
info["results"] = Markup('<ul>' + ''.join('<li>%s</li>' % m for m in info["results"] if len(m) > 0) + '</ul>')
# format the message to encode HTML characters
info['query'] = Markup(request.args.get('query', ''))
t = vis.jinja_env.get_template('explore.html')
v1 = t.render(**info)
t = vis.jinja_env.get_template('ml_kmeans.html')
v2 = t.render(**info)
v3 = v1[:-7] + v2 + v1[-7:] + '</html>'
return Response(v3, mimetype='text/html')
| modules/ml_kmeans.py | 7,963 | create higher order transformations fit_transform from sklearn doesn't return the loadings V. Here is a hacked version X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) X_new = X * V = U * S * V^T * V = U * S transposing component matrix such that PCA_1 is in row module independent user inputs start at 0 module dependent user inputs verify essential parameter details - smell test prepare sql query transform features summary resultswrite pca matrix file preparing within cluster distances into a js arrayprovenance0:id,1:prediction result (grouping),2:actual label(shape),3:error,4:y,or features prepare some messages format the message to encode HTML characters | 673 | en | 0.665402 |
# From http://rodp.me/2015/how-to-extract-data-from-the-web.html
import time
import sys
import uuid
import json
import markdown
from collections import Counter
from requests import get
from lxml import html
from unidecode import unidecode
import urllib
import lxml.html
from readability.readability import Document
def getDoc(url):
t = time.time()
t2 = time.time()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
r = get(url,headers=headers)
print("*"*30)
print("Getting url took " + str(time.time()-t2))
print("*"*30)
redirectUrl = str(uuid.uuid3(uuid.NAMESPACE_DNS, str(r.url)))[0:5]
newContent = r.content
parsed_doc = html.fromstring(newContent)
with open('doc.html','w') as f:
f.write(newContent)
parents_with_children_counts = []
parent_elements = parsed_doc.xpath('//body//*/..')
for parent in parent_elements:
children_counts = Counter([child.tag for child in parent.iterchildren()])
parents_with_children_counts.append((parent, children_counts))
parents_with_children_counts.sort(key=lambda x: x[1].most_common(1)[0][1], reverse=True)
docStrings = {}
last = len(parents_with_children_counts)
if last > 20:
last = 20
t2 = time.time()
for i in range(last):
docString = ""
numLines = 0
for child in parents_with_children_counts[i][0]: # Possibly [1][0]
tag = str(child.tag)
#print(tag)
if tag == 'style' or tag == 'iframe':
continue
if tag == 'font' or tag == 'div' or tag == 'script':
tag = 'p'
try:
startTag = "<" + tag + ">"
endTag = "</" + tag + ">"
except:
startTag = '<p>'
endTag = '</p>'
try:
str_text = child.text_content().encode('utf-8')
#str_text = " ".join(str_text.split())
str_text = json.dumps(str_text)
str_text = str_text.replace('\"','').replace('\\n','\n')
str_text = str_text.replace('\\t','').replace('\\r','')
str_text = str_text.replace('\u0092',"'").replace('\\u00e2\\u0080\\u0099',"'").replace('\u2019',"'")
str_text = str_text.replace('\u0093','"').replace('\u00e2\u0080\u009c','"').replace('\u00e2\u0080\u009d','"').replace('\u201c','"').replace('\u201d','"')
str_text = str_text.replace('\u0094','"').replace('\u00e2\u0080" ','')
for foo in range(5):
str_text = str_text.replace('<br> <br>','<br>')
str_text = str_text.replace('\u0096','-').replace('\u2014','-').replace('\\u00a0',' ')
str_text = str_text.replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ')
str_text = str_text.replace('\\','').replace('u2026 ','').replace('u00c2','')
newString = startTag + str_text + endTag + "\n"
newString = str_text + "\n\n"
if (len(newString) > 50000 or
len(newString)<14 or
'{ "' in newString or
'{"' in newString or
"function()" in newString or
'else {' in newString or
'.js' in newString or
'pic.twitter' in newString or
'("' in newString or
'ajax' in newString or
'var ' in newString or
('Advertisement' in newString and len(newString)<200) or
'Continue reading' in newString or
('Photo' in newString and 'Credit' in newString) or
'window.' in newString or
');' in newString or
'; }' in newString or
'CDATA' in newString or
'()' in newString):
continue
#print(len(newString))
if len(newString) > 50 and ':' not in newString:
numLines += 1
docString += newString
except:
#print('error')
pass
docStrings[i] = {}
docStrings[i]['docString'] = markdown.markdown(docString)
docStrings[i]['word_per_p'] = float(len(docString.split())) / float(len(docStrings[i]['docString'].split('<p>')))
docStrings[i]['numLines'] = numLines
docStrings[i]['docString_length'] = len(docString)
try:
docStrings[i]['score']=numLines*docStrings[i]['word_per_p']
#docStrings[i]['score']=1000*numLines / sum(1 for c in docString if c.isupper())
except:
docStrings[i]['score'] = 0
print("*"*30)
print("Looping took " + str(time.time()-t2))
print("*"*30)
with open('test.json','w') as f:
f.write(json.dumps(docStrings,indent=2))
bestI = 0
bestNumLines = 0
for i in range(len(docStrings)):
if (docStrings[i]['word_per_p']>12 and
docStrings[i]['score'] > bestNumLines and
docStrings[i]['docString_length'] > 300):
bestI = i
bestNumLines = docStrings[i]['score']
print("*"*24)
print(bestI)
print(bestNumLines)
print("*"*24)
docString = docStrings[bestI]['docString']
if len(docString)<100:
docString="<h1>There is no content on this page.</h1>"
title = parsed_doc.xpath(".//title")[0].text_content().strip()
try:
description = parsed_doc.xpath(".//meta[@name='description']")[0].get('content')
except:
description = ""
url = r.url
timeElapsed = int((time.time()-t)*1000)
docString = docString.decode('utf-8')
for s in docString.split('\n'):
print(len(s))
fileSize = 0.7 + float(sys.getsizeof(docString)/1000.0)
fileSize = round(fileSize,1)
return {'title':title,'description':description,'url':url,'timeElapsed':timeElapsed,'content':docString,'size':fileSize}
def getDoc2(url):
t = time.time()
# import urllib
# html = urllib.urlopen(url).read()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
r = get(url,headers=headers)
html = r.content
doc = Document(html,url=url)
readable_article = doc.summary()
readable_title = doc.short_title()
readable_article = readable_article.replace("http","/?url=http")
timeElapsed = int((time.time()-t)*1000)
fileSize = 0.7 + float(sys.getsizeof(readable_article)/1000.0)
fileSize = round(fileSize,1)
return {'title':readable_title,'description':"",'url':url,'timeElapsed':timeElapsed,'content':readable_article,'size':fileSize}
#print(getDoc('http://www.bbc.co.uk/news/entertainment-arts-34768201'))
| parseDoc.py | 7,033 | From http://rodp.me/2015/how-to-extract-data-from-the-web.html Possibly [1][0]print(tag)str_text = " ".join(str_text.split())print(len(newString))print('error')docStrings[i]['score']=1000*numLines / sum(1 for c in docString if c.isupper()) import urllib html = urllib.urlopen(url).read()print(getDoc('http://www.bbc.co.uk/news/entertainment-arts-34768201')) | 357 | en | 0.356288 |
# -*- coding: utf-8 -*-
"""Identity Services Engine deleteDeviceAdminLocalExceptionById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorC7D6Bb4Abf53F6Aa2F40B6986F58A9(object):
"""deleteDeviceAdminLocalExceptionById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC7D6Bb4Abf53F6Aa2F40B6986F58A9, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"id": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| tests/models/validators/v3_0_0/jsd_c7d6bb4abf53f6aa2f40b6986f58a9.py | 2,280 | deleteDeviceAdminLocalExceptionById request schema definition.
Identity Services Engine deleteDeviceAdminLocalExceptionById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-*- coding: utf-8 -*- | 1,232 | en | 0.8704 |
# -*- coding: utf-8 -*-
u"""
Beta regression for modeling rates and proportions.
References
----------
Grün, Bettina, Ioannis Kosmidis, and Achim Zeileis. Extended beta regression
in R: Shaken, stirred, mixed, and partitioned. No. 2011-22. Working Papers in
Economics and Statistics, 2011.
Smithson, Michael, and Jay Verkuilen. "A better lemon squeezer?
Maximum-likelihood regression with beta-distributed dependent variables."
Psychological methods 11.1 (2006): 54.
"""
import numpy as np
from scipy.special import gammaln as lgamma
import patsy
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.model import (
GenericLikelihoodModel, GenericLikelihoodModelResults, _LLRMixin)
from statsmodels.genmod import families
_init_example = """
Beta regression with default of logit-link for exog and log-link
for precision.
>>> mod = BetaModel(endog, exog)
>>> rslt = mod.fit()
>>> print(rslt.summary())
We can also specify a formula and a specific structure and use the
identity-link for precision.
>>> from sm.families.links import identity
>>> Z = patsy.dmatrix('~ temp', dat, return_type='dataframe')
>>> mod = BetaModel.from_formula('iyield ~ C(batch, Treatment(10)) + temp',
... dat, exog_precision=Z,
... link_precision=identity())
In the case of proportion-data, we may think that the precision depends on
the number of measurements. E.g for sequence data, on the number of
sequence reads covering a site:
>>> Z = patsy.dmatrix('~ coverage', df)
>>> formula = 'methylation ~ disease + age + gender + coverage'
>>> mod = BetaModel.from_formula(formula, df, Z)
>>> rslt = mod.fit()
"""
class BetaModel(GenericLikelihoodModel):
__doc__ = """Beta Regression.
The Model is parameterized by mean and precision. Both can depend on
explanatory variables through link functions.
Parameters
----------
endog : array_like
1d array of endogenous response variable.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
exog_precision : array_like
2d array of variables for the precision.
link : link
Any link in sm.families.links for mean, should have range in
interval [0, 1]. Default is logit-link.
link_precision : link
Any link in sm.families.links for precision, should have
range in positive line. Default is log-link.
**kwds : extra keywords
Keyword options that will be handled by super classes.
Not all general keywords will be supported in this class.
Notes
-----
Status: experimental, new in 0.13.
Core results are verified, but api can change and some extra results
specific to Beta regression are missing.
Examples
--------
{example}
See Also
--------
:ref:`links`
""".format(example=_init_example)
def __init__(self, endog, exog, exog_precision=None,
link=families.links.Logit(),
link_precision=families.links.Log(), **kwds):
etmp = np.array(endog)
assert np.all((0 < etmp) & (etmp < 1))
if exog_precision is None:
extra_names = ['precision']
exog_precision = np.ones((len(endog), 1), dtype='f')
else:
extra_names = ['precision-%s' % zc for zc in
(exog_precision.columns
if hasattr(exog_precision, 'columns')
else range(1, exog_precision.shape[1] + 1))]
kwds['extra_params_names'] = extra_names
super(BetaModel, self).__init__(endog, exog,
exog_precision=exog_precision,
**kwds)
self.link = link
self.link_precision = link_precision
# not needed, handled by super:
# self.exog_precision = exog_precision
# inherited df do not account for precision params
self.nobs = self.endog.shape[0]
self.df_model = self.nparams - 1
self.df_resid = self.nobs - self.nparams
assert len(self.exog_precision) == len(self.endog)
self.hess_type = "oim"
if 'exog_precision' not in self._init_keys:
self._init_keys.extend(['exog_precision'])
self._init_keys.extend(['link', 'link_precision'])
self._null_drop_keys = ['exog_precision']
self.results_class = BetaResults
self.results_class_wrapper = BetaResultsWrapper
@classmethod
def from_formula(cls, formula, data, exog_precision_formula=None,
*args, **kwargs):
if exog_precision_formula is not None:
if 'subset' in kwargs:
d = data.ix[kwargs['subset']]
Z = patsy.dmatrix(exog_precision_formula, d)
else:
Z = patsy.dmatrix(exog_precision_formula, data)
kwargs['exog_precision'] = Z
return super(BetaModel, cls).from_formula(formula, data, *args,
**kwargs)
def _get_exogs(self):
return (self.exog, self.exog_precision)
def predict(self, params, exog=None, exog_precision=None, which="mean"):
"""Predict values for mean or precision
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision parameter.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linear" : linear predictor for the mean function
- "linear-precision" : linear predictor for the precision parameter
Returns
-------
ndarray, predicted values
"""
# compatibility with old names and misspelling
if which == "linpred":
which = "linear"
if which in ["linpred_precision", "linear_precision"]:
which = "linear-precision"
k_mean = self.exog.shape[1]
if which in ["mean", "linear"]:
if exog is None:
exog = self.exog
params_mean = params[:k_mean]
# Zparams = params[k_mean:]
linpred = np.dot(exog, params_mean)
if which == "mean":
mu = self.link.inverse(linpred)
res = mu
else:
res = linpred
elif which in ["precision", "linear-precision"]:
if exog_precision is None:
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_prec)
if which == "precision":
phi = self.link_precision.inverse(linpred_prec)
res = phi
else:
res = linpred_prec
elif which == "var":
res = self._predict_var(
params,
exog=exog,
exog_precision=exog_precision
)
else:
raise ValueError('which = %s is not available' % which)
return res
def _predict_precision(self, params, exog_precision=None):
"""Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision.
"""
if exog_precision is None:
exog_precision = self.exog_precision
k_mean = self.exog.shape[1]
params_precision = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_precision)
phi = self.link_precision.inverse(linpred_prec)
return phi
def _predict_var(self, params, exog=None, exog_precision=None):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance.
"""
mean = self.predict(params, exog=exog)
precision = self._predict_precision(params,
exog_precision=exog_precision)
var_endog = mean * (1 - mean) / (1 + precision)
return var_endog
def loglikeobs(self, params):
"""
Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
return self._llobs(self.endog, self.exog, self.exog_precision, params)
def _llobs(self, endog, exog, exog_precision, params):
"""
Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
y, X, Z = endog, exog, exog_precision
nz = Z.shape[1]
params_mean = params[:-nz]
params_prec = params[-nz:]
linpred = np.dot(X, params_mean)
linpred_prec = np.dot(Z, params_prec)
mu = self.link.inverse(linpred)
phi = self.link_precision.inverse(linpred_prec)
eps_lb = 1e-200
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ll = (lgamma(phi) - lgamma(alpha)
- lgamma(beta)
+ (mu * phi - 1) * np.log(y)
+ (((1 - mu) * phi) - 1) * np.log(1 - y))
return ll
def score(self, params):
"""
Returns the score vector of the log-likelihood.
http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score : ndarray
First derivative of loglikelihood function.
"""
sf1, sf2 = self.score_factor(params)
d1 = np.dot(sf1, self.exog)
d2 = np.dot(sf2, self.exog_precision)
return np.concatenate((d1, d2))
def _score_check(self, params):
"""Inherited score with finite differences
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score based on numerical derivatives
"""
return super(BetaModel, self).score(params)
def score_factor(self, params, endog=None):
"""Derivative of loglikelihood function w.r.t. linear predictors.
This needs to be multiplied with the exog to obtain the score_obs.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
Notes
-----
The score_obs can be obtained from score_factor ``sf`` using
- d1 = sf[:, :1] * exog
- d2 = sf[:, 1:2] * exog_precision
"""
from scipy import special
digamma = special.psi
y = self.endog if endog is None else endog
X, Z = self.exog, self.exog_precision
nz = Z.shape[1]
Xparams = params[:-nz]
Zparams = params[-nz:]
# NO LINKS
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ystar = np.log(y / (1. - y))
dig_beta = digamma(beta)
mustar = digamma(alpha) - dig_beta
yt = np.log(1 - y)
mut = dig_beta - digamma(phi)
t = 1. / self.link.deriv(mu)
h = 1. / self.link_precision.deriv(phi)
#
sf1 = phi * t * (ystar - mustar)
sf2 = h * (mu * (ystar - mustar) + yt - mut)
return (sf1, sf2)
def score_hessian_factor(self, params, return_hessian=False,
observed=True):
"""Derivatives of loglikelihood function w.r.t. linear predictors.
This calculates score and hessian factors at the same time, because
there is a large overlap in calculations.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
return_hessian : bool
If False, then only score_factors are returned
If True, the both score and hessian factors are returned
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
(-jbb, -jbg, -jgg) : tuple
A tuple with 3 hessian factors, corresponding to the upper
triangle of the Hessian matrix.
TODO: check why there are minus
"""
from scipy import special
digamma = special.psi
y, X, Z = self.endog, self.exog, self.exog_precision
nz = Z.shape[1]
Xparams = params[:-nz]
Zparams = params[-nz:]
# NO LINKS
mu = self.link.inverse(np.dot(X, Xparams))
phi = self.link_precision.inverse(np.dot(Z, Zparams))
# We need to prevent mu = 0 and (1-mu) = 0 in digamma call
eps_lb = 1e-200 # lower bound for evaluating digamma, avoids -inf
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ystar = np.log(y / (1. - y))
dig_beta = digamma(beta)
mustar = digamma(alpha) - dig_beta
yt = np.log(1 - y)
mut = dig_beta - digamma(phi)
t = 1. / self.link.deriv(mu)
h = 1. / self.link_precision.deriv(phi)
ymu_star = (ystar - mustar)
sf1 = phi * t * ymu_star
sf2 = h * (mu * ymu_star + yt - mut)
if return_hessian:
trigamma = lambda x: special.polygamma(1, x) # noqa
trig_beta = trigamma(beta)
var_star = trigamma(alpha) + trig_beta
var_t = trig_beta - trigamma(phi)
c = - trig_beta
s = self.link.deriv2(mu)
q = self.link_precision.deriv2(phi)
jbb = (phi * t) * var_star
if observed:
jbb += s * t**2 * ymu_star
jbb *= t * phi
jbg = phi * t * h * (mu * var_star + c)
if observed:
jbg -= ymu_star * t * h
jgg = h**2 * (mu**2 * var_star + 2 * mu * c + var_t)
if observed:
jgg += (mu * ymu_star + yt - mut) * q * h**3 # **3 ?
return (sf1, sf2), (-jbb, -jbg, -jgg)
else:
return (sf1, sf2)
def score_obs(self, params):
"""
Score, first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
"""
sf1, sf2 = self.score_factor(params)
# elementwise product for each row (observation)
d1 = sf1[:, None] * self.exog
d2 = sf2[:, None] * self.exog_precision
return np.column_stack((d1, d2))
def hessian(self, params, observed=None):
"""Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
Parameter at which Hessian is evaluated.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
"""
if self.hess_type == "eim":
observed = False
else:
observed = True
_, hf = self.score_hessian_factor(params, return_hessian=True,
observed=observed)
hf11, hf12, hf22 = hf
# elementwise product for each row (observation)
d11 = (self.exog.T * hf11).dot(self.exog)
d12 = (self.exog.T * hf12).dot(self.exog_precision)
d22 = (self.exog_precision.T * hf22).dot(self.exog_precision)
return np.block([[d11, d12], [d12.T, d22]])
def hessian_factor(self, params, observed=True):
"""Derivatives of loglikelihood function w.r.t. linear predictors.
"""
_, hf = self.score_hessian_factor(params, return_hessian=True,
observed=observed)
return hf
def _start_params(self, niter=2, return_intermediate=False):
"""find starting values
Parameters
----------
niter : int
Number of iterations of WLS approximation
return_intermediate : bool
If False (default), then only the preliminary parameter estimate
will be returned.
If True, then also the two results instances of the WLS estimate
for mean parameters and for the precision parameters will be
returned.
Returns
-------
sp : ndarray
start parameters for the optimization
res_m2 : results instance (optional)
Results instance for the WLS regression of the mean function.
res_p2 : results instance (optional)
Results instance for the WLS regression of the precision function.
Notes
-----
This calculates a few iteration of weighted least squares. This is not
a full scoring algorithm.
"""
# WLS of the mean equation uses the implied weights (inverse variance),
# WLS for the precision equations uses weights that only take
# account of the link transformation of the precision endog.
from statsmodels.regression.linear_model import OLS, WLS
res_m = OLS(self.link(self.endog), self.exog).fit()
fitted = self.link.inverse(res_m.fittedvalues)
resid = self.endog - fitted
prec_i = fitted * (1 - fitted) / np.maximum(np.abs(resid), 1e-2)**2 - 1
res_p = OLS(self.link_precision(prec_i), self.exog_precision).fit()
prec_fitted = self.link_precision.inverse(res_p.fittedvalues)
# sp = np.concatenate((res_m.params, res_p.params))
for _ in range(niter):
y_var_inv = (1 + prec_fitted) / (fitted * (1 - fitted))
# y_var = fitted * (1 - fitted) / (1 + prec_fitted)
ylink_var_inv = y_var_inv / self.link.deriv(fitted)**2
res_m2 = WLS(self.link(self.endog), self.exog,
weights=ylink_var_inv).fit()
fitted = self.link.inverse(res_m2.fittedvalues)
resid2 = self.endog - fitted
prec_i2 = (fitted * (1 - fitted) /
np.maximum(np.abs(resid2), 1e-2)**2 - 1)
w_p = 1. / self.link_precision.deriv(prec_fitted)**2
res_p2 = WLS(self.link_precision(prec_i2), self.exog_precision,
weights=w_p).fit()
prec_fitted = self.link_precision.inverse(res_p2.fittedvalues)
sp2 = np.concatenate((res_m2.params, res_p2.params))
if return_intermediate:
return sp2, res_m2, res_p2
return sp2
def fit(self, start_params=None, maxiter=1000, disp=False,
method='bfgs', **kwds):
"""
Fit the model by maximum likelihood.
Parameters
----------
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
maxiter : integer
The maximum number of iterations
disp : bool
Show convergence stats.
method : str
The optimization method to use.
kwds :
Keyword arguments for the optimizer.
Returns
-------
BetaResults instance.
"""
if start_params is None:
start_params = self._start_params()
# # http://www.ime.usp.br/~sferrari/beta.pdf suggests starting phi
# # on page 8
if "cov_type" in kwds:
# this is a workaround because we cannot tell super to use eim
if kwds["cov_type"].lower() == "eim":
self.hess_type = "eim"
del kwds["cov_type"]
else:
self.hess_type = "oim"
res = super(BetaModel, self).fit(start_params=start_params,
maxiter=maxiter, method=method,
disp=disp, **kwds)
if not isinstance(res, BetaResultsWrapper):
# currently GenericLikelihoodModel doe not add wrapper
res = BetaResultsWrapper(res)
return res
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
not verified yet
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
link = self.link
lin_pred = self.predict(params, which="linear")
idl = link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
return np.column_stack((dmat, np.zeros(self.exog_precision.shape)))
def _deriv_score_obs_dendog(self, params):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
def f(y):
if y.ndim == 2 and y.shape[1] == 1:
y = y[:, 0]
sf = self.score_factor(params, endog=y)
return np.column_stack(sf)
dsf = _approx_fprime_cs_scalar(self.endog[:, None], f)
# deriv is 2d vector
d1 = dsf[:, :1] * self.exog
d2 = dsf[:, 1:2] * self.exog_precision
return np.column_stack((d1, d2))
# code duplication with results class
def get_distribution_params(self, params, exog=None, exog_precision=None):
"""
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
"""
mean = self.predict(params, exog=exog)
precision = self.predict(params, exog_precision=exog_precision,
which="precision")
return precision * mean, precision * (1 - mean)
def get_distribution(self, params, exog=None, exog_precision=None):
"""
Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
from scipy import stats
args = self.get_distribution_params(params, exog=exog,
exog_precision=exog_precision)
distr = stats.beta(*args)
return distr
class BetaResults(GenericLikelihoodModelResults, _LLRMixin):
"""Results class for Beta regression
This class inherits from GenericLikelihoodModelResults and not all
inherited methods might be appropriate in this case.
"""
# GenericLikeihoodmodel doesn't define fittedvalues, residuals and similar
@cache_readonly
def fittedvalues(self):
"""In-sample predicted mean, conditional expectation."""
return self.model.predict(self.params)
@cache_readonly
def fitted_precision(self):
"""In-sample predicted precision"""
return self.model.predict(self.params, which="precision")
@cache_readonly
def resid(self):
"""Response residual"""
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
"""Pearson standardize residual"""
std = np.sqrt(self.model.predict(self.params, which="var"))
return self.resid / std
@cache_readonly
def prsquared(self):
"""Cox-Snell Likelihood-Ratio pseudo-R-squared.
1 - exp((llnull - .llf) * (2 / nobs))
"""
return self.pseudo_rsquared(kind="lr")
def get_distribution_params(self, exog=None, exog_precision=None,
transform=True):
"""
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
"""
mean = self.predict(exog=exog, transform=transform)
precision = self.predict(exog_precision=exog_precision,
which="precision", transform=transform)
return precision * mean, precision * (1 - mean)
def get_distribution(self, exog=None, exog_precision=None, transform=True):
"""
Return a instance of the predictive distribution.
Parameters
----------
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
from scipy import stats
args = self.get_distribution_params(exog=exog,
exog_precision=exog_precision,
transform=transform)
args = (np.asarray(arg) for arg in args)
distr = stats.beta(*args)
return distr
def bootstrap(self, *args, **kwargs):
raise NotImplementedError
class BetaResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(BetaResultsWrapper,
BetaResults)
| statsmodels/othermod/betareg.py | 30,504 | Results class for Beta regression
This class inherits from GenericLikelihoodModelResults and not all
inherited methods might be appropriate in this case.
Derivative of the expected endog with respect to the parameters.
not verified yet
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog.
Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision.
predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance.
Inherited score with finite differences
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score based on numerical derivatives
find starting values
Parameters
----------
niter : int
Number of iterations of WLS approximation
return_intermediate : bool
If False (default), then only the preliminary parameter estimate
will be returned.
If True, then also the two results instances of the WLS estimate
for mean parameters and for the precision parameters will be
returned.
Returns
-------
sp : ndarray
start parameters for the optimization
res_m2 : results instance (optional)
Results instance for the WLS regression of the mean function.
res_p2 : results instance (optional)
Results instance for the WLS regression of the precision function.
Notes
-----
This calculates a few iteration of weighted least squares. This is not
a full scoring algorithm.
Fit the model by maximum likelihood.
Parameters
----------
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
maxiter : integer
The maximum number of iterations
disp : bool
Show convergence stats.
method : str
The optimization method to use.
kwds :
Keyword arguments for the optimizer.
Returns
-------
BetaResults instance.
In-sample predicted precision
In-sample predicted mean, conditional expectation.
Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
Return a instance of the predictive distribution.
Parameters
----------
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
Instance of a scipy frozen distribution based on estimated
parameters.
See Also
--------
predict
Notes
-----
This function delegates to the predict method to handle exog and
exog_precision, which in turn makes any required transformations.
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for mean.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
Return distribution parameters converted from model prediction.
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
transform : bool
If transform is True and formulas have been used, then predictor
``exog`` is passed through the formula processing. Default is True.
Returns
-------
(alpha, beta) : tuple of ndarrays
Parameters for the scipy distribution to evaluate predictive
distribution.
Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
Parameter at which Hessian is evaluated.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
Derivatives of loglikelihood function w.r.t. linear predictors.
Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
Predict values for mean or precision
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision parameter.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linear" : linear predictor for the mean function
- "linear-precision" : linear predictor for the precision parameter
Returns
-------
ndarray, predicted values
Cox-Snell Likelihood-Ratio pseudo-R-squared.
1 - exp((llnull - .llf) * (2 / nobs))
Response residual
Pearson standardize residual
Returns the score vector of the log-likelihood.
http://www.tandfonline.com/doi/pdf/10.1080/00949650903389993
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score : ndarray
First derivative of loglikelihood function.
Derivative of loglikelihood function w.r.t. linear predictors.
This needs to be multiplied with the exog to obtain the score_obs.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
Notes
-----
The score_obs can be obtained from score_factor ``sf`` using
- d1 = sf[:, :1] * exog
- d2 = sf[:, 1:2] * exog_precision
Derivatives of loglikelihood function w.r.t. linear predictors.
This calculates score and hessian factors at the same time, because
there is a large overlap in calculations.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
return_hessian : bool
If False, then only score_factors are returned
If True, the both score and hessian factors are returned
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
score_factor : ndarray, 2-D
A 2d weight vector used in the calculation of the score_obs.
(-jbb, -jbg, -jgg) : tuple
A tuple with 3 hessian factors, corresponding to the upper
triangle of the Hessian matrix.
TODO: check why there are minus
Score, first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
Beta regression for modeling rates and proportions.
References
----------
Grün, Bettina, Ioannis Kosmidis, and Achim Zeileis. Extended beta regression
in R: Shaken, stirred, mixed, and partitioned. No. 2011-22. Working Papers in
Economics and Statistics, 2011.
Smithson, Michael, and Jay Verkuilen. "A better lemon squeezer?
Maximum-likelihood regression with beta-distributed dependent variables."
Psychological methods 11.1 (2006): 54.
-*- coding: utf-8 -*- not needed, handled by super: self.exog_precision = exog_precision inherited df do not account for precision params compatibility with old names and misspelling Zparams = params[k_mean:] NO LINKS lower bound for evaluating digamma, avoids -inf NO LINKS We need to prevent mu = 0 and (1-mu) = 0 in digamma call lower bound for evaluating digamma, avoids -inf noqa **3 ? elementwise product for each row (observation) elementwise product for each row (observation) WLS of the mean equation uses the implied weights (inverse variance), WLS for the precision equations uses weights that only take account of the link transformation of the precision endog. sp = np.concatenate((res_m.params, res_p.params)) y_var = fitted * (1 - fitted) / (1 + prec_fitted) http://www.ime.usp.br/~sferrari/beta.pdf suggests starting phi on page 8 this is a workaround because we cannot tell super to use eim currently GenericLikelihoodModel doe not add wrapper deriv is 2d vector code duplication with results class GenericLikeihoodmodel doesn't define fittedvalues, residuals and similar | 10,603 | en | 0.595582 |
import serial
import csv
import os
serialPort = serial.Serial("COM10", baudrate=115200)
try:
os.rename('output.csv', 'ALTERAR_MEU_NOME.csv')
except IOError:
print('')
finally:
while(True):
arduinoData = serialPort.readline().decode("ascii")
print(arduinoData)
#add the data to the file
file = open("output.csv", "a") #append the data to the file
file.write(arduinoData) #write data with a newline
#close out the file
file.close()
| receiveGeneratorData.py | 517 | add the data to the fileappend the data to the filewrite data with a newlineclose out the file | 94 | en | 0.71678 |
###############################################################################
#
# DONE:
#
# 1. READ the code below.
# 2. TRACE (by hand) the execution of the code,
# predicting what will get printed.
# 3. Run the code and compare your prediction to what actually was printed.
# 4. Decide whether you are 100% clear on the CONCEPTS and the NOTATIONS for:
# -- DEFINING a function that has PARAMETERS
# -- CALLING a function with actual ARGUMENTS.
#
# *****************************************************************************
# If you are NOT 100% clear on the above concepts,
# ask your instructor or a student assistant about them during class.
# *****************************************************************************
#
# After you have completed the above, mark this _TODO_ as DONE.
#
###############################################################################
def main():
hello("Snow White")
goodbye("Bashful")
hello("Grumpy")
hello("Sleepy")
hello_and_goodbye("Magic Mirror", "Cruel Queen")
def hello(friend):
print("Hello,", friend, "- how are things?")
def goodbye(friend):
print("Goodbye,", friend, '- see you later!')
print(' Ciao!')
print(' Bai bai!')
def hello_and_goodbye(person1, person2):
hello(person1)
goodbye(person2)
main()
| src/m1r_functions.py | 1,354 | DONE: 1. READ the code below. 2. TRACE (by hand) the execution of the code, predicting what will get printed. 3. Run the code and compare your prediction to what actually was printed. 4. Decide whether you are 100% clear on the CONCEPTS and the NOTATIONS for: -- DEFINING a function that has PARAMETERS -- CALLING a function with actual ARGUMENTS. ***************************************************************************** If you are NOT 100% clear on the above concepts, ask your instructor or a student assistant about them during class. ***************************************************************************** After you have completed the above, mark this _TODO_ as DONE. | 721 | en | 0.819633 |
"""
Django settings for YoutubeFun project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xb103+!$k5uhqga^9$^g=t^bw-3zo-6j4c+cf8x8mflhq-*qm2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Youtube_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'YoutubeFun.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'YoutubeFun.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| YoutubeFun/settings.py | 2,870 | Django settings for YoutubeFun project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.8/ref/settings/databases Internationalization https://docs.djangoproject.com/en/1.8/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.8/howto/static-files/ | 895 | en | 0.653791 |
import urllib.request
import os
import random
import socket
def url_open(url):
#代理
iplist=['60.251.63.159:8080','118.180.15.152:8102','119.6.136.122:80','183.61.71.112:8888']
proxys= random.choice(iplist)
print (proxys)
proxy_support = urllib.request.ProxyHandler({'http': proxys})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')]
urllib.request.install_opener(opener)
#头文件
head={}
head['User-Agent']='Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
req=urllib.request.Request(url,headers=head)
res = urllib.request.urlopen(req)
html = res.read()
print(url)
return html
def get_page(url):
html=url_open(url).decode('utf-8')
a = html.find('current-comment-page')+23
b = html.find(']',a)
return html[a:b]
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
while a != -1:
b = html.find('.jpg',a,a+255)
if b != -1:
img_addrs.append(html[a+9:b+4])
else:
b=a+9
a=html.find('img src=',b)
#for each in img_addrs:
# print(each)
#return img_addrs
def save_imgs(folder,img_addrs):
socket.setdefaulttimeout(3)
for each in img_addrs:
try:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = url_open(each)
f.write(img)
except Exception:
continue
def download_mm(folder = 'ooxx',pages=10):
os.mkdir(folder)
os.chdir(folder)
url='http://jandan.net/ooxx/'
#拿到所在页面
page_num= int(get_page(url))
for i in range(pages):
page_num = page_num - i
page_url = url+'page-'+str(page_num)+'#comments'
#查询页面中的图片
img_addrs = find_imgs(page_url)
#保存图片
save_imgs(folder,img_addrs)
if __name__ == '__main__':
download_mm()
| mmParse.py | 2,181 | 代理头文件 for each in img_addrs: print(each)return img_addrs拿到所在页面查询页面中的图片保存图片 | 77 | zh | 0.623795 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING, Any
# Bokeh imports
from ..models import glyphs
from ._decorators import glyph_method, marker_method
if TYPE_CHECKING:
from ..models.canvas import CoordinateMapping
from ..models.plots import Plot
from ..models.renderers import GlyphRenderer
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"GlyphAPI",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class GlyphAPI:
""" """
@property
def plot(self) -> Plot | None:
return self._parent
@property
def coordinates(self) -> CoordinateMapping | None:
return self._coordinates
def __init__(self, parent: Plot | None = None, coordinates: CoordinateMapping | None = None) -> None:
self._parent = parent
self._coordinates = coordinates
@glyph_method(glyphs.AnnularWedge)
def annular_wedge(self, **kwargs):
pass
@glyph_method(glyphs.Annulus)
def annulus(self, **kwargs):
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
"""
@glyph_method(glyphs.Arc)
def arc(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@marker_method()
def asterisk(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
"""
@glyph_method(glyphs.Bezier)
def bezier(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@glyph_method(glyphs.Circle)
def circle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to |data units|.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
"""
@marker_method()
def circle_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
"""
@marker_method()
def circle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_color=None)
show(plot)
"""
@marker_method()
def circle_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@marker_method()
def circle_y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@marker_method()
def cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
"""
@marker_method()
def dash(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@marker_method()
def diamond(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@marker_method()
def diamond_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def diamond_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None)
show(plot)
"""
@marker_method()
def dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")
show(plot)
"""
@glyph_method(glyphs.HArea)
def harea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.HBar)
def hbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Ellipse)
def ellipse(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def hex(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@marker_method()
def hex_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],
color="#74ADD1", fill_color=None)
show(plot)
"""
@glyph_method(glyphs.HexTile)
def hex_tile(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300, match_aspect=True)
plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")
show(plot)
"""
@glyph_method(glyphs.Image)
def image(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
"""
@glyph_method(glyphs.ImageRGBA)
def image_rgba(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
"""
@glyph_method(glyphs.ImageURL)
def image_url(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@marker_method()
def inverted_triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Line)
def line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(title="line", width=300, height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
"""
@glyph_method(glyphs.MultiLine)
def multi_line(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
"""
@glyph_method(glyphs.MultiPolygons)
def multi_polygons(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is a
nested array.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],
ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],
color=['red', 'green'])
show(p)
"""
@glyph_method(glyphs.Oval)
def oval(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
"""
@glyph_method(glyphs.Patch)
def patch(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
"""
@glyph_method(glyphs.Patches)
def patches(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
"""
@marker_method()
def plus(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Quad)
def quad(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
"""
@glyph_method(glyphs.Quadratic)
def quadratic(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
@glyph_method(glyphs.Ray)
def ray(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
"""
@glyph_method(glyphs.Rect)
def rect(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
"""
@glyph_method(glyphs.Step)
def step(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")
show(plot)
"""
@glyph_method(glyphs.Segment)
def segment(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
"""
@marker_method()
def square(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@marker_method()
def square_cross(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def square_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F", fill_color=None)
show(plot)
"""
@marker_method()
def square_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def square_x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
"""
@marker_method()
def star(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@marker_method()
def star_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(glyphs.Text)
def text(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
"""
@marker_method()
def triangle(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@marker_method()
def triangle_dot(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", fill_color=None)
show(plot)
"""
@marker_method()
def triangle_pin(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@glyph_method(glyphs.VArea)
def varea(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.VBar)
def vbar(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Wedge)
def wedge(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
"""
@marker_method()
def x(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
"""
@marker_method()
def y(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
"""
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
# -------------------------------------------------------------------------
@glyph_method(glyphs.Scatter)
def _scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
pass
def scatter(self, *args: Any, **kwargs: Any) -> GlyphRenderer:
''' Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in |screen units|
marker (str, or list[str]): values or field names of marker types
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: |line properties| and |fill properties|
Examples:
>>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")
>>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)
.. note::
When passing ``marker="circle"`` it is also possible to supply a
``radius`` value in |data units|. When configuring marker type
from a data source column, *all* markers including circles may only
be configured with ``size`` in |screen units|.
'''
marker_type = kwargs.pop("marker", "circle")
if isinstance(marker_type, str) and marker_type in _MARKER_SHORTCUTS:
marker_type = _MARKER_SHORTCUTS[marker_type]
# The original scatter implementation allowed circle scatters to set a
# radius. We will leave this here for compatibility but note that it
# only works when the marker type is "circle" (and not referencing a
# data source column). Consider deprecating in the future.
if marker_type == "circle" and "radius" in kwargs:
return self.circle(*args, **kwargs)
else:
return self._scatter(*args, marker=marker_type, **kwargs)
_MARKER_SHORTCUTS = {
"*" : "asterisk",
"+" : "cross",
"o" : "circle",
"o+" : "circle_cross",
"o." : "circle_dot",
"ox" : "circle_x",
"oy" : "circle_y",
"-" : "dash",
"." : "dot",
"v" : "inverted_triangle",
"^" : "triangle",
"^." : "triangle_dot",
}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bokeh/plotting/glyph_api.py | 24,868 | Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to |data units|.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_color=None)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],
fill_color="#99D594")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],
color="#74ADD1", fill_color=None)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300, match_aspect=True)
plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")
show(plot)
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(title="line", width=300, height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
.. note::
For this glyph, the data is not simply an array of scalars, it is a
nested array.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],
ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],
color=['red', 'green'])
show(p)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
p = figure(width=300, height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in |screen units|
marker (str, or list[str]): values or field names of marker types
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: |line properties| and |fill properties|
Examples:
>>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")
>>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)
.. note::
When passing ``marker="circle"`` it is also possible to supply a
``radius`` value in |data units|. When configuring marker type
from a data source column, *all* markers including circles may only
be configured with ``size`` in |screen units|.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F", fill_color=None)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.star_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")
show(plot)
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", fill_color=None)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],
fill_color="#99D594")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
Examples:
.. code-block:: python
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
----------------------------------------------------------------------------- Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors. All rights reserved. The full license is in the file LICENSE.txt, distributed with this software.---------------------------------------------------------------------------------------------------------------------------------------------------------- Boilerplate----------------------------------------------------------------------------- isort:skip----------------------------------------------------------------------------- Imports----------------------------------------------------------------------------- Standard library imports Bokeh imports----------------------------------------------------------------------------- Globals and constants---------------------------------------------------------------------------------------------------------------------------------------------------------- General API---------------------------------------------------------------------------------------------------------------------------------------------------------- Dev API----------------------------------------------------------------------------- ------------------------------------------------------------------------- The original scatter implementation allowed circle scatters to set a radius. We will leave this here for compatibility but note that it only works when the marker type is "circle" (and not referencing a data source column). Consider deprecating in the future.----------------------------------------------------------------------------- Private API---------------------------------------------------------------------------------------------------------------------------------------------------------- Code----------------------------------------------------------------------------- | 16,854 | en | 0.419175 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""init for cli and clize"""
import pathlib
import shlex
from collections import OrderedDict
from functools import partial
import clize
from clize import parameters
from clize.help import ClizeHelp, HelpForAutodetectedDocstring
from clize.parser import value_converter
from clize.runner import Clize
from sigtools.wrappers import decorator
# Imports are done in their functions to make calls to -h quicker.
# selected clize imports/constants
IGNORE = clize.Parameter.IGNORE
LAST_OPTION = clize.Parameter.LAST_OPTION
REQUIRED = clize.Parameter.REQUIRED
UNDOCUMENTED = clize.Parameter.UNDOCUMENTED
# help helpers
def docutilize(obj):
"""Convert Numpy or Google style docstring into reStructuredText format.
Args:
obj (str or object):
Takes an object and changes it's docstrings to a reStructuredText
format.
Returns:
str or object:
A converted string or an object with replaced docstring depending
on the type of the input.
"""
from inspect import cleandoc, getdoc
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if isinstance(obj, str):
doc = cleandoc(obj)
else:
doc = getdoc(obj)
doc = str(NumpyDocstring(doc))
doc = str(GoogleDocstring(doc))
doc = doc.replace(":exc:", "")
doc = doc.replace(":data:", "")
doc = doc.replace(":keyword", ":param")
doc = doc.replace(":kwtype", ":type")
if isinstance(obj, str):
return doc
obj.__doc__ = doc
return obj
class HelpForNapoleonDocstring(HelpForAutodetectedDocstring):
"""Subclass to add support for google style docstrings"""
def add_docstring(self, docstring, *args, **kwargs):
"""Adds the updated docstring."""
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs)
class DocutilizeClizeHelp(ClizeHelp):
"""Subclass to build Napoleon docstring from subject."""
def __init__(self, subject, owner, builder=HelpForNapoleonDocstring.from_subject):
super().__init__(subject, owner, builder)
# input handling
class ObjectAsStr(str):
"""Hide object under a string to pass it through Clize parser."""
__slots__ = ("original_object",)
def __new__(cls, obj, name=None):
if isinstance(obj, cls): # pass object through if already wrapped
return obj
if name is None:
name = cls.obj_to_name(obj)
self = str.__new__(cls, name)
self.original_object = obj
return self
@staticmethod
def obj_to_name(obj, cls=None):
"""Helper function to create the string."""
if cls is None:
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return "<%s.%s@%i>" % (cls.__module__, cls.__name__, obj_id)
def maybe_coerce_with(converter, obj, **kwargs):
"""Apply converter if str, pass through otherwise."""
obj = getattr(obj, "original_object", obj)
return converter(obj, **kwargs) if isinstance(obj, str) else obj
@value_converter
def inputcube(to_convert):
"""Loads cube from file or returns passed object.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube
return maybe_coerce_with(load_cube, to_convert)
@value_converter
def inputcube_nolazy(to_convert):
"""Loads cube from file or returns passed object.
Where a load is performed, it will not have lazy data.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube
if getattr(to_convert, "has_lazy_data", False):
# Realise data if lazy
to_convert.data
return maybe_coerce_with(load_cube, to_convert, no_lazy_load=True)
@value_converter
def inputcubelist(to_convert):
"""Loads a cubelist from file or returns passed object.
Args:
to_convert (string or iris.cube.CubeList):
File name or CubeList object.
Returns:
Loaded cubelist or passed object.
"""
from improver.utilities.load import load_cubelist
return maybe_coerce_with(load_cubelist, to_convert)
@value_converter
def inputjson(to_convert):
"""Loads json from file or returns passed object.
Args:
to_convert (string or dict):
File name or json dictionary.
Returns:
Loaded json dictionary or passed object.
"""
from improver.utilities.cli_utilities import load_json_or_none
return maybe_coerce_with(load_json_or_none, to_convert)
@value_converter
def comma_separated_list(to_convert):
"""Converts comma separated string to list or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
"""
return maybe_coerce_with(lambda s: s.split(","), to_convert)
@value_converter
def comma_separated_list_of_float(to_convert):
"""Converts comma separated string to list of floats or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
"""
return maybe_coerce_with(
lambda string: [float(s) for s in string.split(",")], to_convert
)
@value_converter
def inputpath(to_convert):
"""Converts string paths to pathlib Path objects
Args:
to_convert (string or pathlib.Path):
path represented as string
Returns:
(pathlib.Path): Path object
"""
return maybe_coerce_with(pathlib.Path, to_convert)
@value_converter
def inputdatetime(to_convert):
"""Converts string to datetime or returns passed object.
Args:
to_convert (string or datetime):
datetime represented as string of the format YYYYMMDDTHHMMZ
Returns:
(datetime): datetime object
"""
from improver.utilities.temporal import cycletime_to_datetime
return maybe_coerce_with(cycletime_to_datetime, to_convert)
def create_constrained_inputcubelist_converter(*constraints):
"""Makes function that the input constraints are used in a loop.
The function is a @value_converter, this means it is used by clize to convert
strings into objects.
This is a way of not using the IMPROVER load_cube which will try to merge
cubes. Iris load on the other hand won't deal with meta data properly.
So an example is if you wanted to load an X cube and a Y cube from a cubelist
of 2. You call this function with a list of constraints.
These cubes get loaded and returned as a CubeList.
Args:
*constraints (tuple of str or callable or iris.Constraint):
Constraints to be used in extracting the required cubes.
Each constraint must match exactly one cube and extracted cubes
will be sorted to match their order.
A constraint can be an iris.Constraint object or a callable
or cube name that can be used to construct one.
Returns:
callable:
A function with the constraints used for a list comprehension.
"""
@value_converter
def constrained_inputcubelist_converter(to_convert):
"""Passes the cube and constraints onto maybe_coerce_with.
Args:
to_convert (str or iris.cube.CubeList):
A CubeList or a filename to be loaded into a CubeList.
Returns:
iris.cube.CubeList:
The loaded cubelist of constrained cubes.
"""
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList(
cubelist.extract_cube(
Constraint(cube_func=constr) if callable(constr) else constr
)
for constr in constraints
)
return constrained_inputcubelist_converter
# output handling
@decorator
def with_output(
wrapped,
*args,
output=None,
compression_level=1,
least_significant_digit: int = None,
**kwargs,
):
"""Add `output` keyword only argument.
Add `compression_level` option.
Add `least_significant_digit` option.
This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI
options. If `output` is provided, it saves the result of calling `wrapped` to file and returns
None, otherwise it returns the result. If `compression_level` is provided, it compresses the
data with the provided compression level (or not, if `compression_level` 0). If
`least_significant_digit` provided, it will quantize the data to a certain number of
significant figures.
Args:
wrapped (obj):
The function to be wrapped.
output (str, optional):
Output file name. If not supplied, the output object will be
printed instead.
compression_level (int):
Will set the compression level (1 to 9), or disable compression (0).
least_significant_digit (int):
If specified will truncate the data to a precision given by
10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will
be quantized to a precision of 0.01 (10**(-2)). See
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml
for details. When used with `compression level`, this will result in lossy
compression.
Returns:
Result of calling `wrapped` or None if `output` is given.
"""
from improver.utilities.save import save_netcdf
result = wrapped(*args, **kwargs)
if output and result:
save_netcdf(result, output, compression_level, least_significant_digit)
return
return result
# cli object creation
def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
"""Decorator for creating CLI objects.
"""
if obj is None:
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, "cli"):
return obj
if not callable(obj):
return Clize.get_cli(obj, **kwargs)
return Clize.keep(obj, helper_class=helper_class, **kwargs)
# help command
@clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
"""Show command help."""
prog_name = prog_name.split()[0]
args = filter(None, [command, "--help", usage and "--usage"])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if not command and usage:
result = "\n".join(
line
for line in result.splitlines()
if not line.endswith("--help [--usage]")
)
return result
def _cli_items():
"""Dynamically discover CLIs."""
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
yield ("help", improver_help)
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if mod_name != "__main__":
mcli = importlib.import_module("improver.cli." + mod_name)
yield (mod_name, clizefy(mcli.process))
SUBCOMMANDS_TABLE = OrderedDict(sorted(_cli_items()))
# main CLI object with subcommands
SUBCOMMANDS_DISPATCHER = clizefy(
SUBCOMMANDS_TABLE,
description="""IMPROVER NWP post-processing toolbox""",
footnotes="""See also improver --help for more information.""",
)
# IMPROVER top level main
def unbracket(args):
"""Convert input list with bracketed items into nested lists.
>>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())
['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']
"""
outargs = []
stack = []
mismatch_msg = "Mismatched bracket at position %i."
for i, arg in enumerate(args):
if arg == "[":
stack.append(outargs)
outargs = []
elif arg == "]":
if not stack:
raise ValueError(mismatch_msg % i)
stack[-1].append(outargs)
outargs = stack.pop()
else:
outargs.append(arg)
if stack:
raise ValueError(mismatch_msg % len(args))
return outargs
def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
"""Common entry point for command execution."""
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, (list, tuple)):
# process nested commands recursively
arg = execute_command(
dispatcher, prog_name, *arg, verbose=verbose, dry_run=dry_run
)
if isinstance(arg, pathlib.PurePath):
arg = str(arg)
elif not isinstance(arg, str):
arg = ObjectAsStr(arg)
args[i] = arg
if verbose or dry_run:
print(" ".join([shlex.quote(x) for x in (prog_name, *args)]))
if dry_run:
return args
result = dispatcher(prog_name, *args)
if verbose and result is not None:
print(ObjectAsStr.obj_to_name(result))
return result
@clizefy()
def main(
prog_name: parameters.pass_name,
command: LAST_OPTION,
*args,
profile: value_converter(lambda _: _, name="FILENAME") = None, # noqa: F821
memprofile: value_converter(lambda _: _, name="FILENAME") = None, # noqa: F821
verbose=False,
dry_run=False,
):
"""IMPROVER NWP post-processing toolbox
Results from commands can be passed into file-like arguments
of other commands by surrounding them by square brackets::
improver command [ command ... ] ...
Spaces around brackets are mandatory.
Args:
prog_name:
The program name from argv[0].
command (str):
Command to execute
args (tuple):
Command arguments
profile (str):
If given, will write profiling to the file given.
To write to stdout, use a hyphen (-)
memprofile (str):
Creates 2 files by adding a suffix to the provided arguemnt -
a tracemalloc snapshot at the point of highest memory consumption
of your program (suffixed with _SNAPSHOT)
and a track of the maximum memory used by your program
over time (suffixed with _MAX_TRACKER).
verbose (bool):
Print executed commands
dry_run (bool):
Print commands to be executed
See improver help [--usage] [command] for more information
on available command(s).
"""
args = unbracket(args)
exec_cmd = execute_command
if profile is not None:
from improver.profile import profile_hook_enable
profile_hook_enable(dump_filename=None if profile == "-" else profile)
if memprofile is not None:
from improver.memprofile import memory_profile_decorator
exec_cmd = memory_profile_decorator(exec_cmd, memprofile)
result = exec_cmd(
SUBCOMMANDS_DISPATCHER,
prog_name,
command,
*args,
verbose=verbose,
dry_run=dry_run,
)
return result
def run_main(argv=None):
"""Overrides argv[0] to be 'improver' then runs main.
Args:
argv (list of str):
Arguments that were from the command line.
"""
import sys
from clize import run
# clize help shows module execution as `python -m improver.cli`
# override argv[0] and pass it explicitly in order to avoid this
# so that the help command reflects the way that we call improver.
if argv is None:
argv = sys.argv[:]
argv[0] = "improver"
run(main, args=argv)
| improver/cli/__init__.py | 17,599 | Subclass to build Napoleon docstring from subject.
Subclass to add support for google style docstrings
Hide object under a string to pass it through Clize parser.
Dynamically discover CLIs.
Adds the updated docstring.
Decorator for creating CLI objects.
Converts comma separated string to list or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
Converts comma separated string to list of floats or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
Passes the cube and constraints onto maybe_coerce_with.
Args:
to_convert (str or iris.cube.CubeList):
A CubeList or a filename to be loaded into a CubeList.
Returns:
iris.cube.CubeList:
The loaded cubelist of constrained cubes.
Makes function that the input constraints are used in a loop.
The function is a @value_converter, this means it is used by clize to convert
strings into objects.
This is a way of not using the IMPROVER load_cube which will try to merge
cubes. Iris load on the other hand won't deal with meta data properly.
So an example is if you wanted to load an X cube and a Y cube from a cubelist
of 2. You call this function with a list of constraints.
These cubes get loaded and returned as a CubeList.
Args:
*constraints (tuple of str or callable or iris.Constraint):
Constraints to be used in extracting the required cubes.
Each constraint must match exactly one cube and extracted cubes
will be sorted to match their order.
A constraint can be an iris.Constraint object or a callable
or cube name that can be used to construct one.
Returns:
callable:
A function with the constraints used for a list comprehension.
Convert Numpy or Google style docstring into reStructuredText format.
Args:
obj (str or object):
Takes an object and changes it's docstrings to a reStructuredText
format.
Returns:
str or object:
A converted string or an object with replaced docstring depending
on the type of the input.
Common entry point for command execution.
Show command help.
Loads cube from file or returns passed object.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
Loads cube from file or returns passed object.
Where a load is performed, it will not have lazy data.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
Loads a cubelist from file or returns passed object.
Args:
to_convert (string or iris.cube.CubeList):
File name or CubeList object.
Returns:
Loaded cubelist or passed object.
Converts string to datetime or returns passed object.
Args:
to_convert (string or datetime):
datetime represented as string of the format YYYYMMDDTHHMMZ
Returns:
(datetime): datetime object
Loads json from file or returns passed object.
Args:
to_convert (string or dict):
File name or json dictionary.
Returns:
Loaded json dictionary or passed object.
Converts string paths to pathlib Path objects
Args:
to_convert (string or pathlib.Path):
path represented as string
Returns:
(pathlib.Path): Path object
IMPROVER NWP post-processing toolbox
Results from commands can be passed into file-like arguments
of other commands by surrounding them by square brackets::
improver command [ command ... ] ...
Spaces around brackets are mandatory.
Args:
prog_name:
The program name from argv[0].
command (str):
Command to execute
args (tuple):
Command arguments
profile (str):
If given, will write profiling to the file given.
To write to stdout, use a hyphen (-)
memprofile (str):
Creates 2 files by adding a suffix to the provided arguemnt -
a tracemalloc snapshot at the point of highest memory consumption
of your program (suffixed with _SNAPSHOT)
and a track of the maximum memory used by your program
over time (suffixed with _MAX_TRACKER).
verbose (bool):
Print executed commands
dry_run (bool):
Print commands to be executed
See improver help [--usage] [command] for more information
on available command(s).
Apply converter if str, pass through otherwise.
Helper function to create the string.
Overrides argv[0] to be 'improver' then runs main.
Args:
argv (list of str):
Arguments that were from the command line.
Convert input list with bracketed items into nested lists.
>>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())
['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']
Add `output` keyword only argument.
Add `compression_level` option.
Add `least_significant_digit` option.
This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI
options. If `output` is provided, it saves the result of calling `wrapped` to file and returns
None, otherwise it returns the result. If `compression_level` is provided, it compresses the
data with the provided compression level (or not, if `compression_level` 0). If
`least_significant_digit` provided, it will quantize the data to a certain number of
significant figures.
Args:
wrapped (obj):
The function to be wrapped.
output (str, optional):
Output file name. If not supplied, the output object will be
printed instead.
compression_level (int):
Will set the compression level (1 to 9), or disable compression (0).
least_significant_digit (int):
If specified will truncate the data to a precision given by
10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will
be quantized to a precision of 0.01 (10**(-2)). See
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml
for details. When used with `compression level`, this will result in lossy
compression.
Returns:
Result of calling `wrapped` or None if `output` is given.
init for cli and clize
-*- coding: utf-8 -*- ----------------------------------------------------------------------------- (C) British Crown copyright. The Met Office. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Imports are done in their functions to make calls to -h quicker. selected clize imports/constants help helpers input handling pass object through if already wrapped Realise data if lazy output handling cli object creation help command main CLI object with subcommands IMPROVER top level main process nested commands recursively noqa: F821 noqa: F821 clize help shows module execution as `python -m improver.cli` override argv[0] and pass it explicitly in order to avoid this so that the help command reflects the way that we call improver. | 8,318 | en | 0.755215 |
"""
input: image
output: little squares with faces
"""
import face_recognition
image = face_recognition.load_image_file("people.png")
face_locations = face_recognition.face_locations(image)
print(face_locations) | Features/face_extraction.py | 213 | input: image
output: little squares with faces | 46 | en | 0.815414 |
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Yi Li
# --------------------------------------------------------
from __future__ import print_function, division
import numpy as np
class Symbol:
def __init__(self):
self.arg_shape_dict = None
self.out_shape_dict = None
self.aux_shape_dict = None
self.sym = None
@property
def symbol(self):
return self.sym
def get_symbol(self, cfg, is_train=True):
"""
return a generated symbol, it also need to be assigned to self.sym
"""
raise NotImplementedError()
def init_weights(self, cfg, arg_params, aux_params):
raise NotImplementedError()
def get_msra_std(self, shape):
fan_in = float(shape[1])
if len(shape) > 2:
fan_in *= np.prod(shape[2:])
print(np.sqrt(2 / fan_in))
return np.sqrt(2 / fan_in)
def infer_shape(self, data_shape_dict):
# infer shape
arg_shape, out_shape, aux_shape = self.sym.infer_shape(**data_shape_dict)
self.arg_shape_dict = dict(zip(self.sym.list_arguments(), arg_shape))
self.out_shape_dict = dict(zip(self.sym.list_outputs(), out_shape))
self.aux_shape_dict = dict(zip(self.sym.list_auxiliary_states(), aux_shape))
def check_parameter_shapes(
self, arg_params, aux_params, data_shape_dict, is_train=True
):
for k in self.sym.list_arguments():
if k in data_shape_dict or (False if is_train else "label" in k):
continue
assert k in arg_params, k + " not initialized"
assert arg_params[k].shape == self.arg_shape_dict[k], (
"shape inconsistent for "
+ k
+ " inferred "
+ str(self.arg_shape_dict[k])
+ " provided "
+ str(arg_params[k].shape)
)
for k in self.sym.list_auxiliary_states():
assert k in aux_params, k + " not initialized"
assert aux_params[k].shape == self.aux_shape_dict[k], (
"shape inconsistent for "
+ k
+ " inferred "
+ str(self.aux_shape_dict[k])
+ " provided "
+ str(aux_params[k].shape)
)
| lib/utils/symbol.py | 2,420 | return a generated symbol, it also need to be assigned to self.sym
-------------------------------------------------------- Deep Iterative Matching Network Licensed under The Apache-2.0 License [see LICENSE for details] Written by Yi Li -------------------------------------------------------- infer shape | 307 | en | 0.617093 |
__author__ = 'Eugene'
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def return_to_the_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_the_groups_page()
def delete_first_group(self):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# submit deletion delete
wd.find_element_by_name("delete").click()
self.return_to_the_groups_page()
def modify_first_group(self, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_first_group()
# init modify group
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modify group
wd.find_element_by_name("update").click()
self.return_to_the_groups_page()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
| fixture/group.py | 2,209 | init group creation submit group creation submit deletion delete init modify group fill group form submit modify group | 118 | en | 0.720088 |
import pandas as pd
import numpy as np
import os
import json
import requests
from dotenv import load_dotenv
from PIL import Image
from io import BytesIO
from IPython.core.display import display, HTML
def art_search(art):
'''
Function to retrieve the information about collections in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched artworks.
Example:
-------------
>>>art_search('monet')
0 16568 Water Lilies Claude Monet\nFrench, 1840-1926 France 1906 1906 Oil on canvas [Painting and Sculpture of Europe, Essentials]
1 16571 Arrival of the Normandy Train, Gare Saint-Lazare Claude Monet\nFrench, 1840-1926 France 1877 1877 Oil on canvas [Painting and Sculpture of Europe]
'''
params_search = {'q': art}
r = requests.get("https://api.artic.edu/api/v1/artworks/search?fields=id,title,date_start,date_end,artist_display,place_of_origin,medium_display,category_titles", params = params_search)
try:
status = r.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
r1 = json.dumps(r.json(), indent = 2)
artsearch = json.loads(r1)
artworks = pd.DataFrame(artsearch['data'])
artworks_info = artworks[['id','title','artist_display','place_of_origin','date_start','date_end','medium_display','category_titles']]
return artworks_info
def tour_search(tour):
'''
Function to retrieve the information about tour in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched tour.
Example:
-------------
>>>tour_search('monet')
0 4714 Monet and Chicago http://aic-mobile-tours.artic.edu/sites/defaul... <p>Monet and Chicago presents the city’s uniqu... <p>Monet and Chicago is the first exhibition t... [Cliff Walk at Pourville, Caricature of a Man ... [Claude Monet, Claude Monet, Claude Monet, Cla...
1 4520 Manet and Modern Beauty http://aic-mobile-tours.artic.edu/sites/defaul... <p>Dive deep into the life and mind of one the... <p>Manet is undoubtedly one of the most fascin... [] []
'''
params_search_tour = {'q': tour}
rt = requests.get("https://api.artic.edu/api/v1/tours/search?fields=id,image,title,description,intro,artwork_titles,artist_titles", params = params_search_tour)
try:
status = rt.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rt1 = json.dumps(rt.json(), indent = 2)
toursearch = json.loads(rt1)
ntour = pd.DataFrame(toursearch['data'])
tour_info = ntour[['id','title','image','description','intro','artwork_titles','artist_titles']]
return tour_info
def pic_search(pic, artist):
'''
Function to retrieve the images of artworks collected in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
Image: jpg
The image of the searched atwork
Error Message:
Error messsage if the search is invalid
Example:
-------------
>>>pic_search('Water Lillies', 'Claude Monet')
'''
params_search_pic = {'q': pic}
rp = requests.get("https://api.artic.edu/api/v1/artworks/search?fields=id,title,artist_display,image_id", params = params_search_pic)
linkhead = 'https://www.artic.edu/iiif/2/'
linktail = '/full/843,/0/default.jpg'
try:
status = rp.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rp1 = json.dumps(rp.json(), indent = 2)
picsearch = json.loads(rp1)
npic = pd.DataFrame(picsearch['data'])
pic_info = npic[['id','title','artist_display','image_id']]
df_len = len(pic_info)
for i in range(df_len):
if pic_info.iloc[i]['title'] == pic and (artist in pic_info.iloc[i]['artist_display']): # match title and artist with user input
get_image_id = pic_info.iloc[i]['image_id']
image_link = linkhead + get_image_id + linktail
response = requests.get(image_link)
img = Image.open(BytesIO(response.content))
return img
print("Invalid Search! Please find related information below :)")
return pic_info
def product_search(product_art, product_category):
'''
Function to retrieve the information about products sold in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the products and images of the products
Example:
-------------
>>>product_search('Rainy Day', 'Mug')
>>>0 245410 Gustave Caillebotte Paris Street; Rainy Day Mug $9.95...
'''
params_search_product = {'q': product_art}
rpro = requests.get("https://api.artic.edu/api/v1/products?search", params = params_search_product)
try:
status = rpro.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rpro1 = json.dumps(rpro.json(), indent = 2)
productsearch = json.loads(rpro1)
nproduct = pd.DataFrame(productsearch['data'])
df_len1 = len(nproduct)
for i in range(df_len1):
if product_art in nproduct.iloc[i]['title'] and (product_category in nproduct.iloc[i]['description']): # match title and artist with user input
product_info = nproduct[['id','title','image_url','price_display','description']]
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
image_cols = ['image_url']
format_dict={}
for image_cols in image_cols:
format_dict[image_cols] = path_to_image_html
html = display(HTML(product_info.to_html(escape = False,formatters = format_dict)))
return html
else:
return"Invalid Search! Please try other artworks or categories:)"
def product_show(product_art_show):
'''
Function to retrieve the information about top10 products sold in the Art institute of Chicago
Parameters:
-------------
Type in any random word
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the top 10 products and images of the products
Example:
-------------
>>>product_search('')
>>>0 250620 The Age of French Impressionism—Softcover $30...
'''
params_show_product = {'q': product_art_show}
rproshow = requests.get("https://api.artic.edu/api/v1/products?limit=10", params = params_show_product)
try:
status = rproshow.status_code
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}')
except Exception as err:
print(f'Other error occurred: {err}')
else:
print('no error (successfully made request)')
rproshow1 = json.dumps(rproshow.json(), indent = 2)
productshow = json.loads(rproshow1)
nproductshow = pd.DataFrame(productshow['data'])
product_show_info = nproductshow[['id','title','image_url','price_display','description']]
def path_to_image_html(path):
return '<img src="'+ path + '" width="60" >'
image_cols1 = ['image_url']
format_dict={}
for image_cols1 in image_cols1:
format_dict[image_cols1] = path_to_image_html
html1 = display(HTML(product_show_info.to_html(escape = False,formatters = format_dict)))
return html1 | src/aicapi_yw3760/aicapi_yw3760.py | 9,068 | Function to retrieve the information about collections in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched artworks.
Example:
-------------
>>>art_search('monet')
0 16568 Water Lilies Claude Monet
French, 1840-1926 France 1906 1906 Oil on canvas [Painting and Sculpture of Europe, Essentials]
1 16571 Arrival of the Normandy Train, Gare Saint-Lazare Claude Monet
French, 1840-1926 France 1877 1877 Oil on canvas [Painting and Sculpture of Europe]
Function to retrieve the images of artworks collected in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
Image: jpg
The image of the searched atwork
Error Message:
Error messsage if the search is invalid
Example:
-------------
>>>pic_search('Water Lillies', 'Claude Monet')
Function to retrieve the information about products sold in the Art institute of Chicago
Parameters:
-------------
pic: the title of the artwork
artist: the full name of the artist
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the products and images of the products
Example:
-------------
>>>product_search('Rainy Day', 'Mug')
>>>0 245410 Gustave Caillebotte Paris Street; Rainy Day Mug $9.95...
Function to retrieve the information about top10 products sold in the Art institute of Chicago
Parameters:
-------------
Type in any random word
Returns:
-------------
Status code: str
if the API request went through
DataFrame: a dataframe include related info about the top 10 products and images of the products
Example:
-------------
>>>product_search('')
>>>0 250620 The Age of French Impressionism—Softcover $30...
Function to retrieve the information about tour in the Art institute of Chicago
Parameters:
-------------
The key word that users want to search,
for example: the artist's name, the title of the artwork.
Returns:
-------------
Status code: str
if the API request went through
Dataframe: df
includes the related info about the searched tour.
Example:
-------------
>>>tour_search('monet')
0 4714 Monet and Chicago http://aic-mobile-tours.artic.edu/sites/defaul... <p>Monet and Chicago presents the city’s uniqu... <p>Monet and Chicago is the first exhibition t... [Cliff Walk at Pourville, Caricature of a Man ... [Claude Monet, Claude Monet, Claude Monet, Cla...
1 4520 Manet and Modern Beauty http://aic-mobile-tours.artic.edu/sites/defaul... <p>Dive deep into the life and mind of one the... <p>Manet is undoubtedly one of the most fascin... [] []
match title and artist with user input match title and artist with user input | 3,220 | en | 0.740117 |
import argparse
import sys
from typing import List, Sequence
from exabel_data_sdk import ExabelClient
from exabel_data_sdk.scripts.base_script import BaseScript
class ListTimeSeries(BaseScript):
"""
Lists all time series.
"""
def __init__(self, argv: Sequence[str], description: str):
super().__init__(argv, description)
self.parser.add_argument(
"--signal",
required=False,
type=str,
help="The resource name of a signal, for example 'signals/ns.signalIdentifier'",
)
self.parser.add_argument(
"--entity",
required=False,
type=str,
help=(
"The resource name of an entity, "
"for example 'entityTypes/company/entities/identifier'"
),
)
def run_script(self, client: ExabelClient, args: argparse.Namespace) -> None:
if (args.signal is None) == (args.entity is None):
raise ValueError("Specify either signal or entity, but not both.")
page_token = None
all_time_series: List[str] = []
while True:
if args.signal is not None:
result = client.time_series_api.get_signal_time_series(
args.signal, page_size=1000, page_token=page_token
)
else:
result = client.time_series_api.get_entity_time_series(
args.entity, page_size=1000, page_token=page_token
)
all_time_series.extend(result.results)
page_token = result.next_page_token
if len(all_time_series) == result.total_size:
break
if not all_time_series:
print("No time series.")
for time_series in all_time_series:
print(time_series)
if __name__ == "__main__":
ListTimeSeries(sys.argv, "Lists time series.").run()
| exabel_data_sdk/scripts/list_time_series.py | 1,933 | Lists all time series. | 22 | en | 0.839375 |
from typing import List, Dict
import os
import json
import argparse
import sys
from string import Template
from common import get_files, get_links_from_file, get_id_files_dict, get_id_title_dict
FORCE_GRAPH_TEMPLATE_NAME = "force_graph.html"
OUTPUT_FILE_NAME = "output.html"
def generate_force_graph(id_files_dict: Dict, id_title_dict: Dict, dirname: str = "", highlight: List = None) -> None:
if not highlight:
highlight = []
# Create nodes
# Dict(id, group)
nodes = [
{"id": title, "group": 2 if uid in highlight else 1}
for uid, title in id_title_dict.items()
]
# Create links
# Dict(source, target, value)
links = []
for uid, file in id_files_dict.items():
file_links = get_links_from_file(file, dirname=dirname)
link_list = [
{"source": id_title_dict[uid], "target": id_title_dict[link], "value": 2}
for link in file_links
if id_title_dict.get(link, None)
]
links.extend(link_list)
# Create Output and open it
data = json.dumps({"nodes": nodes, "links": links})
with open(FORCE_GRAPH_TEMPLATE_NAME, "r") as f:
template = f.read()
s = Template(template)
with open(OUTPUT_FILE_NAME, "w") as out:
out.write(s.substitute(data=data))
os.system("open {}".format(OUTPUT_FILE_NAME))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"directory", help="Source Directory (Default to current directory)"
)
parser.add_argument("--highlight", nargs='*', help="Highlight zettel ID")
args = parser.parse_args()
dirname = args.directory
if not os.path.isdir(dirname):
print("Invalid directory, please check you directory")
exit(1)
# Handle the file
files = get_files(dirname)
if not files:
print("No markdown file found.")
exit(1)
# Create title and files map
id_files_dict = get_id_files_dict(files)
id_title_dict = get_id_title_dict(files)
if not id_files_dict or not id_title_dict:
print("No valid Zettel was found.")
exit(1)
highlight = []
if args.highlight is not None:
highlight = args.highlight if args.highlight else []
if not highlight:
highlight = [line.strip() for line in sys.stdin]
generate_force_graph(id_files_dict, id_title_dict, dirname, highlight=highlight)
| zettvis.py | 2,447 | Create nodes Dict(id, group) Create links Dict(source, target, value) Create Output and open it Handle the file Create title and files map | 138 | en | 0.491796 |
import os
import sys
import math
import copy
from binary_tree import BinaryTreeNode, BinaryTree, BinarySearchTree
from graph import GraphNode, Graph
# 4.6 find the next node (in-order) of a given node in a Binary Tree
# -> back to root and using in-order travelsal until meet the current node. get the next
def get_next_node(node):
root = get_root_node(node)
is_next = [False]
next_node = get_next_node_in_order_of_node(node, root, is_next)
return next_node
def get_next_node_in_order_of_node(node, visit_node, is_next):
if is_next[0]:
return visit_node
if visit_node == None:
return None
node_next = get_next_node_in_order_of_node(node, visit_node.left, is_next)
if node_next != None:
return node_next
if is_next[0]:
return visit_node
if visit_node == node:
is_next[0] = True
node_next = get_next_node_in_order_of_node(node, visit_node.right, is_next)
if node_next != None:
return node_next
return None
def get_root_node(node):
root = node
while node.parent != None:
node = node.parent
return node
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# node = tree.root.left.right
# next_node = get_next_node(node)
# if next_node != None:
# print(next_node.value)
# else:
# print("None")
# 4.7 build projects
class Project:
name = ""
dependencies = list() # list of dependency projects
state = 0 # 0: waiting, 1: built
def __init__(self, name):
self.name = name
self.state = 0
self.dependencies = list()
def build_projects(projects):
build_queue = list()
while True:
has_new_build = False
for project in projects:
if project.state == 0:
if build_project(project) == True:
build_queue.append(project.name)
project.state = 1
has_new_build = True
if has_new_build == False:
break
is_built_all = True
for project in projects:
if project.state == 0:
is_built_all = False
break
if is_built_all:
return build_queue
else:
return False
def build_project(project):
is_dependencies_built = True
for dep in project.dependencies:
if dep.state != 1:
is_dependencies_built = False
break
if is_dependencies_built:
project.state = 1
return True
else:
return False
# a = Project("a")
# b = Project("b")
# c = Project("c")
# d = Project("d")
# e = Project("e")
# f = Project("f")
# d.dependencies.append(a)
# b.dependencies.append(f)
# d.dependencies.append(b)
# a.dependencies.append(f)
# c.dependencies.append(d)
# t = build_projects([a,b,c,d,e,f])
# print(t)
# 4.8 find first common ancestor
# -> get a queue ancestor of node 1 and compare for node 2
def get_common_ancestor(node1, node2):
if node1 == node2:
return node1
node1_parents = list()
parent1 = node1
while parent1 != None:
node1_parents.append(parent1)
parent1 = parent1.parent
node2_parents = list()
parent2 = node2
while parent2 != None:
node2_parents.append(parent2)
parent2 = parent2.parent
common_ancestor = None
for p1 in node1_parents:
for p2 in node2_parents:
if p1 == p2:
common_ancestor = p1
break
if common_ancestor != None:
break
return common_ancestor
# Test
# array = [1,2,3,4,5,6]
# tree = BinaryTree()
# for v in array:
# tree.append(v)
# n1 = tree.root.left.left
# n2 = tree.root.right.left
# common = get_common_ancestor(n1, n2)
# print(common.value)
# 4.9 print all possible array can be create from a binary search tree
def dump_permutation_of_source_array(tree):
if tree.root != None:
_dump_permutation_of_source_array([tree.root], [])
else:
print("tree is empty")
def _dump_permutation_of_source_array(candidate_nodes, visited_nodes):
if len(candidate_nodes) == 0:
dump_nodes(visited_nodes)
return
n = len(candidate_nodes)
for i in range(0, n):
_visited_nodes = copy.deepcopy(visited_nodes)
_candidate_nodes = copy.deepcopy(candidate_nodes)
_visited_nodes.append(_candidate_nodes[i])
_candidate_nodes.remove(_candidate_nodes[i])
node = candidate_nodes[i]
if node.left != None:
_candidate_nodes.insert(0, node.left)
if node.right != None:
_candidate_nodes.insert(0, node.right)
_dump_permutation_of_source_array(_candidate_nodes, _visited_nodes)
def dump_nodes(nodes):
values = []
for node in nodes:
values.append(node.value)
print("source:", values)
# Test
# values = [2,1,3,4]
# values1 = [10,5,15,4,6,14,16]
# tree = BinarySearchTree()
# for v in values1:
# tree.append(v)
# dump_permutation_of_source_array(tree) | cracking-the-coding-interview/1-chapter4_1.py | 4,673 | 4.6 find the next node (in-order) of a given node in a Binary Tree -> back to root and using in-order travelsal until meet the current node. get the next Test array = [1,2,3,4,5,6] tree = BinaryTree() for v in array: tree.append(v) node = tree.root.left.right next_node = get_next_node(node) if next_node != None: print(next_node.value) else: print("None") 4.7 build projects list of dependency projects 0: waiting, 1: built a = Project("a") b = Project("b") c = Project("c") d = Project("d") e = Project("e") f = Project("f") d.dependencies.append(a) b.dependencies.append(f) d.dependencies.append(b) a.dependencies.append(f) c.dependencies.append(d) t = build_projects([a,b,c,d,e,f]) print(t) 4.8 find first common ancestor -> get a queue ancestor of node 1 and compare for node 2 Test array = [1,2,3,4,5,6] tree = BinaryTree() for v in array: tree.append(v) n1 = tree.root.left.left n2 = tree.root.right.left common = get_common_ancestor(n1, n2) print(common.value) 4.9 print all possible array can be create from a binary search tree Test values = [2,1,3,4] values1 = [10,5,15,4,6,14,16] tree = BinarySearchTree() for v in values1: tree.append(v) dump_permutation_of_source_array(tree) | 1,199 | en | 0.538308 |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
"""
Built-in value transformers.
"""
import datetime as dt
from typing import Any, Sequence
from datadog_checks.base import AgentCheck
from datadog_checks.base.types import ServiceCheckStatus
from datadog_checks.base.utils.db.utils import normalize_datetime
def length(value):
# type: (Sequence) -> int
return len(value)
def to_time_elapsed(datetime):
# type: (dt.datetime) -> float
datetime = normalize_datetime(datetime)
elapsed = dt.datetime.now(datetime.tzinfo) - datetime
return elapsed.total_seconds()
def ok_warning(value):
# type: (Any) -> ServiceCheckStatus
return AgentCheck.OK if value else AgentCheck.WARNING
| rethinkdb/datadog_checks/rethinkdb/document_db/transformers.py | 775 | Built-in value transformers.
(C) Datadog, Inc. 2020-present All rights reserved Licensed under a 3-clause BSD style license (see LICENSE) type: (Sequence) -> int type: (dt.datetime) -> float type: (Any) -> ServiceCheckStatus | 226 | en | 0.661962 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Performance benchmark tests for MXNet NDArray Unary Operations.
1. Operators are automatically fetched from MXNet operator registry.
2. Default Inputs are generated. See rules/default_params.py. You can override the default values.
Below 54 unary Operators are covered:
['BlockGrad', 'Flatten', 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'argmax_channel', 'cbrt', 'ceil', 'cos', 'cosh', 'degrees', 'erf', 'erfinv', 'exp', 'expm1', 'fix', 'flatten',
'floor', 'gamma', 'gammaln', 'identity', 'log', 'log10', 'log1p', 'log2', 'logical_not', 'make_loss', 'negative',
'ones_like', 'radians', 'rcbrt', 'reciprocal', 'relu', 'rint', 'round', 'rsqrt', 'shuffle', 'sigmoid', 'sign',
'sin', 'sinh', 'size_array', 'softsign', 'sqrt', 'square', 'stop_gradient', 'tan', 'tanh', 'trunc', 'zeros_like']
"""
import mxnet as mx
from benchmark.opperf.utils.op_registry_utils import get_all_unary_operators
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
def run_mx_unary_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype)for all the unary
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all Unary Operators
mx_unary_broadcast_ops = get_all_unary_operators()
# Run benchmarks
mx_unary_op_results = run_op_benchmarks(mx_unary_broadcast_ops, dtype, ctx, warmup, runs)
return mx_unary_op_results
| benchmark/opperf/nd_operations/unary_operators.py | 2,635 | Runs benchmarks with the given context and precision (dtype)for all the unary
operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
Performance benchmark tests for MXNet NDArray Unary Operations.
1. Operators are automatically fetched from MXNet operator registry.
2. Default Inputs are generated. See rules/default_params.py. You can override the default values.
Below 54 unary Operators are covered:
['BlockGrad', 'Flatten', 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'argmax_channel', 'cbrt', 'ceil', 'cos', 'cosh', 'degrees', 'erf', 'erfinv', 'exp', 'expm1', 'fix', 'flatten',
'floor', 'gamma', 'gammaln', 'identity', 'log', 'log10', 'log1p', 'log2', 'logical_not', 'make_loss', 'negative',
'ones_like', 'radians', 'rcbrt', 'reciprocal', 'relu', 'rint', 'round', 'rsqrt', 'shuffle', 'sigmoid', 'sign',
'sin', 'sinh', 'size_array', 'softsign', 'sqrt', 'square', 'stop_gradient', 'tan', 'tanh', 'trunc', 'zeros_like']
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Fetch all Unary Operators Run benchmarks | 2,071 | en | 0.555549 |
from utils.compute import get_landmark_3d, get_vector_intersection
from utils.visualize import HumanPoseVisualizer
from utils.OakRunner import OakRunner
from utils.pose import getKeypoints
from utils.draw import displayFPS
from pathlib import Path
import depthai as dai
import numpy as np
import cv2
fps_limit = 3
frame_width, frame_height = 456, 256
pairs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],
[1, 0], [0, 14], [14, 16], [0, 15], [15, 17]]
colors = [[255, 100, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [255, 255, 0], [255, 100, 0], [0, 255, 0],
[100, 200, 255], [255, 0, 255], [0, 255, 0], [100, 200, 255], [255, 0, 255], [255, 0, 0], [0, 0, 255],
[0, 200, 200], [0, 0, 255], [0, 200, 200], [0, 0, 0]]
threshold = 0.3
nb_points = 18
def init(runner, device):
calibration = device.readCalibration()
left_intrinsics = np.array(calibration.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, 1280, 720))
right_intrinsics = np.array(calibration.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720))
runner.custom_arguments["focal_length_left"] = left_intrinsics[0,0]
runner.custom_arguments["focal_length_right"] = right_intrinsics[0,0]
runner.custom_arguments["size_left"] = left_intrinsics[0,2]
runner.custom_arguments["size_right"] = right_intrinsics[0,2]
runner.custom_arguments["visualizer"] = HumanPoseVisualizer(300, 300, [runner.left_camera_location, runner.right_camera_location], colors=colors, pairs=pairs)
runner.custom_arguments["visualizer"].start()
def process(runner):
spatial_vectors = dict()
for side in ["left", "right"]:
frame = runner.output_queues[side+"_cam"].get().getCvFrame()
nn_current_output = runner.output_queues["nn_"+side].get()
heatmaps = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L2')).reshape((1, 19, 32, 57)).astype('float32')
pafs = np.array(nn_current_output.getLayerFp16('Mconv7_stage2_L1')).reshape((1, 38, 32, 57)).astype('float32')
outputs = np.concatenate((heatmaps, pafs), axis=1)
spatial_vectors[side] = []
landmarks = []
for i in range(nb_points):
probMap = outputs[0, i, :, :]
probMap = cv2.resize(probMap, (frame_width, frame_height))
keypoints = getKeypoints(probMap, threshold)
if(len(keypoints) > 0 and len(keypoints[0]) > 1):
spatial_vectors[side].append(np.array(get_landmark_3d((keypoints[0][0]/frame_width, keypoints[0][1]/frame_height), focal_length=runner.custom_arguments["focal_length_"+side], size=runner.custom_arguments["size_"+side])))
landmarks.append([keypoints[0][0], keypoints[0][1]])
cv2.circle(frame, (keypoints[0][0], keypoints[0][1]), 5, (colors[i][2], colors[i][1], colors[i][0]), -1, cv2.LINE_AA) # draw keypoint
else:
spatial_vectors[side].append(keypoints) # insert empty array if the keypoint is not detected with enough confidence
landmarks.append(keypoints)
for pair in pairs:
if(np.alltrue([len(landmarks[i])==2 for i in pair])):
color = [0, 0, 0]
for i in range(3):
color[i] += colors[pair[0]][i]/2
color[i] += colors[pair[1]][i]/2
cv2.line(frame, (landmarks[pair[0]][0], landmarks[pair[0]][1]), (landmarks[pair[1]][0], landmarks[pair[1]][1]), (color[2], color[1], color[0]), 3, cv2.LINE_AA)
displayFPS(frame, runner.getFPS())
cv2.imshow(side, frame)
# Determined depth to accuratly locate landmarks in space
landmark_spatial_locations = []
for i in range(nb_points):
landmark_spatial_locations.append(np.array(get_vector_intersection(spatial_vectors["left"][i], runner.left_camera_location, spatial_vectors["right"][i], runner.right_camera_location)))
runner.custom_arguments["visualizer"].setLandmarks(landmark_spatial_locations)
runner = OakRunner()
for side in ["left", "right"]:
if(side == "left"):
runner.setLeftCamera(frame_width, frame_height)
runner.getLeftCamera().setFps(fps_limit)
manip = runner.getLeftCameraManip()
else:
runner.setRightCamera(frame_width, frame_height)
runner.getRightCamera().setFps(fps_limit)
manip = runner.getRightCameraManip()
manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)
runner.addNeuralNetworkModel(stream_name="nn_"+side, path=str(Path(__file__).parent) + "/../_models/pose_estimation.blob", handle_mono_depth=False)
manip.out.link(runner.neural_networks["nn_"+side].input) # link transformed video stream to neural network entry
runner.run(process=process, init=init) | _examples/pose_estimation.py | 4,859 | draw keypoint insert empty array if the keypoint is not detected with enough confidence Determined depth to accuratly locate landmarks in space Switch to BGR (but still grayscaled) link transformed video stream to neural network entry | 234 | en | 0.847457 |
import sys
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_scalar
from dask.dataframe import methods
from dask.dataframe.core import DataFrame, Series, apply_concat_apply, map_partitions
from dask.dataframe.utils import has_known_categories
from dask.utils import M
###############################################################
# Dummies
###############################################################
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=np.uint8,
**kwargs,
):
"""
Convert categorical variable into dummy/indicator variables.
Data must have category dtype to infer result's ``columns``.
Parameters
----------
data : Series, or DataFrame
For Series, the dtype must be categorical.
For DataFrame, at least one column must be categorical.
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.18.2
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.18.2
Returns
-------
dummies : DataFrame
Examples
--------
Dask's version only works with Categorical data, as this is the only way to
know the output shape without computing all the data.
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)
>>> dd.get_dummies(s)
Traceback (most recent call last):
...
NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...
With categorical data:
>>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)
>>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE
Dask DataFrame Structure:
a b c
npartitions=2
0 uint8 uint8 uint8
2 ... ... ...
3 ... ... ...
Dask Name: get_dummies, 4 tasks
>>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
See Also
--------
pandas.get_dummies
"""
if isinstance(data, (pd.Series, pd.DataFrame)):
return pd.get_dummies(
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
**kwargs,
)
not_cat_msg = (
"`get_dummies` with non-categorical dtypes is not "
"supported. Please use `df.categorize()` beforehand to "
"convert to categorical dtype."
)
unknown_cat_msg = (
"`get_dummies` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known "
"categories"
)
if isinstance(data, Series):
if not methods.is_categorical_dtype(data):
raise NotImplementedError(not_cat_msg)
if not has_known_categories(data):
raise NotImplementedError(unknown_cat_msg)
elif isinstance(data, DataFrame):
if columns is None:
if (data.dtypes == "object").any():
raise NotImplementedError(not_cat_msg)
columns = data._meta.select_dtypes(include=["category"]).columns
else:
if not all(methods.is_categorical_dtype(data[c]) for c in columns):
raise NotImplementedError(not_cat_msg)
if not all(has_known_categories(data[c]) for c in columns):
raise NotImplementedError(unknown_cat_msg)
package_name = data._meta.__class__.__module__.split(".")[0]
dummies = sys.modules[package_name].get_dummies
return map_partitions(
dummies,
data,
prefix=prefix,
prefix_sep=prefix_sep,
dummy_na=dummy_na,
columns=columns,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
**kwargs,
)
###############################################################
# Pivot table
###############################################################
def pivot_table(df, index=None, columns=None, values=None, aggfunc="mean"):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, and ``aggfunc`` must be all scalar.
``values`` can be scalar or list-like.
Parameters
----------
df : DataFrame
index : scalar
column to be index
columns : scalar
column to be columns
values : scalar or list(scalar)
column(s) to aggregate
aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'
Returns
-------
table : DataFrame
See Also
--------
pandas.DataFrame.pivot_table
"""
if not is_scalar(index) or index is None:
raise ValueError("'index' must be the name of an existing column")
if not is_scalar(columns) or columns is None:
raise ValueError("'columns' must be the name of an existing column")
if not methods.is_categorical_dtype(df[columns]):
raise ValueError("'columns' must be category dtype")
if not has_known_categories(df[columns]):
raise ValueError(
"'columns' must have known categories. Please use "
"`df[columns].cat.as_known()` beforehand to ensure "
"known categories"
)
if not (
is_list_like(values)
and all([is_scalar(v) for v in values])
or is_scalar(values)
):
raise ValueError("'values' must refer to an existing column or columns")
available_aggfuncs = ["mean", "sum", "count", "first", "last"]
if not is_scalar(aggfunc) or aggfunc not in available_aggfuncs:
raise ValueError(
"aggfunc must be either " + ", ".join(f"'{x}'" for x in available_aggfuncs)
)
# _emulate can't work for empty data
# the result must have CategoricalIndex columns
columns_contents = pd.CategoricalIndex(df[columns].cat.categories, name=columns)
if is_scalar(values):
new_columns = columns_contents
else:
new_columns = pd.MultiIndex.from_product(
(sorted(values), columns_contents), names=[None, columns]
)
if aggfunc in ["first", "last"]:
# Infer datatype as non-numeric values are allowed
if is_scalar(values):
meta = pd.DataFrame(
columns=new_columns,
dtype=df[values].dtype,
index=pd.Index(df._meta[index]),
)
else:
meta = pd.DataFrame(
columns=new_columns,
index=pd.Index(df._meta[index]),
)
for value_col in values:
meta[value_col] = meta[value_col].astype(df[values].dtypes[value_col])
else:
# Use float64 as other aggregate functions require numerical data
meta = pd.DataFrame(
columns=new_columns, dtype=np.float64, index=pd.Index(df._meta[index])
)
kwargs = {"index": index, "columns": columns, "values": values}
if aggfunc in ["sum", "mean"]:
pv_sum = apply_concat_apply(
[df],
chunk=methods.pivot_sum,
aggregate=methods.pivot_agg,
meta=meta,
token="pivot_table_sum",
chunk_kwargs=kwargs,
)
if aggfunc in ["count", "mean"]:
pv_count = apply_concat_apply(
[df],
chunk=methods.pivot_count,
aggregate=methods.pivot_agg,
meta=meta,
token="pivot_table_count",
chunk_kwargs=kwargs,
)
if aggfunc == "sum":
return pv_sum
elif aggfunc == "count":
return pv_count
elif aggfunc == "mean":
return pv_sum / pv_count
elif aggfunc == "first":
return apply_concat_apply(
[df],
chunk=methods.pivot_first,
aggregate=methods.pivot_agg_first,
meta=meta,
token="pivot_table_first",
chunk_kwargs=kwargs,
)
elif aggfunc == "last":
return apply_concat_apply(
[df],
chunk=methods.pivot_last,
aggregate=methods.pivot_agg_last,
meta=meta,
token="pivot_table_last",
chunk_kwargs=kwargs,
)
else:
raise ValueError
###############################################################
# Melt
###############################################################
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
"""
Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to the row
axis, leaving just two non-identifier columns, 'variable' and 'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
pandas.DataFrame.melt
"""
from dask.dataframe.core import no_default
return frame.map_partitions(
M.melt,
meta=no_default,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
token="melt",
)
| dask/dataframe/reshape.py | 11,467 | Convert categorical variable into dummy/indicator variables.
Data must have category dtype to infer result's ``columns``.
Parameters
----------
data : Series, or DataFrame
For Series, the dtype must be categorical.
For DataFrame, at least one column must be categorical.
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.18.2
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.18.2
Returns
-------
dummies : DataFrame
Examples
--------
Dask's version only works with Categorical data, as this is the only way to
know the output shape without computing all the data.
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> s = dd.from_pandas(pd.Series(list('abca')), npartitions=2)
>>> dd.get_dummies(s)
Traceback (most recent call last):
...
NotImplementedError: `get_dummies` with non-categorical dtypes is not supported...
With categorical data:
>>> s = dd.from_pandas(pd.Series(list('abca'), dtype='category'), npartitions=2)
>>> dd.get_dummies(s) # doctest: +NORMALIZE_WHITESPACE
Dask DataFrame Structure:
a b c
npartitions=2
0 uint8 uint8 uint8
2 ... ... ...
3 ... ... ...
Dask Name: get_dummies, 4 tasks
>>> dd.get_dummies(s).compute() # doctest: +ELLIPSIS
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
See Also
--------
pandas.get_dummies
Unpivots a DataFrame from wide format to long format, optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one or more columns are identifier variables
(``id_vars``), while all other columns, considered measured variables (``value_vars``), are "unpivoted" to the row
axis, leaving just two non-identifier columns, 'variable' and 'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
pandas.DataFrame.melt
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, and ``aggfunc`` must be all scalar.
``values`` can be scalar or list-like.
Parameters
----------
df : DataFrame
index : scalar
column to be index
columns : scalar
column to be columns
values : scalar or list(scalar)
column(s) to aggregate
aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'
Returns
-------
table : DataFrame
See Also
--------
pandas.DataFrame.pivot_table
Dummies Pivot table _emulate can't work for empty data the result must have CategoricalIndex columns Infer datatype as non-numeric values are allowed Use float64 as other aggregate functions require numerical data Melt | 4,300 | en | 0.416317 |
import json
from sqlalchemy.orm import subqueryload
from werkzeug.exceptions import BadRequest, NotFound, PreconditionFailed
from rdr_service import clock
from rdr_service.code_constants import PPI_EXTRA_SYSTEM
from rdr_service.dao.base_dao import BaseDao, UpdatableDao
from rdr_service.lib_fhir.fhirclient_1_0_6.models import questionnaire
from rdr_service.model.code import CodeType
from rdr_service.model.questionnaire import (
Questionnaire,
QuestionnaireConcept,
QuestionnaireHistory,
QuestionnaireQuestion,
)
_SEMANTIC_DESCRIPTION_EXTENSION = "http://all-of-us.org/fhir/forms/semantic-description"
_IRB_MAPPING_EXTENSION = "http://all-of-us.org/fhir/forms/irb-mapping"
class QuestionnaireDao(UpdatableDao):
def __init__(self):
super(QuestionnaireDao, self).__init__(Questionnaire)
def get_id(self, obj):
return obj.questionnaireId
def get_with_children(self, questionnaire_id):
with self.session() as session:
query = session.query(Questionnaire).options(
subqueryload(Questionnaire.concepts), subqueryload(Questionnaire.questions)
)
return query.get(questionnaire_id)
def has_dup_semantic_version(self, session, questionnaire_id, semantic_version):
record = session.query(QuestionnaireHistory)\
.filter(QuestionnaireHistory.questionnaireId == questionnaire_id,
QuestionnaireHistory.semanticVersion == semantic_version)\
.first()
return True if record else False
def get_latest_questionnaire_with_concept(self, codeId):
"""Find the questionnaire most recently modified that has the specified concept code."""
with self.session() as session:
return (
session.query(Questionnaire)
.join(Questionnaire.concepts)
.filter(QuestionnaireConcept.codeId == codeId)
.order_by(Questionnaire.lastModified.desc())
.options(subqueryload(Questionnaire.questions))
.first()
)
def _make_history(self, questionnaire, concepts, questions):
# pylint: disable=redefined-outer-name
history = QuestionnaireHistory()
history.fromdict(questionnaire.asdict(), allow_pk=True)
for concept in concepts:
new_concept = QuestionnaireConcept()
new_concept.fromdict(concept.asdict())
new_concept.questionnaireId = questionnaire.questionnaireId
new_concept.questionnaireVersion = questionnaire.version
history.concepts.append(new_concept)
for question in questions:
new_question = QuestionnaireQuestion()
new_question.fromdict(question.asdict())
new_question.questionnaireId = questionnaire.questionnaireId
new_question.questionnaireVersion = questionnaire.version
history.questions.append(new_question)
return history
def insert_with_session(self, session, questionnaire):
# pylint: disable=redefined-outer-name
questionnaire.created = clock.CLOCK.now()
questionnaire.lastModified = clock.CLOCK.now()
questionnaire.version = 1
# SQLAlchemy emits warnings unnecessarily when these collections aren't empty.
# We don't want these to be cascaded now anyway, so point them at nothing, but save
# the concepts and questions for use in history.
concepts = list(questionnaire.concepts)
questions = list(questionnaire.questions)
questionnaire.concepts = []
questionnaire.questions = []
super(QuestionnaireDao, self).insert_with_session(session, questionnaire)
# This is needed to assign an ID to the questionnaire, as the client doesn't need to provide
# one.
session.flush()
# Set the ID in the resource JSON
resource_json = json.loads(questionnaire.resource)
resource_json["id"] = str(questionnaire.questionnaireId)
questionnaire.semanticVersion = resource_json['version']
questionnaire.resource = json.dumps(resource_json)
history = self._make_history(questionnaire, concepts, questions)
history.questionnaireId = questionnaire.questionnaireId
QuestionnaireHistoryDao().insert_with_session(session, history)
return questionnaire
def _do_update(self, session, obj, existing_obj):
# If the provider link changes, update the HPO ID on the participant and its summary.
obj.lastModified = clock.CLOCK.now()
obj.version = existing_obj.version + 1
obj.created = existing_obj.created
resource_json = json.loads(obj.resource)
resource_json["id"] = str(obj.questionnaireId)
obj.semanticVersion = resource_json['version']
obj.resource = json.dumps(resource_json)
super(QuestionnaireDao, self)._do_update(session, obj, existing_obj)
def update_with_session(self, session, questionnaire):
# pylint: disable=redefined-outer-name
super(QuestionnaireDao, self).update_with_session(session, questionnaire)
QuestionnaireHistoryDao().insert_with_session(
session, self._make_history(questionnaire, questionnaire.concepts, questionnaire.questions)
)
@classmethod
def from_client_json(cls, resource_json, id_=None, expected_version=None, client_id=None):
# pylint: disable=unused-argument
# Parse the questionnaire to make sure it's valid, but preserve the original JSON
# when saving.
fhir_q = questionnaire.Questionnaire(resource_json)
if not fhir_q.group:
raise BadRequest("No top-level group found in questionnaire")
if 'version' not in resource_json:
raise BadRequest('No version info found in questionnaire')
external_id = None
if fhir_q.identifier and len(fhir_q.identifier) > 0:
external_id = fhir_q.identifier[0].value
semantic_desc = None
irb_mapping = None
if fhir_q.extension:
for ext in fhir_q.extension:
if ext.url == _SEMANTIC_DESCRIPTION_EXTENSION:
semantic_desc = ext.valueString
if ext.url == _IRB_MAPPING_EXTENSION:
irb_mapping = ext.valueString
q = Questionnaire(
resource=json.dumps(resource_json),
questionnaireId=id_,
semanticVersion=expected_version,
externalId=external_id,
semanticDesc=semantic_desc,
irbMapping=irb_mapping
)
# Assemble a map of (system, value) -> (display, code_type, parent_id) for passing into CodeDao.
# Also assemble a list of (system, code) for concepts and (system, code, linkId) for questions,
# which we'll use later when assembling the child objects.
code_map, concepts, questions = cls._extract_codes(fhir_q.group)
from rdr_service.dao.code_dao import CodeDao
# Get or insert codes, and retrieve their database IDs.
code_id_map = CodeDao().get_internal_id_code_map(code_map)
# Now add the child objects, using the IDs in code_id_map
cls._add_concepts(q, code_id_map, concepts)
cls._add_questions(q, code_id_map, questions)
return q
def _validate_update(self, session, obj, existing_obj):
"""Validates that an update is OK before performing it. (Not applied on insert.)
By default, validates that the object already exists, and if an expected semanticVersion ID is provided,
that it matches.
"""
if not existing_obj:
raise NotFound('%s with id %s does not exist' % (self.model_type.__name__, id))
if self.validate_version_match and existing_obj.semanticVersion != obj.semanticVersion:
raise PreconditionFailed('Expected semanticVersion was %s; stored semanticVersion was %s' %
(obj.semanticVersion, existing_obj.semanticVersion))
resource_json = json.loads(obj.resource)
exist_id = str(obj.questionnaireId)
new_semantic_version = resource_json['version']
if self.has_dup_semantic_version(session, exist_id, new_semantic_version):
raise BadRequest('This semantic version already exist for this questionnaire id.')
self._validate_model(session, obj)
@classmethod
def _add_concepts(cls, q, code_id_map, concepts):
for system, code in concepts:
q.concepts.append(
QuestionnaireConcept(
questionnaireId=q.questionnaireId,
questionnaireVersion=q.version,
codeId=code_id_map.get(system, code),
)
)
@classmethod
def _add_questions(cls, q, code_id_map, questions):
for system, code, linkId, repeats in questions:
q.questions.append(
QuestionnaireQuestion(
questionnaireId=q.questionnaireId,
questionnaireVersion=q.version,
linkId=linkId,
codeId=code_id_map.get(system, code),
repeats=repeats if repeats else False,
)
)
@classmethod
def _extract_codes(cls, group):
code_map = {}
concepts = []
questions = []
if group.concept:
for concept in group.concept:
if concept.system and concept.code and concept.system != PPI_EXTRA_SYSTEM:
code_map[(concept.system, concept.code)] = (concept.display, CodeType.MODULE, None)
concepts.append((concept.system, concept.code))
cls._populate_questions(group, code_map, questions)
return (code_map, concepts, questions)
@classmethod
def _populate_questions(cls, group, code_map, questions):
"""Recursively populate questions under this group."""
if group.question:
for question in group.question:
# Capture any questions that have a link ID and single concept with a system and code
if question.linkId and question.concept and len(question.concept) == 1:
concept = question.concept[0]
if concept.system and concept.code and concept.system != PPI_EXTRA_SYSTEM:
code_map[(concept.system, concept.code)] = (concept.display, CodeType.QUESTION, None)
questions.append((concept.system, concept.code, question.linkId, question.repeats))
if question.group:
for sub_group in question.group:
cls._populate_questions(sub_group, code_map, questions)
if question.option:
for option in question.option:
code_map[(option.system, option.code)] = (option.display, CodeType.ANSWER, None)
if group.group:
for sub_group in group.group:
cls._populate_questions(sub_group, code_map, questions)
class QuestionnaireHistoryDao(BaseDao):
"""Maintains version history for questionnaires.
All previous versions of a questionnaire are maintained (with the same questionnaireId value and
a new version value for each update.)
Old versions of questionnaires and their questions can still be referenced by questionnaire
responses, and are used when generating metrics / participant summaries, and in general
determining what answers participants gave to questions.
Concepts and questions live under a QuestionnaireHistory entry, such that when the questionnaire
gets updated new concepts and questions are created and existing ones are left as they were.
Do not use this DAO for write operations directly; instead use QuestionnaireDao.
"""
def __init__(self):
super(QuestionnaireHistoryDao, self).__init__(QuestionnaireHistory)
def get_id(self, obj):
return [obj.questionnaireId, obj.version]
def get_with_children_with_session(self, session, questionnaire_id_and_semantic_version):
query = session.query(QuestionnaireHistory) \
.options(subqueryload(QuestionnaireHistory.concepts), subqueryload(QuestionnaireHistory.questions)) \
.filter(QuestionnaireHistory.questionnaireId == questionnaire_id_and_semantic_version[0],
QuestionnaireHistory.semanticVersion == questionnaire_id_and_semantic_version[1])
return query.first()
def get_with_children(self, questionnaire_id_and_semantic_version):
with self.session() as session:
return self.get_with_children_with_session(session, questionnaire_id_and_semantic_version)
class QuestionnaireConceptDao(BaseDao):
def __init__(self):
super(QuestionnaireConceptDao, self).__init__(QuestionnaireConcept)
def get_id(self, obj):
return obj.questionnaireConceptId
class QuestionnaireQuestionDao(BaseDao):
def __init__(self):
super(QuestionnaireQuestionDao, self).__init__(QuestionnaireQuestion)
def get_id(self, obj):
return obj.questionnaireQuestionId
def get_all_with_session(self, session, ids):
if not ids:
return []
return (
session.query(QuestionnaireQuestion).filter(QuestionnaireQuestion.questionnaireQuestionId.in_(ids)).all()
)
| rdr_service/dao/questionnaire_dao.py | 13,444 | Maintains version history for questionnaires.
All previous versions of a questionnaire are maintained (with the same questionnaireId value and
a new version value for each update.)
Old versions of questionnaires and their questions can still be referenced by questionnaire
responses, and are used when generating metrics / participant summaries, and in general
determining what answers participants gave to questions.
Concepts and questions live under a QuestionnaireHistory entry, such that when the questionnaire
gets updated new concepts and questions are created and existing ones are left as they were.
Do not use this DAO for write operations directly; instead use QuestionnaireDao.
Recursively populate questions under this group.
Validates that an update is OK before performing it. (Not applied on insert.)
By default, validates that the object already exists, and if an expected semanticVersion ID is provided,
that it matches.
Find the questionnaire most recently modified that has the specified concept code.
pylint: disable=redefined-outer-name pylint: disable=redefined-outer-name SQLAlchemy emits warnings unnecessarily when these collections aren't empty. We don't want these to be cascaded now anyway, so point them at nothing, but save the concepts and questions for use in history. This is needed to assign an ID to the questionnaire, as the client doesn't need to provide one. Set the ID in the resource JSON If the provider link changes, update the HPO ID on the participant and its summary. pylint: disable=redefined-outer-name pylint: disable=unused-argument Parse the questionnaire to make sure it's valid, but preserve the original JSON when saving. Assemble a map of (system, value) -> (display, code_type, parent_id) for passing into CodeDao. Also assemble a list of (system, code) for concepts and (system, code, linkId) for questions, which we'll use later when assembling the child objects. Get or insert codes, and retrieve their database IDs. Now add the child objects, using the IDs in code_id_map Capture any questions that have a link ID and single concept with a system and code | 2,120 | en | 0.86518 |
import json
import os
from urllib import request
from flask import current_app
from elastichq.model import ClusterDTO
from elastichq.vendor.elasticsearch.exceptions import NotFoundError
from .ConnectionService import ConnectionService
from ..globals import CACHE_REGION, LOG
class HQService:
def get_status(self):
version_str = ""
try:
fp = request.urlopen("http://www.elastichq.org/currversion.json", timeout=10)
mbyte = fp.read()
version_str = mbyte.decode("utf-8")
fp.close()
except Exception as ex:
LOG.error("error retrieving version information", ex)
stable_version = (json.loads(version_str)).get("version", None)
from elastichq.service import ClusterService
clusters = ClusterService().get_clusters(create_if_missing=False)
schema = ClusterDTO(many=True)
result = schema.dump(clusters)
status = {
"name": "ElasticHQ",
"installed_version": current_app.config.get('API_VERSION'),
"current_stable_version": stable_version,
"tagline": "You know, for Elasticsearch",
"clusters": result.data,
"default_url": os.environ.get('HQ_DEFAULT_URL', current_app.config.get('DEFAULT_URL'))
}
return status
@CACHE_REGION.cache_on_arguments()
def get_settings(self, cluster_name):
try:
connection = ConnectionService().get_connection(cluster_name)
settings_doc = connection.get_source(index=current_app.config.get('HQ_CLUSTER_SETTINGS')[
'index_name'],
id=current_app.config.get('HQ_CLUSTER_SETTINGS')[
'doc_id'],
doc_type=current_app.config.get('HQ_CLUSTER_SETTINGS')[
'doc_type'])
return settings_doc
except NotFoundError as nfe:
if current_app.config.get('HQ_CLUSTER_SETTINGS')['store_metrics']:
self.save_settings(cluster_name)
return current_app.config.get('HQ_CLUSTER_SETTINGS')
def save_settings(self, cluster_name, body=None):
try:
if body is None:
body = current_app.config.get('HQ_CLUSTER_SETTINGS')
connection = ConnectionService().get_connection(cluster_name)
connection.index(index=current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'],
doc_type=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_type'],
id=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_id'],
body=body, refresh=True)
except NotFoundError as nfe:
LOG.error("Unable to save index. Is action.auto_create_index disabled in the ES configuration file?", nfe)
def update_settings(self, cluster_name, body=None):
if body is None:
body = current_app.config.get('HQ_CLUSTER_SETTINGS')
current_settings = self.get_settings(cluster_name)
new_settings = {
'doc_id': current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_id'],
'index_name': current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'],
'version': 1,
'doc_type': current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_type'],
'store_metrics': body.get('store_metrics', current_settings.get('store_metrics')),
'websocket_interval': body.get('websocket_interval',
current_settings.get('websocket_interval')),
'historic_poll_interval': body.get('historic_poll_interval',
current_settings.get('historic_poll_interval')),
'historic_days_to_store': body.get('historic_days_to_store',
current_settings.get('historic_days_to_store')),
'show_dot_indices': body.get('show_dot_indices',
current_settings.get('show_dot_indices'))
}
connection = ConnectionService().get_connection(cluster_name)
connection.update(index=current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'],
doc_type=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_type'],
id=current_app.config.get('HQ_CLUSTER_SETTINGS')['doc_id'],
body={"doc": new_settings}, refresh=True)
self.get_settings.invalidate(self, cluster_name) # alter cache
return new_settings
def delete_settings(self, cluster_name):
connection = ConnectionService().get_connection(cluster_name)
self.get_settings.invalidate(self, cluster_name) # alter cache
return connection.indices.delete(index=current_app.config.get('HQ_CLUSTER_SETTINGS')['index_name'])
| elastichq/service/HQService.py | 5,017 | alter cache alter cache | 23 | de | 0.261264 |
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class NoodlesAndCompanySpider(scrapy.Spider):
name = "noodles_and_company"
item_attributes = { 'brand': "Noodles and Company" }
allowed_domains = ["locations.noodles.com"]
start_urls = (
'https://locations.noodles.com/',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info['day'][:2].title()
hour_intervals = []
for interval in day_info['intervals']:
f_time = str(interval['start']).zfill(4)
t_time = str(interval['end']).zfill(4)
hour_intervals.append('{}:{}-{}:{}'.format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
))
hours = ','.join(hour_intervals)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse(self, response):
for state_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re(r'(^[^\/]+$)'):
yield scrapy.Request(
response.urljoin(state_url),
callback=self.parse_state,
)
for location_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re(r'(^[^\/]+\/[^\/]+\/.+$)'):
yield scrapy.Request(
response.urljoin(location_url),
callback=self.parse_location,
)
def parse_state(self, response):
# For counties that have multiple locations, go to a county page listing, and go to each individual location from there.
for county_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re('(^[^\/]+\/[^\/]+$)'):
yield scrapy.Request(
response.urljoin(county_url),
callback=self.parse_county,
)
# For counties that have only one location, go directly to that location page.
for location_url in response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').re('(^[^\/]+\/[^\/]+\/.+$)'):
yield scrapy.Request(
response.urljoin(location_url),
callback=self.parse_location,
)
def parse_county(self, response):
for location_block in response.xpath('//div[@class="c-location-grid-item"]'):
location_url = location_block.xpath('.//a[@class="c-location-grid-item-link"]/@href').extract_first()
yield scrapy.Request(
response.urljoin(location_url),
callback=self.parse_location,
)
def parse_location(self, response):
properties = {
'lon': float(response.xpath('//span/meta[@itemprop="longitude"]/@content').extract_first()),
'lat': float(response.xpath('//span/meta[@itemprop="latitude"]/@content').extract_first()),
'addr_full': response.xpath('//span[@class="c-address-street-1"]/text()').extract_first().strip(),
'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(),
'phone': response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
'name': response.xpath('//span[@class="location-name-geo"]/text()').extract_first(),
'ref': response.url,
'website': response.url,
}
hours_elem = response.xpath('//div[@class="c-location-hours-details-wrapper js-location-hours"]/@data-days')
opening_hours = None
if hours_elem:
hours = json.loads(hours_elem.extract_first())
opening_hours = self.store_hours(hours)
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
| locations/spiders/noodles_and_company.py | 5,371 | -*- coding: utf-8 -*- For counties that have multiple locations, go to a county page listing, and go to each individual location from there. For counties that have only one location, go directly to that location page. | 217 | en | 0.964171 |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import scipy as sp
from PIL import Image
import six
import networkx
for m in (np, sp, Image, six, networkx):
if not m is None:
if m is Image:
# Pillow 6.0.0 and above have removed the 'VERSION' attribute
# https://bitbucket.org/rptlab/reportlab/issues/176/incompatibility-with-pillow-600
try:
im_ver = Image.__version__
except AttributeError:
im_ver = Image.VERSION
print('PIL'.rjust(10), ' ', im_ver)
else:
print(m.__name__.rjust(10), ' ', m.__version__)
| tools/build_versions.py | 662 | !/usr/bin/env python Pillow 6.0.0 and above have removed the 'VERSION' attribute https://bitbucket.org/rptlab/reportlab/issues/176/incompatibility-with-pillow-600 | 162 | en | 0.669261 |
from .base import SimIRExpr
from ... import s_options as o
from ...s_action import SimActionData
class SimIRExpr_RdTmp(SimIRExpr):
def _execute(self):
if (o.SUPER_FASTPATH in self.state.options
and self._expr.tmp not in self.state.scratch.temps):
self.expr = self.state.se.BVV(0, self.size_bits())
else:
self.expr = self.state.scratch.tmp_expr(self._expr.tmp)
# finish it and save the tmp reference
self._post_process()
if o.TRACK_TMP_ACTIONS in self.state.options:
r = SimActionData(self.state, SimActionData.TMP, SimActionData.READ, tmp=self._expr.tmp, size=self.size_bits(), data=self.expr)
self.actions.append(r)
| simuvex/vex/expressions/rdtmp.py | 727 | finish it and save the tmp reference | 36 | en | 0.732176 |
# coding: utf-8
# # Load and preprocess 2012 data
#
# We will, over time, look over other years. Our current goal is to explore the features of a single year.
#
# ---
# In[1]:
get_ipython().magic('pylab --no-import-all inline')
import pandas as pd
# ## Load the data.
#
# ---
#
# If this fails, be sure that you've saved your own data in the prescribed location, then retry.
# In[2]:
file = "../data/interim/2012data.dta"
df_rawest = pd.read_stata(file)
# In[7]:
df_rawest.weight_full.isnull()
# In[8]:
good_columns = [#'campfin_limcorp', # "Should gov be able to limit corporate contributions"
'pid_x', # Your own party identification
'abortpre_4point', # Abortion
'trad_adjust', # Moral Relativism
'trad_lifestyle', # "Newer" lifetyles
'trad_tolerant', # Moral tolerance
'trad_famval', # Traditional Families
'gayrt_discstd_x', # Gay Job Discrimination
'gayrt_milstd_x', # Gay Military Service
'inspre_self', # National health insurance
'guarpr_self', # Guaranteed Job
'spsrvpr_ssself', # Services/Spending
'aa_work_x', # Affirmative Action ( Should this be aapost_hire_x? )
'resent_workway',
'resent_slavery',
'resent_deserve',
'resent_try',
]
df_raw = df_rawest[good_columns]
# ## Clean the data
# ---
# In[9]:
def convert_to_int(s):
"""Turn ANES data entry into an integer.
>>> convert_to_int("1. Govt should provide many fewer services")
1
>>> convert_to_int("2")
2
"""
try:
return int(s.partition('.')[0])
except ValueError:
warnings.warn("Couldn't convert: "+s)
return np.nan
except AttributeError:
return s
def negative_to_nan(value):
"""Convert negative values to missing.
ANES codes various non-answers as negative numbers.
For instance, if a question does not pertain to the
respondent.
"""
return value if value >= 0 else np.nan
def lib1_cons2_neutral3(x):
"""Rearrange questions where 3 is neutral."""
return -3 + x if x != 1 else x
def liblow_conshigh(x):
"""Reorder questions where the liberal response is low."""
return -x
def dem_edu_special_treatment(x):
"""Eliminate negative numbers and {95. Other}"""
return np.nan if x == 95 or x <0 else x
df = df_raw.applymap(convert_to_int)
df = df.applymap(negative_to_nan)
df.abortpre_4point = df.abortpre_4point.apply(lambda x: np.nan if x not in {1, 2, 3, 4} else -x)
df.loc[:, 'trad_lifestyle'] = df.trad_lifestyle.apply(lambda x: -x) # 1: moral relativism, 5: no relativism
df.loc[:, 'trad_famval'] = df.trad_famval.apply(lambda x: -x) # Tolerance. 1: tolerance, 7: not
df.loc[:, 'spsrvpr_ssself'] = df.spsrvpr_ssself.apply(lambda x: -x)
df.loc[:, 'resent_workway'] = df.resent_workway.apply(lambda x: -x)
df.loc[:, 'resent_try'] = df.resent_try.apply(lambda x: -x)
df.rename(inplace=True, columns=dict(zip(
good_columns,
["PartyID",
"Abortion",
"MoralRelativism",
"NewerLifestyles",
"MoralTolerance",
"TraditionalFamilies",
"GayJobDiscrimination",
"GayMilitaryService",
"NationalHealthInsurance",
"StandardOfLiving",
"ServicesVsSpending",
"AffirmativeAction",
"RacialWorkWayUp",
"RacialGenerational",
"RacialDeserve",
"RacialTryHarder",
]
)))
# In[10]:
print("Variables now available: df")
# In[11]:
df_rawest.pid_x.value_counts()
# In[12]:
df.PartyID.value_counts()
# In[13]:
df.describe()
# In[14]:
df.head()
# In[21]:
df.to_csv("../data/processed/2012.csv")
# In[15]:
df_rawest.weight_full.to_csv("../data/processed/2012_weights.csv")
# In[16]:
df_rawest.shapee
# In[ ]:
| notebooks/as_script/1.0-adm-load-data-2012-Copy1.py | 3,705 | Turn ANES data entry into an integer.
>>> convert_to_int("1. Govt should provide many fewer services")
1
>>> convert_to_int("2")
2
Eliminate negative numbers and {95. Other}
Rearrange questions where 3 is neutral.
Reorder questions where the liberal response is low.
Convert negative values to missing.
ANES codes various non-answers as negative numbers.
For instance, if a question does not pertain to the
respondent.
coding: utf-8 Load and preprocess 2012 data We will, over time, look over other years. Our current goal is to explore the features of a single year. --- In[1]: Load the data. --- If this fails, be sure that you've saved your own data in the prescribed location, then retry. In[2]: In[7]: In[8]:'campfin_limcorp', "Should gov be able to limit corporate contributions" Your own party identification Abortion Moral Relativism "Newer" lifetyles Moral tolerance Traditional Families Gay Job Discrimination Gay Military Service National health insurance Guaranteed Job Services/Spending Affirmative Action ( Should this be aapost_hire_x? ) Clean the data --- In[9]: 1: moral relativism, 5: no relativism Tolerance. 1: tolerance, 7: not In[10]: In[11]: In[12]: In[13]: In[14]: In[21]: In[15]: In[16]: In[ ]: | 1,233 | en | 0.799784 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading machine types."""
from googlecloudsdk.calliope import base
class MachineTypes(base.Group):
"""Read Google Compute Engine virtual machine types."""
MachineTypes.detailed_help = {
'brief': 'Read Google Compute Engine virtual machine types',
}
| google-cloud-sdk/.install/.backup/lib/surface/compute/machine_types/__init__.py | 871 | Read Google Compute Engine virtual machine types.
Commands for reading machine types.
Copyright 2014 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 656 | en | 0.859282 |
"""
WSGI config for rara_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rara_api.settings')
application = get_wsgi_application()
| rara_api/wsgi.py | 393 | WSGI config for rara_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ | 214 | en | 0.781004 |
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from codecs import open
from setuptools import setup
VERSION = "2.14.0"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/core/__init__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
import sys
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/core/__init__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'adal~=1.2.3',
'argcomplete~=1.8',
'azure-cli-telemetry==1.0.6',
'colorama~=0.4.1',
'humanfriendly>=4.7,<9.0',
'jmespath',
'knack==0.7.2',
'msal~=1.0.0',
'msal-extensions~=0.1.3',
'msrest>=0.4.4',
'msrestazure>=0.6.3',
'paramiko>=2.0.8,<3.0.0',
'PyJWT',
'pyopenssl>=17.1.0', # https://github.com/pyca/pyopenssl/pull/612
'requests~=2.22',
'six~=1.12',
'pkginfo>=1.5.0.1',
'azure-mgmt-resource==10.2.0',
'azure-mgmt-core==1.2.0'
]
TESTS_REQUIRE = [
'mock'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-core',
version=VERSION,
description='Microsoft Azure Command-Line Tools Core Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
packages=[
'azure.cli.core',
'azure.cli.core.commands',
'azure.cli.core.extension',
'azure.cli.core.profiles',
],
install_requires=DEPENDENCIES,
extras_require={
":python_version<'3.4'": ['enum34'],
":python_version<'2.7.9'": ['pyopenssl', 'ndg-httpsclient', 'pyasn1'],
':python_version<"3.0"': ['futures'],
"test": TESTS_REQUIRE,
},
tests_require=TESTS_REQUIRE,
package_data={'azure.cli.core': ['auth_landing_pages/*.html']}
)
| src/azure-cli-core/setup.py | 3,095 | !/usr/bin/env python -------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- If we have source, validate that our version numbers match This should prevent uploading releases with mismatched versions. https://github.com/pyca/pyopenssl/pull/612 | 524 | en | 0.57808 |
# -*- coding: utf-8 -*-
#
import re
from collections import OrderedDict
from copy import deepcopy
from ._http import HTTPStatus
#copied from sanic router
REGEX_TYPES = {
'string': (str, r'[^/]+'),
'int': (int, r'\d+'),
'number': (float, r'[0-9\\.]+'),
'alpha': (str, r'[A-Za-z]+'),
}
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
__all__ = ('merge', 'camel_to_dash', 'default_id', 'not_none', 'not_none_sorted', 'unpack')
def merge(first, second):
"""
Recursively merges two dictionaries.
Second dictionary values will take precedence over those from the first one.
Nested dictionaries are merged too.
:param dict first: The first dictionary
:param dict second: The second dictionary
:return: the resulting merged dictionary
:rtype: dict
"""
if not isinstance(second, dict):
return second
result = deepcopy(first)
for key, value in second.items():
if key in result and isinstance(result[key], dict):
result[key] = merge(result[key], value)
else:
result[key] = deepcopy(value)
return result
def camel_to_dash(value):
'''
Transform a CamelCase string into a low_dashed one
:param str value: a CamelCase string to transform
:return: the low_dashed string
:rtype: str
'''
first_cap = FIRST_CAP_RE.sub(r'\1_\2', value)
return ALL_CAP_RE.sub(r'\1_\2', first_cap).lower()
def default_id(resource, method):
'''Default operation ID generator'''
return '{0}_{1}'.format(method, camel_to_dash(resource))
def not_none(data):
'''
Remove all keys where value is None
:param dict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: dict
'''
return dict((k, v) for k, v in data.items() if v is not None)
def not_none_sorted(data):
'''
Remove all keys where value is None
:param OrderedDict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: OrderedDict
'''
return OrderedDict((k, v) for k, v in sorted(data.items()) if v is not None)
def unpack(response, default_code=HTTPStatus.OK):
'''
Unpack a Flask standard response.
Flask response can be:
- a single value
- a 2-tuple ``(value, code)``
- a 3-tuple ``(value, code, headers)``
.. warning::
When using this function, you must ensure that the tuple is not the response data.
To do so, prefer returning list instead of tuple for listings.
:param response: A Flask style response
:param int default_code: The HTTP code to use as default if none is provided
:return: a 3-tuple ``(data, code, headers)``
:rtype: tuple
:raise ValueError: if the response does not have one of the expected format
'''
if not isinstance(response, tuple):
# data only
return response, default_code, {}
elif len(response) == 1:
# data only as tuple
return response[0], default_code, {}
elif len(response) == 2:
# data and code
data, code = response
return data, code, {}
elif len(response) == 3:
# data, code and headers
data, code, headers = response
return data, code or default_code, headers
else:
raise ValueError('Too many response values')
def get_accept_mimetypes(request):
accept_types = request.headers.get('accept', None)
if accept_types is None:
return {}
split_types = str(accept_types).split(',')
# keep the order they appear!
return OrderedDict([((s, 1,), s,) for s in split_types])
def best_match_accept_mimetype(request, representations, default=None):
if representations is None or len(representations) < 1:
return default
try:
accept_mimetypes = get_accept_mimetypes(request)
if accept_mimetypes is None or len(accept_mimetypes) < 1:
return default
# find exact matches, in the order they appear in the `Accept:` header
for accept_type, qual in accept_mimetypes:
if accept_type in representations:
return accept_type
# match special types, like "application/json;charset=utf8" where the first half matches.
for accept_type, qual in accept_mimetypes:
type_part = str(accept_type).split(';', 1)[0]
if type_part in representations:
return type_part
# if _none_ of those don't match, then fallback to wildcard matching
for accept_type, qual in accept_mimetypes:
if accept_type == "*" or accept_type == "*/*" or accept_type == "*.*":
return default
except (AttributeError, KeyError):
return default
def parse_rule(parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
if str(parameter_string).startswith('/'):
parameter_string = parameter_string[1:]
parameter_string = str(parameter_string).strip('<>')
name = parameter_string
pattern = 'string'
if ':' in parameter_string:
name, pattern = parameter_string.split(':', 1)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern
| sanic_restplus/utils.py | 5,739 | Transform a CamelCase string into a low_dashed one
:param str value: a CamelCase string to transform
:return: the low_dashed string
:rtype: str
Default operation ID generator
Recursively merges two dictionaries.
Second dictionary values will take precedence over those from the first one.
Nested dictionaries are merged too.
:param dict first: The first dictionary
:param dict second: The second dictionary
:return: the resulting merged dictionary
:rtype: dict
Remove all keys where value is None
:param dict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: dict
Remove all keys where value is None
:param OrderedDict data: A dictionary with potentially some values set to None
:return: The same dictionary without the keys with values to ``None``
:rtype: OrderedDict
Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
Unpack a Flask standard response.
Flask response can be:
- a single value
- a 2-tuple ``(value, code)``
- a 3-tuple ``(value, code, headers)``
.. warning::
When using this function, you must ensure that the tuple is not the response data.
To do so, prefer returning list instead of tuple for listings.
:param response: A Flask style response
:param int default_code: The HTTP code to use as default if none is provided
:return: a 3-tuple ``(data, code, headers)``
:rtype: tuple
:raise ValueError: if the response does not have one of the expected format
-*- coding: utf-8 -*-copied from sanic router data only data only as tuple data and code data, code and headers keep the order they appear! find exact matches, in the order they appear in the `Accept:` header match special types, like "application/json;charset=utf8" where the first half matches. if _none_ of those don't match, then fallback to wildcard matching We could receive NAME or NAME:PATTERN Pull from pre-configured types | 2,151 | en | 0.702361 |
import asyncio
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon.tl.functions.users import GetFullUserRequest
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, LESS_SPAMMY
from userbot.utils import admin_cmd
PM_WARNS = {}
PREV_REPLY_MESSAGE = {}
CACHE = {}
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "**No name set yet nibba, check pinned message in** @XtraTgBot"
USER_BOT_WARN_ZERO = "`You were spamming my peru master's inbox, henceforth your retarded lame ass has been blocked by my master's userbot.` **Now GTFO, i'm playing minecraft** "
USER_BOT_NO_WARN = ("[Please wait for my boss's permission](tg://user?id=742506768)\n\n"
f"{DEFAULTUSER}'s` inbox.\n\n"
"Leave your name, phone number, address and 10k$ and hopefully you'll get a reply within 2 light years.`\n\n"
"** Send** `/start` ** so that we can decide why you're here.**")
if Var.PRIVATE_GROUP_ID is not None:
@command(pattern="^.approve ?(.*)")
async def approve_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if chat.id in PM_WARNS:
del PM_WARNS[chat.id]
if chat.id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat.id].delete()
del PREV_REPLY_MESSAGE[chat.id]
pmpermit_sql.approve(chat.id, reason)
await event.edit("Approved Nibba [{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.delete()
@bot.on(events.NewMessage(outgoing=True))
async def you_dm_niqq(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
if not chat.id in PM_WARNS:
pmpermit_sql.approve(chat.id, "outgoing")
bruh = "__Added user to approved pms cuz outgoing message >~<__"
rko = await borg.send_message(event.chat_id, bruh)
await asyncio.sleep(3)
await rko.delete()
@command(pattern="^.block ?(.*)")
async def block_p_m(event):
if event.fwd_from:
return
replied_user = await event.client(GetFullUserRequest(event.chat_id))
firstname = replied_user.user.first_name
reason = event.pattern_match.group(1)
chat = await event.get_chat()
if event.is_private:
if pmpermit_sql.is_approved(chat.id):
pmpermit_sql.disapprove(chat.id)
await event.edit("\n Now You Can't Message Me..[{}](tg://user?id={})".format(firstname, chat.id))
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat.id))
@command(pattern="^.listapproved")
async def approve_p_m(event):
if event.fwd_from:
return
approved_users = pmpermit_sql.get_all_approved()
APPROVED_PMs = "Current Approved PMs\n"
if len(approved_users) > 0:
for a_user in approved_users:
if a_user.reason:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\n"
else:
APPROVED_PMs += f"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id})\n"
else:
APPROVED_PMs = "no Approved PMs (yet)"
if len(APPROVED_PMs) > 4095:
with io.BytesIO(str.encode(APPROVED_PMs)) as out_file:
out_file.name = "approved.pms.text"
await event.client.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption="Current Approved PMs",
reply_to=event
)
await event.delete()
else:
await event.edit(APPROVED_PMs)
@bot.on(events.NewMessage(incoming=True))
async def on_new_private_message(event):
if event.from_id == bot.uid:
return
if Var.PRIVATE_GROUP_ID is None:
return
if not event.is_private:
return
message_text = event.message.message
chat_id = event.from_id
current_message_text = message_text.lower()
if USER_BOT_NO_WARN == message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return
if event.from_id in CACHE:
sender = CACHE[event.from_id]
else:
sender = await bot.get_entity(event.from_id)
CACHE[event.from_id] = sender
if chat_id == bot.uid:
# don't log Saved Messages
return
if sender.bot:
# don't log bots
return
if sender.verified:
# don't log verified accounts
return
if any([x in event.raw_text for x in ("/start", "1", "2", "3", "4", "5")]):
return
if not pmpermit_sql.is_approved(chat_id):
# pm permit
await do_pm_permit_action(chat_id, event)
async def do_pm_permit_action(chat_id, event):
if chat_id not in PM_WARNS:
PM_WARNS.update({chat_id: 0})
if PM_WARNS[chat_id] == Config.MAX_FLOOD_IN_P_M_s:
r = await event.reply(USER_BOT_WARN_ZERO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
the_message = ""
the_message += "#BLOCKED_PMs\n\n"
the_message += f"[User](tg://user?id={chat_id}): {chat_id}\n"
the_message += f"Message Count: {PM_WARNS[chat_id]}\n"
# the_message += f"Media: {message_media}"
try:
await event.client.send_message(
entity=Var.PRIVATE_GROUP_ID,
message=the_message,
# reply_to=,
# parse_mode="html",
link_preview=False,
# file=message_media,
silent=True
)
return
except:
return
r = await event.reply(USER_BOT_NO_WARN)
PM_WARNS[chat_id] += 1
if chat_id in PREV_REPLY_MESSAGE:
await PREV_REPLY_MESSAGE[chat_id].delete()
PREV_REPLY_MESSAGE[chat_id] = r
| userbot/plugins/pmpermit.py | 7,096 | userbot's should not reply to other userbot's https://core.telegram.org/bots/faqwhy-doesn-39t-my-bot-see-messages-from-other-bots don't log Saved Messages don't log bots don't log verified accounts pm permit the_message += f"Media: {message_media}" reply_to=, parse_mode="html", file=message_media, | 298 | en | 0.706465 |
''' Frsutum PointNets v1 Model.
'''
from __future__ import print_function
import sys
import os
import tensorflow as tf
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT
from model_util import point_cloud_masking, get_center_regression_net
from model_util import placeholder_inputs, parse_output_to_tensors, get_loss
def get_instance_seg_v1_net(point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
net = tf.expand_dims(point_cloud, 2)
# (32, 2048, 1, 4) 论文第一格
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
# (32, 2048, 1, 64)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
point_feat = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
# (32, 2048, 1, 64) 论文第二格,拼接到第五格
net = tf_util.conv2d(point_feat, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
# (32, 2048, 1, 128)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# (32, 2048, 1, 1024) 论文第三格
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
# (32, 1, 1, 1024) 论文第四格,拼接到第五格
global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
# (32, 1, 1, 1027)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
# (32, 2048, 1, 1027) tf.tile()复制扩展数据
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
# (32, 2048, 1, 1091) 论文第五格 2048*(1024+64+3)
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
# (32, 2048, 1, 512)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
# (32, 2048, 1, 128)
net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)
logits = tf_util.conv2d(net, 2, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
# (32, 2048, 1, 2)
logits = tf.squeeze(logits, [2]) # BxNxC
# (32, 2048, 2)论文第六格
# to 182
return logits, end_points
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
# (32, 512,1, 3) 第一格
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg4', bn_decay=bn_decay)
# (32, 512, 1, 512) 第二格
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool2')
# (32, 1, 1, 512)
net = tf.squeeze(net, axis=[1,2])
# (32, 512)
net = tf.concat([net, one_hot_vec], axis=1)
# (32, 512+3) 第三格
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
is_training=is_training, bn_decay=bn_decay)
# (32, 256)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points
# (3+4*8+2*12)
# (3+4NS+2NH) 第四格
# to 202
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
''' Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors)
'''
end_points = {}
# 3D Instance Segmentation PointNet
# logits: TF tensor in shape(B, N, 2), scores for bkg / clutter and object
# end_points: dict
# to 18
logits, end_points = get_instance_seg_v1_net(\
point_cloud, one_hot_vec,
is_training, bn_decay, end_points)
# (32, 2048, 2), {}
end_points['mask_logits'] = logits
# Masking
# select masked points and translate to masked points' centroid
object_point_cloud_xyz, mask_xyz_mean, end_points = \
point_cloud_masking(point_cloud, logits, end_points)
# to model_util.py 217
# (32, 512, 3) (32, 3) end_points['mask'] = mask
# T-Net and coordinate translation
# to model_util.py 295
center_delta, end_points = get_center_regression_net(\
object_point_cloud_xyz, one_hot_vec,
is_training, bn_decay, end_points)
# (32,3) end_points['mask'] = mask
stage1_center = center_delta + mask_xyz_mean # Bx3
# (32,3)
end_points['stage1_center'] = stage1_center
# Get object point cloud in object coordinate
object_point_cloud_xyz_new = \
object_point_cloud_xyz - tf.expand_dims(center_delta, 1)
# (32, 512, 3) - (32, 1, 3)
# in object coordinate
# Amodel Box Estimation PointNet
# to 105
output, end_points = get_3d_box_estimation_v1_net(\
object_point_cloud_xyz_new, one_hot_vec,
is_training, bn_decay, end_points)
# Parse output to 3D box parameters
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3
# (32, 3)
return end_points
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,4))
outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True))
for key in outputs:
print((key, outputs[key]))
loss = get_loss(tf.zeros((32,1024),dtype=tf.int32),
tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,3)), outputs)
print(loss)
| models/frustum_pointnets_v1.py | 9,914 | 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors)
Frsutum PointNets v1 Model.
(32, 2048, 1, 4) 论文第一格 (32, 2048, 1, 64) (32, 2048, 1, 64) 论文第二格,拼接到第五格 (32, 2048, 1, 128) (32, 2048, 1, 1024) 论文第三格 (32, 1, 1, 1024) 论文第四格,拼接到第五格 (32, 1, 1, 1027) (32, 2048, 1, 1027) tf.tile()复制扩展数据 (32, 2048, 1, 1091) 论文第五格 2048*(1024+64+3) (32, 2048, 1, 512) (32, 2048, 1, 128) (32, 2048, 1, 2) BxNxC (32, 2048, 2)论文第六格 to 182 (32, 512,1, 3) 第一格 (32, 512, 1, 512) 第二格 (32, 1, 1, 512) (32, 512) (32, 512+3) 第三格 (32, 256) The first 3 numbers: box center coordinates (cx,cy,cz), the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals next NUM_SIZE_CLUSTER*4: box cluster scores and residuals (3+4*8+2*12) (3+4NS+2NH) 第四格 to 202 3D Instance Segmentation PointNet logits: TF tensor in shape(B, N, 2), scores for bkg / clutter and object end_points: dict to 18 (32, 2048, 2), {} Masking select masked points and translate to masked points' centroid to model_util.py 217 (32, 512, 3) (32, 3) end_points['mask'] = mask T-Net and coordinate translation to model_util.py 295 (32,3) end_points['mask'] = mask Bx3 (32,3) Get object point cloud in object coordinate (32, 512, 3) - (32, 1, 3) in object coordinate Amodel Box Estimation PointNet to 105 Parse output to 3D box parameters Bx3 (32, 3) | 2,685 | en | 0.561355 |
import asyncio
import discord
from discord import Member, Role, TextChannel, DMChannel
from discord.ext import commands
from typing import Union
from profanity_check import predict
class ProfanityFilter:
"""
A simple filter that checks for profanity in a message and
then deletes it. Many profanity detection libraries use a hard-coded
list of bad words to detect and filter profanity, however this
plugin utilises a library that uses a linear support vector machine
(SVM) model trained on 200k human-labeled samples of clean and profane
text strings. ([`profanity-check`](https://github.com/vzhou842/profanity-check)).
Artificial intelligence in a discord bot? Heck yeah!
"""
def __init__(self, bot):
self.bot = bot
self.coll = bot.plugin_db.get_partition(self)
self.enabled = True
self.whitelist = set()
asyncio.create_task(self._set_config())
async def _set_config(self):
config = await self.coll.find_one({'_id': 'config'})
self.enabled = config.get('enabled', True)
self.whitelist = set(config.get('whitelist', []))
@commands.group(invoke_without_command=True)
@commands.is_owner()
async def profanity(self, ctx, mode: bool):
"""Disable or enable the profanity filter.
Usage: `profanity enable` / `profanity disable`
"""
self.enabled = mode
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'enabled': self.enabled}},
upsert=True
)
await ctx.send(('Enabled' if mode else 'Disabled') + ' the profanity filter.')
@commands.is_owner()
@profanity.command()
async def whitelist(ctx, target: Union[Member, Role, TextChannel]):
"""Whitelist a user, role or channel from the profanity filter.
Usage: `profanity whitelist @dude`
"""
self = ctx.bot.get_cog('ProfanityFilter') # wtf where did self dissapear
if target.id in self.whitelist:
self.whitelist.remove(target.id)
removed = True
else:
self.whitelist.add(target.id)
removed = False
await self.coll.update_one(
{'_id': 'config'},
{'$set': {'whitelist': list(self.whitelist)}},
upsert=True
)
await ctx.send(
f"{'Un-w' if removed else 'W'}hitelisted "
f"{target.mention} from the profanity filter."
)
async def on_message(self, message):
if not self.enabled:
return
channel = message.channel
author = message.author
if isinstance(author, discord.User): # private channel
return
ids = {author.id, channel.id} | {r.id for r in author.roles}
if self.whitelist.intersection(ids): # anything intersects
return
profane = bool(predict([message.content])[0])
if not profane:
return
await message.delete()
temp = await channel.send(
f'{author.mention} your message has '
'been deleted for containing profanity.'
)
await asyncio.sleep(5)
await temp.delete()
def setup(bot):
bot.add_cog(ProfanityFilter(bot)) | profanity-filter/profanity-filter.py | 3,360 | A simple filter that checks for profanity in a message and
then deletes it. Many profanity detection libraries use a hard-coded
list of bad words to detect and filter profanity, however this
plugin utilises a library that uses a linear support vector machine
(SVM) model trained on 200k human-labeled samples of clean and profane
text strings. ([`profanity-check`](https://github.com/vzhou842/profanity-check)).
Artificial intelligence in a discord bot? Heck yeah!
wtf where did self dissapear private channel anything intersects | 537 | en | 0.832786 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: vs@webdirect.md
Description: Very simple reminder
'''
from core.people.person import Profile, Session
from core.utils.utils import text2int
import re
from crontab import CronTab
from getpass import getuser
from core.config.settings import logger, ROBOT_DIR
class Reaction:
"""remind me every ... reaction"""
response = ''
request = ''
def __str__(self):
return 'Remind me every ... reaction'
@classmethod
def __init__(self, *args, **kwargs):
""" original request string """
#get request object
self.req_obj = kwargs.pop('req_obj')
#request word sequence
self.request = self.req_obj.get('request', '')
#request received from (julius, jabber any other resources)
self.req_from = self.req_obj.get('from', '')
self.response = ''
@classmethod
def run(self):
"""default method"""
sess = Session()
sender = self.req_obj.get('sender', '')
if sender:
#exctract sender email
email = sender.split('/')[0]
#find user profile by primary email
profile = sess.query(Profile).filter(Profile.email == email).one()
cron = CronTab(getuser())
DAYS = {'sunday': 'SUN'
, 'monday': 'MON'
, 'tuesday': 'TUE'
, 'wednesday': 'WED'
, 'thursday': 'THU'
, 'friday': 'FRI'
, 'saturday': 'SAT'}
req = self.request.replace('remind me every', '', 1)
#r = re.compile(re.escape('remind me every'), re.IGNORECASE)
#req = r.sub('', request)
m = re.search('\s+?(by|with|to|of)\s+message\s+?(.+)', req)
if m and m.group(2):
msg = m.group(2)
else:
m = re.search('\s+?(by|with|to|of)\s+?(.+)', req)
if m and m.group(2):
msg = m.group(2)
else:
msg = 'This a reminder. Unfortunatelly I could not parse your message, \
but I guess you can remember what you wanted to do.'
job = cron.new(command='/usr/bin/python %s/core/cron/cronjob.py --uuid=%s \
--cmd="send jabber message" --arguments="%s"' % (ROBOT_DIR, profile.uuid, msg.replace('"', '')))
skip_other = False
if req.strip().startswith('month'):
job.minute.on(0)
job.hour.on(0)
job.dom.on(1)
skip_other = True
if req.strip().startswith('week'):
job.minute.on(0)
job.hour.on(0)
job.dow.on(0)
skip_other = True
if req.strip().startswith('year'):
job.dom.on(0)
job.month.on(0)
skip_other = True
dow = False
for dw, cron_day in DAYS.items():
if req.strip().lower().startswith(dw):
dow = True
break
if dow:
job.dow.on(cron_day.upper())
#req = req.replace(dw, '', 1) - ignore case problem
r = re.split(r'^' + dw, req.strip(), flags=re.IGNORECASE)
if r and len(r) == 2:
req = r.pop()
if req.strip().startswith('at '):
################################################
# every monday/tuesday/wednesday at 00:00
################################################
time = re.search("[^0-9](\d{1,2})\so'clock", req)
if time and time.group(1):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if not skip_other:
################################################
# every monday/tuesday/wednesday at 00:00
################################################
time = re.search('[^0-9](\d{1,2}):(\d{2})[^0-9]', req)
if time and time.group(1) and time.group(2):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
################################################
# every monday/tuesday/wednesday hourly
################################################
if not skip_other and req.strip().startswith('hourly'):
#hourly
job.minute.on(0)
skip_other = True
################################################
# every monday/tuesday/wednesday from 00:00 to 00:00
################################################
elif not skip_other:
#@todo
#time = re.search('\s?from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())
time = re.search('\s?from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+', req)
#@todo
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
#todo every minute, every 5 minutes
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
################################################
# every monday/tuesday/wednesday
################################################
elif not skip_other:
job.minute.on(0)
#by default 10:00
job.hour.on(10)
skip_other = True
if not skip_other and req.strip().startswith('day'):
#cut day word
req = req.replace('day', '', 1)
if req.strip().startswith('at '):
################################################
# every day at 00:00
################################################
time = re.search("[^0-9](\d{1,2})\so'clock", req)
if time and time.group(1):
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
if not skip_other:
################################################
# every day at 00:00
################################################
time = re.search('[^0-9](\d{1,2}):(\d{2})[^0-9]', req)
if time and time.group(1) and time.group(2):
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
################################################
# every day hourly
################################################
if not skip_other and req.strip().startswith('hourly'):
#hourly
job.minute.on(0)
skip_other = True
################################################
# every day every 5 hours
################################################
if not skip_other and req.strip().startswith('every'):
req = req.replace('every', '', 1)
hour = re.search('\s?(\d+)\s+(hour|hours|hs|h)', req)
if hour and hour.group(1):
job.hour.every(hour.group(1))
skip_other = True
else:
#if hour presents in human word : one, two etc.
hour = re.search('^\s?([a-zA-Z]+?)\s(hours|hour)', req)
if hour and hour.group(1):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
################################################
# every day from 00:00 to 00:00
################################################
elif not skip_other and req.strip().startswith('from'):
#@todo
time = re.search('^from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+', req.strip())
#@todo
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
#todo every minute, every 5 minutes
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
################################################
# every day
################################################
elif not skip_other:
job.minute.on(0)
#by default 10:00
job.hour.on(10)
skip_other = True
print(job)
else:
pass
if not skip_other and req.strip().startswith('with message'):
job.minute.on(0)
#by default 10:00
job.hour.on(10)
skip_other = True
if not skip_other and req.strip().startswith('hour'):
#every hour
job.minute.on(0)
skip_other = True
if not skip_other and req.strip().startswith('minute'):
#every minute
job.minute.every(1)
skip_other = True
if not skip_other:
################################################
# hours
################################################
hour = re.search('^(\d+)\s+(hour|hours|hs|h)', req.strip())
if hour and hour.group(1):
job.hour.every(hour.group(1))
skip_other = True
else:
#if hour presents in human word : one, two etc.
hour = re.search('^([a-zA-Z]+?)\s(hours|hour)', req.strip())
if hour and hour.group(1):
h = text2int(hour.group(1))
job.hour.every(h)
job.minute.on(0)
skip_other = True
if not skip_other:
#######################################################################################################
# days
#######################################################################################################
day = re.search('^(\d+)\s+(days|day|d)', req.strip())
if day and day.group(1):
#remove the matched part of the string which describes number of days: ex. 10 days
req = req.replace(day.group(0), '', 1)
################################################
# days at 00:00
################################################
if req.strip().startswith('at '):
req = req.replace('at', '', 1)
################################################
# days at 8 o'clock
################################################
time = re.search("^(\d{1,2})\so'clock", req.strip())
if time and time.group(1):
job.dow.every(day.group(1))
job.minute.on(0)
job.hour.on(time.group(1))
skip_other = True
################################################
# days hourly
################################################
if not skip_other and req.strip().startswith('hourly'):
#hourly
job.minute.on(0)
job.dow.every(day.group(1))
skip_other = True
################################################
# days at 00:00
################################################
if not skip_other:
time = re.search('^(\d{1,2}):(\d{2})[^0-9]', req.strip())
if time and time.group(1) and time.group(2):
job.dom.every(day.group(1))
job.minute.on(time.group(2))
job.hour.on(time.group(1))
skip_other = True
################################################
# 10 days from 00:00 to 00:00
################################################
if not skip_other and req.strip().startswith('from'):
#@todo
req = req.replace('from', '', 1)
time = re.search('^(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
job.dom.every(day.group(1))
#todo every 5 minutes
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
#################################################
# every two days
#################################################
elif not skip_other:
day = re.search('^\s?([a-zA-Z]+?)\s(days|day)', req)
if day and day.group(1):
d = text2int(day.group(1))
req = req.replace(day.group(0), '', 1)
################################################
# ten days from 00:00 to 00:00
################################################
if not skip_other and req.strip().startswith('from'):
time = re.search('^from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())
if time and time.group(1):
job.hour.during(time.group(1), time.group(3))
job.dom.every(d)
#todo every 5 minutes
# remove from .. to and check for "every" 5 minutes
req = req.replace(day.group(0), '', 1)
req = req.replace(time.group(0), '', 1)
if req.strip().startswith('every'):
mins = re.search('^every\s(\d{1,2})[^0-9]+?(min|minute|minutes)', req.strip())
if mins and mins.group(0):
job.minute.during(time.group(2), time.group(4)).every(mins.group(1))
skip_other = True
#check once again but now we expect minutes as word not number
else:
mins = re.search('^every\s([^0-9\s]+)\s?(min|minute|minutes)', req.strip())
#if exists
if mins and mins.group(1):
m = text2int(mins.group(1))
job.minute.during(time.group(2), time.group(4)).every(m)
skip_other = True
else:
raise
# if not starts with "every"
else:
job.minute.during(time.group(2), time.group(4)).every(5)
skip_other = True
else:
job.dom.every(d)
job.minute.on(0)
#by default 10:00
job.hour.on(10)
#print(job)
skip_other = True
else:
print(req)
raise
#job.minute.on(0)
#job.hour.on(10) #by default 10:00
#skip_other=True
#job.dow.every(day.group(1))
#skip_other = True
if not skip_other:
#######################################################################################################
# minutes
#######################################################################################################
min = re.search('\s?(\d+)\s+(minutes|min|minute|m)', req)
if min and min.group(1):
job.minute.every(min.group(1))
else:
#if day presents in human word : one, two etc.
min = re.search('^\s?([a-zA-Z]+?)\s(minutes|min|mins)', req)
if min and min.group(1):
m = text2int(min.group(1))
job.minute.every(m)
cron.write()
logger.info('adding cronjob %s' % cron.render())
response = 'ok, cronjob added %s' % job.render()
if self.req_from == 'jabber':
todo = {'text': response, 'jmsg': response, 'type': 'response'}
self.response = todo
if self.req_from == 'julius':
from core.broadcast import say, bang
bang()
todo = {'say': response, 'text': response, 'type': 'response'}
self.response = say(self.request.replace('say', '').upper())
return self.response
#n = Reaction(*{'reserved':''}, **{'req_obj':{'from':'jabber', 'request':'remind me every 2 minutes with "hey don\'t forget about pizza"', 'sender': 'vasilii.pascal@gmail.com'}})
#n.run()
| core/brain/remind/me/every/reaction.py | 17,836 | remind me every ... reaction
original request string
default method
Author: vs@webdirect.md
Description: Very simple reminder
!/usr/bin/env python -*- coding: utf-8 -*-get request objectrequest word sequencerequest received from (julius, jabber any other resources)exctract sender emailfind user profile by primary emailr = re.compile(re.escape('remind me every'), re.IGNORECASE)req = r.sub('', request)req = req.replace(dw, '', 1) - ignore case problem every monday/tuesday/wednesday at 00:00 every monday/tuesday/wednesday at 00:00 every monday/tuesday/wednesday hourlyhourly every monday/tuesday/wednesday from 00:00 to 00:00@todotime = re.search('\s?from\s(\d{1,2}):(\d{2})\sto\s(\d{1,2}):(\d{2})[^0-9]+?', req.strip())@todotodo every minute, every 5 minutes every monday/tuesday/wednesdayby default 10:00cut day word every day at 00:00 every day at 00:00 every day hourlyhourly every day every 5 hoursif hour presents in human word : one, two etc. every day from 00:00 to 00:00@todo@todotodo every minute, every 5 minutes every dayby default 10:00by default 10:00every hourevery minute hoursif hour presents in human word : one, two etc. daysremove the matched part of the string which describes number of days: ex. 10 days days at 00:00 days at 8 o'clock days hourlyhourly days at 00:00 10 days from 00:00 to 00:00@todotodo every 5 minutes every two days ten days from 00:00 to 00:00todo every 5 minutes remove from .. to and check for "every" 5 minutescheck once again but now we expect minutes as word not numberif exists if not starts with "every"by default 10:00print(job)job.minute.on(0)job.hour.on(10) by default 10:00skip_other=Truejob.dow.every(day.group(1))skip_other = True minutesif day presents in human word : one, two etc.n = Reaction(*{'reserved':''}, **{'req_obj':{'from':'jabber', 'request':'remind me every 2 minutes with "hey don\'t forget about pizza"', 'sender': 'vasilii.pascal@gmail.com'}})n.run() | 1,973 | en | 0.757046 |
import cv2
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# resize the image to a fixed size, ignoring the aspect
# ratio
return cv2.resize(image, (self.width, self.height), interpolation=self.inter) | 22. Neural Networks from Scratch/preprocessing/simplepreprocessor.py | 483 | store the target image width, height, and interpolation method used when resizing resize the image to a fixed size, ignoring the aspect ratio | 141 | en | 0.775208 |
"""Define endpoints related to user reports."""
import logging
from typing import Any, Dict
from .helpers.report import Report
_LOGGER: logging.Logger = logging.getLogger(__name__)
class UserReport(Report):
"""Define a user report object."""
async def status_by_coordinates(
self, latitude: float, longitude: float
) -> Dict[str, Any]:
"""Get symptom data for the location nearest to the user's lat/lon."""
return await self.nearest_by_coordinates(latitude, longitude)
async def status_by_zip(self, zip_code: str) -> Dict[str, Any]:
"""Get symptom data for the provided ZIP code."""
try:
location = next(
(d for d in await self.user_reports() if d["zip"] == zip_code)
)
except StopIteration:
return {}
return await self.status_by_coordinates(
float(location["latitude"]), float(location["longitude"])
)
| pyflunearyou/user.py | 954 | Define a user report object.
Define endpoints related to user reports. | 70 | en | 0.881593 |
import re
from .reports import BaseReport
from .utils import get_pacer_doc_id_from_doc1_url, reverse_goDLS_function
from ..lib.log_tools import make_default_logger
from ..lib.string_utils import force_unicode
logger = make_default_logger()
class AttachmentPage(BaseReport):
"""An object for querying and parsing the attachment page report. """
PATH = 'doc1/'
def __init__(self, court_id, pacer_session=None):
super(AttachmentPage, self).__init__(court_id, pacer_session)
if self.court_id.endswith('b'):
# Note that parsing bankruptcy attachment pages does not reveal the
# document number, only the attachment numbers.
self.is_bankruptcy = True
else:
self.is_bankruptcy = False
def query(self, document_number):
"""Query the "attachment page" endpoint and set the results to self.response.
:param document_number: The internal PACER document ID for the item.
:return: a request response object
"""
assert self.session is not None, \
"session attribute of DocketReport cannot be None."
# coerce the fourth digit of the document number to 1 to ensure we get
# the attachment page.
document_number = document_number[:3] + "0" + document_number[4:]
url = self.url + document_number
logger.info(u'Querying the attachment page endpoint at URL: %s' % url)
self.response = self.session.get(url)
self.parse()
@property
def data(self):
"""Get data back from the query for the matching document entry.
:return: If lookup fails, an empty dict. Else, a dict containing the
following fields:
- document_number: The document number we're working with.
- page_count: The number of pages of the item
- pacer_doc_id: The doc ID for the main document.
- attachments: A list of attached items with the following fields:
- attachment_number: The attachment number.
- description: A description of the item.
- page_count: The number of pages.
- pacer_doc_id: The document ID for the attachment (a str).
See the JSON objects in the tests for more examples.
"""
rows = self.tree.xpath('//tr[.//a]')
if not rows:
logger.info("No documents found on attachment page.")
return {}
first_row = rows.pop(0)
result = {
'document_number': self._get_document_number(first_row),
'page_count': self._get_page_count_from_tr(first_row),
'pacer_doc_id': self._get_pacer_doc_id(first_row),
'pacer_case_id': self._get_pacer_case_id(),
'attachments': []
}
for row in rows:
result['attachments'].append({
'attachment_number': self._get_attachment_number(row),
'description': self._get_description_from_tr(row),
'page_count': self._get_page_count_from_tr(row),
'pacer_doc_id': self._get_pacer_doc_id(row)
})
return result
def _get_document_number(self, row):
"""Return the document number for an item.
In district court attachment pages, this is easy to extract with an
XPath. In bankruptcy cases, it's simply not there.
"""
if self.is_bankruptcy:
return None
else:
return int(row.xpath('.//a/text()')[0].strip())
def _get_attachment_number(self, row):
"""Return the attachment number for an item.
In district courts, this can be easily extracted. In bankruptcy courts,
you must extract it, then subtract 1 from the value since these are
tallied and include the main document.
"""
number = int(row.xpath('.//a/text()')[0].strip())
if self.is_bankruptcy:
return number - 1
else:
return number
def _get_description_from_tr(self, row):
"""Get the description from the row"""
if not self.is_bankruptcy:
index = 2
else:
index = 3
description_text_nodes = row.xpath('./td[%s]//text()' % index)
if len(description_text_nodes) == 0:
# No text in the cell.
return u''
else:
description = description_text_nodes[0].strip()
return force_unicode(description)
@staticmethod
def _get_page_count_from_tr(tr):
"""Take a row from the attachment table and return the page count as an
int extracted from the cell specified by index.
"""
pg_cnt_str_nodes = tr.xpath('./td[contains(., "page")]/text()')
if len(pg_cnt_str_nodes) == 0:
# It's a restricted document without page count information.
return None
else:
for pg_cnt_str_node in pg_cnt_str_nodes:
try:
pg_cnt_str = pg_cnt_str_node.strip()
return int(pg_cnt_str.split()[0])
except ValueError:
# Happens when the description field contains the word "page"
# and gets caught by the xpath. Just press on.
continue
@staticmethod
def _get_pacer_doc_id(row):
"""Take in a row from the attachment table and return the pacer_doc_id
for the item in that row. Return None if the ID cannot be found.
"""
try:
url = row.xpath(u'.//a')[0]
except IndexError:
# Item exists, but cannot download document. Perhaps it's sealed
# or otherwise unavailable in PACER. This is carried over from the
# docket report and may not be needed here, but it's a good
# precaution.
return None
else:
doc1_url = url.xpath('./@href')[0]
return get_pacer_doc_id_from_doc1_url(doc1_url)
def _get_pacer_case_id(self):
"""Get the pacer_case_id value by inspecting the HTML
:returns str: The pacer_case_id value
"""
# Start by inspecting all the links
urls = self.tree.xpath('//a')
for url in urls:
try:
onclick = url.xpath('./@onclick')[0]
except IndexError:
continue
else:
if 'goDLS' not in onclick:
# Some other onclick we don't care about.
continue
go_dls_parts = reverse_goDLS_function(onclick)
return go_dls_parts['caseid']
# If that fails, try inspecting the input elements
input_els = self.tree.xpath('//input')
for input_el in input_els:
try:
onclick = input_el.xpath('./@onclick')[0]
except IndexError:
continue
else:
m = re.search(r'[?&]caseid=(\d+)', onclick, flags=re.I)
if m:
return m.group(1)
| juriscraper/pacer/attachment_page.py | 7,089 | An object for querying and parsing the attachment page report.
Return the attachment number for an item.
In district courts, this can be easily extracted. In bankruptcy courts,
you must extract it, then subtract 1 from the value since these are
tallied and include the main document.
Get the description from the row
Return the document number for an item.
In district court attachment pages, this is easy to extract with an
XPath. In bankruptcy cases, it's simply not there.
Get the pacer_case_id value by inspecting the HTML
:returns str: The pacer_case_id value
Take in a row from the attachment table and return the pacer_doc_id
for the item in that row. Return None if the ID cannot be found.
Take a row from the attachment table and return the page count as an
int extracted from the cell specified by index.
Get data back from the query for the matching document entry.
:return: If lookup fails, an empty dict. Else, a dict containing the
following fields:
- document_number: The document number we're working with.
- page_count: The number of pages of the item
- pacer_doc_id: The doc ID for the main document.
- attachments: A list of attached items with the following fields:
- attachment_number: The attachment number.
- description: A description of the item.
- page_count: The number of pages.
- pacer_doc_id: The document ID for the attachment (a str).
See the JSON objects in the tests for more examples.
Query the "attachment page" endpoint and set the results to self.response.
:param document_number: The internal PACER document ID for the item.
:return: a request response object
Note that parsing bankruptcy attachment pages does not reveal the document number, only the attachment numbers. coerce the fourth digit of the document number to 1 to ensure we get the attachment page. No text in the cell. It's a restricted document without page count information. Happens when the description field contains the word "page" and gets caught by the xpath. Just press on. Item exists, but cannot download document. Perhaps it's sealed or otherwise unavailable in PACER. This is carried over from the docket report and may not be needed here, but it's a good precaution. Start by inspecting all the links Some other onclick we don't care about. If that fails, try inspecting the input elements | 2,363 | en | 0.851504 |
# System
import json
# SBaaS
from .stage02_physiology_pairWiseTest_query import stage02_physiology_pairWiseTest_query
from SBaaS_base.sbaas_template_io import sbaas_template_io
# Resources
from io_utilities.base_importData import base_importData
from io_utilities.base_exportData import base_exportData
from ddt_python.ddt_container import ddt_container
class stage02_physiology_pairWiseTest_io(stage02_physiology_pairWiseTest_query,
sbaas_template_io):
def import_data_stage02_physiology_pairWiseTest_add(self, filename):
'''table adds'''
data = base_importData();
data.read_csv(filename);
data.format_data();
self.add_data_stage02_physiology_pairWiseTest(data.data);
data.clear_data();
def export_dataStage02PhysiologyPairWiseTest_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)'''
#get the data for the analysis
data_O = [];
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTest(analysis_id_I);
# make the data parameters
data1_keys = ['analysis_id','simulation_id_1','simulation_id_2',
'rxn_id','flux_units','test_description'
];
data1_nestkeys = ['analysis_id'];
data1_keymap = {'ydata':'pvalue_negLog10',
'xdata':'fold_change',
'serieslabel':'',
'featureslabel':'rxn_id'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 50, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":'Fold Change [geometric]',
"svgy1axislabel":'Probability [-log10(P)]'};
svgtileparameters_O = {'tileheader':'Volcano plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tableclass":"table table-condensed table-hover",
"tablefilters":None,
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'pairWiseTest','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
filtermenuobject_O = None;
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage02PhysiologyPairWiseTestMetabolites_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)'''
#get the data for the analysis
data_O = [];
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestMetabolites(analysis_id_I);
# make the data parameters
data1_keys = ['analysis_id','simulation_id_1','simulation_id_2',
'met_id','flux_units','test_description'
];
data1_nestkeys = ['analysis_id'];
data1_keymap = {'ydata':'pvalue_negLog10',
'xdata':'fold_change',
'serieslabel':'',
'featureslabel':'met_id'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 50, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":'Fold Change [geometric]',
"svgy1axislabel":'Probability [-log10(P)]'};
svgtileparameters_O = {'tileheader':'Volcano plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tableclass":"table table-condensed table-hover",
"tablefilters":None,
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'pairWiseTest','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
filtermenuobject_O = None;
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects());
def export_dataStage02PhysiologyPairWiseTestSubsystems_js(self,analysis_id_I,data_dir_I='tmp'):
'''Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)'''
#get the data for the analysis
data_O = [];
data_O = self.get_rows_analysisID_dataStage02PhysiologyPairWiseTestSubsystems(analysis_id_I);
# make the data parameters
data1_keys = ['analysis_id','simulation_id_1','simulation_id_2',
'subsystem_id','flux_units','test_description'
];
data1_nestkeys = ['analysis_id'];
data1_keymap = {'ydata':'pvalue_negLog10',
'xdata':'fold_change',
'serieslabel':'',
'featureslabel':'subsystem_id'};
# make the data object
dataobject_O = [{"data":data_O,"datakeys":data1_keys,"datanestkeys":data1_nestkeys}];
# make the tile parameter objects
formtileparameters_O = {'tileheader':'Filter menu','tiletype':'html','tileid':"filtermenu1",'rowid':"row1",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-4"};
formparameters_O = {'htmlid':'filtermenuform1',"htmltype":'form_01',"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},"formresetbuttonidtext":{'id':'reset1','text':'reset'},"formupdatebuttonidtext":{'id':'update1','text':'update'}};
formtileparameters_O.update(formparameters_O);
svgparameters_O = {"svgtype":'volcanoplot2d_01',
"svgkeymap":[data1_keymap],
'svgid':'svg1',
"svgmargin":{ 'top': 50, 'right': 50, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":'Fold Change [geometric]',
"svgy1axislabel":'Probability [-log10(P)]'};
svgtileparameters_O = {'tileheader':'Volcano plot','tiletype':'svg','tileid':"tile2",'rowid':"row1",'colid':"col2",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-8"};
svgtileparameters_O.update(svgparameters_O);
tableparameters_O = {"tabletype":'responsivetable_01',
'tableid':'table1',
"tableclass":"table table-condensed table-hover",
"tablefilters":None,
'tableformtileid':'filtermenu1','tableresetbuttonid':'reset1','tablesubmitbuttonid':'submit1'};
tabletileparameters_O = {'tileheader':'pairWiseTest','tiletype':'table','tileid':"tile3",'rowid':"row2",'colid':"col1",
'tileclass':"panel panel-default",'rowclass':"row",'colclass':"col-sm-12"};
tabletileparameters_O.update(tableparameters_O);
parametersobject_O = [formtileparameters_O,svgtileparameters_O,tabletileparameters_O];
tile2datamap_O = {"filtermenu1":[0],"tile2":[0],"tile3":[0]};
# dump the data to a json file
filtermenuobject_O = None;
ddtutilities = ddt_container(parameters_I = parametersobject_O,data_I = dataobject_O,tile2datamap_I = tile2datamap_O,filtermenu_I = filtermenuobject_O);
if data_dir_I=='tmp':
filename_str = self.settings['visualization_data'] + '/tmp/ddt_data.js'
elif data_dir_I=='data_json':
data_json_O = ddtutilities.get_allObjects_js();
return data_json_O;
with open(filename_str,'w') as file:
file.write(ddtutilities.get_allObjects()); | SBaaS_COBRA/stage02_physiology_pairWiseTest_io.py | 11,805 | Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)
Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)
Export data for a volcano plot
Visuals:
1. volcano plot
2. sample vs. sample (FC)
3. sample vs. sample (concentration)
4. sample vs. sample (p-value)
table adds
System SBaaS Resourcesget the data for the analysis make the data parameters make the data object make the tile parameter objects dump the data to a json fileget the data for the analysis make the data parameters make the data object make the tile parameter objects dump the data to a json fileget the data for the analysis make the data parameters make the data object make the tile parameter objects dump the data to a json file | 893 | en | 0.563043 |
"""Euler explicit time advancement routine"""
from .projection import predictor, corrector, divergence
from .stats import stats
def advance_euler(gridc, gridx, gridy, scalars, grid_var_list, predcorr):
"""
Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations
Arguments
---------
gridc : object
Grid object for cell centered variables
gridx : object
Grid object for x-face variables
gridy : object
Grid object for y-face variables
scalars: object
Scalars object to access time-step and Reynold number
grid_var_list : list
List containing variable names for velocity, RHS term from the previous time-step, divergence and pressure
predcorr : string
Flag for the fractional step method equations - 'predictor', 'divergence', 'corrector'
"""
velc = grid_var_list[0]
hvar = grid_var_list[1]
divv = grid_var_list[2]
pres = grid_var_list[3]
if(predcorr == 'predictor'):
# Calculate predicted velocity: u* = dt*H(u^n)
predictor(gridx, gridy, velc, hvar, scalars.variable['Re'], scalars.variable['dt'])
if(predcorr == 'divergence'):
# Calculate RHS for the pressure Poission solver div(u)/dt
divergence(gridc, gridx, gridy, velc, divv, ifac = scalars.variable['dt'])
elif(predcorr == 'corrector'):
# Calculate corrected velocity u^n+1 = u* - dt * grad(P)
corrector(gridc, gridx, gridy, velc, pres, scalars.variable['dt'])
# Calculate divergence of the corrected velocity to display stats
divergence(gridc, gridx, gridy, velc, divv)
# Calculate stats
scalars.stats.update(stats(gridc, gridx, gridy, velc, pres, divv))
| flowx/ins/euler.py | 1,797 | Subroutine for the fractional step euler explicit time advancement of Navier Stokes equations
Arguments
---------
gridc : object
Grid object for cell centered variables
gridx : object
Grid object for x-face variables
gridy : object
Grid object for y-face variables
scalars: object
Scalars object to access time-step and Reynold number
grid_var_list : list
List containing variable names for velocity, RHS term from the previous time-step, divergence and pressure
predcorr : string
Flag for the fractional step method equations - 'predictor', 'divergence', 'corrector'
Euler explicit time advancement routine
Calculate predicted velocity: u* = dt*H(u^n) Calculate RHS for the pressure Poission solver div(u)/dt Calculate corrected velocity u^n+1 = u* - dt * grad(P) Calculate divergence of the corrected velocity to display stats Calculate stats | 892 | en | 0.596948 |
"""
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from cycler import cycler
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://leebyron.com/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
# Assume data passed has not been 'stacked', so stack it here.
stack = np.cumsum(y, axis=0)
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(0, m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
m, n = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
# multiply by 1/total (or zero) to avoid infinities in the division:
inv_total = np.zeros_like(total)
mask = total > 0
inv_total[mask] = 1.0 / total[mask]
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size * inv_total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
else:
errstr = "Baseline method %s not recognised. " % baseline
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
# Color between x = 0 and the first array.
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :],
facecolor=color, label=six.next(labels, None),
**kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
return r
| lib/matplotlib/stackplot.py | 4,198 | Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://leebyron.com/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
Assume data passed has not been 'stacked', so stack it here. multiply by 1/total (or zero) to avoid infinities in the division: Color between x = 0 and the first array. Color between array i-1 and array i | 1,741 | en | 0.786461 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenMiniInnerversionOnlinePublishModel import AlipayOpenMiniInnerversionOnlinePublishModel
class AlipayOpenMiniInnerversionOnlinePublishRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenMiniInnerversionOnlinePublishModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenMiniInnerversionOnlinePublishModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.innerversion.online.publish'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| alipay/aop/api/request/AlipayOpenMiniInnerversionOnlinePublishRequest.py | 4,084 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
import bmesh
from ..com.gltf2_blender_extras import set_extras
from .gltf2_blender_material import BlenderMaterial
from .gltf2_blender_primitive import BlenderPrimitive
class BlenderMesh():
"""Blender Mesh."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create(gltf, mesh_idx, skin_idx):
"""Mesh creation."""
pymesh = gltf.data.meshes[mesh_idx]
# Create one bmesh, add all primitives to it, and then convert it to a
# mesh.
bme = bmesh.new()
# List of all the materials this mesh will use. The material each
# primitive uses is set by giving an index into this list.
materials = []
# Process all primitives
for prim in pymesh.primitives:
if prim.material is None:
material_idx = None
else:
pymaterial = gltf.data.materials[prim.material]
vertex_color = None
if 'COLOR_0' in prim.attributes:
vertex_color = 'COLOR_0'
# Create Blender material if needed
if vertex_color not in pymaterial.blender_material:
BlenderMaterial.create(gltf, prim.material, vertex_color)
material_name = pymaterial.blender_material[vertex_color]
material = bpy.data.materials[material_name]
try:
material_idx = materials.index(material.name)
except ValueError:
materials.append(material.name)
material_idx = len(materials) - 1
BlenderPrimitive.add_primitive_to_bmesh(gltf, bme, pymesh, prim, skin_idx, material_idx)
name = pymesh.name or 'Mesh_' + str(mesh_idx)
mesh = bpy.data.meshes.new(name)
BlenderMesh.bmesh_to_mesh(gltf, pymesh, bme, mesh)
bme.free()
for name_material in materials:
mesh.materials.append(bpy.data.materials[name_material])
mesh.update()
set_extras(mesh, pymesh.extras, exclude=['targetNames'])
# Clear accessor cache after all primitives are done
gltf.accessor_cache = {}
return mesh
@staticmethod
def bmesh_to_mesh(gltf, pymesh, bme, mesh):
bme.to_mesh(mesh)
# Unfortunately need to do shapekeys/normals/smoothing ourselves.
# Shapekeys
if len(bme.verts.layers.shape) != 0:
# The only way I could find to create a shape key was to temporarily
# parent mesh to an object and use obj.shape_key_add.
tmp_ob = None
try:
tmp_ob = bpy.data.objects.new('##gltf-import:tmp-object##', mesh)
tmp_ob.shape_key_add(name='Basis')
mesh.shape_keys.name = mesh.name
for layer_name in bme.verts.layers.shape.keys():
tmp_ob.shape_key_add(name=layer_name)
key_block = mesh.shape_keys.key_blocks[layer_name]
layer = bme.verts.layers.shape[layer_name]
for i, v in enumerate(bme.verts):
key_block.data[i].co = v[layer]
finally:
if tmp_ob:
bpy.data.objects.remove(tmp_ob)
# Normals
mesh.update()
if gltf.import_settings['import_shading'] == "NORMALS":
mesh.create_normals_split()
use_smooths = [] # whether to smooth for each poly
face_idx = 0
for prim in pymesh.primitives:
if gltf.import_settings['import_shading'] == "FLAT" or \
'NORMAL' not in prim.attributes:
use_smooths += [False] * prim.num_faces
elif gltf.import_settings['import_shading'] == "SMOOTH":
use_smooths += [True] * prim.num_faces
elif gltf.import_settings['import_shading'] == "NORMALS":
mesh_loops = mesh.loops
for fi in range(face_idx, face_idx + prim.num_faces):
poly = mesh.polygons[fi]
# "Flat normals" are when all the vertices in poly have the
# poly's normal. Otherwise, smooth the poly.
for loop_idx in range(poly.loop_start, poly.loop_start + poly.loop_total):
vi = mesh_loops[loop_idx].vertex_index
if poly.normal.dot(bme.verts[vi].normal) <= 0.9999999:
use_smooths.append(True)
break
else:
use_smooths.append(False)
else:
# shouldn't happen
assert False
face_idx += prim.num_faces
mesh.polygons.foreach_set('use_smooth', use_smooths)
# Custom normals, now that every update is done
if gltf.import_settings['import_shading'] == "NORMALS":
custom_normals = [v.normal for v in bme.verts]
mesh.normals_split_custom_set_from_vertices(custom_normals)
mesh.use_auto_smooth = True
| addons/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py | 5,752 | Blender Mesh.
Mesh creation.
Copyright 2018-2019 The glTF-Blender-IO authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Create one bmesh, add all primitives to it, and then convert it to a mesh. List of all the materials this mesh will use. The material each primitive uses is set by giving an index into this list. Process all primitives Create Blender material if needed Clear accessor cache after all primitives are done Unfortunately need to do shapekeys/normals/smoothing ourselves. Shapekeys The only way I could find to create a shape key was to temporarily parent mesh to an object and use obj.shape_key_add. Normals whether to smooth for each poly "Flat normals" are when all the vertices in poly have the poly's normal. Otherwise, smooth the poly. shouldn't happen Custom normals, now that every update is done | 1,302 | en | 0.861917 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import aiokubernetes
from aiokubernetes.models.v1beta1_custom_resource_subresources import V1beta1CustomResourceSubresources # noqa: E501
from aiokubernetes.rest import ApiException
class TestV1beta1CustomResourceSubresources(unittest.TestCase):
"""V1beta1CustomResourceSubresources unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CustomResourceSubresources(self):
"""Test V1beta1CustomResourceSubresources"""
# FIXME: construct object with mandatory attributes with example values
# model = aiokubernetes.models.v1beta1_custom_resource_subresources.V1beta1CustomResourceSubresources() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| test/test_v1beta1_custom_resource_subresources.py | 1,086 | V1beta1CustomResourceSubresources unit test stubs
Test V1beta1CustomResourceSubresources
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = aiokubernetes.models.v1beta1_custom_resource_subresources.V1beta1CustomResourceSubresources() noqa: E501 | 525 | en | 0.40557 |
# QAP Gemini
#
# adcclib.py
# ------------------------------------------------------------------------------
import os
import sys
import signal
import time
from copy import copy
from threading import Event
from threading import Thread
from recipe_system.adcc.servers import http_proxy
from recipe_system.adcc.servers import eventsManager
from recipe_system.config import globalConf
from recipe_system.config import STANDARD_REDUCTION_CONF
from recipe_system.utils.findexe import findexe
# ------------------------------------------------------------------------------
def get_adcc_dir(dirtitle="adcc"):
dotadcc = {"adcc": ".adcc"}
if not os.path.exists(dotadcc[dirtitle]):
os.mkdir(dotadcc[dirtitle])
return dotadcc[dirtitle]
def write_adcc_sr(srname, vals):
print("adcclib: adcc startup report in {}".format(srname))
with open(srname, "w+") as sr:
sr.write(repr(vals))
return
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class ADCC(metaclass=Singleton):
def __init__(self, args=None):
if args is None:
pass
else:
self.dark = args.dark
self.events = eventsManager.EventsManager()
self.spec_events = eventsManager.EventsManager()
self.http_port = args.httpport
self.sreport = args.adccsrn
self.racefile = "adccinfo.py"
self.verbose = args.verbosity
self.web = None
def _check_adcc(self, cpid):
adccproc = findexe('adcc')
xprocx = copy(adccproc)
msg = "adcclib: adcc process {} running."
try:
xprocx.pop(adccproc.index(cpid))
except ValueError as err:
pass
x = [print(msg.format(p)) for p in xprocx]
return xprocx
def _check_kill_adcc(self, pids):
for pid in pids:
os.kill(int(pid), signal.SIGKILL)
return
def _http_interface(self, run_event):
# establish HTTP server and proxy.
self.web = Thread(group=None, target=http_proxy.main, name="webface",
args=(run_event,),
kwargs={
'port': self.http_port,
'dark': self.dark,
'events': self.events,
'spec_events': self.spec_events,
'verbose': self.verbose
}
)
return
def _handle_locks(self):
curpid = os.getpid()
adccdir = get_adcc_dir()
lockf = os.path.join(adccdir, self.racefile)
lfile = True if os.path.exists(lockf) else False
pids = self._check_adcc(curpid)
msgs = {
'lockrun': "adcclib: adcc running and lockfile detected.",
'portrun': "adcclib: adcc running on port {}",
'norun': "adcclib: No adcc running but lockfile found.",
'rupted': "adcclib: adcc config appears corrupted. Clearing ..."
}
if pids and lfile:
sys.exit(msgs['lockrun'])
elif pids and not lfile:
sys.exit(msgs['portrun'].format(self.http_port))
elif lfile and not pids:
print(msgs['norun'])
print(msgs['rupted'])
os.unlink(lockf)
return
def _write_locks(self):
"""
Write racefile and ADCC Startup Report
"""
dotadcc = get_adcc_dir()
vals = {"http_port": self.http_port, "pid": os.getpid()}
rfile = os.path.join(dotadcc, self.racefile)
with open(rfile, "w") as ports:
ports.write(repr(vals))
sr = os.path.join(dotadcc, self.sreport)
write_adcc_sr(sr, vals)
return
def main(self):
globalConf.load(STANDARD_REDUCTION_CONF, env_override=True)
self._handle_locks()
self._write_locks()
# start webinterface
run_event = Event()
run_event.set()
self._http_interface(run_event)
self.web.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("\nadcc: exiting due to Ctrl-C")
run_event.clear()
self.web.join()
if os.path.exists(self.racefile):
os.remove(self.racefile)
return
| recipe_system/adcc/adcclib.py | 4,661 | Write racefile and ADCC Startup Report
QAP Gemini adcclib.py ------------------------------------------------------------------------------ ------------------------------------------------------------------------------ establish HTTP server and proxy. start webinterface | 408 | en | 0.296922 |
from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import PandasColumn, create_dagster_pandas_dataframe_type
from dagster_pandas.constraints import (
ColumnConstraint,
ColumnConstraintViolationException,
ColumnDTypeInSetConstraint,
)
from pandas import DataFrame, read_csv
# start_custom_col
class DivisibleByFiveConstraint(ColumnConstraint):
def __init__(self):
message = "Value must be divisible by 5"
super(DivisibleByFiveConstraint, self).__init__(
error_description=message, markdown_description=message
)
def validate(self, dataframe, column_name):
rows_with_unexpected_buckets = dataframe[dataframe[column_name].apply(lambda x: x % 5 != 0)]
if not rows_with_unexpected_buckets.empty:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=self.error_description,
column_name=column_name,
offending_rows=rows_with_unexpected_buckets,
)
CustomTripDataFrame = create_dagster_pandas_dataframe_type(
name="CustomTripDataFrame",
columns=[
PandasColumn(
"amount_paid",
constraints=[ColumnDTypeInSetConstraint({"int64"}), DivisibleByFiveConstraint()],
)
],
)
# end_custom_col
@op(out=Out(CustomTripDataFrame))
def load_custom_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
dtype={"color": "category"},
)
@job
def custom_column_constraint_trip():
load_custom_trip_dataframe()
| examples/docs_snippets/docs_snippets/legacy/dagster_pandas_guide/custom_column_constraint.py | 1,808 | start_custom_col end_custom_col | 31 | en | 0.395284 |
# coding: utf-8
"""
jinja2schema.config
~~~~~~~~~~~~~~~~~~~
"""
from .order_number import OrderNumber
class Config(object):
"""Configuration."""
TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = 'dictionary'
"""Possible values: ``"dictionary"`` or ``"list""``.
For example, in the expression ``xs[a]`` variable ``xs`` may be a list as well as a dictionary.
This setting is used to choose between a dictionary and a list when the variable is
being indexed with another variable.
"""
TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE = 'list'
"""Possible values: ``"dictionary"``, ``"list"`` or ``"tuple"``.
For example, in the expression ``xs[2]`` variable ``xs`` may be a list as well as a dictionary or a tuple.
This setting is used to choose between a dictionary, a tuple and a list when the variable is
being indexed with an integer.
"""
BOOLEAN_CONDITIONS = False
"""Whether or not to consider conditions in ``if`` statements as boolean.
If this variable is not set, ``xs`` variable in template ``{% if xs %}{% endif %}`` will have
unknown structure. If this variable is set, ``xs`` will be a boolean.
"""
PACKAGE_NAME = ''
"""Name of the package where you want to load templates from.
This configuration is for if you are using includes in your jinja templates. This tells jinja
where to look to be able to load the included template from. If you do not plan on using ``includes``
this configuration is not needed.
"""
TEMPLATE_DIR = 'templates'
"""Name of the directory where you want to load templates from. Defaulted to ``templates``
This configuration is for if you are using includes in your jinja templates. This tells jinja
which directoy to look to be able to load the included template from. If you do not plan on using ``includes``
this configuration is not needed.
"""
ORDER_NUMBER = False
"""Add a order number to each node
Add a order number to make schema sortable.
"""
ORDER_NUMBER_SUB_COUNTER = True
"""Independent subsection order numbers
Use a separate counter in subsections as order number creator.
"""
IGNORE_UNKNOWN_FILTERS = False
"""Ignore unknown filters
Can be True to ignore all unknown filters, False (by default, to keep the original behavior), or a list
(set or tuple) containing the filter names to ignore.
Those filters will always accept Unknown and return Unknown.
"""
def __init__(self,
TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE='dictionary',
TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE='list',
BOOLEAN_CONDITIONS=False,
PACKAGE_NAME='',
TEMPLATE_DIR='templates',
ORDER_NUMBER=False,
ORDER_NUMBER_SUB_COUNTER=True,
IGNORE_UNKNOWN_FILTERS=False):
if TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE not in ('dictionary', 'list'):
raise ValueError('TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE must be'
'either "dictionary" or "list"')
if TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE not in ('dictionary', 'list', 'tuple'):
raise ValueError('TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE must be'
'either "dictionary", "tuple" or "list"')
self.TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE = TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE
self.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE
self.BOOLEAN_CONDITIONS = BOOLEAN_CONDITIONS
self.PACKAGE_NAME = PACKAGE_NAME
self.TEMPLATE_DIR = TEMPLATE_DIR
self.ORDER_NUMBER = ORDER_NUMBER
self.ORDER_OBJECT = OrderNumber(number=1, enabled=self.ORDER_NUMBER,
sub_counter_enabled=ORDER_NUMBER_SUB_COUNTER)
self.IGNORE_UNKNOWN_FILTERS = IGNORE_UNKNOWN_FILTERS
default_config = Config()
| jinja2schema/config.py | 4,031 | Configuration.
jinja2schema.config
~~~~~~~~~~~~~~~~~~~
coding: utf-8 | 70 | en | 0.204889 |
from typing import Any, List, Literal, TypedDict
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Element import FHIR_Element
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# A record of a clinical assessment performed to determine what problem(s) may affect the patient and before planning the treatments or management strategies that are best to manage a patient's condition. Assessments are often 1:1 with a clinical consultation / encounter, but this varies greatly depending on the clinical workflow. This resource is called "ClinicalImpression" rather than "ClinicalAssessment" to avoid confusion with the recording of assessment tools such as Apgar score.
FHIR_ClinicalImpression_Finding = TypedDict(
"FHIR_ClinicalImpression_Finding",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# Specific text or code for finding or diagnosis, which may include ruled-out or resolved conditions.
"itemCodeableConcept": FHIR_CodeableConcept,
# Specific reference for finding or diagnosis, which may include ruled-out or resolved conditions.
"itemReference": FHIR_Reference,
# Which investigations support finding or diagnosis.
"basis": FHIR_string,
# Extensions for basis
"_basis": FHIR_Element,
},
total=False,
)
| src/fhir_types/FHIR_ClinicalImpression_Finding.py | 2,766 | A record of a clinical assessment performed to determine what problem(s) may affect the patient and before planning the treatments or management strategies that are best to manage a patient's condition. Assessments are often 1:1 with a clinical consultation / encounter, but this varies greatly depending on the clinical workflow. This resource is called "ClinicalImpression" rather than "ClinicalAssessment" to avoid confusion with the recording of assessment tools such as Apgar score. Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). Specific text or code for finding or diagnosis, which may include ruled-out or resolved conditions. Specific reference for finding or diagnosis, which may include ruled-out or resolved conditions. Which investigations support finding or diagnosis. Extensions for basis | 2,097 | en | 0.920554 |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
'''
ComputeCost computes the cost function
'''
def computeCost(X, y, theta):
#computeCost Compute cost for linear regression
# J = computeCost(X, y, theta) computes the cost of using theta as the
# parameter for linear regression to fit the data points in X and y
# Initialize some useful values
m = len(y); # number of training examples
# You need to return the following variables correctly
J = 0;
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the cost of a particular choice of theta
# =========================================================================
# You should set J to the cost.
X_product = np.matmul(X,theta) # X*theta
X_diff = np.subtract(X_product, y) # X*theta - y
X_square = np.square(X_diff) # Square each element of the matrix computed above
X_sum = np.sum(X_square) # Sum all the elements
J = (1.0/(2.0*m))*X_sum # Cost Function
return J
'''
gradientDescent function iterates till it finds a minima
'''
def gradientDescent(X, y, theta, alpha, num_iters):
#function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
#GRADIENTDESCENT Performs gradient descent to learn theta
# theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by
# taking num_iters gradient steps with learning rate alpha
# Initialize some useful values
m = len(y); # number of training examples
J_history = np.zeros((num_iters, 1));
for iter in range(num_iters):
# ====================== YOUR CODE HERE ======================
# Instructions: Perform a single gradient step on the parameter vector
# theta.
#
# Hint: While debugging, it can be useful to print out the values
# of the cost function (computeCost) and gradient here.
#
X_0 = X[:,0].reshape((m,1));
X_1 = X[:,1].reshape((m,1));
X_0_tr = np.transpose(X_0)
X_1_tr = np.transpose(X_1)
X_theta_prod = (np.matmul(X,theta)).reshape((m,1))
X_theta_y_diff = np.subtract(X_theta_prod,y)
theta_0 = theta.item(0) - (float(alpha)/float(m))*(np.matmul(X_0_tr, X_theta_y_diff)).item(0)
theta_1 = theta.item(1) - (float(alpha)/float(m))*(np.matmul(X_1_tr, X_theta_y_diff)).item(0)
#print X_0.shape, X_0_tr.shape, theta.shape, X.shape, X_theta_prod.shape, y.shape, X_theta_y_diff.shape
theta = np.array([theta_0, theta_1]).reshape((2,1))
# Plot the linear fit
if(iter%200==0):
plt.scatter(X_data, y_data, marker='o', color='g', label='orig')
y_data_predicted = np.matmul(X,theta)
plt.plot(X_data, y_data_predicted, marker='*', linestyle='-', color='b', label='pred')
plt.legend(loc='lower right')
plt.show(block=False)
time.sleep(3)
plt.close()
# ============================================================
# Save the cost J in every iteration
J_history[iter] = computeCost(X, y, theta)
print "Cost @ iteration: ",iter, " = ", J_history[iter]
return theta
data = pd.read_csv('ex1data1.txt', header = None, names = ['Population', 'Profits'])
y_data = data.iloc[:,1]
X_data = data.iloc[:,0]
m = len(y_data) #Number of training samples
y = np.array(y_data).reshape(m,1)
X = np.c_[np.ones(m), np.array(X_data)] # Add a column of ones to x
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Population - Profit Scatter Plot')
ax.set_xlabel('Population in 10000s')
ax.set_ylabel('Profit in 10000$')
theta = np.zeros((2, 1)).reshape((2,1)) # initialize fitting parameters
theta = np.array([40,40]).reshape((2,1))# Try initializing from a different point. The convergence will be seen easily
print "Cost Function Value is:", computeCost(X, y, theta)
# Some gradient descent settings
iterations = 1500;
alpha = 0.01;
# run gradient descent
theta = gradientDescent(X, y, theta, alpha, iterations);
# print theta to screen
print 'Theta found by gradient descent: ', theta.item(0), theta.item(1)
# Plot the linear fit
plt.scatter(X_data, y_data, marker='o', color='g', label='orig')
y_data_predicted = np.matmul(X,theta)
plt.plot(X_data, y_data_predicted, marker='*', linestyle='-', color='b', label='pred')
plt.legend(loc='lower right')
# Predict values for population sizes of 35,000 and 70,000
#predict1 = [1, 3.5] *theta;
#fprintf('For population = 35,000, we predict a profit of %f\n',...
# predict1*10000);
#predict2 = [1, 7] * theta;
#fprintf('For population = 70,000, we predict a profit of %f\n',...
# predict2*10000);
| Linear_Regression/ex1.py | 4,537 | computeCost Compute cost for linear regression J = computeCost(X, y, theta) computes the cost of using theta as the parameter for linear regression to fit the data points in X and y Initialize some useful values number of training examples You need to return the following variables correctly ====================== YOUR CODE HERE ====================== Instructions: Compute the cost of a particular choice of theta ========================================================================= You should set J to the cost. X*theta X*theta - y Square each element of the matrix computed above Sum all the elements Cost Functionfunction [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)GRADIENTDESCENT Performs gradient descent to learn theta theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by taking num_iters gradient steps with learning rate alpha Initialize some useful values number of training examples ====================== YOUR CODE HERE ====================== Instructions: Perform a single gradient step on the parameter vector theta. Hint: While debugging, it can be useful to print out the values of the cost function (computeCost) and gradient here.print X_0.shape, X_0_tr.shape, theta.shape, X.shape, X_theta_prod.shape, y.shape, X_theta_y_diff.shape Plot the linear fit ============================================================ Save the cost J in every iteration Number of training samples Add a column of ones to x initialize fitting parameters Try initializing from a different point. The convergence will be seen easily Some gradient descent settings run gradient descent print theta to screen Plot the linear fit Predict values for population sizes of 35,000 and 70,000predict1 = [1, 3.5] *theta;fprintf('For population = 35,000, we predict a profit of %f\n',... predict1*10000);predict2 = [1, 7] * theta;fprintf('For population = 70,000, we predict a profit of %f\n',... predict2*10000); | 1,984 | en | 0.647776 |
# Mobile Verification Toolkit (MVT)
# Copyright (c) 2021-2022 The MVT Project Authors.
# Use of this software is governed by the MVT License 1.1 that can be found at
# https://license.mvt.re/1.1/
import logging
import os
import click
from rich.logging import RichHandler
from mvt.common.help import (HELP_MSG_FAST, HELP_MSG_IOC,
HELP_MSG_LIST_MODULES, HELP_MSG_MODULE,
HELP_MSG_OUTPUT, HELP_MSG_SERIAL)
from mvt.common.indicators import Indicators, download_indicators_files
from mvt.common.logo import logo
from mvt.common.module import run_module, save_timeline
from .download_apks import DownloadAPKs
from .lookups.koodous import koodous_lookup
from .lookups.virustotal import virustotal_lookup
from .modules.adb import ADB_MODULES
from .modules.backup import BACKUP_MODULES
# Setup logging using Rich.
LOG_FORMAT = "[%(name)s] %(message)s"
logging.basicConfig(level="INFO", format=LOG_FORMAT, handlers=[
RichHandler(show_path=False, log_time_format="%X")])
log = logging.getLogger(__name__)
#==============================================================================
# Main
#==============================================================================
@click.group(invoke_without_command=False)
def cli():
logo()
#==============================================================================
# Command: version
#==============================================================================
@cli.command("version", help="Show the currently installed version of MVT")
def version():
return
#==============================================================================
# Download APKs
#==============================================================================
@cli.command("download-apks", help="Download all or non-safelisted installed APKs installed on the device")
@click.option("--serial", "-s", type=str, help=HELP_MSG_SERIAL)
@click.option("--all-apks", "-a", is_flag=True,
help="Extract all packages installed on the phone, including system packages")
@click.option("--virustotal", "-v", is_flag=True, help="Check packages on VirusTotal")
@click.option("--koodous", "-k", is_flag=True, help="Check packages on Koodous")
@click.option("--all-checks", "-A", is_flag=True, help="Run all available checks")
@click.option("--output", "-o", type=click.Path(exists=False),
help="Specify a path to a folder where you want to store the APKs")
@click.option("--from-file", "-f", type=click.Path(exists=True),
help="Instead of acquiring from phone, load an existing packages.json file for lookups (mainly for debug purposes)")
@click.pass_context
def download_apks(ctx, all_apks, virustotal, koodous, all_checks, output, from_file, serial):
try:
if from_file:
download = DownloadAPKs.from_json(from_file)
else:
# TODO: Do we actually want to be able to run without storing any file?
if not output:
log.critical("You need to specify an output folder with --output!")
ctx.exit(1)
if not os.path.exists(output):
try:
os.makedirs(output)
except Exception as e:
log.critical("Unable to create output folder %s: %s", output, e)
ctx.exit(1)
download = DownloadAPKs(output_folder=output, all_apks=all_apks,
log=logging.getLogger(DownloadAPKs.__module__))
if serial:
download.serial = serial
download.run()
packages = download.packages
if len(packages) == 0:
return
if virustotal or all_checks:
virustotal_lookup(packages)
if koodous or all_checks:
koodous_lookup(packages)
except KeyboardInterrupt:
print("")
ctx.exit(1)
#==============================================================================
# Checks through ADB
#==============================================================================
@cli.command("check-adb", help="Check an Android device over adb")
@click.option("--serial", "-s", type=str, help=HELP_MSG_SERIAL)
@click.option("--iocs", "-i", type=click.Path(exists=True), multiple=True,
default=[], help=HELP_MSG_IOC)
@click.option("--output", "-o", type=click.Path(exists=False),
help=HELP_MSG_OUTPUT)
@click.option("--fast", "-f", is_flag=True, help=HELP_MSG_FAST)
@click.option("--list-modules", "-l", is_flag=True, help=HELP_MSG_LIST_MODULES)
@click.option("--module", "-m", help=HELP_MSG_MODULE)
@click.pass_context
def check_adb(ctx, iocs, output, fast, list_modules, module, serial):
if list_modules:
log.info("Following is the list of available check-adb modules:")
for adb_module in ADB_MODULES:
log.info(" - %s", adb_module.__name__)
return
log.info("Checking Android through adb bridge")
if output and not os.path.exists(output):
try:
os.makedirs(output)
except Exception as e:
log.critical("Unable to create output folder %s: %s", output, e)
ctx.exit(1)
indicators = Indicators(log=log)
indicators.load_indicators_files(iocs)
timeline = []
timeline_detected = []
for adb_module in ADB_MODULES:
if module and adb_module.__name__ != module:
continue
m = adb_module(output_folder=output, fast_mode=fast,
log=logging.getLogger(adb_module.__module__))
if indicators.total_ioc_count:
m.indicators = indicators
m.indicators.log = m.log
if serial:
m.serial = serial
run_module(m)
timeline.extend(m.timeline)
timeline_detected.extend(m.timeline_detected)
if output:
if len(timeline) > 0:
save_timeline(timeline, os.path.join(output, "timeline.csv"))
if len(timeline_detected) > 0:
save_timeline(timeline_detected, os.path.join(output, "timeline_detected.csv"))
#==============================================================================
# Check ADB backup
#==============================================================================
@cli.command("check-backup", help="Check an Android Backup")
@click.option("--serial", "-s", type=str, help=HELP_MSG_SERIAL)
@click.option("--iocs", "-i", type=click.Path(exists=True), multiple=True,
default=[], help=HELP_MSG_IOC)
@click.option("--output", "-o", type=click.Path(exists=False), help=HELP_MSG_OUTPUT)
@click.argument("BACKUP_PATH", type=click.Path(exists=True))
@click.pass_context
def check_backup(ctx, iocs, output, backup_path, serial):
log.info("Checking ADB backup located at: %s", backup_path)
if output and not os.path.exists(output):
try:
os.makedirs(output)
except Exception as e:
log.critical("Unable to create output folder %s: %s", output, e)
ctx.exit(1)
indicators = Indicators(log=log)
indicators.load_indicators_files(iocs)
if os.path.isfile(backup_path):
log.critical("The path you specified is a not a folder!")
if os.path.basename(backup_path) == "backup.ab":
log.info("You can use ABE (https://github.com/nelenkov/android-backup-extractor) "
"to extract 'backup.ab' files!")
ctx.exit(1)
for module in BACKUP_MODULES:
m = module(base_folder=backup_path, output_folder=output,
log=logging.getLogger(module.__module__))
if indicators.total_ioc_count:
m.indicators = indicators
m.indicators.log = m.log
if serial:
m.serial = serial
run_module(m)
#==============================================================================
# Command: check-iocs
#==============================================================================
@cli.command("check-iocs", help="Compare stored JSON results to provided indicators")
@click.option("--iocs", "-i", type=click.Path(exists=True), multiple=True,
default=[], help=HELP_MSG_IOC)
@click.option("--list-modules", "-l", is_flag=True, help=HELP_MSG_LIST_MODULES)
@click.option("--module", "-m", help=HELP_MSG_MODULE)
@click.argument("FOLDER", type=click.Path(exists=True))
@click.pass_context
def check_iocs(ctx, iocs, list_modules, module, folder):
all_modules = []
for entry in BACKUP_MODULES + ADB_MODULES:
if entry not in all_modules:
all_modules.append(entry)
if list_modules:
log.info("Following is the list of available check-iocs modules:")
for iocs_module in all_modules:
log.info(" - %s", iocs_module.__name__)
return
log.info("Checking stored results against provided indicators...")
indicators = Indicators(log=log)
indicators.load_indicators_files(iocs)
total_detections = 0
for file_name in os.listdir(folder):
name_only, ext = os.path.splitext(file_name)
file_path = os.path.join(folder, file_name)
# TODO: Skipping processing of result files that are not json.
# We might want to revisit this eventually.
if ext != ".json":
continue
for iocs_module in all_modules:
if module and iocs_module.__name__ != module:
continue
if iocs_module().get_slug() != name_only:
continue
log.info("Loading results from \"%s\" with module %s", file_name,
iocs_module.__name__)
m = iocs_module.from_json(file_path,
log=logging.getLogger(iocs_module.__module__))
if indicators.total_ioc_count > 0:
m.indicators = indicators
m.indicators.log = m.log
try:
m.check_indicators()
except NotImplementedError:
continue
else:
total_detections += len(m.detected)
if total_detections > 0:
log.warning("The check of the results produced %d detections!",
total_detections)
#==============================================================================
# Command: download-iocs
#==============================================================================
@cli.command("download-iocs", help="Download public STIX2 indicators")
def download_indicators():
download_indicators_files(log)
| mvt/android/cli.py | 10,563 | Mobile Verification Toolkit (MVT) Copyright (c) 2021-2022 The MVT Project Authors. Use of this software is governed by the MVT License 1.1 that can be found at https://license.mvt.re/1.1/ Setup logging using Rich.============================================================================== Main============================================================================================================================================================ Command: version============================================================================================================================================================ Download APKs============================================================================== TODO: Do we actually want to be able to run without storing any file?============================================================================== Checks through ADB============================================================================================================================================================ Check ADB backup============================================================================================================================================================ Command: check-iocs============================================================================== TODO: Skipping processing of result files that are not json. We might want to revisit this eventually.============================================================================== Command: download-iocs============================================================================== | 1,601 | en | 0.460461 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
import unittest
from tracspamfilter.filters.tests import akismet, bayes, extlinks, regex, \
session
def test_suite():
suite = unittest.TestSuite()
suite.addTest(akismet.test_suite())
suite.addTest(bayes.test_suite())
suite.addTest(extlinks.test_suite())
suite.addTest(regex.test_suite())
suite.addTest(session.test_suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| files/spam-filter/tracspamfilter/filters/tests/__init__.py | 1,004 | -*- coding: utf-8 -*- Copyright (C) 2006 Edgewall Software All rights reserved. This software is licensed as described in the file COPYING, which you should have received as part of this distribution. The terms are also available at http://trac.edgewall.com/license.html. This software consists of voluntary contributions made by many individuals. For the exact contribution history, see the revision history and logs, available at http://projects.edgewall.com/trac/. | 467 | en | 0.966947 |
#!/usr/bin/python
import re, random, sys, difflib
random.seed(123)
for i, line in enumerate(sys.stdin.readlines()):
if i % 1000 == 0: print >>sys.stderr, i, "..."
if i>0 and re.search(r'^id\tsentiment', line): continue # combined files, ignore multiple header rows
line = re.sub(r'\n$', '', line) # strip trailing newlines
# "0003b8d" <tab>1<tab> }\n \n u32 cik_gfx_get_wptr(struct radeon_device *rdev,\n \t\t struct radeon_ri
fields = line.split('\t', 2)
if fields[2] == '': continue # corruption due to empty commits, i.e. no applicable code...
fields[2] = '\\n'.join(fields[2].split('\\n')[0:25]) # keep <=25 lines
f2 = fields[2] = re.sub(r'[^\x09,\x0A,\x20-\x7E]', '.', fields[2]) # cleanup non-ASCII
r = random.randint(0,99) # augment x% of the time, i.e. don't go crazy
if fields[1] == '0':
# no bug - harmless transforms
res = []
if r % 10 == 0: # 10% of the time
f2 = re.sub(r'/[*].*?[*]/|//.*?(\\n)', '\1', f2)
# inject spaces and newlines
for i in range(len(f2)-1):
c = f2[i]
# lines end in newlines, so no risk of running off the end
if c == '\\':
c2 = f2[i+1]
if c2 == ' ' and r < 3: res.append(' ') # add a space
elif c2 == 'n' and r < 5: res.append('\\n\\') # add a newline
elif c2 == 'n' and r < 7: res.append(' \\') # add extra trailing whitespace
elif c2 == 't' and r < 3: res.append(' \\') # extra space before tab
elif c2 == 't' and r < 5: res.append('\\t ') # extra space after tabs
### your ideas here ###
else: res.append(c)
elif c in '{}[]':
r = random.randint(0,99)
if r < 3: res.append(' ') # add a space
### your ideas here ###
else: res.append(c)
else: res.append(c)
newf2 = ''.join(res)+f2[-1]
else: # fields[1] == '1'
# contains a bug - harmful transform
r = random.randint(0,99)
if r < 50:
# swap if/then clauses - may introduce syntax errors
newf2 = re.sub(r'(if[^(]*[(].+?[)][^{]*){(.+?)}(.*?then.*?){(.*?)}', r'\1{\4}\3{\2}', f2)
# change comparison operators - since ==/!= is used for other datatypes, keep separate from </>
# note: pick random operator to avoid real parsing
newf2 = re.sub(r'([a-zA-Z0-9_] *)(<=?|>=?)( *[a-zA-Z0-9_])', r'\1'+['<','<=','>','>='][r%4]+r'\3', newf2)
newf2 = re.sub(r'([a-zA-Z0-9_] *)(==|!=)( *[a-zA-Z0-9_])', r'\1'+['==','!='][r%2]+r'\3', newf2)
newf2 = re.sub(r'([a-zA-Z0-9_] *)(&&|[|][|])( *[a-zA-Z0-9_])', r'\1'+['==','!='][r%2]+r'\3', newf2)
# muck numbers
# 201 - 99...99 doesn't end in 0, not binary, etc.
newf2 = re.sub(r'([2-9][0-9]+[1-9])', str(r*random.randint(0,99)+200), newf2)
else:
newf2 = f2
print '\t'.join(fields)
if newf2 != fields[2]:
print '\t'.join([re.sub(r'"$', 'beef"', fields[0]), fields[1], newf2])
#print 'diff:\n' + ''.join(difflib.unified_diff(fields[2].replace('\\n','\n'), newf2.replace('\\n','\n')))
| augment-c_and_cpp.py | 3,270 | !/usr/bin/python combined files, ignore multiple header rows strip trailing newlines "0003b8d" <tab>1<tab> }\n \n u32 cik_gfx_get_wptr(struct radeon_device *rdev,\n \t\t struct radeon_ri corruption due to empty commits, i.e. no applicable code... keep <=25 lines cleanup non-ASCII augment x% of the time, i.e. don't go crazy no bug - harmless transforms 10% of the time inject spaces and newlines lines end in newlines, so no risk of running off the end add a space add a newline add extra trailing whitespace extra space before tab extra space after tabs your ideas here add a space your ideas here fields[1] == '1' contains a bug - harmful transform swap if/then clauses - may introduce syntax errors change comparison operators - since ==/!= is used for other datatypes, keep separate from </> note: pick random operator to avoid real parsing muck numbers 201 - 99...99 doesn't end in 0, not binary, etc.print 'diff:\n' + ''.join(difflib.unified_diff(fields[2].replace('\\n','\n'), newf2.replace('\\n','\n'))) | 1,016 | en | 0.670901 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Event sequence RNN model."""
import collections
import copy
import functools
from magenta.common import beam_search
from magenta.common import state_util
from magenta.models.shared import events_rnn_graph
import magenta.music as mm
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import training as contrib_training
# Model state when generating event sequences, consisting of the next inputs to
# feed the model, the current RNN state, the current control sequence (if
# applicable), and state for the current control sequence (if applicable).
ModelState = collections.namedtuple(
'ModelState', ['inputs', 'rnn_state', 'control_events', 'control_state'])
class EventSequenceRnnModelError(Exception):
pass
def _extend_control_events_default(control_events, events, state):
"""Default function for extending control event sequence.
This function extends a control event sequence by duplicating the final event
in the sequence. The control event sequence will be extended to have length
one longer than the generated event sequence.
Args:
control_events: The control event sequence to extend.
events: The list of generated events.
state: State maintained while generating, unused.
Returns:
The resulting state after extending the control sequence (in this case the
state will be returned unmodified).
"""
while len(control_events) <= len(events):
control_events.append(control_events[-1])
return state
class EventSequenceRnnModel(mm.BaseModel):
"""Class for RNN event sequence generation models.
Currently this class only supports generation, of both event sequences and
note sequences (via event sequences). Support for model training will be added
at a later time.
"""
def __init__(self, config):
"""Initialize the EventSequenceRnnModel.
Args:
config: An EventSequenceRnnConfig containing the encoder/decoder and
HParams to use.
"""
super(EventSequenceRnnModel, self).__init__()
self._config = config
def _build_graph_for_generation(self):
events_rnn_graph.get_build_graph_fn('generate', self._config)()
def _batch_size(self):
"""Extracts the batch size from the graph."""
return self._session.graph.get_collection('inputs')[0].shape[0].value
def _generate_step_for_batch(self, event_sequences, inputs, initial_state,
temperature):
"""Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`. These are extended by this method.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
temperature: The softmax temperature.
Returns:
final_state: The final RNN state, a numpy array the same size as
`initial_state`.
loglik: The log-likelihood of the chosen softmax value for each event
sequence, a 1-D numpy array of length
`self._batch_size()`. If `inputs` is a full-length inputs batch, the
log-likelihood of each entire sequence up to and including the
generated step will be computed and returned.
"""
assert len(event_sequences) == self._batch_size()
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_final_state = self._session.graph.get_collection('final_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs,
tuple(graph_initial_state): initial_state}
# For backwards compatibility, we only try to pass temperature if the
# placeholder exists in the graph.
if graph_temperature:
feed_dict[graph_temperature[0]] = temperature
final_state, softmax = self._session.run(
[graph_final_state, graph_softmax], feed_dict)
if isinstance(softmax, list):
if softmax[0].shape[1] > 1:
softmaxes = []
for beam in range(softmax[0].shape[0]):
beam_softmaxes = []
for event in range(softmax[0].shape[1] - 1):
beam_softmaxes.append(
[softmax[s][beam, event] for s in range(len(softmax))])
softmaxes.append(beam_softmaxes)
loglik = self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmaxes)
else:
loglik = np.zeros(len(event_sequences))
else:
if softmax.shape[1] > 1:
# The inputs batch is longer than a single step, so we also want to
# compute the log-likelihood of the event sequences up until the step
# we're generating.
loglik = self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmax[:, :-1, :])
else:
loglik = np.zeros(len(event_sequences))
indices = np.array(self._config.encoder_decoder.extend_event_sequences(
event_sequences, softmax))
if isinstance(softmax, list):
p = 1.0
for i in range(len(softmax)):
p *= softmax[i][range(len(event_sequences)), -1, indices[:, i]]
else:
p = softmax[range(len(event_sequences)), -1, indices]
return final_state, loglik + np.log(p)
def _generate_step(self, event_sequences, model_states, logliks, temperature,
extend_control_events_callback=None,
modify_events_callback=None):
"""Extends a list of event sequences by a single step each.
This method modifies the event sequences in place. It also returns the
modified event sequences and updated model states and log-likelihoods.
Args:
event_sequences: A list of event sequence objects, which are extended by
this method.
model_states: A list of model states, each of which contains model inputs
and initial RNN states.
logliks: A list containing the current log-likelihood for each event
sequence.
temperature: The softmax temperature.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
event_sequences: A list of extended event sequences. These are modified in
place but also returned.
final_states: A list of resulting model states, containing model inputs
for the next step along with RNN states for each event sequence.
logliks: A list containing the updated log-likelihood for each event
sequence.
"""
# Split the sequences to extend into batches matching the model batch size.
batch_size = self._batch_size()
num_seqs = len(event_sequences)
num_batches = int(np.ceil(num_seqs / float(batch_size)))
# Extract inputs and RNN states from the model states.
inputs = [model_state.inputs for model_state in model_states]
initial_states = [model_state.rnn_state for model_state in model_states]
# Also extract control sequences and states.
control_sequences = [
model_state.control_events for model_state in model_states]
control_states = [
model_state.control_state for model_state in model_states]
final_states = []
logliks = np.array(logliks, dtype=np.float32)
# Add padding to fill the final batch.
pad_amt = -len(event_sequences) % batch_size
padded_event_sequences = event_sequences + [
copy.deepcopy(event_sequences[-1]) for _ in range(pad_amt)]
padded_inputs = inputs + [inputs[-1]] * pad_amt
padded_initial_states = initial_states + [initial_states[-1]] * pad_amt
for b in range(num_batches):
i, j = b * batch_size, (b + 1) * batch_size
pad_amt = max(0, j - num_seqs)
# Generate a single step for one batch of event sequences.
batch_final_state, batch_loglik = self._generate_step_for_batch(
padded_event_sequences[i:j],
padded_inputs[i:j],
state_util.batch(padded_initial_states[i:j], batch_size),
temperature)
final_states += state_util.unbatch(
batch_final_state, batch_size)[:j - i - pad_amt]
logliks[i:j - pad_amt] += batch_loglik[:j - i - pad_amt]
# Construct inputs for next step.
if extend_control_events_callback is not None:
# We are conditioning on control sequences.
for idx in range(len(control_sequences)):
# Extend each control sequence to ensure that it is longer than the
# corresponding event sequence.
control_states[idx] = extend_control_events_callback(
control_sequences[idx], event_sequences[idx], control_states[idx])
next_inputs = self._config.encoder_decoder.get_inputs_batch(
control_sequences, event_sequences)
else:
next_inputs = self._config.encoder_decoder.get_inputs_batch(
event_sequences)
if modify_events_callback:
# Modify event sequences and inputs for next step.
modify_events_callback(
self._config.encoder_decoder, event_sequences, next_inputs)
model_states = [ModelState(inputs=inputs, rnn_state=final_state,
control_events=control_events,
control_state=control_state)
for inputs, final_state, control_events, control_state
in zip(next_inputs, final_states,
control_sequences, control_states)]
return event_sequences, model_states, logliks
def _generate_events(self, num_steps, primer_events, temperature=1.0,
beam_size=1, branch_factor=1, steps_per_iteration=1,
control_events=None, control_state=None,
extend_control_events_callback=(
_extend_control_events_default),
modify_events_callback=None):
"""Generate an event sequence from a primer sequence.
Args:
num_steps: The integer length in steps of the final event sequence, after
generation. Includes the primer.
primer_events: The primer event sequence, a Python list-like object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes events more
random, less than 1.0 makes events less random.
beam_size: An integer, beam size to use when generating event sequences
via beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
control_events: A sequence of control events upon which to condition the
generation. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the control events will be
used along with the target sequence to generate model inputs. In some
cases, the control event sequence cannot be fully-determined as later
control events depend on earlier generated events; use the
`extend_control_events_callback` argument to provide a function that
extends the control event sequence.
control_state: Initial state used by `extend_control_events_callback`.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
The generated event sequence (which begins with the provided primer).
Raises:
EventSequenceRnnModelError: If the primer sequence has zero length or
is not shorter than num_steps.
"""
if (control_events is not None and
not isinstance(self._config.encoder_decoder,
mm.ConditionalEventSequenceEncoderDecoder)):
raise EventSequenceRnnModelError(
'control sequence provided but encoder/decoder is not a '
'ConditionalEventSequenceEncoderDecoder')
if control_events is not None and extend_control_events_callback is None:
raise EventSequenceRnnModelError(
'must provide callback for extending control sequence (or use'
'default)')
if not primer_events:
raise EventSequenceRnnModelError(
'primer sequence must have non-zero length')
if len(primer_events) >= num_steps:
raise EventSequenceRnnModelError(
'primer sequence must be shorter than `num_steps`')
if len(primer_events) >= num_steps:
# Sequence is already long enough, no need to generate.
return primer_events
event_sequences = [copy.deepcopy(primer_events)]
# Construct inputs for first step after primer.
if control_events is not None:
# We are conditioning on a control sequence. Make sure it is longer than
# the primer sequence.
control_state = extend_control_events_callback(
control_events, primer_events, control_state)
inputs = self._config.encoder_decoder.get_inputs_batch(
[control_events], event_sequences, full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(
event_sequences, full_length=True)
if modify_events_callback:
# Modify event sequences and inputs for first step after primer.
modify_events_callback(
self._config.encoder_decoder, event_sequences, inputs)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_states = state_util.unbatch(self._session.run(graph_initial_state))
# Beam search will maintain a state for each sequence consisting of the next
# inputs to feed the model, and the current RNN state. We start out with the
# initial full inputs batch and the zero state.
initial_state = ModelState(
inputs=inputs[0], rnn_state=initial_states[0],
control_events=control_events, control_state=control_state)
generate_step_fn = functools.partial(
self._generate_step,
temperature=temperature,
extend_control_events_callback=
extend_control_events_callback if control_events is not None else None,
modify_events_callback=modify_events_callback)
events, _, loglik = beam_search(
initial_sequence=event_sequences[0],
initial_state=initial_state,
generate_step_fn=generate_step_fn,
num_steps=num_steps - len(primer_events),
beam_size=beam_size,
branch_factor=branch_factor,
steps_per_iteration=steps_per_iteration)
tf.logging.info('Beam search yields sequence with log-likelihood: %f ',
loglik)
return events
def _evaluate_batch_log_likelihood(self, event_sequences, inputs,
initial_state):
"""Evaluates the log likelihood of a batch of event sequences.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
Returns:
A Python list containing the log likelihood of each sequence in
`event_sequences`.
"""
graph_inputs = self._session.graph.get_collection('inputs')[0]
graph_initial_state = self._session.graph.get_collection('initial_state')
graph_softmax = self._session.graph.get_collection('softmax')[0]
graph_temperature = self._session.graph.get_collection('temperature')
feed_dict = {graph_inputs: inputs,
tuple(graph_initial_state): initial_state}
# For backwards compatibility, we only try to pass temperature if the
# placeholder exists in the graph.
if graph_temperature:
feed_dict[graph_temperature[0]] = 1.0
softmax = self._session.run(graph_softmax, feed_dict)
return self._config.encoder_decoder.evaluate_log_likelihood(
event_sequences, softmax)
def _evaluate_log_likelihood(self, event_sequences, control_events=None):
"""Evaluate log likelihood for a list of event sequences of the same length.
Args:
event_sequences: A list of event sequences for which to evaluate the log
likelihood.
control_events: A sequence of control events upon which to condition the
event sequences. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the log likelihood of each
event sequence will be computed conditional on the control sequence.
Returns:
The log likelihood of each sequence in `event_sequences`.
Raises:
EventSequenceRnnModelError: If the event sequences are not all the
same length, or if the control sequence is shorter than the event
sequences.
"""
num_steps = len(event_sequences[0])
for events in event_sequences[1:]:
if len(events) != num_steps:
raise EventSequenceRnnModelError(
'log likelihood evaluation requires all event sequences to have '
'the same length')
if control_events is not None and len(control_events) < num_steps:
raise EventSequenceRnnModelError(
'control sequence must be at least as long as the event sequences')
batch_size = self._batch_size()
num_full_batches = len(event_sequences) / batch_size
loglik = np.empty(len(event_sequences))
# Since we're computing log-likelihood and not generating, the inputs batch
# doesn't need to include the final event in each sequence.
if control_events is not None:
# We are conditioning on a control sequence.
inputs = self._config.encoder_decoder.get_inputs_batch(
[control_events] * len(event_sequences),
[events[:-1] for events in event_sequences],
full_length=True)
else:
inputs = self._config.encoder_decoder.get_inputs_batch(
[events[:-1] for events in event_sequences], full_length=True)
graph_initial_state = self._session.graph.get_collection('initial_state')
initial_state = [
self._session.run(graph_initial_state)] * len(event_sequences)
offset = 0
for _ in range(num_full_batches):
# Evaluate a single step for one batch of event sequences.
batch_indices = range(offset, offset + batch_size)
batch_loglik = self._evaluate_batch_log_likelihood(
[event_sequences[i] for i in batch_indices],
[inputs[i] for i in batch_indices],
initial_state[batch_indices])
loglik[batch_indices] = batch_loglik
offset += batch_size
if offset < len(event_sequences):
# There's an extra non-full batch. Pad it with a bunch of copies of the
# final sequence.
num_extra = len(event_sequences) - offset
pad_size = batch_size - num_extra
batch_indices = range(offset, len(event_sequences))
batch_loglik = self._evaluate_batch_log_likelihood(
[event_sequences[i] for i in batch_indices] + [
copy.deepcopy(event_sequences[-1]) for _ in range(pad_size)],
[inputs[i] for i in batch_indices] + inputs[-1] * pad_size,
np.append(initial_state[batch_indices],
np.tile(inputs[-1, :], (pad_size, 1)),
axis=0))
loglik[batch_indices] = batch_loglik[0:num_extra]
return loglik
class EventSequenceRnnConfig(object):
"""Stores a configuration for an event sequence RNN.
Only one of `steps_per_quarter` or `steps_per_second` will be applicable for
any particular model.
Attributes:
details: The GeneratorDetails message describing the config.
encoder_decoder: The EventSequenceEncoderDecoder or
ConditionalEventSequenceEncoderDecoder object to use.
hparams: The HParams containing hyperparameters to use. Will be merged with
default hyperparameter values.
steps_per_quarter: The integer number of quantized time steps per quarter
note to use.
steps_per_second: The integer number of quantized time steps per second to
use.
"""
def __init__(self, details, encoder_decoder, hparams,
steps_per_quarter=4, steps_per_second=100):
hparams_dict = {
'batch_size': 64,
'rnn_layer_sizes': [128, 128],
'dropout_keep_prob': 1.0,
'attn_length': 0,
'clip_norm': 3,
'learning_rate': 0.001,
'residual_connections': False,
'use_cudnn': False
}
hparams_dict.update(hparams.values())
self.details = details
self.encoder_decoder = encoder_decoder
self.hparams = contrib_training.HParams(**hparams_dict)
self.steps_per_quarter = steps_per_quarter
self.steps_per_second = steps_per_second
| magenta/models/shared/events_rnn_model.py | 23,210 | Stores a configuration for an event sequence RNN.
Only one of `steps_per_quarter` or `steps_per_second` will be applicable for
any particular model.
Attributes:
details: The GeneratorDetails message describing the config.
encoder_decoder: The EventSequenceEncoderDecoder or
ConditionalEventSequenceEncoderDecoder object to use.
hparams: The HParams containing hyperparameters to use. Will be merged with
default hyperparameter values.
steps_per_quarter: The integer number of quantized time steps per quarter
note to use.
steps_per_second: The integer number of quantized time steps per second to
use.
Class for RNN event sequence generation models.
Currently this class only supports generation, of both event sequences and
note sequences (via event sequences). Support for model training will be added
at a later time.
Initialize the EventSequenceRnnModel.
Args:
config: An EventSequenceRnnConfig containing the encoder/decoder and
HParams to use.
Extracts the batch size from the graph.
Evaluates the log likelihood of a batch of event sequences.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
Returns:
A Python list containing the log likelihood of each sequence in
`event_sequences`.
Evaluate log likelihood for a list of event sequences of the same length.
Args:
event_sequences: A list of event sequences for which to evaluate the log
likelihood.
control_events: A sequence of control events upon which to condition the
event sequences. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the log likelihood of each
event sequence will be computed conditional on the control sequence.
Returns:
The log likelihood of each sequence in `event_sequences`.
Raises:
EventSequenceRnnModelError: If the event sequences are not all the
same length, or if the control sequence is shorter than the event
sequences.
Default function for extending control event sequence.
This function extends a control event sequence by duplicating the final event
in the sequence. The control event sequence will be extended to have length
one longer than the generated event sequence.
Args:
control_events: The control event sequence to extend.
events: The list of generated events.
state: State maintained while generating, unused.
Returns:
The resulting state after extending the control sequence (in this case the
state will be returned unmodified).
Generate an event sequence from a primer sequence.
Args:
num_steps: The integer length in steps of the final event sequence, after
generation. Includes the primer.
primer_events: The primer event sequence, a Python list-like object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes events more
random, less than 1.0 makes events less random.
beam_size: An integer, beam size to use when generating event sequences
via beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
control_events: A sequence of control events upon which to condition the
generation. If not None, the encoder/decoder should be a
ConditionalEventSequenceEncoderDecoder, and the control events will be
used along with the target sequence to generate model inputs. In some
cases, the control event sequence cannot be fully-determined as later
control events depend on earlier generated events; use the
`extend_control_events_callback` argument to provide a function that
extends the control event sequence.
control_state: Initial state used by `extend_control_events_callback`.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
The generated event sequence (which begins with the provided primer).
Raises:
EventSequenceRnnModelError: If the primer sequence has zero length or
is not shorter than num_steps.
Extends a list of event sequences by a single step each.
This method modifies the event sequences in place. It also returns the
modified event sequences and updated model states and log-likelihoods.
Args:
event_sequences: A list of event sequence objects, which are extended by
this method.
model_states: A list of model states, each of which contains model inputs
and initial RNN states.
logliks: A list containing the current log-likelihood for each event
sequence.
temperature: The softmax temperature.
extend_control_events_callback: A function that takes three arguments: a
current control event sequence, a current generated event sequence,
and the control state. The function should a) extend the control event
sequence to be one longer than the generated event sequence (or do
nothing if it is already at least this long), and b) return the
resulting control state.
modify_events_callback: An optional callback for modifying the event list.
Can be used to inject events rather than having them generated. If not
None, will be called with 3 arguments after every event: the current
EventSequenceEncoderDecoder, a list of current EventSequences, and a
list of current encoded event inputs.
Returns:
event_sequences: A list of extended event sequences. These are modified in
place but also returned.
final_states: A list of resulting model states, containing model inputs
for the next step along with RNN states for each event sequence.
logliks: A list containing the updated log-likelihood for each event
sequence.
Extends a batch of event sequences by a single step each.
This method modifies the event sequences in place.
Args:
event_sequences: A list of event sequences, each of which is a Python
list-like object. The list of event sequences should have length equal
to `self._batch_size()`. These are extended by this method.
inputs: A Python list of model inputs, with length equal to
`self._batch_size()`.
initial_state: A numpy array containing the initial RNN state, where
`initial_state.shape[0]` is equal to `self._batch_size()`.
temperature: The softmax temperature.
Returns:
final_state: The final RNN state, a numpy array the same size as
`initial_state`.
loglik: The log-likelihood of the chosen softmax value for each event
sequence, a 1-D numpy array of length
`self._batch_size()`. If `inputs` is a full-length inputs batch, the
log-likelihood of each entire sequence up to and including the
generated step will be computed and returned.
Event sequence RNN model.
Copyright 2019 The Magenta Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=redefined-builtin Model state when generating event sequences, consisting of the next inputs to feed the model, the current RNN state, the current control sequence (if applicable), and state for the current control sequence (if applicable). For backwards compatibility, we only try to pass temperature if the placeholder exists in the graph. The inputs batch is longer than a single step, so we also want to compute the log-likelihood of the event sequences up until the step we're generating. Split the sequences to extend into batches matching the model batch size. Extract inputs and RNN states from the model states. Also extract control sequences and states. Add padding to fill the final batch. Generate a single step for one batch of event sequences. Construct inputs for next step. We are conditioning on control sequences. Extend each control sequence to ensure that it is longer than the corresponding event sequence. Modify event sequences and inputs for next step. Sequence is already long enough, no need to generate. Construct inputs for first step after primer. We are conditioning on a control sequence. Make sure it is longer than the primer sequence. Modify event sequences and inputs for first step after primer. Beam search will maintain a state for each sequence consisting of the next inputs to feed the model, and the current RNN state. We start out with the initial full inputs batch and the zero state. For backwards compatibility, we only try to pass temperature if the placeholder exists in the graph. Since we're computing log-likelihood and not generating, the inputs batch doesn't need to include the final event in each sequence. We are conditioning on a control sequence. Evaluate a single step for one batch of event sequences. There's an extra non-full batch. Pad it with a bunch of copies of the final sequence. | 10,193 | en | 0.87389 |
# Functions for visualization
import numpy as np
import networkx as nx
import multinetx as mx
from jinja2 import Environment, FileSystemLoader, Template
import json
from networkx.readwrite import json_graph
def write_mx_to_json(filename, mg, nNodes, pos, nLayers, nodes_to_remove = []):
# filename the complete name of the output file (data/slide_x.json)
# mx the multilayer network as a multinetx object
# nNodes the number of nodes in the first layer
# pos a dictionary of node coordinates
# nLayers the number of layers in the second aspect.
# nodes_to_remove is a list of nodes that should not exist in each layer. Default = []
# From the sparse adj, make a networkx graph and add node attributes
G1 = nx.from_numpy_array(mx.adjacency_matrix(mg,weight='weight').todense())
# Remove nodes from G
G1.remove_nodes_from(nodes_to_remove)
# Recreate the graph G to make the rest work nicely.
G = nx.from_numpy_array(nx.adjacency_matrix(G1).todense())
# Create dictionaries pretending like all nodes exist
scalefact = 20
L2_classes = np.arange(nLayers)
L2_array_original = np.array([])
z_shift = 2
z_array_original = np.array([])
x_orig = np.array([])
y_orig = np.array([])
L1_orig = np.array([])
for level in L2_classes:
L2_array_original = np.concatenate((L2_array_original, np.array([float(level) for i in np.arange(nNodes)])))
z_array_original = np.concatenate((z_array_original, np.array([float(level*z_shift) for i in np.arange(nNodes)])))
x_orig = np.concatenate((x_orig, [pos[key][0]+scalefact for key in pos]))
y_orig = np.concatenate((y_orig, [pos[key][1]+scalefact for key in pos]))
L1_orig = np.concatenate((L1_orig, [i for i in np.arange(nNodes)]))
# Need to delete nodes from our attribute dictionaries, too
L2_array = np.delete(L2_array_original, nodes_to_remove, 0)
z_array = np.delete(z_array_original, nodes_to_remove, 0)
x_array = np.delete(x_orig, nodes_to_remove, 0)
y_array = np.delete(y_orig, nodes_to_remove, 0)
L1_array = np.delete(L1_orig, nodes_to_remove, 0)
## Each node will get attributes L1=node id, L2=slice number, x position, y position, and name/id
id_dict = {i:("id"+str(i)) for i in np.arange(nNodes*nLayers)}
x_dict = {}
y_dict = {}
L2_dict = {i:l2 for i,l2 in enumerate(L2_array)}
z_dict = {i:z_val for i,z_val in enumerate(z_array)}
x_dict = {i:x_val for i,x_val in enumerate(x_array)}
y_dict = {i:y_val for i,y_val in enumerate(y_array)}
L1_dict = {i:L1_val for i,L1_val in enumerate(L1_array)}
nx.set_node_attributes(G, id_dict, name = "name")
nx.set_node_attributes(G, x_dict, name = "x")
nx.set_node_attributes(G, y_dict, name = "y")
nx.set_node_attributes(G, z_dict, name = "z")
nx.set_node_attributes(G, L1_dict, name= "L1")
nx.set_node_attributes(G, L2_dict, name= "L2")
G_json = json_graph.node_link_data(G)
# Write for visualization function
G_json_viz = json.dumps(G_json, indent = 4)
# To save as a .json file
with open(filename, 'w') as fp:
json.dump(G_json, fp)
print(f"done writing mx to {filename}")
return G_json_viz
#Finished defining functions
print("finished defining functions")
def visualize(
mxgraph,
theme="dark",
path_html="visualization_output.html",
title="MX viz",
save_file=True,
):
# Find the module absolute path and locate templates
# module_root = os.path.join(os.path.dirname('./'), "templates")
module_root = "./"
env = Environment(loader=FileSystemLoader(module_root))
# Read in the D3 save pages code and include in the exported html
d3_save_svg_path = "./d3-save-svg-gh-pages/assets/d3-save-svg.min.js"
with open(d3_save_svg_path,'r') as f:
d3_save_svg = f.readlines()
if theme=="dark":
js_path = './javascript/mx_viz.js'
with open(js_path, "r") as f:
js_text = f.read()
css_path = './style/style.css'
with open(css_path, "r") as f:
css_text = f.read()
# Jinja
template = env.get_template("./templates/mx_viz.html").render(
title=title,
js_text=js_text,
css_text=css_text,
mxgraph=mxgraph,
d3_save_svg=d3_save_svg[0]
)
elif theme == "light":
js_path = './javascript/mx_vizlighttheme.js'
with open(js_path, "r") as f:
js_text = f.read()
css_path = './style/style_lighttheme.css'
with open(css_path, "r") as f:
css_text = f.read()
# Jinja
template = env.get_template("./templates/mx_viz_lighttheme.html").render(
title=title,
js_text=js_text,
css_text=css_text,
mxgraph=mxgraph,
d3_save_svg=d3_save_svg[0]
)
if save_file:
with open(path_html, "wb") as outfile:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
def visualize_timeseries(
mxgraph,
path_html="visualization_timeseries_output.html",
title="MX viz",
save_file=True,
):
# Find the module absolute path and locate templates
# module_root = os.path.join(os.path.dirname('./'), "templates")
module_root = "./"
env = Environment(loader=FileSystemLoader(module_root))
# Read in the D3 save pages code and include in the exported html
d3_save_svg_path = "./d3-save-svg-gh-pages/assets/d3-save-svg.min.js"
with open(d3_save_svg_path,'r') as f:
d3_save_svg = f.readlines()
# Find the absolute module path and the static files
# js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
js_path = './javascript/mx_viz_timeseries.js'
with open(js_path, "r") as f:
js_text = f.read()
css_path = './style/style_timeseries.css'
with open(css_path, "r") as f:
css_text = f.read()
# Jinja
template = env.get_template("./templates/mx_viz_timeseries.html").render(
title=title,
js_text=js_text,
css_text=css_text,
mxgraph=mxgraph,
d3_save_svg=d3_save_svg[0]
)
if save_file:
with open(path_html, "wb") as outfile:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
| mx_viz.py | 6,665 | Functions for visualization filename the complete name of the output file (data/slide_x.json) mx the multilayer network as a multinetx object nNodes the number of nodes in the first layer pos a dictionary of node coordinates nLayers the number of layers in the second aspect. nodes_to_remove is a list of nodes that should not exist in each layer. Default = [] From the sparse adj, make a networkx graph and add node attributes Remove nodes from G Recreate the graph G to make the rest work nicely. Create dictionaries pretending like all nodes exist Need to delete nodes from our attribute dictionaries, too Each node will get attributes L1=node id, L2=slice number, x position, y position, and name/id Write for visualization function To save as a .json fileFinished defining functions Find the module absolute path and locate templates module_root = os.path.join(os.path.dirname('./'), "templates") Read in the D3 save pages code and include in the exported html Jinja Jinja Find the module absolute path and locate templates module_root = os.path.join(os.path.dirname('./'), "templates") Read in the D3 save pages code and include in the exported html Find the absolute module path and the static files js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js") Jinja | 1,302 | en | 0.656255 |
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('MirrorEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import re
import threading
import copy
from pydispatch import dispatcher
from SmartMeshSDK.protocols.DC2126AConverters import DC2126AConverters
from EventBus import EventBusClient
class MirrorEngine(EventBusClient.EventBusClient):
def __init__(self):
# store params
# log
log.info('creating instance')
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
signal = 'parsedAppData_OAPTemperature',
cb = self._publish,
teardown_cb = self._cleanup,
)
self.name = 'DataConnector_MirrorEngine'
# connect extra applications
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_DC2126A',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_SPIPressure',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_GPIONet',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_LIS331',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_OAPtilt',
weak = False,
)
dispatcher.connect(
self.getMirrorData,
signal = 'getMirrorData',
weak = False,
)
dispatcher.connect(
self.calibrateMirrorData,
signal = 'calibrateMirrorData',
weak = False,
)
dispatcher.connect(
self.clearMirrorData,
signal = 'clearMirrorData',
weak = False,
)
# add stats
# local variables
self.dataLock = threading.Lock()
self.pressureOffsets = {}
self.mirrordata = []
self.dc2126Aconverters = DC2126AConverters.DC2126AConverters()
#======================== public ==========================================
def getMirrorData(self,sender,signal,data):
with self.dataLock:
return copy.deepcopy(self.mirrordata)
def calibrateMirrorData(self,sender,signal,data):
with self.dataLock:
pressures = {}
for row in self.mirrordata:
if row['type']=='pressure':
pressures[row['mac']] = int(row['lastvalue'].split('_')[0])
if len(pressures)==2:
macs = pressures.keys()
offset = pressures[macs[0]]-pressures[macs[1]]
self.pressureOffsets = {}
self.pressureOffsets[macs[0]] = -offset
self.pressureOffsets[macs[1]] = 0
def clearMirrorData(self,sender,signal,data):
with self.dataLock:
self.mirrordata = []
#======================== private =========================================
def _cleanup(self):
# disconnect extra applications
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_DC2126A',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_SPIPressure',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_GPIONet',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_LIS331',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_OAPtilt',
weak = False,
)
dispatcher.disconnect(
self.getMirrorData,
signal = 'getMirrorData',
weak = False,
)
dispatcher.disconnect(
self.calibrateMirrorData,
signal = 'calibrateMirrorData',
weak = False,
)
dispatcher.disconnect(
self.clearMirrorData,
signal = 'clearMirrorData',
weak = False,
)
def _publish(self,sender,signal,data):
# format the data to publish
newData = []
mac = data['mac']
if signal in ['parsedAppData_OAPTemperature']:
# temperature reported in 1/100th C, displayed in C
temperature_C = float(data['fields']['temperature'])/100.0
# format newData entry
newData += [
{
'mac': mac,
'type': 'temperature',
'lastvalue': str(temperature_C),
'lastupdated': str(data['timestamp']),
'subscribeToLed': True,
}
]
elif signal in ['parsedAppData_DC2126A']:
# publish temperature
temperature = self.dc2126Aconverters.convertTemperature(
data['fields']['temperature'],
)
if temperature!=None:
newData += [
{
'mac': mac,
'type': 'temperature',
'lastvalue': str(temperature),
'lastupdated': str(data['timestamp']),
}
]
# publish adcValue
adcValue = self.dc2126Aconverters.convertAdcValue(
data['fields']['adcValue'],
)
newData += [
{
'mac': mac,
'type': 'voltage',
'lastvalue': adcValue,
'lastupdated': str(data['timestamp']),
}
]
# publish energysource
energysource = self.dc2126Aconverters.convertEnergySource(
mac,adcValue,
)
newData += [
{
'mac': mac,
'type': 'energysource',
'lastvalue': energysource,
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_SPIPressure']:
with self.dataLock:
if mac in self.pressureOffsets:
offset = self.pressureOffsets[mac]
else:
offset = 0
# format newData entry
newData += [
{
'mac': mac,
'type': 'pressure',
'lastvalue': str(data['fields']['adcPressure']) + "_" + str(offset),
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_GPIONet']:
# convert 'pinVal' field to meaning
if data['fields']['pinVal']==1:
energysource = 'solar'
elif data['fields']['pinVal']==2:
energysource = 'vibration'
elif data['fields']['pinVal']==3:
energysource = 'temperature'
else:
energysource = 'battery'
# format newData entry
newData += [
{
'mac': mac,
'type': 'energysource',
'lastvalue': energysource,
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_LIS331']:
# format newData entry
newData += [
{
'mac': mac,
'type': 'acceleration',
'lastvalue': '{0}_{1}_{2}'.format(
data['fields']['x'],
data['fields']['y'],
data['fields']['z'],
),
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_OAPtilt']:
# format newData entry
newData += [
{
'mac': mac,
'type': 'tilt',
'lastvalue': '{0}'.format(data['fields']['status']),
'lastupdated': str(data['timestamp']),
}
]
else:
raise SystemError('unexpected signal={0}'.format(signal))
# store local mirror of data
with self.dataLock:
for nd in newData:
found = False
newDataSource = nd['mac']
newDataType = nd['type']
for i,e in enumerate(self.mirrordata):
if e['mac']==newDataSource and e['type']==newDataType:
found = True
self.mirrordata[i] = nd
break
if not found:
self.mirrordata.append(nd)
# dispatch (once even if multiple data points)
with self.dataLock:
for nd in newData:
dispatcher.send(
signal = 'newDataMirrored',
data = copy.deepcopy(nd),
)
| DataConnector/MirrorEngine.py | 10,422 | !/usr/bin/python store params log initialize parent class connect extra applications add stats local variables======================== public ================================================================== private ========================================= disconnect extra applications format the data to publish temperature reported in 1/100th C, displayed in C format newData entry publish temperature publish adcValue publish energysource format newData entry convert 'pinVal' field to meaning format newData entry format newData entry format newData entry store local mirror of data dispatch (once even if multiple data points) | 634 | en | 0.568382 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Moves a list of remote media from one media store to another.
The input should be a list of media files to be moved, one per line. Each line
should be formatted::
<origin server>|<file id>
This can be extracted from postgres with::
psql --tuples-only -A -c "select media_origin, filesystem_id from
matrix.remote_media_cache where ..."
To use, pipe the above into::
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
"""
from __future__ import print_function
import argparse
import logging
import sys
import os
import shutil
from synapse.rest.media.v1.filepath import MediaFilePaths
logger = logging.getLogger()
def main(src_repo, dest_repo):
src_paths = MediaFilePaths(src_repo)
dest_paths = MediaFilePaths(dest_repo)
for line in sys.stdin:
line = line.strip()
parts = line.split('|')
if len(parts) != 2:
print("Unable to parse input line %s" % line, file=sys.stderr)
exit(1)
move_media(parts[0], parts[1], src_paths, dest_paths)
def move_media(origin_server, file_id, src_paths, dest_paths):
"""Move the given file, and any thumbnails, to the dest repo
Args:
origin_server (str):
file_id (str):
src_paths (MediaFilePaths):
dest_paths (MediaFilePaths):
"""
logger.info("%s/%s", origin_server, file_id)
# check that the original exists
original_file = src_paths.remote_media_filepath(origin_server, file_id)
if not os.path.exists(original_file):
logger.warn(
"Original for %s/%s (%s) does not exist",
origin_server, file_id, original_file,
)
else:
mkdir_and_move(
original_file,
dest_paths.remote_media_filepath(origin_server, file_id),
)
# now look for thumbnails
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
origin_server, file_id,
)
if not os.path.exists(original_thumb_dir):
return
mkdir_and_move(
original_thumb_dir,
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
)
def mkdir_and_move(original_file, dest_file):
dirname = os.path.dirname(dest_file)
if not os.path.exists(dirname):
logger.debug("mkdir %s", dirname)
os.makedirs(dirname)
logger.debug("mv %s %s", original_file, dest_file)
shutil.move(original_file, dest_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class = argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-v", action='store_true', help='enable debug logging')
parser.add_argument(
"src_repo",
help="Path to source content repo",
)
parser.add_argument(
"dest_repo",
help="Path to source content repo",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
}
logging.basicConfig(**logging_config)
main(args.src_repo, args.dest_repo)
| scripts/move_remote_media_to_new_store.py | 3,800 | Move the given file, and any thumbnails, to the dest repo
Args:
origin_server (str):
file_id (str):
src_paths (MediaFilePaths):
dest_paths (MediaFilePaths):
Moves a list of remote media from one media store to another.
The input should be a list of media files to be moved, one per line. Each line
should be formatted::
<origin server>|<file id>
This can be extracted from postgres with::
psql --tuples-only -A -c "select media_origin, filesystem_id from
matrix.remote_media_cache where ..."
To use, pipe the above into::
PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py <source repo> <dest repo>
!/usr/bin/env python -*- coding: utf-8 -*- Copyright 2017 New Vector Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. check that the original exists now look for thumbnails | 1,299 | en | 0.743094 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Find your credentials at twilio.com/console
api_key_sid = 'SKXXXX'
api_key_secret = 'your_api_key_secret'
client = Client(api_key_sid, api_key_secret)
compositionHook = client.video.compositionHooks('KHXXXX').update(
friendlyName = 'MyFirstCompositionHook',
enabled = False,
audio_sources = '*',
video_layout = {
'grid' : {
'video_sources': ['*']
}
},
status_callback = 'http://my.server.org/callbacks',
format='mp4')
print('Updated Composition Hook with SID=%s' % (compositionHook.sid))
| video/rest/compositionhooks/update-hook/update-hook.6.x.py | 717 | Download the Python helper library from twilio.com/docs/python/install Find your credentials at twilio.com/console | 114 | en | 0.817787 |
import sqlite3;
import csv;
import sys;
from ordery.db import get_db
from flask import current_app
def order_csv(filename):
## Connect to the database
try:
conn = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES); # Get a connection object for the database
conn.execute('PRAGMA foreign_keys = ON;'); # Turn on foreign key constraints
csr = conn.cursor(); # Get a cursor object for the connection
except Exception as e:
print("Error connecting to database: ", e); # Print error message
sys.exit(); # Fatal Error
## Open the orders csv file
try:
f = open(filename, newline=''); # Open the file – default for reading
r = csv.DictReader(f); # Return a dictionary reader iterator for the file
print("\n csv file openned successfully")
except Exception as e:
print("Error opening csv file: ", e); # Print error message
sys.exit(); # Fatal Error
## --------------------------------------
## Loop through the orders csv file and insert each row in the table
## File title line: ord_nbr, prod_nbr, ord_qty, ord_date
for d_row in r: # Loop on each row in the file into a list
t_row = (int(d_row['ord_nbr']), d_row['prod_nbr'], int(d_row['ord_qty']), d_row['ord_date']);
csr.execute('BEGIN TRANSACTION'); # Start transaction
try:
# Check if order number already exists
v_sql = 'SELECT id FROM orders WHERE ord_nbr = ?';
csr.execute(v_sql,(t_row[0],) );
t_id = csr.fetchone(); # Get the order id
if t_id != None: # Order number already exists in orderss
print("\nOrder number " + str(t[0]) + " already exists in orders table");
continue; # Get next order
# Get product number id IF it exists in product table
v_sql = 'SELECT id FROM products WHERE prod_nbr = ?';
csr.execute(v_sql,(t_row[1],) );
t_pid = csr.fetchone(); # Get the product id
if t_pid == None:
print("\nProduct number " + str(t_row[1]) + " does not exist in products table");
continue; # Get next order
# If order number Not Exist and product number Exist then Insert the order
if t_id == None and t_pid != None:
v_sql = '''INSERT INTO orders (ord_nbr, ord_qty, ord_date, prod_id)
VALUES (?,?,?,?);'''
csr.execute(v_sql, (t_row[0], t_row[2], t_row[3], t_pid[0]) );
conn.commit(); # Commit transaction for this row
except Exception as e:
print("Error loading Orders table " + str(e)); # Print error message
print("Order number: ", t_row[0]); # Identify order number
conn.rollback(); # Rollback this transaction
f.close(); # Close the file
conn.close()
| ordery/order_csv.py | 3,078 | Connect to the database Get a connection object for the database Turn on foreign key constraints Get a cursor object for the connection Print error message Fatal Error Open the orders csv file Open the file – default for reading Return a dictionary reader iterator for the file Print error message Fatal Error -------------------------------------- Loop through the orders csv file and insert each row in the table File title line: ord_nbr, prod_nbr, ord_qty, ord_date Loop on each row in the file into a list Start transaction Check if order number already exists Get the order id Order number already exists in orderss Get next order Get product number id IF it exists in product table Get the product id Get next order If order number Not Exist and product number Exist then Insert the order Commit transaction for this row Print error message Identify order number Rollback this transaction Close the file | 909 | en | 0.622446 |
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
pretrained='open-mmlab://detectron2/resnext101_32x8d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch'))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
| configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py | 2,474 | In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range' Use RepeatDataset to speed up training | 122 | en | 0.606389 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
from utils import trunc_normal_
from swin_transformer import SwinTransformer
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def swin_t(**kwargs):
from config import get_config_swin_t
config = get_config_swin_t()
model = SwinTransformer(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| vision_transformer.py | 12,421 | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Image to Patch Embedding
Vision Transformer
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
Copyright (c) Facebook, Inc. and its affiliates. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. work with diff dim tensors, not just 2D ConvNets binarize stochastic depth decay rule Classifier head we add a small number to avoid floating point error in the interpolation see discussion at https://github.com/facebookresearch/dino/issues/8 patch linear embedding add the [CLS] token to the embed patch tokens add positional encoding to each token return attention of the last block we return the output tokens from the `n` last blocks | 1,291 | en | 0.813687 |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeInstanceGroupSelectorCollection(object):
"""
A collection of selectors. The combination of instances matching the selectors are included in the instance group.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.
:type items: list[oci.devops.models.ComputeInstanceGroupSelector]
"""
self.swagger_types = {
'items': 'list[ComputeInstanceGroupSelector]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
**[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:return: The items of this ComputeInstanceGroupSelectorCollection.
:rtype: list[oci.devops.models.ComputeInstanceGroupSelector]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:param items: The items of this ComputeInstanceGroupSelectorCollection.
:type: list[oci.devops.models.ComputeInstanceGroupSelector]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| src/oci/devops/models/compute_instance_group_selector_collection.py | 2,558 | A collection of selectors. The combination of instances matching the selectors are included in the instance group.
Initializes a new ComputeInstanceGroupSelectorCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this ComputeInstanceGroupSelectorCollection.
:type items: list[oci.devops.models.ComputeInstanceGroupSelector]
**[Required]** Gets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:return: The items of this ComputeInstanceGroupSelectorCollection.
:rtype: list[oci.devops.models.ComputeInstanceGroupSelector]
Sets the items of this ComputeInstanceGroupSelectorCollection.
A list of selectors for the instance group. UNION operator is used for combining the instances selected by each selector.
:param items: The items of this ComputeInstanceGroupSelectorCollection.
:type: list[oci.devops.models.ComputeInstanceGroupSelector]
coding: utf-8 Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. noqa: F401 | 1,487 | en | 0.787627 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Update the location of a adrespositie and and add a terrein koppeling using a shapeFile
import os, sys, codecs, datetime, argparse
import osgeo.ogr as ogr
from pyspatialite import dbapi2 as sqlite3 #import sqlite3
def updateTerrein(cur, TERREINOBJECTID , HUISNUMMERID):
cur.execute("""INSERT INTO TERREINOBJECT_HUISNUMMER_RELATIES
(ID, TERREINOBJECTID , HUISNUMMERID, BEGINDATUM, BEGINORGANISATIE, BEGINBEWERKING, BEGINTIJD )
VALUES ( (SELECT MAX("ID")+ 1 FROM "TERREINOBJECT_HUISNUMMER_RELATIES"),
?, ?, date('now'), 1, 1, strftime('%Y-%m-%dT%H:%M:%S','now')) ;""", (TERREINOBJECTID , HUISNUMMERID))
def updateAdresPosistie(cur, X , Y , herkomst, ADRESID ):
'herkomst: 2= perceel, 3= gebouw'
cur.execute("""UPDATE ADRESPOSITIES
SET X=?, Y=?, BEGINORGANISATIE=1, BEGINBEWERKING=3, BEGINTIJD=strftime('%Y-%m-%dT%H:%M:%S','now'),
HERKOMSTADRESPOSITIE=? WHERE ID=? ;""", (X, Y, herkomst, ADRESID))
def removeDoubleTerreinKoppeling(cur):
#joined twice or more
cmd1 = """DELETE FROM TERREINOBJECT_HUISNUMMER_RELATIES
WHERE BEGINTIJD IS NULL OR BEGINTIJD > DATE('now', '-1 day')
AND EXISTS (
SELECT t2.terreinobjectid , t2.huisnummerid , t2.begindatum
FROM TERREINOBJECT_HUISNUMMER_RELATIES t2
WHERE eindtijd IS NULL
AND TERREINOBJECT_HUISNUMMER_RELATIES.terreinobjectid = t2.terreinobjectid
AND TERREINOBJECT_HUISNUMMER_RELATIES.huisnummerid = t2.huisnummerid
AND TERREINOBJECT_HUISNUMMER_RELATIES.begindatum = t2.begindatum
GROUP BY t2.terreinobjectid, t2.huisnummerid, t2.begindatum
HAVING COUNT(*) > 1
AND MAX(t2.ID) <> TERREINOBJECT_HUISNUMMER_RELATIES.ID
); """
#joined to a adres with an enddate
cmd2 = """DELETE FROM TERREINOBJECT_HUISNUMMER_RELATIES
WHERE BEGINTIJD IS NULL OR BEGINTIJD > DATE('now', '-1 day')
AND EXISTS (
SELECT einddatum FROM HUISNUMMERS
WHERE
ID = TERREINOBJECT_HUISNUMMER_RELATIES.huisnummerid
AND IFNULL(einddatum, '9999-01-01') <
IFNULL(TERREINOBJECT_HUISNUMMER_RELATIES.einddatum, '9999-01-01')
);"""
cur.execute( cmd1 )
cur.execute( cmd2 )
def readShape( shapefile, xgrabDB , koppelType=3 ):
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapefile, 0)
layer = dataSource.GetLayer(0)
con = sqlite3.connect( xgrabDB )
with con:
cur = con.cursor()
cur.execute( "CREATE INDEX IF NOT EXISTS adresID_index ON ADRESPOSITIES (ID);" )
con.commit()
for feature in layer:
geom = feature.GetGeometryRef()
adresID = feature.GetFieldAsInteger("ADRESID")
terreinID = feature.GetFieldAsInteger("TERREINOBJ")
huisnrID = feature.GetFieldAsInteger("HUISNR_ID")
X, Y = ( geom.GetX() , geom.GetY() )
updateAdresPosistie(cur, X, Y, koppelType, adresID)
updateTerrein(cur, terreinID , huisnrID)
removeDoubleTerreinKoppeling(cur)
con.commit()
if con:
con.close()
def main():
readShape(args.shapeFile, args.xgrabDB, int( args.koppelType) )
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='update adresposities in a xgrab-db using a shapefile, requires spatialite and gdal-python')
parser.add_argument('xgrabDB', help='The input database (.sqlite)' )
parser.add_argument('shapeFile', help='The path to the shapefile, has a TERREINOBJ, HUISNR_ID and adresID')
parser.add_argument('koppelType', help='2 for parcel and 3 for building', default='3')
args = parser.parse_args()
main()
| update_terrein_adrespositie.py | 3,955 | herkomst: 2= perceel, 3= gebouw
!/usr/bin/env python -*- coding: UTF-8 -*- Update the location of a adrespositie and and add a terrein koppeling using a shapeFileimport sqlite3joined twice or morejoined to a adres with an enddate | 230 | en | 0.37787 |
import sqlite3
import datetime
from collections import Counter
import calendar
def return_unique_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM rolodex')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def return_unique_order_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM orders')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def input_entry(customerFirstName, customerLastName, customerPhoneNumber, customerAddress, customerPayMethod):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_ID()
rolodexEntry = (uniqueID, customerFirstName, customerLastName, customerPhoneNumber, customerAddress, customerPayMethod)
c.execute('INSERT INTO rolodex VALUES (?,?,?,?,?,?)', rolodexEntry)
conn.commit()
def return_all_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM rolodex ORDER BY last_name')
return c
def review_all_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM rolodex')
for item in c:
print(item[0], item[1], item[2], item[3], item[4])
def delete_entry_by_id(uniqueID):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('DELETE FROM rolodex WHERE id = ?', uniqueID)
conn.commit()
def update_rolodex_entry(variable, variable_type, uniqueID):
if variable_type == "name":
update_rolodex_entry_name(variable, uniqueID)
elif variable_type == "phoneNumber":
update_rolodex_entry_phoneNumber(variable, uniqueID)
elif variable_type == "address":
update_rolodex_entry_address(variable, uniqueID)
elif variable_type == "payMethod":
update_rolodex_entry_payMethod(variable, uniqueID)
else:
print("failed to update anything.")
def update_rolodex_entry_name(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET name = ?
WHERE id = ?''', combo)
conn.commit()
def update_rolodex_entry_phoneNumber(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET phoneNumber = ?
WHERE id = ?''', combo)
conn.commit()
def update_rolodex_entry_address(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET address = ?
WHERE id = ?''', combo)
conn.commit()
def update_rolodex_entry_payMethod(variable, uniqueID):
combo = (variable, uniqueID)
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('''UPDATE rolodex
SET payMethod = ?
WHERE id = ?''', combo)
conn.commit()
def create_rolodex_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE IF NOT EXISTS rolodex (
id integer PRIMARY KEY,
first_name text,
last_name text,
phone_number int,
address text,
payMethod text)
"""
c.execute(create_table)
conn.commit()
def search_by_customer_id(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id = (customer_id,)
c.execute('''SELECT * FROM rolodex WHERE id = (?)''', customer_id)
return c
def search_by_customer_first_name(customer_name):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_name = (customer_name,)
c.execute('''SELECT * FROM rolodex WHERE first_name = (?)''', customer_name)
return c
def search_by_customer_last_name(customer_name):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_name = (customer_name,)
c.execute('''SELECT * FROM rolodex WHERE last_name = (?)''', customer_name)
return c
def search_by_customer_phone_number(customer_phone_number):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_phone_number = (customer_phone_number,)
c.execute('''SELECT * FROM rolodex WHERE phone_number = (?)''', customer_phone_number)
return c
def create_orders_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE orders (
id integer PRIMARY KEY,
custid SMALLINT,
orderlist text,
time_stamp text)
"""
c.execute(create_table)
conn.commit()
def create_customerprefs_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE IF NOT EXISTS customerprefs (
id integer PRIMARY KEY,
customer_id integer,
field_id integer,
description text)
"""
c.execute(create_table)
conn.commit()
def new_customer_delivery_preference(customerID, customer_delivery_preference):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_customerpreference_ID()
orderEntry = (uniqueID, customerID, 10, customer_delivery_preference)
c.execute('INSERT INTO customerprefs VALUES (?,?,?,?)', orderEntry)
conn.commit()
def return_unique_customerpreference_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM customerprefs')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def input_new_order(customerID, order_list):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_order_ID()
orderEntry = (uniqueID, 1, order_list, datetime.datetime.now())
c.execute('INSERT INTO orders VALUES (?,?,?,?)', orderEntry)
conn.commit()
#def drop_rolodex_table():
# conn = sqlite3.connect("ORDERM8.db")
# c = conn.cursor()
# c.execute('DROP table rolodex')
# for item in c:
# orderlist = item[2].split()
# print item[0], item[1], orderlist, item[3]
def return_all_customerprefs_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM customerprefs')
return c
def get_latest_customerprefs(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute('SELECT * FROM customerprefs WHERE customer_id=(?) AND field_id = 10 ORDER BY id DESC LIMIT 1',
customer_id_tuple)
for item in c:
return item
def get_latest_foodprefs(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute('SELECT * FROM customerprefs WHERE customer_id=(?) AND field_id = 20 ORDER BY id DESC LIMIT 1',
customer_id_tuple)
for item in c:
return item
def new_customer_food_preference(customerID, customer_food_preference):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_customerpreference_ID()
orderEntry = (uniqueID, customerID, 20, customer_food_preference)
c.execute('INSERT INTO customerprefs VALUES (?,?,?,?)', orderEntry)
conn.commit()
def delete_customer_and_customer_records(customerID):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
id = (customerID,)
c.execute('DELETE FROM rolodex WHERE id=(?)', id)
c.execute('DELETE FROM customerprefs WHERE customer_id=(?)', id)
conn.commit()
# Day Duties Stuff.
def create_day_duties_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE IF NOT EXISTS day_duties (
id integer PRIMARY KEY,
date_of_entry DATE,
day_of_week text,
task text)
"""
c.execute(create_table)
conn.commit()
def return_unique_day_duty_ID():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM day_duties')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def new_day_duty(date_of_entry, day_of_week, task):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_day_duty_ID()
dutyEntry = (uniqueID, date_of_entry, day_of_week, task)
c.execute('INSERT INTO day_duties VALUES (?,?,?,?)', dutyEntry)
conn.commit()
def return_all_day_duties():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM day_duties')
return c
def search_by_day_of_week(day_of_week):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
day_of_week = (day_of_week,)
c.execute('''SELECT * FROM day_duties WHERE day_of_week = (?)''', day_of_week)
return c
# Examples
# new_day_duty(datetime.datetime.now(), "Wednesday", "Condense Recycling")
# for item in return_all_day_duties():
# print item
# DAILY CUSTOMER ENTRIES
def delete_daily_customer_entrys(order_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
order_id = (str(order_id),)
c.execute('''DELETE FROM daily_customers WHERE id=(?)''',order_id)
conn.commit()
def return_unique_daily_customer_entry_id():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM daily_customers')
IDs = []
for item in c:
ID = int(item[0])
IDs.append(ID)
IDs = sorted(IDs, key=int, reverse=True)
uniqueID = IDs[0] + 1
return str(uniqueID)
def create_daily_customers_table():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
create_table = """CREATE TABLE daily_customers (
id integer PRIMARY KEY,
custid SMALLINT,
todays_date DATE)
"""
c.execute(create_table)
conn.commit()
def new_daily_customer(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
uniqueID = return_unique_daily_customer_entry_id()
dutyEntry = (uniqueID, customer_id, datetime.date.today(),)
c.execute('INSERT INTO daily_customers VALUES (?,?,?)', dutyEntry)
conn.commit()
def return_all_daily_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
date = (datetime.date.today(),)
c.execute('SELECT * FROM daily_customers WHERE todays_date=(?)', date)
return c
def weekly_graph_data():
entries = return_this_weeks_customer_entries()
daycount = enumerate_this_weeks_customer_entries(entries)
week_dictionary = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0}
for key, value in daycount.items():
if key in week_dictionary:
week_dictionary[key] = value
else:
pass
return week_dictionary
def monthly_graph_data():
entries = return_this_months_customer_entries()[0]
start_end = return_this_months_customer_entries()[1]
daycount = enumerate_this_months_customer_entries(entries)
month_dictionary = {}
dictionary_range = range(start_end[0].day,start_end[1].day+1,1)
for item in dictionary_range:
month_dictionary[item] = 0
for key, value in daycount.items():
if key in month_dictionary:
month_dictionary[key] = value
else:
pass
return month_dictionary
def yearly_graph_data():
entries = return_this_years_customer_entries()
daycount = enumerate_this_years_customer_entries(entries)
week_dictionary = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0}
for key, value in daycount.items():
if key in week_dictionary:
week_dictionary[key] = value
else:
pass
return week_dictionary
def return_all_customer_entries_from_daily_customers():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
c.execute('SELECT * FROM daily_customers')
return c
def return_this_weeks_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
today_date = (datetime.date.today(),)
week_start = today_date[0] - datetime.timedelta(days=today_date[0].weekday())
week_end = week_start + datetime.timedelta(days=4)
week_tuple = (week_start, week_end,)
c.execute('SELECT * FROM daily_customers WHERE todays_date BETWEEN (?) and (?)', week_tuple)
return c
def enumerate_this_weeks_customer_entries(customer_entries):
dates = []
for item in customer_entries:
dates.append(item[2])
days = []
for item in dates:
days.append(datetime.datetime.strptime(item, "%Y-%m-%d").weekday())
return Counter(days)
def return_this_months_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
today_date = datetime.date.today()
start_of_month = datetime.datetime(today_date.year, today_date.month, 1)
end_of_month = datetime.datetime(today_date.year,
today_date.month,
calendar.monthrange(today_date.year, today_date.month)[1])
start_end_tuple = (start_of_month, end_of_month)
c.execute('SELECT * FROM daily_customers WHERE todays_date BETWEEN (?) and (?)', start_end_tuple)
return c, start_end_tuple
def enumerate_this_months_customer_entries(customer_entries):
dates = []
for item in customer_entries:
dates.append(item[2])
days = []
for item in dates:
days.append(datetime.datetime.strptime(item, "%Y-%m-%d").day)
return Counter(days)
def return_this_years_customer_entries():
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
today_date = datetime.date.today()
start_of_year = datetime.datetime(today_date.year, 1, 1)
end_of_year = datetime.datetime(today_date.year, 12, 31)
start_end_tuple = (start_of_year, end_of_year)
c.execute('SELECT * FROM daily_customers WHERE todays_date BETWEEN (?) and (?)', start_end_tuple)
return c, start_end_tuple
def enumerate_this_years_customer_entries(customer_entries):
dates = []
for item in customer_entries[0]:
dates.append(item[2])
days = []
for item in dates:
days.append(datetime.datetime.strptime(item, "%Y-%m-%d").month)
return Counter(days)
# for messing around with daily customer entries
# for item in return_all_daily_customer_entries():
# print item
# for item in range(0,15):
# delete_daily_customer_entrys(item)
# FOR COPYING ROLODEX AND CUSTOMERPREFS FROM PEPS DB TO NEW DB
# will have to delete all entries on the new db in customerprefs and rolodex for this to work.
def create_test_empty_db():
conn = sqlite3.connect("ORDERM8_test.db")
conn.close()
def copy_table_db_to_db():
conn = sqlite3.connect("ORDERM8_test.db")#newdb
c = conn.cursor()
c.execute("ATTACH 'ORDERM8.db' AS test")#pepsdb
c.execute("INSERT INTO rolodex SELECT * FROM test.rolodex")
conn.commit()
# create_test_empty_db()
# create_rolodex_table()
# will only need this function once rolodex and customerprefs are cleared out.
# copy_table_db_to_db()
def select_all_activity(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute("SELECT * FROM daily_customers WHERE custid=(?)", customer_id_tuple)
return c
def select_recent_activity(customer_id):
conn = sqlite3.connect("ORDERM8.db")
c = conn.cursor()
customer_id_tuple = (customer_id,)
c.execute("SELECT * FROM daily_customers WHERE custid=(?) ORDER BY todays_date DESC LIMIT 10", customer_id_tuple)
return c
| SQL_functions.py | 16,437 | def drop_rolodex_table(): conn = sqlite3.connect("ORDERM8.db") c = conn.cursor() c.execute('DROP table rolodex') for item in c: orderlist = item[2].split() print item[0], item[1], orderlist, item[3] Day Duties Stuff. Examples new_day_duty(datetime.datetime.now(), "Wednesday", "Condense Recycling") for item in return_all_day_duties(): print item DAILY CUSTOMER ENTRIES for messing around with daily customer entries for item in return_all_daily_customer_entries(): print item for item in range(0,15): delete_daily_customer_entrys(item) FOR COPYING ROLODEX AND CUSTOMERPREFS FROM PEPS DB TO NEW DB will have to delete all entries on the new db in customerprefs and rolodex for this to work.newdbpepsdb create_test_empty_db() create_rolodex_table() will only need this function once rolodex and customerprefs are cleared out. copy_table_db_to_db() | 882 | en | 0.575305 |
"""
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Multi-file class combining taken from https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py
import logging
from abc import ABC
from typing import Literal
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.config import Config
from .reactroles import ReactRoles
from .roles import Roles
log = logging.getLogger("red.phenom4n4n.roleutils")
RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"]
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
"""
class RoleUtils(
Roles,
ReactRoles,
commands.Cog,
metaclass=CompositeMetaClass,
):
"""
Useful role commands.
Includes massroling, role targeting, and reaction roles.
"""
__version__ = "1.3.5"
def format_help_for_context(self, ctx):
pre_processed = super().format_help_for_context(ctx)
n = "\n" if "\n\n" not in pre_processed else ""
return f"{pre_processed}{n}\nCog Version: {self.__version__}"
def __init__(self, bot: Red, *_args) -> None:
self.cache = {}
self.bot = bot
self.config = Config.get_conf(
self,
identifier=326235423452394523,
force_registration=True,
)
default_guild = {"reactroles": {"channels": [], "enabled": True}}
self.config.register_guild(**default_guild)
default_guildmessage = {"reactroles": {"react_to_roleid": {}}}
self.config.init_custom("GuildMessage", 2)
self.config.register_custom("GuildMessage", **default_guildmessage)
super().__init__(*_args)
async def red_delete_data_for_user(self, *, requester: RequestType, user_id: int) -> None:
return
async def initialize(self):
log.debug("RoleUtils initialize")
await super().initialize()
| roleutils/roleutils.py | 3,033 | This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
Useful role commands.
Includes massroling, role targeting, and reaction roles.
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Multi-file class combining taken from https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/mod/mod.py | 1,373 | en | 0.859512 |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 24 17:59:20 2017
@author: amirbitran
Various functions that serve to compute the contacts matrix for a series of PDB snapshots
"""
import numpy as np
from matplotlib import pyplot as plt
import sklearn
from sklearn import metrics
from dbfold.utils import loopCluster
import joblib
import copy as cp
import matplotlib.colors as cccc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def compute_contacts_matrix(coords, mode='binary', thresh=7.8, min_seq_separation=8):
"""
much faster computation
min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted
You can specify either of two modes:
1. 'binary': Returns 1 at positions where distance is less than or equal to thresh
2. 'distances': Returns inter-residue distance wherever this distances is less than or equal to thresh
"""
M=metrics.pairwise.pairwise_distances(coords)
M=np.tril(M, -min_seq_separation) #-min_seq_separation enures that we do not count residues that are closer than min_seq_separation
if mode=='binary':
contacts=np.zeros(np.shape(M))
contacts[np.where((M<thresh) & (M!=0))]=1
elif mode=='distances':
contacts=np.zeros(np.shape(M))
contacts[M>0]=M[M>0]
return contacts
def compute_RG(snapshot, atom='CA'):
"""
Radius of gyration...
"""
coords, resis = read_PDB(snapshot, atom)
R_cm = np.mean(coords, axis=0)
dR = coords - R_cm
mag_R = np.sum(dR*dR, axis=1)
RG = np.sqrt(np.mean(mag_R))
return RG
def count_contacts(native_file, d_cutoff, min_seq_separation):
coords, resis=read_PDB(native_file, 'CA')
native_contacts=compute_contacts_matrix(coords, thresh=d_cutoff, min_seq_separation=min_seq_separation)
return int(np.sum(native_contacts))
def create_substructure_PML(PDB_path, subs_to_plot, d_cutoff, min_clustersize, contact_sep_thresh,min_seq_separation=8, substructures = [], colours = []):
"""
Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol
Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)
You can also pre-enter the substructures as an optional argument
Otherwise, it will compute the substrucutres using PDB_path and save the file as PDB_path but with .pml instead of .pdb
You can optinally input the sequence of colors you want to use to paint the substructures (using the fancy British spelling colours)
Otherwise, it will color things automatically using the usual default sequence
That optional argument, if used, needs to have len equal to thhat of subs_to_plot: one color per substructure to plot
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
if len(substructures)==0:
native_contacts, substructures = identify_native_substructures(PDB_path, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize, plot=False)
prefix = PDB_path.split('pdb')[0]
PML_path = '{}pml'.format(prefix)
Nsubs = np.shape(substructures)[2]
file = open(PML_path, 'w')
file.write('bg white \n color gray \n')
if len(colours)==0:
colors=cm.get_cmap('jet')
counter = 0
for s in range(Nsubs):
if alphabet[s] in subs_to_plot:
if len(colours)==0:
curr_color=colors((s)/(Nsubs-1 ))
else:
curr_color = colours[counter]
c_hex = cccc.to_hex(curr_color)
c_hex = '0x{}'.format(c_hex.split('#')[1])
sub = substructures[:,:,s]
contacts = np.where(sub)
substr = 'sub{}'.format(alphabet[s])
for z in range(len(contacts[0])):
i = contacts[0][z]+1
j = contacts[1][z]+1
lines = "select aa, //resi {}/CA \n select bb, //resi {}/CA \n distance {}, aa, bb \n hide labels, {} \n set dash_color, {}, {} \n ".format(i, j, substr, substr, c_hex, substr)
file.write(lines)
file.write('\n set dash_gap, 0.5 \n set dash_radius, 0.2 \n')
counter+=1
file.close()
def find_native_contacts(native_file, thresh, min_seq_separation, mode = 'binary'):
"""
finds all native contacts from native PDB file
"""
native_coords, resis=read_PDB(native_file, atom='CA')
native_contacts=compute_contacts_matrix(native_coords, thresh = thresh, min_seq_separation = min_seq_separation, mode = mode )
return native_contacts
def identify_native_substructures(native_file, d_cutoff, min_seq_separation, contact_sep_thresh, min_clustersize,atom='CA', labelsize = 30, fontsize = 30, max_res = None, plot=True, ax = None, native_contacts=[], verbose=False):
"""
Identify substructures within native file contact map
Using the following strategy
We produce a contact map which is a bunch of dots
Contacts correspond to pairs of residues that are less than d_cutoff apart
6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact primarily
via sidechains, and thus the alpha carbons are further apart
We only count contacts if the residues are separated by min_seq_separation along the primary sequence
We set min_seq_separation relatively high because we don't care to include intra-helix contacts within our contact map
Ce can calculate the "Manhattan" distnace between every pair of dots on that contact map
and build a graph of contacts in which two contacts are connected if they are less than some
threshold distance, contact_sep_thresh, apart in the contact map
Then, we find all connected components of this graph, each of which is a substructure
But we only keep substructures whose number of residues is at least min_clustersize, to avoid sparse contacts here and there that we dont' care about
Gives you option to input native contacts a priori, but by defualt you don't do this (value is set to None)
You set Max_res to something other than None if you wish to plot only up to a certain residue number (ex. to depict what substructures can be formed when the first 100 AAs are synthesized)
"""
if len(native_contacts)==0:
coords, resis=read_PDB(native_file, atom)
#we get a contact map with a min seq separation larger than usual to avoid helices
native_distances=compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
native_contacts=np.zeros(np.shape(native_distances))
native_contacts[np.where((native_distances<d_cutoff) & (native_distances!=0))]=1
positions=np.where(native_contacts==1) #which residues participate in contacts
positions=np.transpose(positions)
M=metrics.pairwise.pairwise_distances(positions, metric='manhattan') #how far is each contact from each other contact?
#To find connected components, I use my loopCluster function by feeding in the positions ofr the contacts instead of the "files",
#as well as above matrix M as d_contacts
clusters, pairs_in_substructures, mean_intercluster, mean_intracluster=loopCluster(contact_sep_thresh, positions, M, sort_orphans=False, min_clustersize=min_clustersize, verbose=verbose)
#pairs in substructures is a list of sublists, each of which correspodns to a given substructure
#Within a given sublist, there are a bunch of pairs which tell you which pairs of residues belong to that substructure
#The above is in a messy form, so we convert it into a form that allows for numpy indexing,
#where we have a list of sublists, each sublist contains two arrays, the first of which gives the first indices for the interacting residues
#pairs_in_substructures=[[np.array(C)[:,0], np.array(C)[:,1]] for C in pairs_in_substructures]
pairs_in_substructures=[(np.array(C)[:,0], np.array(C)[:,1]) for C in pairs_in_substructures]
nsubstructures=len(pairs_in_substructures) #we now produce a set of matrices...the ith page tells you which contacts belong to the ith substructure
substructures=np.zeros((np.shape(native_contacts)[0], np.shape(native_contacts)[1], nsubstructures))
for n in range(nsubstructures):
SS=np.zeros(np.shape(native_contacts))
SS[pairs_in_substructures[n]]=1
substructures[:,:,n]=SS
if plot:
visualize_substructures(native_contacts, substructures, max_res = max_res, ax = ax, labelsize = labelsize, fontsize = fontsize)
#print(positions)
return native_contacts, substructures
def PDB_contacts_matrix(PDB_file, thresh=7.8, min_seq_separation=8, plot = True,mode='binary'):
"""
Input PDB file, plots contacts matrix
"""
coords, resis = read_PDB(PDB_file, 'CA')
M=metrics.pairwise.pairwise_distances(coords)
M=np.tril(M, -min_seq_separation) #-min_seq_separation enures that we do not count residues that are closer than min_seq_separation
if mode=='binary':
contacts=np.zeros(np.shape(M))
contacts[np.where((M<thresh) & (M!=0))]=1
elif mode=='distances':
contacts=np.zeros(np.shape(M))
contacts[M>0]=M[M>0]
if plot:
plt.figure()
plt.imshow(contacts)
plt.title(PDB_file)
return contacts
def read_PDB(file, atom):
"""
extracts coordinates for some side chain atom in some PDB file
For instance, atom will have value 'CA' if you care about the alpha carbons
TODO: Fix this so it can deal with chain labels
Right now if the PDB has a chain label in the fifth column, this will give nonsense results
"""
openfile=open(file)
resis=[]
coords=[]
for line in openfile.readlines():
#print(line)
line=line.rstrip('\n')
entries=line.split()
if entries[0]=='ATOM':
if entries[2]==atom and entries[4] =='A' and entries[3]!='GLY': #So long as the current residue is not a glycine, we append the coordinate for the carbon of interest
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif len(entries)>1 and entries[2]==atom and entries[4] !='A' and entries[3]!='GLY':
#first, we debug an error that sometimes happens
if '-' in entries[5][1:-1]: #occasionally, when the y coordinate has a negative sign and three digits or more (ex. -100), the tab between the x and y components dissappears and entry [6] mergies into entry [5] (ex. -50-100)
x=entries[5]
entries[5]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[6]=x[(x[1:-1].index('-')+1):]
if '-' in entries[6][1:-1]: #occasionally, when the z coordinate has a negative sign and three digits or more (ex. -100), the tab between the z and y components dissappears and entry [7] mergies into entry [6] (ex. -50-100)
x=entries[6]
entries[6]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[7]=x[(x[1:-1].index('-')+1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
elif len(entries)>1 and entries[2]=='CA' and entries[4] =='A' and entries[3]=='GLY': #But if the current residue is a glycine, we can only append the alpha carbon since there is no side chain
resis.append(entries[3])
coords.append([float(entries[6]), float(entries[7]), float(entries[8])])
elif len(entries)>1 and entries[2]=='CA' and entries[4] !='A' and entries[3]=='GLY':
#first, we debug an error that sometimes happens
if '-' in entries[5][1:-1]: #occasionally, when the y coordinate has a negative sign and three digits or more (ex. -100), the tab between the x and y components dissappears and entry [6] mergies into entry [5] (ex. -50-100)
x=entries[5]
entries[5]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[6]=x[(x[1:-1].index('-')+1):]
if '-' in entries[6][1:-1]: #occasionally, when the z coordinate has a negative sign and three digits or more (ex. -100), the tab between the z and y components dissappears and entry [7] mergies into entry [6] (ex. -50-100)
x=entries[6]
entries[6]=x[0:x[1:-1].index('-')+1] #we ignore the first element of enries 6 in case it is a - sign--we don't care about that one
entries[7]=x[(x[1:-1].index('-')+1):]
resis.append(entries[3])
coords.append([float(entries[5]), float(entries[6]), float(entries[7])])
coords=np.array(coords)
return coords, resis
def score_snapshot(snapshot, substructures, atom='CA', min_seq_separation=8 ):
"""
Assigns a set of scores for a snapshot
the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto
If the score is close to the characteristic contact distnace, then the substructure should be mostly formed
"""
coords, resis=read_PDB(snapshot, atom)
distances=compute_contacts_matrix(coords, mode='distances', min_seq_separation=min_seq_separation)
length=np.shape(distances)[0]
len_substructures=np.shape(substructures)[0]
if length>len_substructures: #We are applying substructures from a smaller protein to analyze a larger protein, so we only keep the part of the larger protein that is encompassed by these substructures
distances=distances[0:len_substructures, 0:len_substructures]
nsubstructures=np.shape(substructures)[2]
scores=np.zeros((nsubstructures))
for s in range(nsubstructures):
sub=substructures[:,:,s]
participation=np.multiply(distances, sub)#gives the overlap between native substrucutres and this snapshot's contacts
scores[s]=np.mean(participation[np.nonzero(participation)])
return scores, distances
def visualize_nonnatives(nonnatives_path, native_file, d_cutoff=6.5, cmap='Greys', Return = False, cbar = True, filter_natives = True, filter_distance = 2, vmax = 1, alpha = 1,custom_filter = None, ax=None, labelsize = 40):
"""
Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih
that contact is observed
d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be
the same as whatever was used to identify the nonnatives
if filter_natives, then we ignore the native contacts, as well as a border around them given by filter_distance
You also have the option to enter a Custom filter, which is a matrix of 1's at positions where you want to filter out the contact map...by default this is off and set to none
Note that if a custom_filter is used, you still pad that filter with a border given by filter_distance
If both filter_natives is set to and and you provide a custom filter, then the two filters are used in conjunction
By the way, the variable vmax says what is the strongest value in the colorbar
By default, it's 1, but you can also set it to None in which case it becomes the maximum value in the map
"""
native_contacts, substructures = identify_native_substructures(native_file, d_cutoff=d_cutoff, plot=False)
[distance_maps, PDB_files, filescores]=joblib.load(nonnatives_path)
if np.shape(distance_maps)[2]>len(PDB_files): #there is an extra page attached to end of the distance maps that tells you mean distances between residues
mean_distances = distance_maps[:,:,-1]
distance_maps = distance_maps[:, :, 0:-1]
mean_nonnatives=np.mean(distance_maps, axis=2)
NN = np.shape(mean_nonnatives)[0]
if filter_natives or np.shape(custom_filter)!=():
if filter_natives and np.shape(custom_filter)==():
Filter=cp.deepcopy(native_contacts)
elif filter_natives and np.shape(custom_filter)!=():
Filter = cp.deepcopy(native_contacts) + custom_filter
zz = np.zeros(np.shape(Filter))
zz[np.where(Filter>0)]=1
Filter = zz
else:
Filter = custom_filter
#plt.figure()
#plt.imshow(Filter)
for d in range(-filter_distance, filter_distance+1): #gets rid of register-shifted native contacts
im1_to_add=np.roll(Filter, d, axis=1)
if d<0:
im1_to_add[:, d:]=0
else:
im1_to_add[:, 0:d]=0
im2_to_add=np.roll(Filter, d, axis=0)
if d<0:
im2_to_add[d:,:]=0
else:
im2_to_add[0:d, :]=0
Filter=Filter+im1_to_add + im2_to_add
Filter[np.where(Filter)]=1
#plt.figure()
#plt.imshow(Filter)
mean_nonnatives = np.multiply(mean_nonnatives, 1 - Filter)
#if filter_natives: mean_nonnatives=np.multiply(mean_nonnatives, 1 - native_contacts)
#Commented all this out September 3 2019
#if cmap != 'Greys':
# for i in range(NN):
# for j in range(NN):
# if mean_nonnatives[i,j]==0:
# mean_nonnatives[i,j] = np.nan #makes points without any contact probability show up as white rather than peach red
if vmax == None:
vmax = np.max(mean_nonnatives)
normalize = cccc.Normalize(vmin = 0, vmax = vmax)
if ax == None:
fig, ax = plt.subplots()
if cmap!=None:
#im = ax.imshow(mean_nonnatives, cmap=cmap, norm = normalize, alpha = alpha, origin = 'lower')
im = ax.imshow(mean_nonnatives + np.transpose(mean_nonnatives), cmap=cmap, norm = normalize, alpha = alpha, origin = 'upper') #changed to this on 1/10/19
else:
#im = ax.imshow(mean_nonnatives, norm = normalize, alpha = alpha, origin = 'lower')
im = ax.imshow(mean_nonnatives + np.transpose(mean_nonnatives), norm = normalize, alpha = alpha, origin = 'upper') #changed to this on 1/10/19
#im.set_clim((0, vmax))
if cbar:
cbar = plt.colorbar(im)
cbar.ax.tick_params(labelsize=labelsize)
ax.tick_params(labelsize=labelsize)
ax.plot(np.arange(0, len(mean_nonnatives)), np.arange(0, len(mean_nonnatives)), color='gray', linestyle=':' ) #added 1/10/19
if Return: return im
def visualize_substructures( native_contacts, substructures, max_res = None, ax = None, labelsize = 30, fontsize = 30):
"""
Visualizes substructures as follows
Everything that is a native contact but not part of any substructure will have value -1 on shown image
(Update 10/1/18, actually will only show contacts that are part of substructures)
Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will have value i
Finally, all non-contacts will just be Nans and appear white
Edited this on 2/4/19 so that substructures are labeled by letter rather than number
Also reinstated the feature that contacts unassigned to substructures are visualized
On 2/10/2020, Changed a bit how the script work
Made it a bit simpler
Also made it so unassigned contacts are now shown in gray
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
substructure_image=np.zeros(np.shape(native_contacts))
native_contacts=native_contacts+np.transpose(native_contacts)
unassigned_contacts = cp.deepcopy(native_contacts)
for s in range(np.shape(substructures)[2]):
substructure_image+=(s+1)*(substructures[:,:,s]+substructures[:,:,s].transpose())
#substructure_image+=(s+1)*substructures[:,:,s]
unassigned_contacts-=substructures[:,:,s]+substructures[:,:,s].transpose()
substructure_image[substructure_image==0] = np.nan #Set background to white
#im[im<0]=np.nan #10/1
#im[np.diag_indices(len(native_contacts))]=0
colors=cm.get_cmap('jet')
if ax ==None: fig, ax = plt.subplots()
#ax.imshow(im, cmap='jet')
ax.imshow(substructure_image, cmap='jet')
ax.tick_params(labelsize=labelsize)
for s in range(np.shape(substructures)[2]):
#Let's annotate
#y_pos=np.where(substructures[:,:,s])[0][0]-3
y_pos=np.where(substructures[:,:,s])[0][0]+4 #2/4, decreased this from +6
x_pos=np.where(substructures[:,:,s])[1][0]+5 #2/4, increased this from +5
#curr_color=colors((s+1)/(np.max(substructure_image) ))
curr_color=colors((s)/(np.nanmax(substructure_image)-1 ))
#print(np.max(substructure_image)-1)
ax.annotate('{}'.format(alphabet[s]), (x_pos, y_pos), fontsize=fontsize, color=curr_color)
ax.annotate('{}'.format(alphabet[s]), (y_pos-5, x_pos-8), fontsize=fontsize, color=curr_color)
nsubstructures=np.shape(substructures)[2]
nbins=nsubstructures+1 #number of colors we are showing...add 1 to accoutn for unassigned contacts
unassigned_contacts[unassigned_contacts==0] = np.nan
ax.imshow(unassigned_contacts, cmap = 'gray', alpha = 0.5)
ax.plot(np.arange(0, len(native_contacts)), np.arange(0, len(native_contacts)), color='gray', linestyle=':' )
if max_res !=None:
ax.set_xlim(( max_res, 0))
ax.set_ylim((0, max_res))
| dbfold/analyze_structures.py | 22,219 | Input PDB file, plots contacts matrix
Radius of gyration...
much faster computation
min_seq_separation is minimum distnce the two residues must be apart in sequence for them to be counted
You can specify either of two modes:
1. 'binary': Returns 1 at positions where distance is less than or equal to thresh
2. 'distances': Returns inter-residue distance wherever this distances is less than or equal to thresh
Identifies substructures, then creates a pymol .pml script that draws those substructures as colored contacts directly on the pymol
Ex. Create_substructure_PML('MARR_umbrella3/marr_0.100_Emin.pdb', ['a','b','c','d','e','f'], 7.8, 7, 3)
You can also pre-enter the substructures as an optional argument
Otherwise, it will compute the substrucutres using PDB_path and save the file as PDB_path but with .pml instead of .pdb
You can optinally input the sequence of colors you want to use to paint the substructures (using the fancy British spelling colours)
Otherwise, it will color things automatically using the usual default sequence
That optional argument, if used, needs to have len equal to thhat of subs_to_plot: one color per substructure to plot
finds all native contacts from native PDB file
Identify substructures within native file contact map
Using the following strategy
We produce a contact map which is a bunch of dots
Contacts correspond to pairs of residues that are less than d_cutoff apart
6 Angstroms is generally a good value, but may want a larger value for helical proteins where residues interact primarily
via sidechains, and thus the alpha carbons are further apart
We only count contacts if the residues are separated by min_seq_separation along the primary sequence
We set min_seq_separation relatively high because we don't care to include intra-helix contacts within our contact map
Ce can calculate the "Manhattan" distnace between every pair of dots on that contact map
and build a graph of contacts in which two contacts are connected if they are less than some
threshold distance, contact_sep_thresh, apart in the contact map
Then, we find all connected components of this graph, each of which is a substructure
But we only keep substructures whose number of residues is at least min_clustersize, to avoid sparse contacts here and there that we dont' care about
Gives you option to input native contacts a priori, but by defualt you don't do this (value is set to None)
You set Max_res to something other than None if you wish to plot only up to a certain residue number (ex. to depict what substructures can be formed when the first 100 AAs are synthesized)
extracts coordinates for some side chain atom in some PDB file
For instance, atom will have value 'CA' if you care about the alpha carbons
TODO: Fix this so it can deal with chain labels
Right now if the PDB has a chain label in the fifth column, this will give nonsense results
Assigns a set of scores for a snapshot
the ith score tells you what is the average distnace between pairs of residues residues that participate in the ith substructure, in this snapshto
If the score is close to the characteristic contact distnace, then the substructure should be mostly formed
Reads a file of the form Distance_maps.dat and makes a contact map of nonnative contacts with shading according to frequency with whcih
that contact is observed
d_cutoff is distance cutoff with which you identify NATIVE structures to subtract off from the nonnatives...sholud be
the same as whatever was used to identify the nonnatives
if filter_natives, then we ignore the native contacts, as well as a border around them given by filter_distance
You also have the option to enter a Custom filter, which is a matrix of 1's at positions where you want to filter out the contact map...by default this is off and set to none
Note that if a custom_filter is used, you still pad that filter with a border given by filter_distance
If both filter_natives is set to and and you provide a custom filter, then the two filters are used in conjunction
By the way, the variable vmax says what is the strongest value in the colorbar
By default, it's 1, but you can also set it to None in which case it becomes the maximum value in the map
Visualizes substructures as follows
Everything that is a native contact but not part of any substructure will have value -1 on shown image
(Update 10/1/18, actually will only show contacts that are part of substructures)
Meanwhile, everything that is part of substructure i (i ranges from 0 to N_substructures-1) will have value i
Finally, all non-contacts will just be Nans and appear white
Edited this on 2/4/19 so that substructures are labeled by letter rather than number
Also reinstated the feature that contacts unassigned to substructures are visualized
On 2/10/2020, Changed a bit how the script work
Made it a bit simpler
Also made it so unassigned contacts are now shown in gray
Created on Fri Mar 24 17:59:20 2017
@author: amirbitran
Various functions that serve to compute the contacts matrix for a series of PDB snapshots
-*- coding: utf-8 -*--min_seq_separation enures that we do not count residues that are closer than min_seq_separation we get a contact map with a min seq separation larger than usual to avoid heliceswhich residues participate in contactshow far is each contact from each other contact?To find connected components, I use my loopCluster function by feeding in the positions ofr the contacts instead of the "files",as well as above matrix M as d_contactspairs in substructures is a list of sublists, each of which correspodns to a given substructureWithin a given sublist, there are a bunch of pairs which tell you which pairs of residues belong to that substructureThe above is in a messy form, so we convert it into a form that allows for numpy indexing,where we have a list of sublists, each sublist contains two arrays, the first of which gives the first indices for the interacting residuespairs_in_substructures=[[np.array(C)[:,0], np.array(C)[:,1]] for C in pairs_in_substructures]we now produce a set of matrices...the ith page tells you which contacts belong to the ith substructureprint(positions)-min_seq_separation enures that we do not count residues that are closer than min_seq_separationprint(line)So long as the current residue is not a glycine, we append the coordinate for the carbon of interest first, we debug an error that sometimes happensoccasionally, when the y coordinate has a negative sign and three digits or more (ex. -100), the tab between the x and y components dissappears and entry [6] mergies into entry [5] (ex. -50-100)we ignore the first element of enries 6 in case it is a - sign--we don't care about that oneoccasionally, when the z coordinate has a negative sign and three digits or more (ex. -100), the tab between the z and y components dissappears and entry [7] mergies into entry [6] (ex. -50-100)we ignore the first element of enries 6 in case it is a - sign--we don't care about that oneBut if the current residue is a glycine, we can only append the alpha carbon since there is no side chainfirst, we debug an error that sometimes happensoccasionally, when the y coordinate has a negative sign and three digits or more (ex. -100), the tab between the x and y components dissappears and entry [6] mergies into entry [5] (ex. -50-100)we ignore the first element of enries 6 in case it is a - sign--we don't care about that oneoccasionally, when the z coordinate has a negative sign and three digits or more (ex. -100), the tab between the z and y components dissappears and entry [7] mergies into entry [6] (ex. -50-100)we ignore the first element of enries 6 in case it is a - sign--we don't care about that oneWe are applying substructures from a smaller protein to analyze a larger protein, so we only keep the part of the larger protein that is encompassed by these substructuresgives the overlap between native substrucutres and this snapshot's contactsthere is an extra page attached to end of the distance maps that tells you mean distances between residuesplt.figure()plt.imshow(Filter)gets rid of register-shifted native contactsplt.figure()plt.imshow(Filter)if filter_natives: mean_nonnatives=np.multiply(mean_nonnatives, 1 - native_contacts)Commented all this out September 3 2019if cmap != 'Greys': for i in range(NN): for j in range(NN): if mean_nonnatives[i,j]==0: mean_nonnatives[i,j] = np.nan makes points without any contact probability show up as white rather than peach redim = ax.imshow(mean_nonnatives, cmap=cmap, norm = normalize, alpha = alpha, origin = 'lower')changed to this on 1/10/19im = ax.imshow(mean_nonnatives, norm = normalize, alpha = alpha, origin = 'lower')changed to this on 1/10/19im.set_clim((0, vmax))added 1/10/19substructure_image+=(s+1)*substructures[:,:,s] Set background to whiteim[im<0]=np.nan 10/1 im[np.diag_indices(len(native_contacts))]=0 ax.imshow(im, cmap='jet')Let's annotatey_pos=np.where(substructures[:,:,s])[0][0]-32/4, decreased this from +62/4, increased this from +5curr_color=colors((s+1)/(np.max(substructure_image) ))print(np.max(substructure_image)-1)number of colors we are showing...add 1 to accoutn for unassigned contacts | 9,260 | en | 0.887723 |
from django.http import HttpResponse
from django.shortcuts import render, reverse
from django.views.decorators.csrf import csrf_exempt
import os
from twilio.rest import Client
from conversations.models import Conversation, Message
from .models import TwilioConfig, PhoneOwnership
# @validate_twilio_request
@csrf_exempt
def receive(request):
if request.method == "POST":
from_number = request.POST['From']
to_number = request.POST['To']
message = request.POST['Body']
existing_conversations = Conversation.objects.filter(external=from_number, internal=to_number)
if existing_conversations:
conversation = existing_conversations[0]
else:
owners = PhoneOwnership.objects.filter(number=to_number)
if owners:
conversation = Conversation(external=from_number, internal=to_number, user=owners[0].user)
conversation.save()
else:
return HttpResponse("Bad request: phone number not owned", status=400)
# store message
messages = Message(msg_content=message, outward=False, conversation=conversation)
messages.save()
return HttpResponse("Message received", status=200)
else:
return HttpResponse("Method not allowed", status=405)
def updateNumbers(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('account:login'))
config = TwilioConfig.objects.filter(user=request.user)[0]
client = Client(config.sid, config.token)
number_list = client.incoming_phone_numbers.list()
PhoneOwnership.objects.filter(user=request.user).delete()
for number in number_list:
# Set the webhook for the phone number
incoming_phone_number = client.incoming_phone_numbers(number.sid).update(sms_url='https://hackaway.software/twilio/receive/')
phone = PhoneOwnership(user=request.user, number=number.phone_number)
phone.save()
context = {
'config': config
}
return render(request, 'configure.html', context)
def configure(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('account:login'))
config = None
configs = TwilioConfig.objects.filter(user=request.user)
if configs: # if some items are found in the database
config = TwilioConfig.objects.filter(user=request.user)[0]
if request.method == "POST":
sid = request.POST['sid']
token = request.POST['token']
# incoming_phone_number = client.incoming_phone_numbers.create(
# sms_url='https://hackaway.software/twilio/receive',
# phone_number='+447700153842'
# )
# Obtain information
# incoming_phone_number = client.incoming_phone_numbers.create(phone_number='+447700153842')
# print(incoming_phone_number.sid)
configs = TwilioConfig.objects.filter(user=request.user)
if configs: # if some items are found in the database
configs.update(sid=sid, token=token)
config = configs[0]
else:
config = TwilioConfig(sid=sid, token=token, user=request.user)
config.save()
return updateNumbers(request)
context = {
'config': config
}
return render(request, 'configure.html', context)
def obtain_number(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse('account:login'))
config = None
configs = TwilioConfig.objects.filter(user=request.user)
if configs: # if some items are found in the database
config = TwilioConfig.objects.filter(user=request.user)[0]
client = Client(config.sid, config.token)
if request.method == "POST":
incoming_phone_number = client.incoming_phone_numbers.create(
phone_number=request.POST['number'],
)
# description solution in the documentation does not work.
return updateNumbers(request)
else:
local = client.available_phone_numbers('GB').local.list(contains='+44',limit=20)
context = {
'options': local
}
return render(request, 'obtain.html', context)
else:
return HttpResponseRedirect(reverse('twilioconfig:configure'))
| twilioconfig/views.py | 4,382 | @validate_twilio_request store message Set the webhook for the phone number if some items are found in the database incoming_phone_number = client.incoming_phone_numbers.create( sms_url='https://hackaway.software/twilio/receive', phone_number='+447700153842' ) Obtain information incoming_phone_number = client.incoming_phone_numbers.create(phone_number='+447700153842') print(incoming_phone_number.sid) if some items are found in the database if some items are found in the database description solution in the documentation does not work. | 594 | en | 0.707313 |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import urllib.parse
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import requests
from airbyte_cdk.sources.streams.http import HttpStream
class CartStream(HttpStream, ABC):
primary_key = "id"
def __init__(self, start_date: str, store_name: str, end_date: str = None, **kwargs):
self._start_date = start_date
self._end_date = end_date
self.store_name = store_name
super().__init__(**kwargs)
@property
def url_base(self) -> str:
return f"https://{self.store_name}/api/v1/"
@property
def data_field(self) -> str:
"""
Field of the response containing data.
By default the value self.name will be used if this property is empty or None
"""
return None
def path(self, **kwargs) -> str:
return self.name
def backoff_time(self, response: requests.Response) -> Optional[float]:
"""
We dont need to check the response.status_code == 429 since this header exists only in this case.
"""
retry_after = response.headers.get("Retry-After")
if retry_after:
return float(retry_after)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
response_json = response.json()
if response_json.get("next_page"):
next_query_string = urllib.parse.urlsplit(response_json.get("next_page")).query
params = dict(urllib.parse.parse_qsl(next_query_string))
return params
def request_headers(self, **kwargs) -> Mapping[str, Any]:
return {"Cache-Control": "no-cache", "Content-Type": "application/json"}
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:
response_json = response.json()
result = response_json.get(self.data_field or self.name, [])
yield from result
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = {"count": 100}
if next_page_token:
params.update(next_page_token)
return params
class IncrementalCartStream(CartStream, ABC):
state_checkpoint_interval = 1000
cursor_field = "updated_at"
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
"""
Generates a query for incremental logic
Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md
"""
params = super().request_params(stream_state=stream_state, **kwargs)
cursor_value = stream_state.get(self.cursor_field) or self._start_date
params["sort"] = self.cursor_field
start_date = max(cursor_value, self._start_date)
query = f"gt:{start_date}"
if self._end_date and self._end_date > start_date:
query += f" AND lt:{self._end_date}"
params[self.cursor_field] = query
return params
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.
"""
latest_state = latest_record.get(self.cursor_field)
current_state = current_stream_state.get(self.cursor_field) or latest_state
if current_state:
return {self.cursor_field: max(latest_state, current_state)}
return {}
class CustomersCart(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1customers/get
"""
data_field = "customers"
def path(self, **kwargs) -> str:
return self.data_field
class Orders(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1orders/get
"""
class OrderPayments(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1order_payments/get
"""
data_field = "payments"
class OrderItems(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1order_items/get
"""
data_field = "items"
class Products(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1products/get
"""
class Addresses(IncrementalCartStream):
"""
Docs: https://developers.cart.com/docs/rest-api/b3A6MjMzMTc3Njc-get-addresses
"""
| airbyte-integrations/connectors/source-cart/source_cart/streams.py | 4,787 | Docs: https://developers.cart.com/docs/rest-api/b3A6MjMzMTc3Njc-get-addresses
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1customers/get
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1order_items/get
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1order_payments/get
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1orders/get
Docs: https://developers.cart.com/docs/rest-api/restapi.json/paths/~1products/get
We dont need to check the response.status_code == 429 since this header exists only in this case.
Field of the response containing data.
By default the value self.name will be used if this property is empty or None
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.
Generates a query for incremental logic
Docs: https://developers.cart.com/docs/rest-api/docs/query_syntax.md
Copyright (c) 2021 Airbyte, Inc., all rights reserved. | 1,034 | en | 0.670341 |
# coding:utf-8
# usr/bin/python3
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
"""
Class Chapter28_1
Class Chapter28_2
Class Chapter28_3
Class Chapter28_4
Class Chapter28_5
"""
from __future__ import absolute_import, division, print_function
import numpy as np
class Chapter28_1:
"""
chapter28.1 note and function
"""
def __init__(self):
pass
def note(self):
"""
Summary
====
Print chapter28.1 note
Example
====
```python
Chapter28_1().note()
```
"""
print('chapter28.1 note as follow')
print('28.1 矩阵的性质')
print('矩阵运算在科学计算中非常重要')
print('矩阵是数字的一个矩阵阵列,在python中使用np.matrix[[1,2],[3,4]]')
print('矩阵和向量')
print('单位矩阵')
print('零矩阵')
print('对角矩阵')
print('三对角矩阵')
print('上三角矩阵')
print('下三角矩阵')
print('置换矩阵')
print('对称矩阵')
print('矩阵乘法满足结合律,矩阵乘法对假发满足分配律')
print('矩阵的F范数和2范数')
print('向量的2范数')
print('矩阵的逆,秩和行列式')
print('定理28.1 一个方阵满秩当且仅当它为非奇异矩阵')
print('定理28.2 当且仅当A无空向量,矩阵A列满秩')
print('定理28.3 当且仅当A具有空向量时,方阵A是奇异的')
print('定理28.4 (行列式的性质) 方阵A的行列式具有如下的性质')
print(' 如果A的任何行或者列的元素为0,则det(A)=0')
print(' 用常数l乘A的行列式任意一行(或任意一列)的各元素,等于用l乘A的行列式')
print(' A的行列式的值与其转置矩阵A^T的行列式的值相等')
print(' 行列式的任意两行(或者两列)互换,则其值异号')
print('定理28.5 当且仅当det(A)=0,一个n*n方阵A是奇异的')
print('正定矩阵')
print('定理28.6 对任意列满秩矩阵A,矩阵A\'A是正定的')
print('练习28.1-1 证明:如果A和B是n*n对称矩阵,则A+B和A-B也是对称的')
print('练习28.1-2 证明:(AB)\'=B\'A\',而且AA\'总是一个对称矩阵')
print('练习28.1-3 证明:矩阵的逆是唯一的,即如果B和C都是A的逆矩阵,则B=C')
print('练习28.1-4 证明:两个下三角矩阵的乘积仍然是一个下三角矩阵.',
'证明:一个下三角(或者上三角矩阵)矩阵的行列式的值是其对角线上的元素之积',
'证明:一个下三角矩阵如果存在逆矩阵,则逆矩阵也是一个下三角矩阵')
print('练习28.1-5 证明:如果P是一个n*n置换矩阵,A是一个n*n矩阵,则可以把A的各行进行置换得到PA',
'而把A的各列进行置换可得到AP。证明:两个置换矩阵的乘积仍然是一个置换矩阵',
'证明:如果P是一个置换矩阵,则P是可逆矩阵,其逆矩阵是P^T,且P^T也是一个置换矩阵')
print('练习28.1-6 设A和B是n*n矩阵,且有AB=I.证明:如果把A的第j行加到第i行而得到A‘',
'则可以通过把B的第j列减去第i列而获得A’的逆矩阵B‘')
print('练习28.1-7 设A是一个非奇异的n*n复数矩阵.证明:当且仅当A的每个元素都是实数时,',
'A-1的每个元素都是实数')
print('练习28.1-8 证明:如果A是一个n*n阶非奇异的对称矩阵,则A-1也是一个对称矩阵.',
'证明:如果B是一个任意的m*n矩阵,则由乘积BAB^T给出的m*m矩阵是对称的')
print('练习28.1-9 证明定理28.2。亦即,证明如果矩阵A为列满秩当且仅当若Ax=0,则说明x=0')
print('练习28.1-10 证明:对任意两个相容矩阵A和B,rank(AB)<=min(rank(A),rank(B))',
'其中等号仅当A或B是非奇异方阵时成立.(利用矩阵秩的另一种等价定义)')
print('练习28.1-11 已知数x0,x1,...,xn-1,证明范德蒙德(Vandermonde)矩阵的行列式表达式')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_2:
"""
chapter28.2 note and function
"""
def __init__(self):
pass
def note(self):
'''
Summary
====
Print chapter28.2 note
Example
====
```python
Chapter28_2().note()
```
'''
print('chapter28.2 note as follow')
print('28.2 矩阵乘法的Strassen算法')
print('两个n*n矩阵乘积的著名的Strassen递归算法,其运行时间为Θ(n^lg7)=Θ(n^2.81)')
print('对足够大的n,该算法在性能上超过了在25.1节中介绍的运行时间为Θ(n^3)的简易矩阵乘法算法MATRIX-MULTIPLY')
print('算法概述')
print(' Strassen算法可以看作是熟知的一种设计技巧--分治法的一种应用')
print(' 假设希望计算乘积C=AB,其中A、B和C都是n*n方阵.假定n是2的幂,把A、B和C都划分为四个n/2*n/2矩阵')
print(' 然后作分块矩阵乘法,可以得到递归式T(n)=8T(n/2)+Θ(n^2),但是T(n)=Θ(n^3)')
print('Strassen发现了另外一种不同的递归方法,该方法只需要执行7次递归的n/2*n/2的矩阵乘法运算和Θ(n^2)次标量加法与减法运算')
print('从而可以得到递归式T(n)=7T(n/2)+Θ(n^2),但是T(n)=Θ(n^2.81)')
print('Strassen方法分为以下四个步骤')
print(' 1) 把输入矩阵A和B划分为n/2*n/2的子矩阵')
print(' 2) 运用Θ(n^2)次标量加法与减法运算,计算出14个n/2*n/2的矩阵A1,B1,A2,B2,...,A7,B7')
print(' 3) 递归计算出7个矩阵的乘积Pi=AiBi,i=1,2,...,7')
print(' 4) 仅使用Θ(n^2)次标量加法与减法运算,对Pi矩阵的各种组合进行求和或求差运算,',
'从而获得结果矩阵C的四个子矩阵r,s,t,u')
print('从实用的观点看,Strassen方法通常不是矩阵乘法所选择的方法')
print(' 1) 在Strassen算法的运行时间中,隐含的常数因子比简单的Θ(n^3)方法中的常数因子要大')
print(' 2) 当矩阵是稀疏的时候,为系数矩阵设计的方法更快')
print(' 3) Strassen算法不像简单方法那样具有数值稳定性')
print(' 4) 在递归层次中生成的子矩阵要消耗空间')
# ! Strassen方法的关键就是对矩阵乘法作分治递归
print('练习28.2-1 运用Strassen算法计算矩阵的乘积')
print('矩阵的乘积为:')
print(np.matrix([[1, 3], [5, 7]]) * np.matrix([[8, 4], [6, 2]]))
print('练习28.2-2 如果n不是2的整数幂,应该如何修改Strassen算法,求出两个n*n矩阵的乘积',
'证明修改后的算法的运行时间为Θ(n^lg7)')
print('练习28.2-3 如果使用k次乘法(假定乘法不满足交换律)就能计算出两个3*3矩阵的乘积',
'就能在o(n^lg7)时间内计算出两个n*n矩阵的乘积,满足上述条件的最大的k值是多少')
print('练习28.2-4 V.Pan发现了一种使用132464次乘法的求68*68矩阵乘积的方法',
'一种使用143640次乘法的求70*70矩阵乘积的方法',
'一种使用155424次乘法的求72*72矩阵乘积的方法')
print('练习28.2-5 用Strassen算法算法作为子程序,能在多长时间内计算出一个kn*n矩阵与一个n*kn矩阵的乘积')
print('练习28.2-6 说明如何仅用三次实数乘法运算,就可以计复数a+bi与c+di的乘积.该算法应该把a,b,c和d作为输入,',
'并分别生成实部ac-bd和虚部ad+bc的值')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_3:
"""
chapter28.3 note and function
"""
def __init__(self):
pass
def note(self):
'''
Summary
====
Print chapter28.3 note
Example
====
```python
Chapter28_3().note()
```
'''
print('chapter28.3 note as follow')
print('28.3 求解线性方程组')
print('对一组同时成立的线性方程组Ax=b求解时很多应用中都会出现的基本问题。一个线性系统可以表述为一个矩阵方程',
'其中每个矩阵或者向量元素都属于一个域,如果实数域R')
print('LUP分解求解线性方程组')
print('LUP分解的思想就是找出三个n*n矩阵L,U和P,满足PA=LU')
print(' 其中L是一个单位下三角矩阵,U是一个上三角矩阵,P是一个置换矩阵')
print('每一个非奇异矩阵A都有这样一种分解')
print('对矩阵A进行LUP分解的优点是当相应矩阵为三角矩阵(如矩阵L和U),更容易求解线性系统')
print('在计算出A的LUP分解后,就可以用如下方式对三角线性系统进行求解,也就获得了Ax=b的解')
print('对Ax=b的两边同时乘以P,就得到等价的方程组PAx=Pb,得到LUx=Pb')
print('正向替换与逆向替换')
print(' 如果已知L,P和b,用正向替换可以在Θ(n^2)的时间内求解下三角线性系统',
'用一个数组pi[1..n]来表示置换P')
print('LU分解的计算')
print(' 把执行LU分解的过程称为高斯消元法.先从其他方程中减去第一个方程的倍数',
'以便把那些方程中的第一个变量消去')
print(' 继续上述过程,直至系统变为一个上三角矩阵形式,这个矩阵都是U.矩阵L是由使得变量被消去的行的乘数所组成')
print('LUP分解的计算')
print(' 一般情况下,为了求线性方程组Ax=b的解,必须在A的非对角线元素中选主元以避免除数为0',
'除数不仅不能为0,也不能很小(即使A是非奇异的),否则就会在计算中导致数值不稳定.因此,所选的主元必须是一个较大的值')
print(' LUP分解的数学基础与LU分解相似。已知一个n*n非奇异矩阵A,并希望计算出一个置换矩阵P,一个单位下三角矩阵L和一个上三角矩阵U,并满足条件PA=LU')
print('练习28.3-1 运用正向替换法求解下列方程组')
print('练习28.3-2 求出下列矩阵的LU分解')
print('练习28.3-3 运用LUP分解来求解下列方程组')
print('练习28.3-4 试描述一个对角矩阵的LUP分解')
print('练习28.3-5 试描述一个置换矩阵A的LUP分解,并证明它是唯一的')
print('练习28.3-6 证明:对所有n>=1,存在具有LU分解的奇异的n*n矩阵')
print('练习28.3-7 在LU-DECOMPOSITION中,当k=n时是否有必要执行最外层的for循环迭代?',
'在LUP-DECOMPOSITION中的情况又是怎样?')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_4:
"""
chapter28.4 note and function
"""
def note(self):
"""
Summary
====
Print chapter28.4 note
Example
====
```python
Chapter28_4().note()
```
"""
print('chapter28.4 note as follow')
print('28.4 矩阵求逆')
print('在实际应用中,一般并不使用逆矩阵来求解线性方程组的解,而是运用一些更具数值稳定性的技术,如LUP分解求解线性方程组')
print('但是,有时仍然需要计算一个矩阵的逆矩阵.可以利用LUP分解来计算逆矩阵')
print('此外,还将证明矩阵乘法和计算逆矩阵问题是具有相同难度的两个问题,即(在技术条件限制下)可以使用一个算法在相同渐进时间内解决另外一个问题')
print('可以使用Strassen矩阵乘法算法来求一个矩阵的逆')
print('确实,正是由于要证明可以用比通常的办法更快的算法来求解线性方程组,才推动了最初的Strassen算法的产生')
print('根据LUP分解计算逆矩阵')
print(' 假设有一个矩阵A的LUP分解,包括三个矩阵L,U,P,并满足PA=LU')
print(' 如果运用LU-SOLVE,则可以在Θ(n^2)的运行时间内,求出形如Ax=b的线性系统的解')
print(' 由于LUP分解仅取决于A而不取决于b,所以就能够再用Θ(n^2)的运行时间,求出形如Ax=b\'的另一个线性方程组的解')
print(' 一般地,一旦得到了A的LUP分解,就可以在Θ(kn^2)的运行时间内,求出k个形如Ax=b的线性方程组的解,这k个方程组只有b不相同')
print('矩阵乘法与逆矩阵')
print(' 对矩阵乘法可以获得理论上的加速,可以相应地加速求逆矩阵的运算')
print(' 从下面的意义上说,求逆矩阵运算等价于矩阵乘法运算',
'如果M(n)表示求两个n*n矩阵乘积所需要的时间,则有在O(M(n))时间内对一个n*n矩阵求逆的方法',
'如果I(n)表示对一个非奇异的n*n矩阵求逆所需的时间,则有在O(I(n))时间内对两个n*n矩阵相乘的方法')
print('定理28.7 (矩阵乘法不比求逆矩阵困难) 如果能在I(n)时间内求出一个n*n矩阵的逆矩阵',
'其中I(n)=Ω(n^2)且满足正则条件I(3n)=O(I(n))时间内求出两个n*n矩阵的乘积')
print('定理28.8 (求逆矩阵运算并不比矩阵乘法运算更困难) 如果能在M(n)的时间内计算出两个n*n实矩阵的乘积',
'其中M(n)=Ω(n^2)且M(n)满足两个正则条件:对任意的0<=k<=n有M(n+k)=O(M(n)),以及对某个常数c<1/2有M(n/2)<=cM(n)',
'则可以在O(M(n))时间内求出任何一个n*n非奇异实矩阵的逆矩阵')
print('练习28.4-1 设M(n)是求n*n矩阵的乘积所需的时间,S(n)表示求n*n矩阵的平方所需时间',
'证明:求矩阵乘积运算与求矩阵平方运算实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵平方算法,',
'一个S(n)时间的矩阵平方算法蕴含着一个O(S(n))时间的矩阵相乘算法')
print('练习28.4-2 设M(n)是求n*n矩阵乘积所需的时间,L(n)为计算一个n*n矩阵的LUP分解所需要的时间',
'证明:求矩阵乘积运算与计算矩阵LUP分解实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的矩阵LUP分解算法',
'一个L(n)时间的矩阵LUP分解算法蕴含着一个O(L(n))时间的矩阵相乘算法')
print('练习28.4-3 设M(n)是求n*n矩阵的乘积所需的时间,D(n)表示求n*n矩阵的行列式的值所需要的时间',
'证明:求矩阵乘积运算与求行列式的值实质上难度相同:一个M(n)时间的矩阵相乘算法蕴含着一个O(M(n))时间的行列式算法',
'一个D(n)时间的行列式算法蕴含着一个O(D(n)时间的矩阵相乘算法')
print('练习28.4-4 设M(n)是求n*n布尔矩阵的乘积所需的时间,T(n)为找出n*n布尔矩阵的传递闭包所需要的时间',
'证明:一个M(n)时间的布尔矩阵相乘算法蕴含着一个O(M(n)lgn)时间的传递闭包算法,一个T(n)时间的传递闭包算法蕴含着一个O(T(n))时间的布尔矩阵相乘算法')
print('练习28.4-5 当矩阵元素属于整数模2所构成的域时,基于定理28.8的求逆矩阵算法的是否能够运行?')
print('练习28.4-6 推广基于定理28.8的求逆矩阵算法,使之能处理复矩阵的情形,并证明所给出的推广方法是正确的')
print(' 提示:用A的共轭转置矩阵A*来代替A的转置矩阵A^T,把A^T中的每个元素用其共轭复数代替就得到A*,也就是Hermitian转置')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
class Chapter28_5:
"""
chapter28.5 note and function
"""
def note(self):
"""
Summary
====
Print chapter28.5 note
Example
====
```python
Chapter28_5().note()
```
"""
print('chapter28.5 note as follow')
print('28.5 对称正定矩阵与最小二乘逼近')
print('对称正定矩阵有许多有趣而很理想的性质。例如,它们都是非奇异矩阵,并且可以对其进行LU分解而无需担心出现除数为0的情况')
print('引理28.9 任意对称矩阵都是非奇异矩阵')
print('引理28.10 如果A是一个对称正定矩阵,则A的每一个主子式都是对称正定的')
print('设A是一个对称正定矩阵,Ak是A的k*k主子式,矩阵A关于Ak的Schur补定义为S=C-BAk^-1B^T')
print('引理28.11 (Schur补定理) 如果A是一个对称正定矩阵,Ak是A的k*k主子式.则A关于Ak的Schur补也是对称正定的')
print('推论28.12 对一个对称正定矩阵进行LU分解不会出现除数为0的情形')
print('最小二乘逼近')
print('对给定一组数据的点进行曲线拟合是对称正定矩阵的一个重要应用,假定给定m个数据点(x1,y1),(x2,y2),...,(xm,ym)',
'其中已知yi受到测量误差的影响。希望找出一个函数F(x),满足对i=1,2,...,m,有yi=F(xi)+qi')
print('其中近似误差qi是很小的,函数F(x)的形式依赖于所遇到的问题,在此,假定它的形式为线性加权和F(x)=∑cifi(x)')
print('其中和项的个数和特定的基函数fi取决于对问题的了解,一种选择是fi(x)=x^j-1,这说明F(x)是一个x的n-1次多项式')
print('这样一个高次函数F尽管容易处理数据,但也容易对数据产生干扰,并且一般在对未预见到的x预测其相应的y值时,其精确性也是很差的')
print('为了使逼近误差最小,选定使误差向量q的范数最小,就得到一个最小二乘解')
print('统计学中正态方程A^TAc=A^Ty')
print('伪逆矩阵A+=(A^TA)^-1A^T')
print('练习28.5-1 证明:对称正定矩阵的对角线上每一个元素都是正值')
print('练习28.5-2 设A=[[a,b],[b,c]]是一个2*2对称正定矩阵,证明其行列式的值ac-b^2是正的')
print('练习28.5-3 证明:一个对称正定矩阵中值最大的元素处于其对角线上')
print('练习28.5-4 证明:一个对称正定矩阵的每一个主子式的行列式的值都是正的')
print('练习28.5-5 设Ak表示对称正定矩阵A的第k个主子式。证明在LU分解中,det(Ak)/det(Ak-1)是第k个主元,为方便起见,设det(A0)=1')
print('练习28.5-6 最小二乘法求')
print('练习28.5-7 证明:伪逆矩阵A+满足下列四个等式:')
print(' AA^+A=A')
print(' A^+AA^+=A^+')
print(' (AA^+)^T=AA^+')
print(' (A^+A)^T=A^+A')
print('思考题28-1 三对角线性方程组')
print(' 1) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LU分解可以在O(n)的时间内求出方程Ax=b的解',
'论证在最坏情况下,从渐进意义上看,基于求出A^-1的任何方法都要花费更多的时间')
print(' 2) 证明:对任意的n*n对称正定的三对角矩阵和任意n维向量b,通过进行LUP分解,'<
'可以在O(n)的时间内求出方程Ax=b的解')
print('思考题28-2 三次样条插值')
print(' 将一个曲线拟合为n个三次多项式组成')
print(' 用自然三次样条可以在O(n)时间内对一组n+1个点-值对进行插值')
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
chapter28_1 = Chapter28_1()
chapter28_2 = Chapter28_2()
chapter28_3 = Chapter28_3()
chapter28_4 = Chapter28_4()
chapter28_5 = Chapter28_5()
def printchapter28note():
"""
print chapter28 note.
"""
print('Run main : single chapter twenty-eight!')
chapter28_1.note()
chapter28_2.note()
chapter28_3.note()
chapter28_4.note()
chapter28_5.note()
# python src/chapter28/chapter28note.py
# python3 src/chapter28/chapter28note.py
if __name__ == '__main__':
printchapter28note()
else:
pass
| src/chapter28/chapter28note.py | 20,506 | chapter28.1 note and function
chapter28.2 note and function
chapter28.3 note and function
chapter28.4 note and function
chapter28.5 note and function
Summary
====
Print chapter28.1 note
Example
====
```python
Chapter28_1().note()
```
Summary
====
Print chapter28.2 note
Example
====
```python
Chapter28_2().note()
```
Summary
====
Print chapter28.3 note
Example
====
```python
Chapter28_3().note()
```
Summary
====
Print chapter28.4 note
Example
====
```python
Chapter28_4().note()
```
Summary
====
Print chapter28.5 note
Example
====
```python
Chapter28_5().note()
```
print chapter28 note.
Class Chapter28_1
Class Chapter28_2
Class Chapter28_3
Class Chapter28_4
Class Chapter28_5
coding:utf-8 usr/bin/python3 python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py ! Strassen方法的关键就是对矩阵乘法作分治递归 python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py python src/chapter28/chapter28note.py python3 src/chapter28/chapter28note.py | 1,288 | en | 0.295707 |
#Python 3.X? Could be compatitible with small tweaks.
from re import findall
#Tatatat0 2016
#Documentation:
#Virtual Memory Classes:
# Virtual_Memory(max_memory)
# maxmemory: maximum address memory can be allocated to
# chunks: list of virtual memory chunks.
# format: ((chunk1, chunk1.start_address, chunk1.allocated_memory),(chunk2,...,...))
# Functions:
# allocate(address,memory)
# creates a new Virtual_Memory_Chunk instance, allocating memory at address.
# adds new chunk to chunks attribute
# deallocate(address)
# removes allocated memory at address. Must be starting address of allocated memory
# get_memory(address,memory)
# returns the memory amount of bytes at address. Must be allocated.
# set_memory(address,new_memory)
# sets the memory at address equal to new_memory
# Virtual_Memory_Chunk(parent,start_address,memory_input,allocated_memory,architecture_class)
# parent: a pointer to the main virtual memory class instance
# start_address: is the address of the first byte in memory_input, referenceable by opcodes.
# Default is 0.
# allocated_memory: This is the amount of memory that is accessible. The memory that is accessible is equal to start_address + allocated_memory.
# Default is 100 bytes.
# memory_input: is a series of bytes represented in hex string, if its length is less than the amount allocated, extra zeros are added. Becomes Virtual_Memory_Chunk.memory upon initialization
# Default is 0.
# architecture_class: This is an open ended variable that can be used to bind into a family of architecture based encoding,decoding, and operation methods.
# Default is "None".
# Functions:
# get_memory(address,amount)
# gets amount of bytes of memory at the address specified by address
# region specified must be within the allocated memory
# set_memory(address,new_memory)
# sets the memory at address to new_memory
# region specified must be within the allocated memory
# smallest data editable is a nibble
# print_variables()
# prints the useful variables of current instance of Virtual_Memory_Chunk
#Beginning of not yet implemented
#Architecture Classes:
# Powerpc_Architecture()
# registers: The registers are r0-31,f0-31,CR,LR,PC
# Functions:
# get_registers()
# uses a generator to return a register's values.
# Powerpc_Register(value, bits)
# value = value of the register
# bits = amount of bytes the value is
#
cast = lambda reg, bits=0, nonreg=False: reg&((1<<bits)-1) if nonreg else reg.value&((1<<reg.bits)-1)
class Powerpc_Register():
__slots__ = ['value','bits']
__int__ = lambda this: int(this.value)
def __init__(self, value, bits):
self.value = value
self.bits = bits
def set(self,value,casts=False,bits=16):
if value.__class__ == Powerpc_Register: # value is a register
raise TypeError('value is a register')
self.value = value
self.value = cast(self)
#print (self.value)
if casts:
self.value = cast(self,bits)
class Powerpc_Architecture():
__slots__ = ['NS']
def __init__(self):
self.NS = dict( CR = Powerpc_Register(0,32), LR = Powerpc_Register(0,32), PC = Powerpc_Register(0,32) )
for n in range(32):
self.NS['r%i'%n] = self.NS['R%i'%n] = Powerpc_Register(0,32) # r1 == R1
self.NS['f%i'%n] = self.NS['F%i'%n] = Powerpc_Register(0,128) # f1 == F1
def get_registers(self): #generator to return registers
values = list(self.NS);#replace with iteritems in 2.X? Nevermind. Still could maybe be a different function in 2.X though.
num = 0
while num < len(values):
yield self.NS[values[num]]
num += 1;
#End of not yet implemented
class Virtual_Memory:
def __init__(self,max_memory):
if type(max_memory) != int:
raise TypeError("Max memory of virtual memory class instance must be type 'int'")
self.max_memory = max_memory
self.chunks = []
def allocate(self, address, memory):
if (address < 0) or (memory <= 0):
raise ValueError("Address or memory amount to be allocated in the Virtual Memory instance can not be negative.")
if address + memory > self.max_memory:#outside of max memory
raise IndexError("Can not allocate virtual_memory_chunks to an address outside the max_memory range of the Virtual_Memory instance." + "Attempted to allocate at " + str(hex(address)) + " for " + str(hex(memory)) + " bytes. max_memory of the current Virtual_Memory instance is " + str(hex(self.max_memory)))
if len(self.chunks) > 0:#contains virtual memory chunks
for chunk in range(0,len(self.chunks)):
#print((hex(memory + address)))
#print(hex((self.chunks[chunk][1] + self.chunks[chunk][2])))
#print("statement 1: " , (self.chunks[chunk][1] >= address and (address + memory) < (self.chunks[chunk][1] + self.chunks[chunk][2])))
#print("statement 2: " , (self.chunks[chunk][1] == address))
#print("statement 3: " , (address < self.chunks[chunk][1] and (address + memory > self.chunks[chunk][1])))
#print("statement 4: " , (address > self.chunks[chunk][1] and address < self.chunks[chunk][1] + self.chunks[chunk][2]))
#if (self.chunks[chunk][1] >= address and (memory + address) > (self.chunks[chunk][1])) or (self.chunks[chunk][1] == address) or (address < self.chunks[chunk][1] and (address + memory > self.chunks[chunk][1])) or (address > self.chunks[chunk][1] and address < self.chunks[chunk][1] + self.chunks[chunk][
if ((address < self.chunks[chunk][1]) and (address + memory >= self.chunks[chunk][1])) or ((address >= self.chunks[chunk][1]) and (address <= (self.chunks[chunk][1] + self.chunks[chunk][2]))):
raise IndexError("Cannot allocate to an already allocated address. Allocation: Address: " + str(hex(address)) + ", Memory: " + str(hex(memory)) + " Overlaps allocation at " + str(hex(self.chunks[chunk][1])) + " for " + str(hex(self.chunks[chunk][2])) + " Bytes.")
self.chunks.append((Virtual_Memory_Chunk(self,address,memory),address,memory))
else:
self.chunks.append((Virtual_Memory_Chunk(self,address,memory),address,memory))
def deallocate(self,address):
if type(address) != int:
raise TypeError("Address used to dellocate memory in Virtual_Memory instance must be type 'int'. Type: " + str(type(address)))
deleted = False
for chunk in range(0,len(self.chunks)):
#print(hex(self.chunks[chunk][1]))
if self.chunks[chunk][1] == address:
del self.chunks[chunk] #deletes memory chunk
deleted = True
break
if (not deleted):
raise IndexError("Given address to deallocate memory of Virtual_Memory instance is not a correct Virtual_Memory_Chunk starting address. Address to deallocate is " + str(hex(address)))
def get_memory(self,address,memory):
if memory <= 0:
raise ValueError("Must get a positive number of memory from the Virtual Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes.")
if address > self.max_memory:
raise IndexError("Can't get memory from an address outside the max_memory range of the Virtual_Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes. max_memory of the current Virtual_Memory instance is " + str(hex(self.max_memory)))
chunk_num = "None" #initialize variable. Virtual Memory chunk to use.
for chunk in range(0,len(self.chunks)):
if self.chunks[chunk][1] <= address and (address + memory < (self.chunks[chunk][1] + self.chunks[chunk][2])):
chunk_num = chunk
break
if chunk_num == "None":#no valid chunk was found
raise IndexError("No chunk was found that has memory allocated in the memory region to get from the Virtual Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes.")
current_chunk = self.chunks[chunk]
address = address - current_chunk[1]
return current_chunk[0].memory[address:address + memory]
def set_memory(self,address,new_memory):
if type(new_memory) == str:
new_memory = findall('..',new_memory.upper())
if len(new_memory) == 0:
raise ValueError("Length of memory to set in the current Virtual Memory instance must be greater than 1 byte. Address to set " + str(hex(address)))
if address > self.max_memory:
raise IndexError("Can't set memory from an address outside the max_memory range of the Virtual_Memory instance. Attempted to set at " + str(hex(address)) + ". max_memory of the current Virtual_Memory instance is " + str(hex(self.max_memory)))
chunk_num = "None" #initialize variable. Virtual Memory chunk to use.
for chunk in range(0,len(self.chunks)):
if self.chunks[chunk][1] <= address and (address + len(new_memory) < (self.chunks[chunk][1] + self.chunks[chunk][2])):
chunk_num = chunk
break
if chunk_num == "None":#no valid chunk was found
raise IndexError("No chunk was found that has memory allocated in the memory region to get from the Virtual Memory instance. Attempted to get from " + str(hex(address)) + " for " + str(hex(memory)) + " bytes.")
current_chunk = self.chunks[chunk]
address = address - current_chunk[1]
current_chunk[0].memory[address:address + len(new_memory)] = new_memory
class Virtual_Memory_Chunk:
def __init__(self,parent,start_address=0,allocated_memory=100,memory_input="00",architecture_class="None"):
#Error checking and formatting
if type(memory_input) != str:#memory input should be in hex, as a string.
raise TypeError("Incorrect type for memory input to create virtual memory. type: " + str(type(memory_input)))
if type(start_address) != int:
if type(start_address) == str:#allows hex
if len(start_address) > 3:#can contain 0x and a number
if start_address[0:2] == "0x":
start_address = int(start_address,16)#converts the hex to int
elif len(start_address) <= 2:
if "0x" in start_address:
raise ValueError("Input for starting address of virtual memory contains no hex after the 0x")
else:
raise TypeError("Incorrect type for starting address to create virtual memory.")
else:
raise TypeError("Incorrect type for starting address to create virtual memory.")
if "0x" in memory_input: #non intrusive way to check. Allows memory_input to be less than 2 characters by not checking index [0:1]
if memory_input[0:2] == "0x":#removes "0x" from beginning if included
memory_input = memory_input[2:]#I chose to keep memory_input as a string instead of a byte array because it is faster.
if len(memory_input) > (allocated_memory * 2): #more memory given then allocated
raise IndexError("Memory inputted for creation of virtual memory exceeds the length allowed by the allocated memory")
elif len(memory_input) < (allocated_memory * 2):#less memory given then allocated
memory_input = memory_input + ("0" * ((allocated_memory * 2) - len(memory_input))) #fills unspecified memory with zeros
#else: memory given is equal to memory allocated
#initialization
self.parent = parent
self.start_address = start_address #this is the address of the first opcode, relevant to the addresses the opcodes can specify.
self.memory = findall('..',memory_input) #memory is a list of each individual byte of input
self.allocated_memory = allocated_memory#amount of memory available
self.architecture_class = architecture_class#where architecture class is used for bindings to directly input into encoding and decoding functions for the given architecture
def get_memory(self,address,amount):
if type(address) == str:
if "0x" in address:
address = int(address,16)
if type(amount) == str:
if "0x" in amount:
amount = int(amount,16)
if address < self.start_address or address > (self.start_address + self.allocated_memory):#is outside allocated memory range
raise IndexError("Address accessed by get_memory() function of Virtual Memory is outside the range of the allocated memory. Address: " + str(hex(address)) + ", Allocated Memory: " + str(hex(self.start_address)) + "-" + str(hex(self.start_address + self.allocated_memory)))
#gets amount bytes at address from memory
memory_start = address - self.start_address#internal memory of virtual memory
return self.memory[memory_start:memory_start + amount]
def set_memory(self,address,new_memory):
if type(address) == str:
if "0x" in address:
address = int(address,16)
if type(new_memory) != str:
raise IndexError("Memory Inputed by set_memory() function of Virtual Memory is not a valid type. Type: " + str(type(new_memory)))
if new_memory[0:2] == "0x":
new_memory = new_memory[2:]
memory_start = address - self.start_address#internal memory of virtual memory
if (address < self.start_address) or (address > (self.start_address + self.allocated_memory)) or (address + (len(new_memory) / 2) > (self.start_address + self.allocated_memory)): #is outside allocated memory range
raise IndexError("Address accessed by set_memory() function of Virtual Memory is outside the range of the allocated memory. Address: " + str(hex(address)) + "-" + str(hex(int(address + (len(new_memory) / 2))) + ", Allocated Memory: " + str(hex(self.start_address)) + "-" + str(hex(self.start_address + self.allocated_memory))))
if len(new_memory) % 2 != 0:#not even
new_memory = new_memory + self.memory[int(memory_start + (len(new_memory) / 2))][1]
self.memory[memory_start:int(memory_start + (len(new_memory) / 2))] = findall('..',new_memory) #updates memory
def print_variables(self):
print(self.start_address)
print(self.memory)
print(self.allocated_memory)
print(self.architecture_class)
#Memory = Virtual_Memory_Chunk("0x80000000",100,"52AA6FBB52AA60BB52AA60BB52AA60BB")
#print(Memory.get_memory("0x80000000","0xF"))
#Memory.set_memory("0x80000000","0xFFF")
#print(Memory.get_memory("0x80000000","0xF"))
Memory = Virtual_Memory(0xFFFFFFFF)
Memory.allocate(0x80000200,100)
Memory.allocate(0x80000000,100)
Memory.set_memory(0x80000002,'FAEE00112255')
print(Memory.get_memory(0x80000002,0x10))
newPPC = Powerpc_Architecture();
#for i in newPPC.get_registers():
# print(bin(int(i)))
| VirtualMemory.py | 15,475 | Python 3.X? Could be compatitible with small tweaks.Tatatat0 2016Documentation:Virtual Memory Classes: Virtual_Memory(max_memory) maxmemory: maximum address memory can be allocated to chunks: list of virtual memory chunks. format: ((chunk1, chunk1.start_address, chunk1.allocated_memory),(chunk2,...,...)) Functions: allocate(address,memory) creates a new Virtual_Memory_Chunk instance, allocating memory at address. adds new chunk to chunks attribute deallocate(address) removes allocated memory at address. Must be starting address of allocated memory get_memory(address,memory) returns the memory amount of bytes at address. Must be allocated. set_memory(address,new_memory) sets the memory at address equal to new_memory Virtual_Memory_Chunk(parent,start_address,memory_input,allocated_memory,architecture_class) parent: a pointer to the main virtual memory class instance start_address: is the address of the first byte in memory_input, referenceable by opcodes. Default is 0. allocated_memory: This is the amount of memory that is accessible. The memory that is accessible is equal to start_address + allocated_memory. Default is 100 bytes. memory_input: is a series of bytes represented in hex string, if its length is less than the amount allocated, extra zeros are added. Becomes Virtual_Memory_Chunk.memory upon initialization Default is 0. architecture_class: This is an open ended variable that can be used to bind into a family of architecture based encoding,decoding, and operation methods. Default is "None". Functions: get_memory(address,amount) gets amount of bytes of memory at the address specified by address region specified must be within the allocated memory set_memory(address,new_memory) sets the memory at address to new_memory region specified must be within the allocated memory smallest data editable is a nibble print_variables() prints the useful variables of current instance of Virtual_Memory_ChunkBeginning of not yet implementedArchitecture Classes: Powerpc_Architecture() registers: The registers are r0-31,f0-31,CR,LR,PC Functions: get_registers() uses a generator to return a register's values. Powerpc_Register(value, bits) value = value of the register bits = amount of bytes the value is value is a registerprint (self.value) r1 == R1 f1 == F1generator to return registersreplace with iteritems in 2.X? Nevermind. Still could maybe be a different function in 2.X though.End of not yet implementedoutside of max memorycontains virtual memory chunksprint((hex(memory + address)))print(hex((self.chunks[chunk][1] + self.chunks[chunk][2])))print("statement 1: " , (self.chunks[chunk][1] >= address and (address + memory) < (self.chunks[chunk][1] + self.chunks[chunk][2])))print("statement 2: " , (self.chunks[chunk][1] == address))print("statement 3: " , (address < self.chunks[chunk][1] and (address + memory > self.chunks[chunk][1])))print("statement 4: " , (address > self.chunks[chunk][1] and address < self.chunks[chunk][1] + self.chunks[chunk][2]))if (self.chunks[chunk][1] >= address and (memory + address) > (self.chunks[chunk][1])) or (self.chunks[chunk][1] == address) or (address < self.chunks[chunk][1] and (address + memory > self.chunks[chunk][1])) or (address > self.chunks[chunk][1] and address < self.chunks[chunk][1] + self.chunks[chunk][print(hex(self.chunks[chunk][1]))deletes memory chunkinitialize variable. Virtual Memory chunk to use.no valid chunk was foundinitialize variable. Virtual Memory chunk to use.no valid chunk was foundError checking and formattingmemory input should be in hex, as a string.allows hexcan contain 0x and a numberconverts the hex to intnon intrusive way to check. Allows memory_input to be less than 2 characters by not checking index [0:1]removes "0x" from beginning if includedI chose to keep memory_input as a string instead of a byte array because it is faster.more memory given then allocatedless memory given then allocatedfills unspecified memory with zeroselse: memory given is equal to memory allocatedinitializationthis is the address of the first opcode, relevant to the addresses the opcodes can specify.memory is a list of each individual byte of inputamount of memory availablewhere architecture class is used for bindings to directly input into encoding and decoding functions for the given architectureis outside allocated memory rangegets amount bytes at address from memoryinternal memory of virtual memoryinternal memory of virtual memoryis outside allocated memory rangenot evenupdates memoryMemory = Virtual_Memory_Chunk("0x80000000",100,"52AA6FBB52AA60BB52AA60BB52AA60BB")print(Memory.get_memory("0x80000000","0xF"))Memory.set_memory("0x80000000","0xFFF")print(Memory.get_memory("0x80000000","0xF"))for i in newPPC.get_registers(): print(bin(int(i))) | 4,913 | en | 0.738116 |
import asyncio, sys, os
from onvif import ONVIFCamera
import time
IP="192.168.1.64" # Camera IP address
PORT=80 # Port
USER="admin" # Username
PASS="intflow3121" # Password
XMAX = 1
XMIN = -1
XNOW = 0.5
YMAX = 1
YMIN = -1
YNOW = 0.5
Move = 0.1
Velocity = 1
Zoom = 0
positionrequest = None
ptz = None
active = False
ptz_configuration_options = None
media_profile = None
def do_move(ptz, request):
global active
if active:
ptz.Stop({'ProfileToken': request.ProfileToken})
active = True
ptz.AbsoluteMove(request)
def move_up(ptz, request):
if YNOW - Move <= -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
do_move(ptz, request)
def move_down(ptz, request):
if YNOW + Move >= 1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW + Move
do_move(ptz, request)
def move_right(ptz, request):
if XNOW - Move >= -0.99:
request.Position.PanTilt.x = XNOW - Move
elif abs(XNOW + Move) >= 0.0:
request.Position.PanTilt.x = abs(XNOW) - Move
elif abs(XNOW) <= 0.01:
request.Position.PanTilt.x = XNOW
request.Position.PanTilt.y = YNOW
do_move(ptz, request)
def move_left(ptz, request):
if XNOW + Move <= 1.0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= 1.0 and XNOW > 0.99:
request.Position.PanTilt.x = -XNOW
elif XNOW < 0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= -0.105556 and XNOW > -0.11:
request.Position.PanTilt.x = XNOW
request.Position.PanTilt.y = YNOW
do_move(ptz, request)
def move_upleft(ptz, request):
if YNOW == -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW + Move <= 1.0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= 1.0 and XNOW > 0.99:
request.Position.PanTilt.x = -XNOW
elif XNOW < 0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= -0.105556 and XNOW > -0.11:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def move_upright(ptz, request):
if YNOW == -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW - Move >= -0.99:
request.Position.PanTilt.x = XNOW - Move
elif abs(XNOW + Move) >= 0.0:
request.Position.PanTilt.x = abs(XNOW) - Move
elif abs(XNOW) <= 0.01:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def move_downleft(ptz, request):
if YNOW - Move == 1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW + Move <= 1.0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= 1.0 and XNOW > 0.99:
request.Position.PanTilt.x = -XNOW
elif XNOW < 0:
request.Position.PanTilt.x = XNOW + Move
elif XNOW <= -0.105556 and XNOW > -0.11:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def move_downright(ptz, request):
if YNOW == -1:
request.Position.PanTilt.y = YNOW
else:
request.Position.PanTilt.y = YNOW - Move
if XNOW - Move >= -0.99:
request.Position.PanTilt.x = XNOW - Move
elif abs(XNOW + Move) >= 0.0:
request.Position.PanTilt.x = abs(XNOW) - Move
elif abs(XNOW) <= 0.01:
request.Position.PanTilt.x = XNOW
do_move(ptz, request)
def Zoom_in(ptz,request):
if Zoom + Move >= 1.0:
request.Position.Zoom = 1.0
else:
request.Position.Zoom = Zoom + Move
do_move(ptz, request)
def Zoom_out(ptz,request):
if Zoom - Move <= 0.0:
request.Position.Zoom = 0.0
else:
request.Position.Zoom = Zoom - Move
do_move(ptz,request)
def setup_move():
mycam = ONVIFCamera(IP, PORT, USER, PASS)
# Create media service object
media = mycam.create_media_service()
# Create ptz service object
global ptz , ptz_configuration_options, media_profile
ptz = mycam.create_ptz_service()
# Get target profile
media_profile = media.GetProfiles()[0]
request = ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration_options = ptz.GetConfigurationOptions(request)
request_configuration = ptz.create_type('GetConfiguration')
request_configuration.PTZConfigurationToken = media_profile.PTZConfiguration.token
ptz_configuration = ptz.GetConfiguration(request_configuration)
request_setconfiguration = ptz.create_type('SetConfiguration')
request_setconfiguration.PTZConfiguration = ptz_configuration
global positionrequest
positionrequest = ptz.create_type('AbsoluteMove')
positionrequest.ProfileToken = media_profile.token
if positionrequest.Position is None :
positionrequest.Position = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
positionrequest.Position.PanTilt.space = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].URI
positionrequest.Position.Zoom.space = ptz_configuration_options.Spaces.AbsoluteZoomPositionSpace[0].URI
if positionrequest.Speed is None :
positionrequest.Speed = ptz.GetStatus({'ProfileToken': media_profile.token}).Position
positionrequest.Speed.PanTilt.space = ptz_configuration_options.Spaces.PanTiltSpeedSpace[0].URI
def Get_Status():
# Get range of pan and tilt
global XMAX, XMIN, YMAX, YMIN, XNOW, YNOW, Velocity, Zoom
XMAX = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].XRange.Max
XMIN = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].XRange.Min
YMAX = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].YRange.Max
YMIN = ptz_configuration_options.Spaces.AbsolutePanTiltPositionSpace[0].YRange.Min
XNOW = ptz.GetStatus({'ProfileToken': media_profile.token}).Position.PanTilt.x
YNOW = ptz.GetStatus({'ProfileToken': media_profile.token}).Position.PanTilt.y
Velocity = ptz_configuration_options.Spaces.PanTiltSpeedSpace[0].XRange.Max
Zoom = ptz.GetStatus({'ProfileToken': media_profile.token}).Position.Zoom.x
def readin():
"""Reading from stdin and displaying menu"""
global positionrequest, ptz
selection = sys.stdin.readline().strip("\n")
lov=[ x for x in selection.split(" ") if x != ""]
if lov:
if lov[0].lower() in ["u","up"]:
move_up(ptz,positionrequest)
elif lov[0].lower() in ["d","do","dow","down"]:
move_down(ptz,positionrequest)
elif lov[0].lower() in ["l","le","lef","left"]:
move_left(ptz,positionrequest)
elif lov[0].lower() in ["l","le","lef","left"]:
move_left(ptz,positionrequest)
elif lov[0].lower() in ["r","ri","rig","righ","right"]:
move_right(ptz,positionrequest)
elif lov[0].lower() in ["ul"]:
move_upleft(ptz,positionrequest)
elif lov[0].lower() in ["ur"]:
move_upright(ptz,positionrequest)
elif lov[0].lower() in ["dl"]:
move_downleft(ptz,positionrequest)
elif lov[0].lower() in ["dr"]:
move_downright(ptz,positionrequest)
elif lov[0].lower() in ["s","st","sto","stop"]:
ptz.Stop({'ProfileToken': positionrequest.ProfileToken})
active = False
else:
print("What are you asking?\tI only know, 'up','down','left','right', 'ul' (up left), \n\t\t\t'ur' (up right), 'dl' (down left), 'dr' (down right) and 'stop'")
print("")
print("Your command: ", end='',flush=True)
# Test Define
# def move(ptz, request):
# request.Position.PanTilt.y = -1
# request.Position.PanTilt.x = 0
# do_move(ptz,request)
if __name__ == '__main__':
setup_move()
# Get_Status()
# Zoom_out(ptz,positionrequest)
# Get_Status()
# move(ptz,positionrequest)
while True:
if active == True:
time.sleep(1)
active = False
else:
Get_Status()
move_up(ptz, positionrequest) | examples/AbsoluteMove.py | 8,309 | Reading from stdin and displaying menu
Camera IP address Port Username Password Create media service object Create ptz service object Get target profile Get range of pan and tilt Test Define def move(ptz, request): request.Position.PanTilt.y = -1 request.Position.PanTilt.x = 0 do_move(ptz,request) Get_Status() Zoom_out(ptz,positionrequest) Get_Status() move(ptz,positionrequest) | 401 | en | 0.483882 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.